blob: 0cc120637046ee85c97946b3d6609d3faa118d86 [file] [log] [blame]
Ghanim Fodi79ee8d82017-02-27 16:39:25 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15#include "ipahal/ipahal_fltrt.h"
16
Amir Levy9659e592016-10-27 18:08:27 +030017#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
18#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
19#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
20
21#define IPA_FLT_GET_RULE_TYPE(__entry) \
22 ( \
23 ((__entry)->rule.hashable) ? \
24 (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
25 )
26
27/**
28 * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
29 * @ip: the ip address family type
30 * @entry: filtering entry
31 * @buf: output buffer, buf == NULL means
32 * caller wants to know the size of the rule as seen
33 * by HW so they did not pass a valid buffer, we will use a
34 * scratch buffer instead.
35 * With this scheme we are going to
36 * generate the rule twice, once to know size using scratch
37 * buffer and second to write the rule to the actual caller
38 * supplied buffer which is of required size
39 *
40 * Returns: 0 on success, negative on failure
41 *
42 * caller needs to hold any needed locks to ensure integrity
43 *
44 */
45static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
46 struct ipa3_flt_entry *entry, u8 *buf)
47{
48 struct ipahal_flt_rule_gen_params gen_params;
49 int res = 0;
50
51 memset(&gen_params, 0, sizeof(gen_params));
52
53 gen_params.ipt = ip;
54 if (entry->rt_tbl)
55 gen_params.rt_tbl_idx = entry->rt_tbl->idx;
56 else
57 gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
58
59 gen_params.priority = entry->prio;
60 gen_params.id = entry->rule_id;
61 gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
62
63 res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
64 if (res)
65 IPAERR("failed to generate flt h/w rule\n");
66
67 return 0;
68}
69
70static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
71{
72 struct ipa3_flt_tbl *tbl;
73 int i;
74
75 IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
76
77 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
78 if (!ipa_is_ep_support_flt(i))
79 continue;
80
81 tbl = &ipa3_ctx->flt_tbl[i][ip];
82 if (tbl->prev_mem[rlt].phys_base) {
83 IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
84 ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
85 }
86
87 if (list_empty(&tbl->head_flt_rule_list)) {
88 if (tbl->curr_mem[rlt].phys_base) {
89 IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
90 i);
91 ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
92 }
93 }
94 }
95}
96
97/**
98 * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
99 * assign priorities to the rules, calculate their sizes and calculate
100 * the overall table size
101 * @ip: the ip address family type
102 * @tbl: the flt tbl to be prepared
103 * @pipe_idx: the ep pipe appropriate for the given tbl
104 *
105 * Return: 0 on success, negative on failure
106 */
107static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
108 struct ipa3_flt_tbl *tbl, int pipe_idx)
109{
110 struct ipa3_flt_entry *entry;
111 int prio_i;
112 int max_prio;
113 u32 hdr_width;
114
115 tbl->sz[IPA_RULE_HASHABLE] = 0;
116 tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
117
118 max_prio = ipahal_get_rule_max_priority();
119
120 prio_i = max_prio;
121 list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
122
123 if (entry->rule.max_prio) {
124 entry->prio = max_prio;
125 } else {
126 if (ipahal_rule_decrease_priority(&prio_i)) {
127 IPAERR("cannot decrease rule priority - %d\n",
128 prio_i);
129 return -EPERM;
130 }
131 entry->prio = prio_i;
132 }
133
134 if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
135 IPAERR("failed to calculate HW FLT rule size\n");
136 return -EPERM;
137 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200138 IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
Amir Levy9659e592016-10-27 18:08:27 +0300139 pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
140
141 if (entry->rule.hashable)
142 tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
143 else
144 tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
145 }
146
147 if ((tbl->sz[IPA_RULE_HASHABLE] +
148 tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
149 IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
150 pipe_idx);
151 return 0;
152 }
153
154 hdr_width = ipahal_get_hw_tbl_hdr_width();
155
156 /* for the header word */
157 if (tbl->sz[IPA_RULE_HASHABLE])
158 tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
159 if (tbl->sz[IPA_RULE_NON_HASHABLE])
160 tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
161
162 IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
163 tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
164
165 return 0;
166}
167
168/**
169 * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
170 * (rules and tables) to HW format and fill it in the given buffers
171 * @ip: the ip address family type
172 * @rlt: the type of the rules to translate (hashable or non-hashable)
173 * @base: the rules body buffer to be filled
174 * @hdr: the rules header (addresses/offsets) buffer to be filled
175 * @body_ofst: the offset of the rules body from the rules header at
176 * ipa sram
177 *
178 * Returns: 0 on success, negative on failure
179 *
180 * caller needs to hold any needed locks to ensure integrity
181 *
182 */
183static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
184 enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
185{
186 u64 offset;
187 u8 *body_i;
188 int res;
189 struct ipa3_flt_entry *entry;
190 u8 *tbl_mem_buf;
191 struct ipa_mem_buffer tbl_mem;
192 struct ipa3_flt_tbl *tbl;
193 int i;
194 int hdr_idx = 0;
195
196 body_i = base;
197 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
198 if (!ipa_is_ep_support_flt(i))
199 continue;
200 tbl = &ipa3_ctx->flt_tbl[i][ip];
201 if (tbl->sz[rlt] == 0) {
202 hdr_idx++;
203 continue;
204 }
205 if (tbl->in_sys[rlt]) {
206 /* only body (no header) */
207 tbl_mem.size = tbl->sz[rlt] -
208 ipahal_get_hw_tbl_hdr_width();
209 if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
210 IPAERR("fail to alloc sys tbl of size %d\n",
211 tbl_mem.size);
212 goto err;
213 }
214
215 if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
216 hdr, hdr_idx, true)) {
217 IPAERR("fail to wrt sys tbl addr to hdr\n");
218 goto hdr_update_fail;
219 }
220
221 tbl_mem_buf = tbl_mem.base;
222
223 /* generate the rule-set */
224 list_for_each_entry(entry, &tbl->head_flt_rule_list,
225 link) {
226 if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
227 continue;
228 res = ipa3_generate_flt_hw_rule(
229 ip, entry, tbl_mem_buf);
230 if (res) {
231 IPAERR("failed to gen HW FLT rule\n");
232 goto hdr_update_fail;
233 }
234 tbl_mem_buf += entry->hw_len;
235 }
236
237 if (tbl->curr_mem[rlt].phys_base) {
238 WARN_ON(tbl->prev_mem[rlt].phys_base);
239 tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
240 }
241 tbl->curr_mem[rlt] = tbl_mem;
242 } else {
243 offset = body_i - base + body_ofst;
244
245 /* update the hdr at the right index */
246 if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
247 hdr_idx, true)) {
248 IPAERR("fail to wrt lcl tbl ofst to hdr\n");
249 goto hdr_update_fail;
250 }
251
252 /* generate the rule-set */
253 list_for_each_entry(entry, &tbl->head_flt_rule_list,
254 link) {
255 if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
256 continue;
257 res = ipa3_generate_flt_hw_rule(
258 ip, entry, body_i);
259 if (res) {
260 IPAERR("failed to gen HW FLT rule\n");
261 goto err;
262 }
263 body_i += entry->hw_len;
264 }
265
266 /**
267 * advance body_i to next table alignment as local
268 * tables are order back-to-back
269 */
270 body_i += ipahal_get_lcl_tbl_addr_alignment();
271 body_i = (u8 *)((long)body_i &
272 ~ipahal_get_lcl_tbl_addr_alignment());
273 }
274 hdr_idx++;
275 }
276
277 return 0;
278
279hdr_update_fail:
280 ipahal_free_dma_mem(&tbl_mem);
281err:
282 return -EPERM;
283}
284
285/**
286 * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
287 * headers and bodies are being created into buffers that will be filled into
288 * the local memory (sram)
289 * @ip: the ip address family type
290 * @alloc_params: In and Out parameters for the allocations of the buffers
291 * 4 buffers: hdr and bdy, each hashable and non-hashable
292 *
293 * Return: 0 on success, negative on failure
294 */
295static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
296 struct ipahal_fltrt_alloc_imgs_params *alloc_params)
297{
298 u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
299 int rc = 0;
300
301 if (ip == IPA_IP_v4) {
302 nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
303 IPA_MEM_PART(v4_flt_nhash_ofst);
304 hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
305 IPA_MEM_PART(v4_flt_hash_ofst);
306 } else {
307 nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
308 IPA_MEM_PART(v6_flt_nhash_ofst);
309 hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
310 IPA_MEM_PART(v6_flt_hash_ofst);
311 }
312
313 if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
314 IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
315 rc = -ENOMEM;
316 goto allocate_failed;
317 }
318
319 if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
320 alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
321 hash_bdy_start_ofst)) {
322 IPAERR("fail to translate hashable flt tbls to hw format\n");
323 rc = -EPERM;
324 goto translate_fail;
325 }
326 if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
327 alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
328 nhash_bdy_start_ofst)) {
329 IPAERR("fail to translate non-hash flt tbls to hw format\n");
330 rc = -EPERM;
331 goto translate_fail;
332 }
333
334 return rc;
335
336translate_fail:
337 if (alloc_params->hash_hdr.size)
338 ipahal_free_dma_mem(&alloc_params->hash_hdr);
339 ipahal_free_dma_mem(&alloc_params->nhash_hdr);
340 if (alloc_params->hash_bdy.size)
341 ipahal_free_dma_mem(&alloc_params->hash_bdy);
342 if (alloc_params->nhash_bdy.size)
343 ipahal_free_dma_mem(&alloc_params->nhash_bdy);
344allocate_failed:
345 return rc;
346}
347
348/**
349 * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
350 * tbl bodies at the sram is enough for the commit
351 * @ipt: the ip address family type
352 * @rlt: the rule type (hashable or non-hashable)
353 *
354 * Return: true if enough space available or false in other cases
355 */
356static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
357 enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
358{
359 u16 avail;
360
361 if (!bdy) {
362 IPAERR("Bad parameters, bdy = NULL\n");
363 return false;
364 }
365
366 if (ipt == IPA_IP_v4)
367 avail = (rlt == IPA_RULE_HASHABLE) ?
368 IPA_MEM_PART(apps_v4_flt_hash_size) :
369 IPA_MEM_PART(apps_v4_flt_nhash_size);
370 else
371 avail = (rlt == IPA_RULE_HASHABLE) ?
372 IPA_MEM_PART(apps_v6_flt_hash_size) :
373 IPA_MEM_PART(apps_v6_flt_nhash_size);
374
375 if (bdy->size <= avail)
376 return true;
377
378 IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
379 bdy->size, avail, ipt, rlt);
380 return false;
381}
382
383/**
384 * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
385 * payload pointers buffers for headers and bodies of flt structure
386 * as well as place for flush imm.
387 * @ipt: the ip address family type
388 * @desc: [OUT] descriptor buffer
389 * @cmd: [OUT] imm commands payload pointers buffer
390 *
391 * Return: 0 on success, negative on failure
392 */
393static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
394 struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
395{
396 u16 entries;
397
398 /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
399 entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
400
401 *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
402 if (*desc == NULL) {
403 IPAERR("fail to alloc desc blob ip %d\n", ip);
404 goto fail_desc_alloc;
405 }
406
407 *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
408 if (*cmd_pyld == NULL) {
409 IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
410 goto fail_cmd_alloc;
411 }
412
413 return 0;
414
415fail_cmd_alloc:
416 kfree(*desc);
417fail_desc_alloc:
418 return -ENOMEM;
419}
420
421/**
422 * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
423 * will skip according to pre-configuration or modem pipes
424 * @pipe: the EP pipe index
425 *
426 * Return: true if to skip, false otherwize
427 */
428static bool ipa_flt_skip_pipe_config(int pipe)
429{
430 if (ipa_is_modem_pipe(pipe)) {
431 IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
432 return true;
433 }
434
435 if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
436 IPADBG_LOW("skip %d\n", pipe);
437 return true;
438 }
439
Ghanim Fodic6b67492017-03-15 14:19:56 +0200440 if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
Amir Levy9659e592016-10-27 18:08:27 +0300441 && ipa3_ctx->modem_cfg_emb_pipe_flt)) {
442 IPADBG_LOW("skip %d\n", pipe);
443 return true;
444 }
445
446 return false;
447}
448
449/**
450 * __ipa_commit_flt_v3() - commit flt tables to the hw
451 * commit the headers and the bodies if are local with internal cache flushing.
452 * The headers (and local bodies) will first be created into dma buffers and
453 * then written via IC to the SRAM
454 * @ipt: the ip address family type
455 *
456 * Return: 0 on success, negative on failure
457 */
458int __ipa_commit_flt_v3(enum ipa_ip_type ip)
459{
460 struct ipahal_fltrt_alloc_imgs_params alloc_params;
461 int rc = 0;
462 struct ipa3_desc *desc;
463 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
464 struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
465 struct ipahal_imm_cmd_pyld **cmd_pyld;
466 int num_cmd = 0;
467 int i;
468 int hdr_idx;
469 u32 lcl_hash_hdr, lcl_nhash_hdr;
470 u32 lcl_hash_bdy, lcl_nhash_bdy;
471 bool lcl_hash, lcl_nhash;
472 struct ipahal_reg_fltrt_hash_flush flush;
473 struct ipahal_reg_valmask valmask;
474 u32 tbl_hdr_width;
475 struct ipa3_flt_tbl *tbl;
476
477 tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
478 memset(&alloc_params, 0, sizeof(alloc_params));
479 alloc_params.ipt = ip;
480 alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
481
482 if (ip == IPA_IP_v4) {
483 lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
484 IPA_MEM_PART(v4_flt_hash_ofst) +
485 tbl_hdr_width; /* to skip the bitmap */
486 lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
487 IPA_MEM_PART(v4_flt_nhash_ofst) +
488 tbl_hdr_width; /* to skip the bitmap */
489 lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
490 IPA_MEM_PART(apps_v4_flt_hash_ofst);
491 lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
492 IPA_MEM_PART(apps_v4_flt_nhash_ofst);
493 lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
494 lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
495 } else {
496 lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
497 IPA_MEM_PART(v6_flt_hash_ofst) +
498 tbl_hdr_width; /* to skip the bitmap */
499 lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
500 IPA_MEM_PART(v6_flt_nhash_ofst) +
501 tbl_hdr_width; /* to skip the bitmap */
502 lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
503 IPA_MEM_PART(apps_v6_flt_hash_ofst);
504 lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
505 IPA_MEM_PART(apps_v6_flt_nhash_ofst);
506 lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
507 lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
508 }
509
510 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
511 if (!ipa_is_ep_support_flt(i))
512 continue;
513 tbl = &ipa3_ctx->flt_tbl[i][ip];
514 if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
515 rc = -EPERM;
516 goto prep_failed;
517 }
518 if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
519 tbl->sz[IPA_RULE_HASHABLE]) {
520 alloc_params.num_lcl_hash_tbls++;
521 alloc_params.total_sz_lcl_hash_tbls +=
522 tbl->sz[IPA_RULE_HASHABLE];
523 alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
524
525 }
526 if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
527 tbl->sz[IPA_RULE_NON_HASHABLE]) {
528 alloc_params.num_lcl_nhash_tbls++;
529 alloc_params.total_sz_lcl_nhash_tbls +=
530 tbl->sz[IPA_RULE_NON_HASHABLE];
531 alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
532 }
533 }
534
535 if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
536 IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
537 rc = -EFAULT;
538 goto prep_failed;
539 }
540
541 if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
542 &alloc_params.hash_bdy)) {
543 rc = -EFAULT;
544 goto fail_size_valid;
545 }
546 if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
547 &alloc_params.nhash_bdy)) {
548 rc = -EFAULT;
549 goto fail_size_valid;
550 }
551
552 if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
553 rc = -ENOMEM;
554 goto fail_size_valid;
555 }
556
557 /* flushing ipa internal hashable flt rules cache */
558 memset(&flush, 0, sizeof(flush));
559 if (ip == IPA_IP_v4)
560 flush.v4_flt = true;
561 else
562 flush.v6_flt = true;
563 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
564 reg_write_cmd.skip_pipeline_clear = false;
565 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
566 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
567 reg_write_cmd.value = valmask.val;
568 reg_write_cmd.value_mask = valmask.mask;
569 cmd_pyld[0] = ipahal_construct_imm_cmd(
570 IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
571 if (!cmd_pyld[0]) {
572 IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
573 rc = -EFAULT;
574 goto fail_reg_write_construct;
575 }
576 desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
577 desc[0].pyld = cmd_pyld[0]->data;
578 desc[0].len = cmd_pyld[0]->len;
579 desc[0].type = IPA_IMM_CMD_DESC;
580 num_cmd++;
581
582 hdr_idx = 0;
583 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
584 if (!ipa_is_ep_support_flt(i)) {
585 IPADBG_LOW("skip %d - not filtering pipe\n", i);
586 continue;
587 }
588
589 if (ipa_flt_skip_pipe_config(i)) {
590 hdr_idx++;
591 continue;
592 }
593
594 IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
595 hdr_idx, i);
596
597 mem_cmd.is_read = false;
598 mem_cmd.skip_pipeline_clear = false;
599 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
600 mem_cmd.size = tbl_hdr_width;
601 mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
602 hdr_idx * tbl_hdr_width;
603 mem_cmd.local_addr = lcl_nhash_hdr +
604 hdr_idx * tbl_hdr_width;
605 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
606 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
607 if (!cmd_pyld[num_cmd]) {
608 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
609 ip);
610 goto fail_imm_cmd_construct;
611 }
612 desc[num_cmd].opcode =
613 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
614 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
615 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
616 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
617
618 mem_cmd.is_read = false;
619 mem_cmd.skip_pipeline_clear = false;
620 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
621 mem_cmd.size = tbl_hdr_width;
622 mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
623 hdr_idx * tbl_hdr_width;
624 mem_cmd.local_addr = lcl_hash_hdr +
625 hdr_idx * tbl_hdr_width;
626 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
627 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
628 if (!cmd_pyld[num_cmd]) {
629 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
630 ip);
631 goto fail_imm_cmd_construct;
632 }
633 desc[num_cmd].opcode =
634 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
635 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
636 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
637 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
638
639 hdr_idx++;
640 }
641
642 if (lcl_nhash) {
643 mem_cmd.is_read = false;
644 mem_cmd.skip_pipeline_clear = false;
645 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
646 mem_cmd.size = alloc_params.nhash_bdy.size;
647 mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
648 mem_cmd.local_addr = lcl_nhash_bdy;
649 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
650 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
651 if (!cmd_pyld[num_cmd]) {
652 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
653 ip);
654 goto fail_imm_cmd_construct;
655 }
656 desc[num_cmd].opcode =
657 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
658 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
659 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
660 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
661 }
662 if (lcl_hash) {
663 mem_cmd.is_read = false;
664 mem_cmd.skip_pipeline_clear = false;
665 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
666 mem_cmd.size = alloc_params.hash_bdy.size;
667 mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
668 mem_cmd.local_addr = lcl_hash_bdy;
669 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
670 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
671 if (!cmd_pyld[num_cmd]) {
672 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
673 ip);
674 goto fail_imm_cmd_construct;
675 }
676 desc[num_cmd].opcode =
677 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
678 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
679 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
680 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
681 }
682
683 if (ipa3_send_cmd(num_cmd, desc)) {
684 IPAERR("fail to send immediate command\n");
685 rc = -EFAULT;
686 goto fail_imm_cmd_construct;
687 }
688
689 IPADBG_LOW("Hashable HEAD\n");
690 IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
691 alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
692
693 IPADBG_LOW("Non-Hashable HEAD\n");
694 IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
695 alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
696
697 if (alloc_params.hash_bdy.size) {
698 IPADBG_LOW("Hashable BODY\n");
699 IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
700 alloc_params.hash_bdy.phys_base,
701 alloc_params.hash_bdy.size);
702 }
703
704 if (alloc_params.nhash_bdy.size) {
705 IPADBG_LOW("Non-Hashable BODY\n");
706 IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
707 alloc_params.nhash_bdy.phys_base,
708 alloc_params.nhash_bdy.size);
709 }
710
711 __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
712 __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
713
714fail_imm_cmd_construct:
715 for (i = 0 ; i < num_cmd ; i++)
716 ipahal_destroy_imm_cmd(cmd_pyld[i]);
717fail_reg_write_construct:
718 kfree(desc);
719 kfree(cmd_pyld);
720fail_size_valid:
721 if (alloc_params.hash_hdr.size)
722 ipahal_free_dma_mem(&alloc_params.hash_hdr);
723 ipahal_free_dma_mem(&alloc_params.nhash_hdr);
724 if (alloc_params.hash_bdy.size)
725 ipahal_free_dma_mem(&alloc_params.hash_bdy);
726 if (alloc_params.nhash_bdy.size)
727 ipahal_free_dma_mem(&alloc_params.nhash_bdy);
728prep_failed:
729 return rc;
730}
731
732static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
733 struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
734{
735 if (rule->action != IPA_PASS_TO_EXCEPTION) {
736 if (!rule->eq_attrib_type) {
737 if (!rule->rt_tbl_hdl) {
738 IPAERR("invalid RT tbl\n");
739 goto error;
740 }
741
742 *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
743 if (*rt_tbl == NULL) {
744 IPAERR("RT tbl not found\n");
745 goto error;
746 }
747
748 if ((*rt_tbl)->cookie != IPA_COOKIE) {
749 IPAERR("RT table cookie is invalid\n");
750 goto error;
751 }
752 } else {
753 if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
754 IPA_MEM_PART(v4_modem_rt_index_hi) :
755 IPA_MEM_PART(v6_modem_rt_index_hi))) {
756 IPAERR("invalid RT tbl\n");
757 goto error;
758 }
759 }
760 }
761
762 if (rule->rule_id) {
763 if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
764 IPAERR("invalid rule_id provided 0x%x\n"
765 "rule_id with bit 0x%x are auto generated\n",
766 rule->rule_id, ipahal_get_rule_id_hi_bit());
767 goto error;
768 }
769 }
770
771 return 0;
772
773error:
774 return -EPERM;
775}
776
777static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
778 const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
779 struct ipa3_flt_tbl *tbl)
780{
781 int id;
782
783 *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
784 if (!*entry) {
785 IPAERR("failed to alloc FLT rule object\n");
786 goto error;
787 }
788 INIT_LIST_HEAD(&((*entry)->link));
789 (*entry)->rule = *rule;
790 (*entry)->cookie = IPA_COOKIE;
791 (*entry)->rt_tbl = rt_tbl;
792 (*entry)->tbl = tbl;
793 if (rule->rule_id) {
794 id = rule->rule_id;
795 } else {
796 id = ipa3_alloc_rule_id(&tbl->rule_ids);
797 if (id < 0) {
798 IPAERR("failed to allocate rule id\n");
799 WARN_ON(1);
800 goto rule_id_fail;
801 }
802 }
803 (*entry)->rule_id = id;
804
805 return 0;
806
807rule_id_fail:
808 kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
809error:
810 return -EPERM;
811}
812
813static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
814 struct ipa3_flt_entry *entry, u32 *rule_hdl)
815{
816 int id;
817
818 tbl->rule_cnt++;
819 if (entry->rt_tbl)
820 entry->rt_tbl->ref_cnt++;
821 id = ipa3_id_alloc(entry);
822 if (id < 0) {
823 IPAERR("failed to add to tree\n");
824 WARN_ON(1);
825 }
826 *rule_hdl = id;
827 entry->id = id;
828 IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
829
830 return 0;
831}
832
833static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
834 const struct ipa_flt_rule *rule, u8 add_rear,
835 u32 *rule_hdl)
836{
837 struct ipa3_flt_entry *entry;
838 struct ipa3_rt_tbl *rt_tbl = NULL;
839
840 if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
841 goto error;
842
843 if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
844 goto error;
845
846 if (add_rear) {
847 if (tbl->sticky_rear)
848 list_add_tail(&entry->link,
849 tbl->head_flt_rule_list.prev);
850 else
851 list_add_tail(&entry->link, &tbl->head_flt_rule_list);
852 } else {
853 list_add(&entry->link, &tbl->head_flt_rule_list);
854 }
855
856 __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
857
858 return 0;
859
860error:
861 return -EPERM;
862}
863
864static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
865 const struct ipa_flt_rule *rule,
866 u32 *rule_hdl,
867 enum ipa_ip_type ip,
868 struct ipa3_flt_entry **add_after_entry)
869{
870 struct ipa3_flt_entry *entry;
871 struct ipa3_rt_tbl *rt_tbl = NULL;
872
873 if (!*add_after_entry)
874 goto error;
875
876 if (rule == NULL || rule_hdl == NULL) {
877 IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
878 rule_hdl);
879 goto error;
880 }
881
882 if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
883 goto error;
884
885 if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
886 goto error;
887
888 list_add(&entry->link, &((*add_after_entry)->link));
889
890 __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
891
892 /*
893 * prepare for next insertion
894 */
895 *add_after_entry = entry;
896
897 return 0;
898
899error:
900 *add_after_entry = NULL;
901 return -EPERM;
902}
903
904static int __ipa_del_flt_rule(u32 rule_hdl)
905{
906 struct ipa3_flt_entry *entry;
907 int id;
908
909 entry = ipa3_id_find(rule_hdl);
910 if (entry == NULL) {
911 IPAERR("lookup failed\n");
912 return -EINVAL;
913 }
914
915 if (entry->cookie != IPA_COOKIE) {
916 IPAERR("bad params\n");
917 return -EINVAL;
918 }
919 id = entry->id;
920
921 list_del(&entry->link);
922 entry->tbl->rule_cnt--;
923 if (entry->rt_tbl)
924 entry->rt_tbl->ref_cnt--;
925 IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
926 entry->tbl->rule_cnt, entry->rule_id);
927 entry->cookie = 0;
928 /* if rule id was allocated from idr, remove it */
929 if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
930 idr_remove(&entry->tbl->rule_ids, entry->rule_id);
931
932 kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
933
934 /* remove the handle from the database */
935 ipa3_id_remove(id);
936
937 return 0;
938}
939
940static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
941 enum ipa_ip_type ip)
942{
943 struct ipa3_flt_entry *entry;
944 struct ipa3_rt_tbl *rt_tbl = NULL;
945
946 entry = ipa3_id_find(frule->rule_hdl);
947 if (entry == NULL) {
948 IPAERR("lookup failed\n");
949 goto error;
950 }
951
952 if (entry->cookie != IPA_COOKIE) {
953 IPAERR("bad params\n");
954 goto error;
955 }
956
957 if (entry->rt_tbl)
958 entry->rt_tbl->ref_cnt--;
959
960 if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
961 if (!frule->rule.eq_attrib_type) {
962 if (!frule->rule.rt_tbl_hdl) {
963 IPAERR("invalid RT tbl\n");
964 goto error;
965 }
966
967 rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
968 if (rt_tbl == NULL) {
969 IPAERR("RT tbl not found\n");
970 goto error;
971 }
972
973 if (rt_tbl->cookie != IPA_COOKIE) {
974 IPAERR("RT table cookie is invalid\n");
975 goto error;
976 }
977 } else {
978 if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
979 IPA_MEM_PART(v4_modem_rt_index_hi) :
980 IPA_MEM_PART(v6_modem_rt_index_hi))) {
981 IPAERR("invalid RT tbl\n");
982 goto error;
983 }
984 }
985 }
986
987 entry->rule = frule->rule;
988 entry->rt_tbl = rt_tbl;
989 if (entry->rt_tbl)
990 entry->rt_tbl->ref_cnt++;
991 entry->hw_len = 0;
992 entry->prio = 0;
993
994 return 0;
995
996error:
997 return -EPERM;
998}
999
1000static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
1001{
1002 *ipa_ep_idx = ipa3_get_ep_mapping(ep);
Ghanim Fodi79ee8d82017-02-27 16:39:25 +02001003 if (*ipa_ep_idx < 0) {
Amir Levy9659e592016-10-27 18:08:27 +03001004 IPAERR("ep not valid ep=%d\n", ep);
1005 return -EINVAL;
1006 }
1007 if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
1008 IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
1009
1010 if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
1011 IPAERR("ep do not support filtering ep=%d\n", ep);
1012 return -EINVAL;
1013 }
1014
1015 return 0;
1016}
1017
1018static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
1019 const struct ipa_flt_rule *rule, u8 add_rear,
1020 u32 *rule_hdl)
1021{
1022 struct ipa3_flt_tbl *tbl;
1023 int ipa_ep_idx;
1024
1025 if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
1026 IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
1027 rule_hdl, ep);
1028
1029 return -EINVAL;
1030 }
1031
1032 if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
1033 return -EINVAL;
1034
1035 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
1036 IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
1037
1038 return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
1039}
1040
1041/**
1042 * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
1043 * commit to IPA HW
1044 *
1045 * Returns: 0 on success, negative on failure
1046 *
1047 * Note: Should not be called from atomic context
1048 */
1049int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
1050{
1051 int i;
1052 int result;
1053
1054 if (rules == NULL || rules->num_rules == 0 ||
1055 rules->ip >= IPA_IP_MAX) {
1056 IPAERR("bad parm\n");
1057 return -EINVAL;
1058 }
1059
1060 mutex_lock(&ipa3_ctx->lock);
1061 for (i = 0; i < rules->num_rules; i++) {
1062 if (!rules->global)
1063 result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
1064 &rules->rules[i].rule,
1065 rules->rules[i].at_rear,
1066 &rules->rules[i].flt_rule_hdl);
1067 else
1068 result = -1;
1069
1070 if (result) {
1071 IPAERR("failed to add flt rule %d\n", i);
1072 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1073 } else {
1074 rules->rules[i].status = 0;
1075 }
1076 }
1077
1078 if (rules->global) {
1079 IPAERR("no support for global filter rules\n");
1080 result = -EPERM;
1081 goto bail;
1082 }
1083
1084 if (rules->commit)
1085 if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
1086 result = -EPERM;
1087 goto bail;
1088 }
1089 result = 0;
1090bail:
1091 mutex_unlock(&ipa3_ctx->lock);
1092
1093 return result;
1094}
1095
1096/**
1097 * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
1098 * the rule which its handle is given and optionally commit to IPA HW
1099 *
1100 * Returns: 0 on success, negative on failure
1101 *
1102 * Note: Should not be called from atomic context
1103 */
1104int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
1105{
1106 int i;
1107 int result;
1108 struct ipa3_flt_tbl *tbl;
1109 int ipa_ep_idx;
1110 struct ipa3_flt_entry *entry;
1111
1112 if (rules == NULL || rules->num_rules == 0 ||
1113 rules->ip >= IPA_IP_MAX) {
1114 IPAERR("bad parm\n");
1115 return -EINVAL;
1116 }
1117
1118 if (rules->ep >= IPA_CLIENT_MAX) {
1119 IPAERR("bad parms ep=%d\n", rules->ep);
1120 return -EINVAL;
1121 }
1122
1123 mutex_lock(&ipa3_ctx->lock);
1124
1125 if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
1126 result = -EINVAL;
1127 goto bail;
1128 }
1129
1130 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
1131
1132 entry = ipa3_id_find(rules->add_after_hdl);
1133 if (entry == NULL) {
1134 IPAERR("lookup failed\n");
1135 result = -EINVAL;
1136 goto bail;
1137 }
1138
1139 if (entry->tbl != tbl) {
1140 IPAERR("given entry does not match the table\n");
1141 result = -EINVAL;
1142 goto bail;
1143 }
1144
1145 if (tbl->sticky_rear)
1146 if (&entry->link == tbl->head_flt_rule_list.prev) {
1147 IPAERR("cannot add rule at end of a sticky table");
1148 result = -EINVAL;
1149 goto bail;
1150 }
1151
1152 IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
1153 rules->ip, rules->ep, rules->add_after_hdl);
1154
1155 /*
1156 * we add all rules one after the other, if one insertion fails, it cuts
1157 * the chain (all following will receive fail status) following calls to
1158 * __ipa_add_flt_rule_after will fail (entry == NULL)
1159 */
1160
1161 for (i = 0; i < rules->num_rules; i++) {
1162 result = __ipa_add_flt_rule_after(tbl,
1163 &rules->rules[i].rule,
1164 &rules->rules[i].flt_rule_hdl,
1165 rules->ip,
1166 &entry);
1167
1168 if (result) {
1169 IPAERR("failed to add flt rule %d\n", i);
1170 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1171 } else {
1172 rules->rules[i].status = 0;
1173 }
1174 }
1175
1176 if (rules->commit)
1177 if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
1178 IPAERR("failed to commit flt rules\n");
1179 result = -EPERM;
1180 goto bail;
1181 }
1182 result = 0;
1183bail:
1184 mutex_unlock(&ipa3_ctx->lock);
1185
1186 return result;
1187}
1188
1189/**
1190 * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
1191 * optionally commit to IPA HW
1192 *
1193 * Returns: 0 on success, negative on failure
1194 *
1195 * Note: Should not be called from atomic context
1196 */
1197int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
1198{
1199 int i;
1200 int result;
1201
1202 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
1203 IPAERR("bad parm\n");
1204 return -EINVAL;
1205 }
1206
1207 mutex_lock(&ipa3_ctx->lock);
1208 for (i = 0; i < hdls->num_hdls; i++) {
1209 if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
1210 IPAERR("failed to del flt rule %i\n", i);
1211 hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
1212 } else {
1213 hdls->hdl[i].status = 0;
1214 }
1215 }
1216
1217 if (hdls->commit)
1218 if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
1219 result = -EPERM;
1220 goto bail;
1221 }
1222 result = 0;
1223bail:
1224 mutex_unlock(&ipa3_ctx->lock);
1225
1226 return result;
1227}
1228
1229/**
1230 * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
1231 * optionally commit to IPA HW
1232 *
1233 * Returns: 0 on success, negative on failure
1234 *
1235 * Note: Should not be called from atomic context
1236 */
1237int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
1238{
1239 int i;
1240 int result;
1241
1242 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
1243 IPAERR("bad parm\n");
1244 return -EINVAL;
1245 }
1246
1247 mutex_lock(&ipa3_ctx->lock);
1248 for (i = 0; i < hdls->num_rules; i++) {
1249 if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
1250 IPAERR("failed to mdfy flt rule %i\n", i);
1251 hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
1252 } else {
1253 hdls->rules[i].status = 0;
1254 }
1255 }
1256
1257 if (hdls->commit)
1258 if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
1259 result = -EPERM;
1260 goto bail;
1261 }
1262 result = 0;
1263bail:
1264 mutex_unlock(&ipa3_ctx->lock);
1265
1266 return result;
1267}
1268
1269
1270/**
1271 * ipa3_commit_flt() - Commit the current SW filtering table of specified type
1272 * to IPA HW
1273 * @ip: [in] the family of routing tables
1274 *
1275 * Returns: 0 on success, negative on failure
1276 *
1277 * Note: Should not be called from atomic context
1278 */
1279int ipa3_commit_flt(enum ipa_ip_type ip)
1280{
1281 int result;
1282
1283 if (ip >= IPA_IP_MAX) {
1284 IPAERR("bad parm\n");
1285 return -EINVAL;
1286 }
1287
1288 mutex_lock(&ipa3_ctx->lock);
1289
1290 if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
1291 result = -EPERM;
1292 goto bail;
1293 }
1294 result = 0;
1295
1296bail:
1297 mutex_unlock(&ipa3_ctx->lock);
1298
1299 return result;
1300}
1301
1302/**
1303 * ipa3_reset_flt() - Reset the current SW filtering table of specified type
1304 * (does not commit to HW)
1305 * @ip: [in] the family of routing tables
1306 *
1307 * Returns: 0 on success, negative on failure
1308 *
1309 * Note: Should not be called from atomic context
1310 */
1311int ipa3_reset_flt(enum ipa_ip_type ip)
1312{
1313 struct ipa3_flt_tbl *tbl;
1314 struct ipa3_flt_entry *entry;
1315 struct ipa3_flt_entry *next;
1316 int i;
1317 int id;
1318
1319 if (ip >= IPA_IP_MAX) {
1320 IPAERR("bad parm\n");
1321 return -EINVAL;
1322 }
1323
1324 mutex_lock(&ipa3_ctx->lock);
1325 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
1326 if (!ipa_is_ep_support_flt(i))
1327 continue;
1328
1329 tbl = &ipa3_ctx->flt_tbl[i][ip];
1330 list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
1331 link) {
1332 if (ipa3_id_find(entry->id) == NULL) {
1333 WARN_ON(1);
1334 mutex_unlock(&ipa3_ctx->lock);
1335 return -EFAULT;
1336 }
1337 list_del(&entry->link);
1338 entry->tbl->rule_cnt--;
1339 if (entry->rt_tbl)
1340 entry->rt_tbl->ref_cnt--;
1341 /* if rule id was allocated from idr, remove it */
1342 if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
1343 idr_remove(&entry->tbl->rule_ids,
1344 entry->rule_id);
1345 entry->cookie = 0;
1346 id = entry->id;
1347 kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
1348
1349 /* remove the handle from the database */
1350 ipa3_id_remove(id);
1351 }
1352 }
1353 mutex_unlock(&ipa3_ctx->lock);
1354
1355 return 0;
1356}
1357
1358void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
1359{
1360 struct ipa3_flt_tbl *tbl;
1361 struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
1362 struct ipa_flt_rule rule;
1363
1364 if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
1365 IPADBG("cannot add flt rules to non filtering pipe num %d\n",
1366 ipa_ep_idx);
1367 return;
1368 }
1369
1370 memset(&rule, 0, sizeof(rule));
1371
1372 mutex_lock(&ipa3_ctx->lock);
1373 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
Amir Levy9659e592016-10-27 18:08:27 +03001374 rule.action = IPA_PASS_TO_EXCEPTION;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001375 __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
Amir Levy9659e592016-10-27 18:08:27 +03001376 &ep->dflt_flt4_rule_hdl);
1377 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001378 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001379
1380 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
Amir Levy9659e592016-10-27 18:08:27 +03001381 rule.action = IPA_PASS_TO_EXCEPTION;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001382 __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
Amir Levy9659e592016-10-27 18:08:27 +03001383 &ep->dflt_flt6_rule_hdl);
1384 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001385 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001386 mutex_unlock(&ipa3_ctx->lock);
1387}
1388
1389void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
1390{
1391 struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
1392
1393 mutex_lock(&ipa3_ctx->lock);
1394 if (ep->dflt_flt4_rule_hdl) {
1395 __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
1396 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
1397 ep->dflt_flt4_rule_hdl = 0;
1398 }
1399 if (ep->dflt_flt6_rule_hdl) {
1400 __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
1401 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
1402 ep->dflt_flt6_rule_hdl = 0;
1403 }
1404 mutex_unlock(&ipa3_ctx->lock);
1405}
1406
1407/**
1408 * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
1409 * Pipe must be for AP EP (not modem) and support filtering
1410 * updates the the filtering masking values without changing the rt ones.
1411 *
1412 * @pipe_idx: filter pipe index to configure the tuple masking
1413 * @tuple: the tuple members masking
1414 * Returns: 0 on success, negative on failure
1415 *
1416 */
1417int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
1418{
1419 struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
1420
1421 if (!tuple) {
1422 IPAERR("bad tuple\n");
1423 return -EINVAL;
1424 }
1425
1426 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1427 IPAERR("bad pipe index!\n");
1428 return -EINVAL;
1429 }
1430
1431 if (!ipa_is_ep_support_flt(pipe_idx)) {
1432 IPAERR("pipe %d not filtering pipe\n", pipe_idx);
1433 return -EINVAL;
1434 }
1435
1436 if (ipa_is_modem_pipe(pipe_idx)) {
1437 IPAERR("modem pipe tuple is not configured by AP\n");
1438 return -EINVAL;
1439 }
1440
1441 ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
1442 pipe_idx, &fltrt_tuple);
1443 fltrt_tuple.flt = *tuple;
1444 ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
1445 pipe_idx, &fltrt_tuple);
1446
1447 return 0;
1448}
1449
1450/**
1451 * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
1452 * @pipe_idx: IPA endpoint index
1453 * @ip_type: IPv4 or IPv6 table
1454 * @hashable: hashable or non-hashable table
1455 * @entry: array to fill the table entries
1456 * @num_entry: number of entries in entry array. set by the caller to indicate
1457 * entry array size. Then set by this function as an output parameter to
1458 * indicate the number of entries in the array
1459 *
1460 * This function reads the filtering table from IPA SRAM and prepares an array
1461 * of entries. This function is mainly used for debugging purposes.
1462 *
1463 * If empty table or Modem Apps table, zero entries will be returned.
1464 *
1465 * Returns: 0 on success, negative on failure
1466 */
1467int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
1468 bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
1469{
1470 void *ipa_sram_mmio;
1471 u64 hdr_base_ofst;
1472 int tbl_entry_idx;
1473 int i;
1474 int res = 0;
1475 u64 tbl_addr;
1476 bool is_sys;
1477 u8 *rule_addr;
1478 struct ipa_mem_buffer *sys_tbl_mem;
1479 int rule_idx;
1480
1481 IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
1482 pipe_idx, ip_type, hashable, entry, num_entry);
1483
1484 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
1485 !entry || !num_entry) {
1486 IPAERR("Invalid params\n");
1487 return -EFAULT;
1488 }
1489
1490 if (!ipa_is_ep_support_flt(pipe_idx)) {
1491 IPAERR("pipe %d does not support filtering\n", pipe_idx);
1492 return -EINVAL;
1493 }
1494
1495 /* map IPA SRAM */
1496 ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
1497 ipa3_ctx->ctrl->ipa_reg_base_ofst +
1498 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
1499 ipa3_ctx->smem_restricted_bytes / 4),
1500 ipa3_ctx->smem_sz);
1501 if (!ipa_sram_mmio) {
1502 IPAERR("fail to ioremap IPA SRAM\n");
1503 return -ENOMEM;
1504 }
1505
1506 memset(entry, 0, sizeof(*entry) * (*num_entry));
1507 if (hashable) {
1508 if (ip_type == IPA_IP_v4)
1509 hdr_base_ofst =
1510 IPA_MEM_PART(v4_flt_hash_ofst);
1511 else
1512 hdr_base_ofst =
1513 IPA_MEM_PART(v6_flt_hash_ofst);
1514 } else {
1515 if (ip_type == IPA_IP_v4)
1516 hdr_base_ofst =
1517 IPA_MEM_PART(v4_flt_nhash_ofst);
1518 else
1519 hdr_base_ofst =
1520 IPA_MEM_PART(v6_flt_nhash_ofst);
1521 }
1522
1523 /* calculate the index of the tbl entry */
1524 tbl_entry_idx = 1; /* skip the bitmap */
1525 for (i = 0; i < pipe_idx; i++)
1526 if (ipa3_ctx->ep_flt_bitmap & (1 << i))
1527 tbl_entry_idx++;
1528
1529 IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
1530 hdr_base_ofst, tbl_entry_idx);
1531
1532 res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
1533 tbl_entry_idx, &tbl_addr, &is_sys);
1534 if (res) {
1535 IPAERR("failed to read table address from header structure\n");
1536 goto bail;
1537 }
1538 IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
1539 pipe_idx, tbl_addr, is_sys);
1540 if (!tbl_addr) {
1541 IPAERR("invalid flt tbl addr\n");
1542 res = -EFAULT;
1543 goto bail;
1544 }
1545
1546 /* for tables resides in DDR access it from the virtual memory */
1547 if (is_sys) {
1548 sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
1549 curr_mem[hashable ? IPA_RULE_HASHABLE :
1550 IPA_RULE_NON_HASHABLE];
1551 if (sys_tbl_mem->phys_base &&
1552 sys_tbl_mem->phys_base != tbl_addr) {
1553 IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
1554 tbl_addr, &sys_tbl_mem->phys_base);
1555 }
1556 if (sys_tbl_mem->phys_base)
1557 rule_addr = sys_tbl_mem->base;
1558 else
1559 rule_addr = NULL;
1560 } else {
1561 rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
1562 }
1563
1564 IPADBG("First rule addr 0x%p\n", rule_addr);
1565
1566 if (!rule_addr) {
1567 /* Modem table in system memory or empty table */
1568 *num_entry = 0;
1569 goto bail;
1570 }
1571
1572 rule_idx = 0;
1573 while (rule_idx < *num_entry) {
1574 res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
1575 if (res) {
1576 IPAERR("failed parsing flt rule\n");
1577 goto bail;
1578 }
1579
1580 IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
1581 if (!entry[rule_idx].rule_size)
1582 break;
1583
1584 rule_addr += entry[rule_idx].rule_size;
1585 rule_idx++;
1586 }
1587 *num_entry = rule_idx;
1588bail:
1589 iounmap(ipa_sram_mmio);
1590 return 0;
1591}