blob: d0ed7826b89998519274244670f80a2403948914 [file] [log] [blame]
Ghanim Fodi79ee8d82017-02-27 16:39:25 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15#include "ipahal/ipahal_fltrt.h"
16
Amir Levy9659e592016-10-27 18:08:27 +030017#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
18#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
19#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
20
21#define IPA_FLT_GET_RULE_TYPE(__entry) \
22 ( \
23 ((__entry)->rule.hashable) ? \
24 (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
25 )
26
27/**
28 * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
29 * @ip: the ip address family type
30 * @entry: filtering entry
31 * @buf: output buffer, buf == NULL means
32 * caller wants to know the size of the rule as seen
33 * by HW so they did not pass a valid buffer, we will use a
34 * scratch buffer instead.
35 * With this scheme we are going to
36 * generate the rule twice, once to know size using scratch
37 * buffer and second to write the rule to the actual caller
38 * supplied buffer which is of required size
39 *
40 * Returns: 0 on success, negative on failure
41 *
42 * caller needs to hold any needed locks to ensure integrity
43 *
44 */
45static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
46 struct ipa3_flt_entry *entry, u8 *buf)
47{
48 struct ipahal_flt_rule_gen_params gen_params;
49 int res = 0;
50
51 memset(&gen_params, 0, sizeof(gen_params));
52
53 gen_params.ipt = ip;
54 if (entry->rt_tbl)
55 gen_params.rt_tbl_idx = entry->rt_tbl->idx;
56 else
57 gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
58
59 gen_params.priority = entry->prio;
60 gen_params.id = entry->rule_id;
61 gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
62
63 res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
64 if (res)
65 IPAERR("failed to generate flt h/w rule\n");
66
67 return 0;
68}
69
70static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
71{
72 struct ipa3_flt_tbl *tbl;
73 int i;
74
75 IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
76
77 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
78 if (!ipa_is_ep_support_flt(i))
79 continue;
80
81 tbl = &ipa3_ctx->flt_tbl[i][ip];
82 if (tbl->prev_mem[rlt].phys_base) {
83 IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
84 ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
85 }
86
87 if (list_empty(&tbl->head_flt_rule_list)) {
88 if (tbl->curr_mem[rlt].phys_base) {
89 IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
90 i);
91 ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
92 }
93 }
94 }
95}
96
97/**
98 * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
99 * assign priorities to the rules, calculate their sizes and calculate
100 * the overall table size
101 * @ip: the ip address family type
102 * @tbl: the flt tbl to be prepared
103 * @pipe_idx: the ep pipe appropriate for the given tbl
104 *
105 * Return: 0 on success, negative on failure
106 */
107static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
108 struct ipa3_flt_tbl *tbl, int pipe_idx)
109{
110 struct ipa3_flt_entry *entry;
111 int prio_i;
112 int max_prio;
113 u32 hdr_width;
114
115 tbl->sz[IPA_RULE_HASHABLE] = 0;
116 tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
117
118 max_prio = ipahal_get_rule_max_priority();
119
120 prio_i = max_prio;
121 list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
122
123 if (entry->rule.max_prio) {
124 entry->prio = max_prio;
125 } else {
126 if (ipahal_rule_decrease_priority(&prio_i)) {
127 IPAERR("cannot decrease rule priority - %d\n",
128 prio_i);
129 return -EPERM;
130 }
131 entry->prio = prio_i;
132 }
133
134 if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
135 IPAERR("failed to calculate HW FLT rule size\n");
136 return -EPERM;
137 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200138 IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
Amir Levy9659e592016-10-27 18:08:27 +0300139 pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
140
141 if (entry->rule.hashable)
142 tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
143 else
144 tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
145 }
146
147 if ((tbl->sz[IPA_RULE_HASHABLE] +
148 tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
149 IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
150 pipe_idx);
151 return 0;
152 }
153
154 hdr_width = ipahal_get_hw_tbl_hdr_width();
155
156 /* for the header word */
157 if (tbl->sz[IPA_RULE_HASHABLE])
158 tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
159 if (tbl->sz[IPA_RULE_NON_HASHABLE])
160 tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
161
162 IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
163 tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
164
165 return 0;
166}
167
168/**
169 * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
170 * (rules and tables) to HW format and fill it in the given buffers
171 * @ip: the ip address family type
172 * @rlt: the type of the rules to translate (hashable or non-hashable)
173 * @base: the rules body buffer to be filled
174 * @hdr: the rules header (addresses/offsets) buffer to be filled
175 * @body_ofst: the offset of the rules body from the rules header at
176 * ipa sram
177 *
178 * Returns: 0 on success, negative on failure
179 *
180 * caller needs to hold any needed locks to ensure integrity
181 *
182 */
183static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
184 enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
185{
186 u64 offset;
187 u8 *body_i;
188 int res;
189 struct ipa3_flt_entry *entry;
190 u8 *tbl_mem_buf;
191 struct ipa_mem_buffer tbl_mem;
192 struct ipa3_flt_tbl *tbl;
193 int i;
194 int hdr_idx = 0;
195
196 body_i = base;
197 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
198 if (!ipa_is_ep_support_flt(i))
199 continue;
200 tbl = &ipa3_ctx->flt_tbl[i][ip];
201 if (tbl->sz[rlt] == 0) {
202 hdr_idx++;
203 continue;
204 }
205 if (tbl->in_sys[rlt]) {
206 /* only body (no header) */
207 tbl_mem.size = tbl->sz[rlt] -
208 ipahal_get_hw_tbl_hdr_width();
209 if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
210 IPAERR("fail to alloc sys tbl of size %d\n",
211 tbl_mem.size);
212 goto err;
213 }
214
215 if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
216 hdr, hdr_idx, true)) {
217 IPAERR("fail to wrt sys tbl addr to hdr\n");
218 goto hdr_update_fail;
219 }
220
221 tbl_mem_buf = tbl_mem.base;
222
223 /* generate the rule-set */
224 list_for_each_entry(entry, &tbl->head_flt_rule_list,
225 link) {
226 if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
227 continue;
228 res = ipa3_generate_flt_hw_rule(
229 ip, entry, tbl_mem_buf);
230 if (res) {
231 IPAERR("failed to gen HW FLT rule\n");
232 goto hdr_update_fail;
233 }
234 tbl_mem_buf += entry->hw_len;
235 }
236
237 if (tbl->curr_mem[rlt].phys_base) {
238 WARN_ON(tbl->prev_mem[rlt].phys_base);
239 tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
240 }
241 tbl->curr_mem[rlt] = tbl_mem;
242 } else {
243 offset = body_i - base + body_ofst;
244
245 /* update the hdr at the right index */
246 if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
247 hdr_idx, true)) {
248 IPAERR("fail to wrt lcl tbl ofst to hdr\n");
249 goto hdr_update_fail;
250 }
251
252 /* generate the rule-set */
253 list_for_each_entry(entry, &tbl->head_flt_rule_list,
254 link) {
255 if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
256 continue;
257 res = ipa3_generate_flt_hw_rule(
258 ip, entry, body_i);
259 if (res) {
260 IPAERR("failed to gen HW FLT rule\n");
261 goto err;
262 }
263 body_i += entry->hw_len;
264 }
265
266 /**
267 * advance body_i to next table alignment as local
268 * tables are order back-to-back
269 */
270 body_i += ipahal_get_lcl_tbl_addr_alignment();
271 body_i = (u8 *)((long)body_i &
272 ~ipahal_get_lcl_tbl_addr_alignment());
273 }
274 hdr_idx++;
275 }
276
277 return 0;
278
279hdr_update_fail:
280 ipahal_free_dma_mem(&tbl_mem);
281err:
282 return -EPERM;
283}
284
285/**
286 * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
287 * headers and bodies are being created into buffers that will be filled into
288 * the local memory (sram)
289 * @ip: the ip address family type
290 * @alloc_params: In and Out parameters for the allocations of the buffers
291 * 4 buffers: hdr and bdy, each hashable and non-hashable
292 *
293 * Return: 0 on success, negative on failure
294 */
295static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
296 struct ipahal_fltrt_alloc_imgs_params *alloc_params)
297{
298 u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
299 int rc = 0;
300
301 if (ip == IPA_IP_v4) {
302 nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
303 IPA_MEM_PART(v4_flt_nhash_ofst);
304 hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
305 IPA_MEM_PART(v4_flt_hash_ofst);
306 } else {
307 nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
308 IPA_MEM_PART(v6_flt_nhash_ofst);
309 hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
310 IPA_MEM_PART(v6_flt_hash_ofst);
311 }
312
313 if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
314 IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
315 rc = -ENOMEM;
316 goto allocate_failed;
317 }
318
319 if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
320 alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
321 hash_bdy_start_ofst)) {
322 IPAERR("fail to translate hashable flt tbls to hw format\n");
323 rc = -EPERM;
324 goto translate_fail;
325 }
326 if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
327 alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
328 nhash_bdy_start_ofst)) {
329 IPAERR("fail to translate non-hash flt tbls to hw format\n");
330 rc = -EPERM;
331 goto translate_fail;
332 }
333
334 return rc;
335
336translate_fail:
337 if (alloc_params->hash_hdr.size)
338 ipahal_free_dma_mem(&alloc_params->hash_hdr);
339 ipahal_free_dma_mem(&alloc_params->nhash_hdr);
340 if (alloc_params->hash_bdy.size)
341 ipahal_free_dma_mem(&alloc_params->hash_bdy);
342 if (alloc_params->nhash_bdy.size)
343 ipahal_free_dma_mem(&alloc_params->nhash_bdy);
344allocate_failed:
345 return rc;
346}
347
348/**
349 * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
350 * tbl bodies at the sram is enough for the commit
351 * @ipt: the ip address family type
352 * @rlt: the rule type (hashable or non-hashable)
353 *
354 * Return: true if enough space available or false in other cases
355 */
356static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
357 enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
358{
359 u16 avail;
360
361 if (!bdy) {
362 IPAERR("Bad parameters, bdy = NULL\n");
363 return false;
364 }
365
366 if (ipt == IPA_IP_v4)
367 avail = (rlt == IPA_RULE_HASHABLE) ?
368 IPA_MEM_PART(apps_v4_flt_hash_size) :
369 IPA_MEM_PART(apps_v4_flt_nhash_size);
370 else
371 avail = (rlt == IPA_RULE_HASHABLE) ?
372 IPA_MEM_PART(apps_v6_flt_hash_size) :
373 IPA_MEM_PART(apps_v6_flt_nhash_size);
374
375 if (bdy->size <= avail)
376 return true;
377
378 IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
379 bdy->size, avail, ipt, rlt);
380 return false;
381}
382
383/**
384 * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
385 * payload pointers buffers for headers and bodies of flt structure
386 * as well as place for flush imm.
387 * @ipt: the ip address family type
388 * @desc: [OUT] descriptor buffer
389 * @cmd: [OUT] imm commands payload pointers buffer
390 *
391 * Return: 0 on success, negative on failure
392 */
393static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
394 struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
395{
396 u16 entries;
397
398 /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
399 entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
400
401 *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
402 if (*desc == NULL) {
403 IPAERR("fail to alloc desc blob ip %d\n", ip);
404 goto fail_desc_alloc;
405 }
406
407 *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
408 if (*cmd_pyld == NULL) {
409 IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
410 goto fail_cmd_alloc;
411 }
412
413 return 0;
414
415fail_cmd_alloc:
416 kfree(*desc);
417fail_desc_alloc:
418 return -ENOMEM;
419}
420
421/**
422 * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
423 * will skip according to pre-configuration or modem pipes
424 * @pipe: the EP pipe index
425 *
426 * Return: true if to skip, false otherwize
427 */
428static bool ipa_flt_skip_pipe_config(int pipe)
429{
430 if (ipa_is_modem_pipe(pipe)) {
431 IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
432 return true;
433 }
434
435 if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
436 IPADBG_LOW("skip %d\n", pipe);
437 return true;
438 }
439
Ghanim Fodic6b67492017-03-15 14:19:56 +0200440 if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
Amir Levy9659e592016-10-27 18:08:27 +0300441 && ipa3_ctx->modem_cfg_emb_pipe_flt)) {
442 IPADBG_LOW("skip %d\n", pipe);
443 return true;
444 }
445
446 return false;
447}
448
449/**
450 * __ipa_commit_flt_v3() - commit flt tables to the hw
451 * commit the headers and the bodies if are local with internal cache flushing.
452 * The headers (and local bodies) will first be created into dma buffers and
453 * then written via IC to the SRAM
454 * @ipt: the ip address family type
455 *
456 * Return: 0 on success, negative on failure
457 */
458int __ipa_commit_flt_v3(enum ipa_ip_type ip)
459{
460 struct ipahal_fltrt_alloc_imgs_params alloc_params;
461 int rc = 0;
462 struct ipa3_desc *desc;
463 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
464 struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
465 struct ipahal_imm_cmd_pyld **cmd_pyld;
466 int num_cmd = 0;
467 int i;
468 int hdr_idx;
469 u32 lcl_hash_hdr, lcl_nhash_hdr;
470 u32 lcl_hash_bdy, lcl_nhash_bdy;
471 bool lcl_hash, lcl_nhash;
472 struct ipahal_reg_fltrt_hash_flush flush;
473 struct ipahal_reg_valmask valmask;
474 u32 tbl_hdr_width;
475 struct ipa3_flt_tbl *tbl;
476
477 tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
478 memset(&alloc_params, 0, sizeof(alloc_params));
479 alloc_params.ipt = ip;
480 alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
481
482 if (ip == IPA_IP_v4) {
483 lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
484 IPA_MEM_PART(v4_flt_hash_ofst) +
485 tbl_hdr_width; /* to skip the bitmap */
486 lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
487 IPA_MEM_PART(v4_flt_nhash_ofst) +
488 tbl_hdr_width; /* to skip the bitmap */
489 lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
490 IPA_MEM_PART(apps_v4_flt_hash_ofst);
491 lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
492 IPA_MEM_PART(apps_v4_flt_nhash_ofst);
493 lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
494 lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
495 } else {
496 lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
497 IPA_MEM_PART(v6_flt_hash_ofst) +
498 tbl_hdr_width; /* to skip the bitmap */
499 lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
500 IPA_MEM_PART(v6_flt_nhash_ofst) +
501 tbl_hdr_width; /* to skip the bitmap */
502 lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
503 IPA_MEM_PART(apps_v6_flt_hash_ofst);
504 lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
505 IPA_MEM_PART(apps_v6_flt_nhash_ofst);
506 lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
507 lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
508 }
509
510 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
511 if (!ipa_is_ep_support_flt(i))
512 continue;
513 tbl = &ipa3_ctx->flt_tbl[i][ip];
514 if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
515 rc = -EPERM;
516 goto prep_failed;
517 }
518 if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
519 tbl->sz[IPA_RULE_HASHABLE]) {
520 alloc_params.num_lcl_hash_tbls++;
521 alloc_params.total_sz_lcl_hash_tbls +=
522 tbl->sz[IPA_RULE_HASHABLE];
523 alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
524
525 }
526 if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
527 tbl->sz[IPA_RULE_NON_HASHABLE]) {
528 alloc_params.num_lcl_nhash_tbls++;
529 alloc_params.total_sz_lcl_nhash_tbls +=
530 tbl->sz[IPA_RULE_NON_HASHABLE];
531 alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
532 }
533 }
534
535 if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
536 IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
537 rc = -EFAULT;
538 goto prep_failed;
539 }
540
541 if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
542 &alloc_params.hash_bdy)) {
543 rc = -EFAULT;
544 goto fail_size_valid;
545 }
546 if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
547 &alloc_params.nhash_bdy)) {
548 rc = -EFAULT;
549 goto fail_size_valid;
550 }
551
552 if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
553 rc = -ENOMEM;
554 goto fail_size_valid;
555 }
556
557 /* flushing ipa internal hashable flt rules cache */
558 memset(&flush, 0, sizeof(flush));
559 if (ip == IPA_IP_v4)
560 flush.v4_flt = true;
561 else
562 flush.v6_flt = true;
563 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
564 reg_write_cmd.skip_pipeline_clear = false;
565 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
566 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
567 reg_write_cmd.value = valmask.val;
568 reg_write_cmd.value_mask = valmask.mask;
569 cmd_pyld[0] = ipahal_construct_imm_cmd(
570 IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
571 if (!cmd_pyld[0]) {
572 IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
573 rc = -EFAULT;
574 goto fail_reg_write_construct;
575 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700576 desc[0].opcode = cmd_pyld[0]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300577 desc[0].pyld = cmd_pyld[0]->data;
578 desc[0].len = cmd_pyld[0]->len;
579 desc[0].type = IPA_IMM_CMD_DESC;
580 num_cmd++;
581
582 hdr_idx = 0;
583 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
584 if (!ipa_is_ep_support_flt(i)) {
585 IPADBG_LOW("skip %d - not filtering pipe\n", i);
586 continue;
587 }
588
589 if (ipa_flt_skip_pipe_config(i)) {
590 hdr_idx++;
591 continue;
592 }
593
594 IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
595 hdr_idx, i);
596
597 mem_cmd.is_read = false;
598 mem_cmd.skip_pipeline_clear = false;
599 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
600 mem_cmd.size = tbl_hdr_width;
601 mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
602 hdr_idx * tbl_hdr_width;
603 mem_cmd.local_addr = lcl_nhash_hdr +
604 hdr_idx * tbl_hdr_width;
605 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
606 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
607 if (!cmd_pyld[num_cmd]) {
608 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
609 ip);
610 goto fail_imm_cmd_construct;
611 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700612 desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300613 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
614 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
615 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
616
617 mem_cmd.is_read = false;
618 mem_cmd.skip_pipeline_clear = false;
619 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
620 mem_cmd.size = tbl_hdr_width;
621 mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
622 hdr_idx * tbl_hdr_width;
623 mem_cmd.local_addr = lcl_hash_hdr +
624 hdr_idx * tbl_hdr_width;
625 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
626 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
627 if (!cmd_pyld[num_cmd]) {
628 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
629 ip);
630 goto fail_imm_cmd_construct;
631 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700632 desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300633 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
634 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
635 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
636
637 hdr_idx++;
638 }
639
640 if (lcl_nhash) {
641 mem_cmd.is_read = false;
642 mem_cmd.skip_pipeline_clear = false;
643 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
644 mem_cmd.size = alloc_params.nhash_bdy.size;
645 mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
646 mem_cmd.local_addr = lcl_nhash_bdy;
647 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
648 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
649 if (!cmd_pyld[num_cmd]) {
650 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
651 ip);
652 goto fail_imm_cmd_construct;
653 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700654 desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300655 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
656 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
657 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
658 }
659 if (lcl_hash) {
660 mem_cmd.is_read = false;
661 mem_cmd.skip_pipeline_clear = false;
662 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
663 mem_cmd.size = alloc_params.hash_bdy.size;
664 mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
665 mem_cmd.local_addr = lcl_hash_bdy;
666 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
667 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
668 if (!cmd_pyld[num_cmd]) {
669 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
670 ip);
671 goto fail_imm_cmd_construct;
672 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700673 desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300674 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
675 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
676 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
677 }
678
679 if (ipa3_send_cmd(num_cmd, desc)) {
680 IPAERR("fail to send immediate command\n");
681 rc = -EFAULT;
682 goto fail_imm_cmd_construct;
683 }
684
685 IPADBG_LOW("Hashable HEAD\n");
686 IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
687 alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
688
689 IPADBG_LOW("Non-Hashable HEAD\n");
690 IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
691 alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
692
693 if (alloc_params.hash_bdy.size) {
694 IPADBG_LOW("Hashable BODY\n");
695 IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
696 alloc_params.hash_bdy.phys_base,
697 alloc_params.hash_bdy.size);
698 }
699
700 if (alloc_params.nhash_bdy.size) {
701 IPADBG_LOW("Non-Hashable BODY\n");
702 IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
703 alloc_params.nhash_bdy.phys_base,
704 alloc_params.nhash_bdy.size);
705 }
706
707 __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
708 __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
709
710fail_imm_cmd_construct:
711 for (i = 0 ; i < num_cmd ; i++)
712 ipahal_destroy_imm_cmd(cmd_pyld[i]);
713fail_reg_write_construct:
714 kfree(desc);
715 kfree(cmd_pyld);
716fail_size_valid:
717 if (alloc_params.hash_hdr.size)
718 ipahal_free_dma_mem(&alloc_params.hash_hdr);
719 ipahal_free_dma_mem(&alloc_params.nhash_hdr);
720 if (alloc_params.hash_bdy.size)
721 ipahal_free_dma_mem(&alloc_params.hash_bdy);
722 if (alloc_params.nhash_bdy.size)
723 ipahal_free_dma_mem(&alloc_params.nhash_bdy);
724prep_failed:
725 return rc;
726}
727
728static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
729 struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
730{
731 if (rule->action != IPA_PASS_TO_EXCEPTION) {
732 if (!rule->eq_attrib_type) {
733 if (!rule->rt_tbl_hdl) {
734 IPAERR("invalid RT tbl\n");
735 goto error;
736 }
737
738 *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
739 if (*rt_tbl == NULL) {
740 IPAERR("RT tbl not found\n");
741 goto error;
742 }
743
744 if ((*rt_tbl)->cookie != IPA_COOKIE) {
745 IPAERR("RT table cookie is invalid\n");
746 goto error;
747 }
748 } else {
749 if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
750 IPA_MEM_PART(v4_modem_rt_index_hi) :
751 IPA_MEM_PART(v6_modem_rt_index_hi))) {
752 IPAERR("invalid RT tbl\n");
753 goto error;
754 }
755 }
756 }
757
758 if (rule->rule_id) {
759 if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
760 IPAERR("invalid rule_id provided 0x%x\n"
761 "rule_id with bit 0x%x are auto generated\n",
762 rule->rule_id, ipahal_get_rule_id_hi_bit());
763 goto error;
764 }
765 }
766
767 return 0;
768
769error:
770 return -EPERM;
771}
772
773static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
774 const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
775 struct ipa3_flt_tbl *tbl)
776{
777 int id;
778
779 *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
780 if (!*entry) {
781 IPAERR("failed to alloc FLT rule object\n");
782 goto error;
783 }
784 INIT_LIST_HEAD(&((*entry)->link));
785 (*entry)->rule = *rule;
786 (*entry)->cookie = IPA_COOKIE;
787 (*entry)->rt_tbl = rt_tbl;
788 (*entry)->tbl = tbl;
789 if (rule->rule_id) {
790 id = rule->rule_id;
791 } else {
792 id = ipa3_alloc_rule_id(&tbl->rule_ids);
793 if (id < 0) {
794 IPAERR("failed to allocate rule id\n");
795 WARN_ON(1);
796 goto rule_id_fail;
797 }
798 }
799 (*entry)->rule_id = id;
800
801 return 0;
802
803rule_id_fail:
804 kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
805error:
806 return -EPERM;
807}
808
809static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
810 struct ipa3_flt_entry *entry, u32 *rule_hdl)
811{
812 int id;
813
814 tbl->rule_cnt++;
815 if (entry->rt_tbl)
816 entry->rt_tbl->ref_cnt++;
817 id = ipa3_id_alloc(entry);
818 if (id < 0) {
819 IPAERR("failed to add to tree\n");
820 WARN_ON(1);
821 }
822 *rule_hdl = id;
823 entry->id = id;
824 IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
825
826 return 0;
827}
828
829static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
830 const struct ipa_flt_rule *rule, u8 add_rear,
831 u32 *rule_hdl)
832{
833 struct ipa3_flt_entry *entry;
834 struct ipa3_rt_tbl *rt_tbl = NULL;
835
836 if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
837 goto error;
838
839 if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
840 goto error;
841
842 if (add_rear) {
843 if (tbl->sticky_rear)
844 list_add_tail(&entry->link,
845 tbl->head_flt_rule_list.prev);
846 else
847 list_add_tail(&entry->link, &tbl->head_flt_rule_list);
848 } else {
849 list_add(&entry->link, &tbl->head_flt_rule_list);
850 }
851
852 __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
853
854 return 0;
855
856error:
857 return -EPERM;
858}
859
860static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
861 const struct ipa_flt_rule *rule,
862 u32 *rule_hdl,
863 enum ipa_ip_type ip,
864 struct ipa3_flt_entry **add_after_entry)
865{
866 struct ipa3_flt_entry *entry;
867 struct ipa3_rt_tbl *rt_tbl = NULL;
868
869 if (!*add_after_entry)
870 goto error;
871
872 if (rule == NULL || rule_hdl == NULL) {
873 IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
874 rule_hdl);
875 goto error;
876 }
877
878 if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
879 goto error;
880
881 if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
882 goto error;
883
884 list_add(&entry->link, &((*add_after_entry)->link));
885
886 __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
887
888 /*
889 * prepare for next insertion
890 */
891 *add_after_entry = entry;
892
893 return 0;
894
895error:
896 *add_after_entry = NULL;
897 return -EPERM;
898}
899
900static int __ipa_del_flt_rule(u32 rule_hdl)
901{
902 struct ipa3_flt_entry *entry;
903 int id;
904
905 entry = ipa3_id_find(rule_hdl);
906 if (entry == NULL) {
907 IPAERR("lookup failed\n");
908 return -EINVAL;
909 }
910
911 if (entry->cookie != IPA_COOKIE) {
912 IPAERR("bad params\n");
913 return -EINVAL;
914 }
915 id = entry->id;
916
917 list_del(&entry->link);
918 entry->tbl->rule_cnt--;
919 if (entry->rt_tbl)
920 entry->rt_tbl->ref_cnt--;
921 IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
922 entry->tbl->rule_cnt, entry->rule_id);
923 entry->cookie = 0;
924 /* if rule id was allocated from idr, remove it */
925 if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
926 idr_remove(&entry->tbl->rule_ids, entry->rule_id);
927
928 kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
929
930 /* remove the handle from the database */
931 ipa3_id_remove(id);
932
933 return 0;
934}
935
936static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
937 enum ipa_ip_type ip)
938{
939 struct ipa3_flt_entry *entry;
940 struct ipa3_rt_tbl *rt_tbl = NULL;
941
942 entry = ipa3_id_find(frule->rule_hdl);
943 if (entry == NULL) {
944 IPAERR("lookup failed\n");
945 goto error;
946 }
947
948 if (entry->cookie != IPA_COOKIE) {
949 IPAERR("bad params\n");
950 goto error;
951 }
952
953 if (entry->rt_tbl)
954 entry->rt_tbl->ref_cnt--;
955
956 if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
957 if (!frule->rule.eq_attrib_type) {
958 if (!frule->rule.rt_tbl_hdl) {
959 IPAERR("invalid RT tbl\n");
960 goto error;
961 }
962
963 rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
964 if (rt_tbl == NULL) {
965 IPAERR("RT tbl not found\n");
966 goto error;
967 }
968
969 if (rt_tbl->cookie != IPA_COOKIE) {
970 IPAERR("RT table cookie is invalid\n");
971 goto error;
972 }
973 } else {
974 if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
975 IPA_MEM_PART(v4_modem_rt_index_hi) :
976 IPA_MEM_PART(v6_modem_rt_index_hi))) {
977 IPAERR("invalid RT tbl\n");
978 goto error;
979 }
980 }
981 }
982
983 entry->rule = frule->rule;
984 entry->rt_tbl = rt_tbl;
985 if (entry->rt_tbl)
986 entry->rt_tbl->ref_cnt++;
987 entry->hw_len = 0;
988 entry->prio = 0;
989
990 return 0;
991
992error:
993 return -EPERM;
994}
995
996static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
997{
998 *ipa_ep_idx = ipa3_get_ep_mapping(ep);
Ghanim Fodi79ee8d82017-02-27 16:39:25 +0200999 if (*ipa_ep_idx < 0) {
Amir Levy9659e592016-10-27 18:08:27 +03001000 IPAERR("ep not valid ep=%d\n", ep);
1001 return -EINVAL;
1002 }
1003 if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
1004 IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
1005
1006 if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
1007 IPAERR("ep do not support filtering ep=%d\n", ep);
1008 return -EINVAL;
1009 }
1010
1011 return 0;
1012}
1013
1014static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
1015 const struct ipa_flt_rule *rule, u8 add_rear,
1016 u32 *rule_hdl)
1017{
1018 struct ipa3_flt_tbl *tbl;
1019 int ipa_ep_idx;
1020
1021 if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
1022 IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
1023 rule_hdl, ep);
1024
1025 return -EINVAL;
1026 }
1027
1028 if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
1029 return -EINVAL;
1030
1031 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
1032 IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
1033
1034 return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
1035}
1036
1037/**
1038 * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
1039 * commit to IPA HW
1040 *
1041 * Returns: 0 on success, negative on failure
1042 *
1043 * Note: Should not be called from atomic context
1044 */
1045int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
1046{
1047 int i;
1048 int result;
1049
1050 if (rules == NULL || rules->num_rules == 0 ||
1051 rules->ip >= IPA_IP_MAX) {
1052 IPAERR("bad parm\n");
1053 return -EINVAL;
1054 }
1055
1056 mutex_lock(&ipa3_ctx->lock);
1057 for (i = 0; i < rules->num_rules; i++) {
1058 if (!rules->global)
1059 result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
1060 &rules->rules[i].rule,
1061 rules->rules[i].at_rear,
1062 &rules->rules[i].flt_rule_hdl);
1063 else
1064 result = -1;
1065
1066 if (result) {
1067 IPAERR("failed to add flt rule %d\n", i);
1068 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1069 } else {
1070 rules->rules[i].status = 0;
1071 }
1072 }
1073
1074 if (rules->global) {
1075 IPAERR("no support for global filter rules\n");
1076 result = -EPERM;
1077 goto bail;
1078 }
1079
1080 if (rules->commit)
1081 if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
1082 result = -EPERM;
1083 goto bail;
1084 }
1085 result = 0;
1086bail:
1087 mutex_unlock(&ipa3_ctx->lock);
1088
1089 return result;
1090}
1091
1092/**
1093 * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
1094 * the rule which its handle is given and optionally commit to IPA HW
1095 *
1096 * Returns: 0 on success, negative on failure
1097 *
1098 * Note: Should not be called from atomic context
1099 */
1100int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
1101{
1102 int i;
1103 int result;
1104 struct ipa3_flt_tbl *tbl;
1105 int ipa_ep_idx;
1106 struct ipa3_flt_entry *entry;
1107
1108 if (rules == NULL || rules->num_rules == 0 ||
1109 rules->ip >= IPA_IP_MAX) {
1110 IPAERR("bad parm\n");
1111 return -EINVAL;
1112 }
1113
1114 if (rules->ep >= IPA_CLIENT_MAX) {
1115 IPAERR("bad parms ep=%d\n", rules->ep);
1116 return -EINVAL;
1117 }
1118
1119 mutex_lock(&ipa3_ctx->lock);
1120
1121 if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
1122 result = -EINVAL;
1123 goto bail;
1124 }
1125
1126 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
1127
1128 entry = ipa3_id_find(rules->add_after_hdl);
1129 if (entry == NULL) {
1130 IPAERR("lookup failed\n");
1131 result = -EINVAL;
1132 goto bail;
1133 }
1134
1135 if (entry->tbl != tbl) {
1136 IPAERR("given entry does not match the table\n");
1137 result = -EINVAL;
1138 goto bail;
1139 }
1140
1141 if (tbl->sticky_rear)
1142 if (&entry->link == tbl->head_flt_rule_list.prev) {
1143 IPAERR("cannot add rule at end of a sticky table");
1144 result = -EINVAL;
1145 goto bail;
1146 }
1147
1148 IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
1149 rules->ip, rules->ep, rules->add_after_hdl);
1150
1151 /*
1152 * we add all rules one after the other, if one insertion fails, it cuts
1153 * the chain (all following will receive fail status) following calls to
1154 * __ipa_add_flt_rule_after will fail (entry == NULL)
1155 */
1156
1157 for (i = 0; i < rules->num_rules; i++) {
1158 result = __ipa_add_flt_rule_after(tbl,
1159 &rules->rules[i].rule,
1160 &rules->rules[i].flt_rule_hdl,
1161 rules->ip,
1162 &entry);
1163
1164 if (result) {
1165 IPAERR("failed to add flt rule %d\n", i);
1166 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1167 } else {
1168 rules->rules[i].status = 0;
1169 }
1170 }
1171
1172 if (rules->commit)
1173 if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
1174 IPAERR("failed to commit flt rules\n");
1175 result = -EPERM;
1176 goto bail;
1177 }
1178 result = 0;
1179bail:
1180 mutex_unlock(&ipa3_ctx->lock);
1181
1182 return result;
1183}
1184
1185/**
1186 * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
1187 * optionally commit to IPA HW
1188 *
1189 * Returns: 0 on success, negative on failure
1190 *
1191 * Note: Should not be called from atomic context
1192 */
1193int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
1194{
1195 int i;
1196 int result;
1197
1198 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
1199 IPAERR("bad parm\n");
1200 return -EINVAL;
1201 }
1202
1203 mutex_lock(&ipa3_ctx->lock);
1204 for (i = 0; i < hdls->num_hdls; i++) {
1205 if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
1206 IPAERR("failed to del flt rule %i\n", i);
1207 hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
1208 } else {
1209 hdls->hdl[i].status = 0;
1210 }
1211 }
1212
1213 if (hdls->commit)
1214 if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
1215 result = -EPERM;
1216 goto bail;
1217 }
1218 result = 0;
1219bail:
1220 mutex_unlock(&ipa3_ctx->lock);
1221
1222 return result;
1223}
1224
1225/**
1226 * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
1227 * optionally commit to IPA HW
1228 *
1229 * Returns: 0 on success, negative on failure
1230 *
1231 * Note: Should not be called from atomic context
1232 */
1233int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
1234{
1235 int i;
1236 int result;
1237
1238 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
1239 IPAERR("bad parm\n");
1240 return -EINVAL;
1241 }
1242
1243 mutex_lock(&ipa3_ctx->lock);
1244 for (i = 0; i < hdls->num_rules; i++) {
1245 if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
1246 IPAERR("failed to mdfy flt rule %i\n", i);
1247 hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
1248 } else {
1249 hdls->rules[i].status = 0;
1250 }
1251 }
1252
1253 if (hdls->commit)
1254 if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
1255 result = -EPERM;
1256 goto bail;
1257 }
1258 result = 0;
1259bail:
1260 mutex_unlock(&ipa3_ctx->lock);
1261
1262 return result;
1263}
1264
1265
1266/**
1267 * ipa3_commit_flt() - Commit the current SW filtering table of specified type
1268 * to IPA HW
1269 * @ip: [in] the family of routing tables
1270 *
1271 * Returns: 0 on success, negative on failure
1272 *
1273 * Note: Should not be called from atomic context
1274 */
1275int ipa3_commit_flt(enum ipa_ip_type ip)
1276{
1277 int result;
1278
1279 if (ip >= IPA_IP_MAX) {
1280 IPAERR("bad parm\n");
1281 return -EINVAL;
1282 }
1283
1284 mutex_lock(&ipa3_ctx->lock);
1285
1286 if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
1287 result = -EPERM;
1288 goto bail;
1289 }
1290 result = 0;
1291
1292bail:
1293 mutex_unlock(&ipa3_ctx->lock);
1294
1295 return result;
1296}
1297
1298/**
1299 * ipa3_reset_flt() - Reset the current SW filtering table of specified type
1300 * (does not commit to HW)
1301 * @ip: [in] the family of routing tables
1302 *
1303 * Returns: 0 on success, negative on failure
1304 *
1305 * Note: Should not be called from atomic context
1306 */
1307int ipa3_reset_flt(enum ipa_ip_type ip)
1308{
1309 struct ipa3_flt_tbl *tbl;
1310 struct ipa3_flt_entry *entry;
1311 struct ipa3_flt_entry *next;
1312 int i;
1313 int id;
1314
1315 if (ip >= IPA_IP_MAX) {
1316 IPAERR("bad parm\n");
1317 return -EINVAL;
1318 }
1319
1320 mutex_lock(&ipa3_ctx->lock);
1321 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
1322 if (!ipa_is_ep_support_flt(i))
1323 continue;
1324
1325 tbl = &ipa3_ctx->flt_tbl[i][ip];
1326 list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
1327 link) {
1328 if (ipa3_id_find(entry->id) == NULL) {
1329 WARN_ON(1);
1330 mutex_unlock(&ipa3_ctx->lock);
1331 return -EFAULT;
1332 }
1333 list_del(&entry->link);
1334 entry->tbl->rule_cnt--;
1335 if (entry->rt_tbl)
1336 entry->rt_tbl->ref_cnt--;
1337 /* if rule id was allocated from idr, remove it */
1338 if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
1339 idr_remove(&entry->tbl->rule_ids,
1340 entry->rule_id);
1341 entry->cookie = 0;
1342 id = entry->id;
1343 kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
1344
1345 /* remove the handle from the database */
1346 ipa3_id_remove(id);
1347 }
1348 }
1349 mutex_unlock(&ipa3_ctx->lock);
1350
1351 return 0;
1352}
1353
1354void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
1355{
1356 struct ipa3_flt_tbl *tbl;
1357 struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
1358 struct ipa_flt_rule rule;
1359
1360 if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
1361 IPADBG("cannot add flt rules to non filtering pipe num %d\n",
1362 ipa_ep_idx);
1363 return;
1364 }
1365
1366 memset(&rule, 0, sizeof(rule));
1367
1368 mutex_lock(&ipa3_ctx->lock);
1369 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
Amir Levy9659e592016-10-27 18:08:27 +03001370 rule.action = IPA_PASS_TO_EXCEPTION;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001371 __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
Amir Levy9659e592016-10-27 18:08:27 +03001372 &ep->dflt_flt4_rule_hdl);
1373 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001374 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001375
1376 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
Amir Levy9659e592016-10-27 18:08:27 +03001377 rule.action = IPA_PASS_TO_EXCEPTION;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001378 __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
Amir Levy9659e592016-10-27 18:08:27 +03001379 &ep->dflt_flt6_rule_hdl);
1380 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001381 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001382 mutex_unlock(&ipa3_ctx->lock);
1383}
1384
1385void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
1386{
1387 struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301388 struct ipa3_flt_tbl *tbl;
Amir Levy9659e592016-10-27 18:08:27 +03001389
1390 mutex_lock(&ipa3_ctx->lock);
1391 if (ep->dflt_flt4_rule_hdl) {
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301392 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
Amir Levy9659e592016-10-27 18:08:27 +03001393 __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
1394 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301395 /* Reset the sticky flag. */
1396 tbl->sticky_rear = false;
Amir Levy9659e592016-10-27 18:08:27 +03001397 ep->dflt_flt4_rule_hdl = 0;
1398 }
1399 if (ep->dflt_flt6_rule_hdl) {
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301400 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
Amir Levy9659e592016-10-27 18:08:27 +03001401 __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
1402 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301403 /* Reset the sticky flag. */
1404 tbl->sticky_rear = false;
Amir Levy9659e592016-10-27 18:08:27 +03001405 ep->dflt_flt6_rule_hdl = 0;
1406 }
1407 mutex_unlock(&ipa3_ctx->lock);
1408}
1409
1410/**
1411 * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
1412 * Pipe must be for AP EP (not modem) and support filtering
1413 * updates the the filtering masking values without changing the rt ones.
1414 *
1415 * @pipe_idx: filter pipe index to configure the tuple masking
1416 * @tuple: the tuple members masking
1417 * Returns: 0 on success, negative on failure
1418 *
1419 */
1420int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
1421{
1422 struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
1423
1424 if (!tuple) {
1425 IPAERR("bad tuple\n");
1426 return -EINVAL;
1427 }
1428
1429 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1430 IPAERR("bad pipe index!\n");
1431 return -EINVAL;
1432 }
1433
1434 if (!ipa_is_ep_support_flt(pipe_idx)) {
1435 IPAERR("pipe %d not filtering pipe\n", pipe_idx);
1436 return -EINVAL;
1437 }
1438
1439 if (ipa_is_modem_pipe(pipe_idx)) {
1440 IPAERR("modem pipe tuple is not configured by AP\n");
1441 return -EINVAL;
1442 }
1443
1444 ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
1445 pipe_idx, &fltrt_tuple);
1446 fltrt_tuple.flt = *tuple;
1447 ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
1448 pipe_idx, &fltrt_tuple);
1449
1450 return 0;
1451}
1452
1453/**
1454 * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
1455 * @pipe_idx: IPA endpoint index
1456 * @ip_type: IPv4 or IPv6 table
1457 * @hashable: hashable or non-hashable table
1458 * @entry: array to fill the table entries
1459 * @num_entry: number of entries in entry array. set by the caller to indicate
1460 * entry array size. Then set by this function as an output parameter to
1461 * indicate the number of entries in the array
1462 *
1463 * This function reads the filtering table from IPA SRAM and prepares an array
1464 * of entries. This function is mainly used for debugging purposes.
1465 *
1466 * If empty table or Modem Apps table, zero entries will be returned.
1467 *
1468 * Returns: 0 on success, negative on failure
1469 */
1470int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
1471 bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
1472{
1473 void *ipa_sram_mmio;
1474 u64 hdr_base_ofst;
1475 int tbl_entry_idx;
1476 int i;
1477 int res = 0;
1478 u64 tbl_addr;
1479 bool is_sys;
1480 u8 *rule_addr;
1481 struct ipa_mem_buffer *sys_tbl_mem;
1482 int rule_idx;
1483
1484 IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
1485 pipe_idx, ip_type, hashable, entry, num_entry);
1486
1487 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
1488 !entry || !num_entry) {
1489 IPAERR("Invalid params\n");
1490 return -EFAULT;
1491 }
1492
1493 if (!ipa_is_ep_support_flt(pipe_idx)) {
1494 IPAERR("pipe %d does not support filtering\n", pipe_idx);
1495 return -EINVAL;
1496 }
1497
1498 /* map IPA SRAM */
1499 ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
1500 ipa3_ctx->ctrl->ipa_reg_base_ofst +
1501 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
1502 ipa3_ctx->smem_restricted_bytes / 4),
1503 ipa3_ctx->smem_sz);
1504 if (!ipa_sram_mmio) {
1505 IPAERR("fail to ioremap IPA SRAM\n");
1506 return -ENOMEM;
1507 }
1508
1509 memset(entry, 0, sizeof(*entry) * (*num_entry));
1510 if (hashable) {
1511 if (ip_type == IPA_IP_v4)
1512 hdr_base_ofst =
1513 IPA_MEM_PART(v4_flt_hash_ofst);
1514 else
1515 hdr_base_ofst =
1516 IPA_MEM_PART(v6_flt_hash_ofst);
1517 } else {
1518 if (ip_type == IPA_IP_v4)
1519 hdr_base_ofst =
1520 IPA_MEM_PART(v4_flt_nhash_ofst);
1521 else
1522 hdr_base_ofst =
1523 IPA_MEM_PART(v6_flt_nhash_ofst);
1524 }
1525
1526 /* calculate the index of the tbl entry */
1527 tbl_entry_idx = 1; /* skip the bitmap */
1528 for (i = 0; i < pipe_idx; i++)
1529 if (ipa3_ctx->ep_flt_bitmap & (1 << i))
1530 tbl_entry_idx++;
1531
1532 IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
1533 hdr_base_ofst, tbl_entry_idx);
1534
1535 res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
1536 tbl_entry_idx, &tbl_addr, &is_sys);
1537 if (res) {
1538 IPAERR("failed to read table address from header structure\n");
1539 goto bail;
1540 }
1541 IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
1542 pipe_idx, tbl_addr, is_sys);
1543 if (!tbl_addr) {
1544 IPAERR("invalid flt tbl addr\n");
1545 res = -EFAULT;
1546 goto bail;
1547 }
1548
1549 /* for tables resides in DDR access it from the virtual memory */
1550 if (is_sys) {
1551 sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
1552 curr_mem[hashable ? IPA_RULE_HASHABLE :
1553 IPA_RULE_NON_HASHABLE];
1554 if (sys_tbl_mem->phys_base &&
1555 sys_tbl_mem->phys_base != tbl_addr) {
1556 IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
1557 tbl_addr, &sys_tbl_mem->phys_base);
1558 }
1559 if (sys_tbl_mem->phys_base)
1560 rule_addr = sys_tbl_mem->base;
1561 else
1562 rule_addr = NULL;
1563 } else {
1564 rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
1565 }
1566
1567 IPADBG("First rule addr 0x%p\n", rule_addr);
1568
1569 if (!rule_addr) {
1570 /* Modem table in system memory or empty table */
1571 *num_entry = 0;
1572 goto bail;
1573 }
1574
1575 rule_idx = 0;
1576 while (rule_idx < *num_entry) {
1577 res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
1578 if (res) {
1579 IPAERR("failed parsing flt rule\n");
1580 goto bail;
1581 }
1582
1583 IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
1584 if (!entry[rule_idx].rule_size)
1585 break;
1586
1587 rule_addr += entry[rule_idx].rule_size;
1588 rule_idx++;
1589 }
1590 *num_entry = rule_idx;
1591bail:
1592 iounmap(ipa_sram_mmio);
1593 return 0;
1594}