blob: 340bacab0dd3c511ff0970dd2b1a3f1190a4b8fd [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15#include "ipahal/ipahal_fltrt.h"
16
17#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
18#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
19#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
20#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
21
22#define IPA_FLT_GET_RULE_TYPE(__entry) \
23 ( \
24 ((__entry)->rule.hashable) ? \
25 (IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
26 )
27
28/**
29 * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
30 * @ip: the ip address family type
31 * @entry: filtering entry
32 * @buf: output buffer, buf == NULL means
33 * caller wants to know the size of the rule as seen
34 * by HW so they did not pass a valid buffer, we will use a
35 * scratch buffer instead.
36 * With this scheme we are going to
37 * generate the rule twice, once to know size using scratch
38 * buffer and second to write the rule to the actual caller
39 * supplied buffer which is of required size
40 *
41 * Returns: 0 on success, negative on failure
42 *
43 * caller needs to hold any needed locks to ensure integrity
44 *
45 */
46static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
47 struct ipa3_flt_entry *entry, u8 *buf)
48{
49 struct ipahal_flt_rule_gen_params gen_params;
50 int res = 0;
51
52 memset(&gen_params, 0, sizeof(gen_params));
53
54 gen_params.ipt = ip;
55 if (entry->rt_tbl)
56 gen_params.rt_tbl_idx = entry->rt_tbl->idx;
57 else
58 gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
59
60 gen_params.priority = entry->prio;
61 gen_params.id = entry->rule_id;
62 gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
63
64 res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
65 if (res)
66 IPAERR("failed to generate flt h/w rule\n");
67
68 return 0;
69}
70
71static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
72{
73 struct ipa3_flt_tbl *tbl;
74 int i;
75
76 IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
77
78 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
79 if (!ipa_is_ep_support_flt(i))
80 continue;
81
82 tbl = &ipa3_ctx->flt_tbl[i][ip];
83 if (tbl->prev_mem[rlt].phys_base) {
84 IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
85 ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
86 }
87
88 if (list_empty(&tbl->head_flt_rule_list)) {
89 if (tbl->curr_mem[rlt].phys_base) {
90 IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
91 i);
92 ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
93 }
94 }
95 }
96}
97
98/**
99 * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
100 * assign priorities to the rules, calculate their sizes and calculate
101 * the overall table size
102 * @ip: the ip address family type
103 * @tbl: the flt tbl to be prepared
104 * @pipe_idx: the ep pipe appropriate for the given tbl
105 *
106 * Return: 0 on success, negative on failure
107 */
108static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
109 struct ipa3_flt_tbl *tbl, int pipe_idx)
110{
111 struct ipa3_flt_entry *entry;
112 int prio_i;
113 int max_prio;
114 u32 hdr_width;
115
116 tbl->sz[IPA_RULE_HASHABLE] = 0;
117 tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
118
119 max_prio = ipahal_get_rule_max_priority();
120
121 prio_i = max_prio;
122 list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
123
124 if (entry->rule.max_prio) {
125 entry->prio = max_prio;
126 } else {
127 if (ipahal_rule_decrease_priority(&prio_i)) {
128 IPAERR("cannot decrease rule priority - %d\n",
129 prio_i);
130 return -EPERM;
131 }
132 entry->prio = prio_i;
133 }
134
135 if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
136 IPAERR("failed to calculate HW FLT rule size\n");
137 return -EPERM;
138 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200139 IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
Amir Levy9659e592016-10-27 18:08:27 +0300140 pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
141
142 if (entry->rule.hashable)
143 tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
144 else
145 tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
146 }
147
148 if ((tbl->sz[IPA_RULE_HASHABLE] +
149 tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
150 IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
151 pipe_idx);
152 return 0;
153 }
154
155 hdr_width = ipahal_get_hw_tbl_hdr_width();
156
157 /* for the header word */
158 if (tbl->sz[IPA_RULE_HASHABLE])
159 tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
160 if (tbl->sz[IPA_RULE_NON_HASHABLE])
161 tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
162
163 IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
164 tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
165
166 return 0;
167}
168
169/**
170 * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
171 * (rules and tables) to HW format and fill it in the given buffers
172 * @ip: the ip address family type
173 * @rlt: the type of the rules to translate (hashable or non-hashable)
174 * @base: the rules body buffer to be filled
175 * @hdr: the rules header (addresses/offsets) buffer to be filled
176 * @body_ofst: the offset of the rules body from the rules header at
177 * ipa sram
178 *
179 * Returns: 0 on success, negative on failure
180 *
181 * caller needs to hold any needed locks to ensure integrity
182 *
183 */
184static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
185 enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
186{
187 u64 offset;
188 u8 *body_i;
189 int res;
190 struct ipa3_flt_entry *entry;
191 u8 *tbl_mem_buf;
192 struct ipa_mem_buffer tbl_mem;
193 struct ipa3_flt_tbl *tbl;
194 int i;
195 int hdr_idx = 0;
196
197 body_i = base;
198 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
199 if (!ipa_is_ep_support_flt(i))
200 continue;
201 tbl = &ipa3_ctx->flt_tbl[i][ip];
202 if (tbl->sz[rlt] == 0) {
203 hdr_idx++;
204 continue;
205 }
206 if (tbl->in_sys[rlt]) {
207 /* only body (no header) */
208 tbl_mem.size = tbl->sz[rlt] -
209 ipahal_get_hw_tbl_hdr_width();
210 if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
211 IPAERR("fail to alloc sys tbl of size %d\n",
212 tbl_mem.size);
213 goto err;
214 }
215
216 if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
217 hdr, hdr_idx, true)) {
218 IPAERR("fail to wrt sys tbl addr to hdr\n");
219 goto hdr_update_fail;
220 }
221
222 tbl_mem_buf = tbl_mem.base;
223
224 /* generate the rule-set */
225 list_for_each_entry(entry, &tbl->head_flt_rule_list,
226 link) {
227 if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
228 continue;
229 res = ipa3_generate_flt_hw_rule(
230 ip, entry, tbl_mem_buf);
231 if (res) {
232 IPAERR("failed to gen HW FLT rule\n");
233 goto hdr_update_fail;
234 }
235 tbl_mem_buf += entry->hw_len;
236 }
237
238 if (tbl->curr_mem[rlt].phys_base) {
239 WARN_ON(tbl->prev_mem[rlt].phys_base);
240 tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
241 }
242 tbl->curr_mem[rlt] = tbl_mem;
243 } else {
244 offset = body_i - base + body_ofst;
245
246 /* update the hdr at the right index */
247 if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
248 hdr_idx, true)) {
249 IPAERR("fail to wrt lcl tbl ofst to hdr\n");
250 goto hdr_update_fail;
251 }
252
253 /* generate the rule-set */
254 list_for_each_entry(entry, &tbl->head_flt_rule_list,
255 link) {
256 if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
257 continue;
258 res = ipa3_generate_flt_hw_rule(
259 ip, entry, body_i);
260 if (res) {
261 IPAERR("failed to gen HW FLT rule\n");
262 goto err;
263 }
264 body_i += entry->hw_len;
265 }
266
267 /**
268 * advance body_i to next table alignment as local
269 * tables are order back-to-back
270 */
271 body_i += ipahal_get_lcl_tbl_addr_alignment();
272 body_i = (u8 *)((long)body_i &
273 ~ipahal_get_lcl_tbl_addr_alignment());
274 }
275 hdr_idx++;
276 }
277
278 return 0;
279
280hdr_update_fail:
281 ipahal_free_dma_mem(&tbl_mem);
282err:
283 return -EPERM;
284}
285
286/**
287 * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
288 * headers and bodies are being created into buffers that will be filled into
289 * the local memory (sram)
290 * @ip: the ip address family type
291 * @alloc_params: In and Out parameters for the allocations of the buffers
292 * 4 buffers: hdr and bdy, each hashable and non-hashable
293 *
294 * Return: 0 on success, negative on failure
295 */
296static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
297 struct ipahal_fltrt_alloc_imgs_params *alloc_params)
298{
299 u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
300 int rc = 0;
301
302 if (ip == IPA_IP_v4) {
303 nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
304 IPA_MEM_PART(v4_flt_nhash_ofst);
305 hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
306 IPA_MEM_PART(v4_flt_hash_ofst);
307 } else {
308 nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
309 IPA_MEM_PART(v6_flt_nhash_ofst);
310 hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
311 IPA_MEM_PART(v6_flt_hash_ofst);
312 }
313
314 if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
315 IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
316 rc = -ENOMEM;
317 goto allocate_failed;
318 }
319
320 if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
321 alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
322 hash_bdy_start_ofst)) {
323 IPAERR("fail to translate hashable flt tbls to hw format\n");
324 rc = -EPERM;
325 goto translate_fail;
326 }
327 if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
328 alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
329 nhash_bdy_start_ofst)) {
330 IPAERR("fail to translate non-hash flt tbls to hw format\n");
331 rc = -EPERM;
332 goto translate_fail;
333 }
334
335 return rc;
336
337translate_fail:
338 if (alloc_params->hash_hdr.size)
339 ipahal_free_dma_mem(&alloc_params->hash_hdr);
340 ipahal_free_dma_mem(&alloc_params->nhash_hdr);
341 if (alloc_params->hash_bdy.size)
342 ipahal_free_dma_mem(&alloc_params->hash_bdy);
343 if (alloc_params->nhash_bdy.size)
344 ipahal_free_dma_mem(&alloc_params->nhash_bdy);
345allocate_failed:
346 return rc;
347}
348
349/**
350 * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
351 * tbl bodies at the sram is enough for the commit
352 * @ipt: the ip address family type
353 * @rlt: the rule type (hashable or non-hashable)
354 *
355 * Return: true if enough space available or false in other cases
356 */
357static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
358 enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
359{
360 u16 avail;
361
362 if (!bdy) {
363 IPAERR("Bad parameters, bdy = NULL\n");
364 return false;
365 }
366
367 if (ipt == IPA_IP_v4)
368 avail = (rlt == IPA_RULE_HASHABLE) ?
369 IPA_MEM_PART(apps_v4_flt_hash_size) :
370 IPA_MEM_PART(apps_v4_flt_nhash_size);
371 else
372 avail = (rlt == IPA_RULE_HASHABLE) ?
373 IPA_MEM_PART(apps_v6_flt_hash_size) :
374 IPA_MEM_PART(apps_v6_flt_nhash_size);
375
376 if (bdy->size <= avail)
377 return true;
378
379 IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
380 bdy->size, avail, ipt, rlt);
381 return false;
382}
383
384/**
385 * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
386 * payload pointers buffers for headers and bodies of flt structure
387 * as well as place for flush imm.
388 * @ipt: the ip address family type
389 * @desc: [OUT] descriptor buffer
390 * @cmd: [OUT] imm commands payload pointers buffer
391 *
392 * Return: 0 on success, negative on failure
393 */
394static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
395 struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
396{
397 u16 entries;
398
399 /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
400 entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
401
402 *desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
403 if (*desc == NULL) {
404 IPAERR("fail to alloc desc blob ip %d\n", ip);
405 goto fail_desc_alloc;
406 }
407
408 *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
409 if (*cmd_pyld == NULL) {
410 IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
411 goto fail_cmd_alloc;
412 }
413
414 return 0;
415
416fail_cmd_alloc:
417 kfree(*desc);
418fail_desc_alloc:
419 return -ENOMEM;
420}
421
422/**
423 * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
424 * will skip according to pre-configuration or modem pipes
425 * @pipe: the EP pipe index
426 *
427 * Return: true if to skip, false otherwize
428 */
429static bool ipa_flt_skip_pipe_config(int pipe)
430{
431 if (ipa_is_modem_pipe(pipe)) {
432 IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
433 return true;
434 }
435
436 if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
437 IPADBG_LOW("skip %d\n", pipe);
438 return true;
439 }
440
441 if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == pipe
442 && ipa3_ctx->modem_cfg_emb_pipe_flt)) {
443 IPADBG_LOW("skip %d\n", pipe);
444 return true;
445 }
446
447 return false;
448}
449
450/**
451 * __ipa_commit_flt_v3() - commit flt tables to the hw
452 * commit the headers and the bodies if are local with internal cache flushing.
453 * The headers (and local bodies) will first be created into dma buffers and
454 * then written via IC to the SRAM
455 * @ipt: the ip address family type
456 *
457 * Return: 0 on success, negative on failure
458 */
459int __ipa_commit_flt_v3(enum ipa_ip_type ip)
460{
461 struct ipahal_fltrt_alloc_imgs_params alloc_params;
462 int rc = 0;
463 struct ipa3_desc *desc;
464 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
465 struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
466 struct ipahal_imm_cmd_pyld **cmd_pyld;
467 int num_cmd = 0;
468 int i;
469 int hdr_idx;
470 u32 lcl_hash_hdr, lcl_nhash_hdr;
471 u32 lcl_hash_bdy, lcl_nhash_bdy;
472 bool lcl_hash, lcl_nhash;
473 struct ipahal_reg_fltrt_hash_flush flush;
474 struct ipahal_reg_valmask valmask;
475 u32 tbl_hdr_width;
476 struct ipa3_flt_tbl *tbl;
477
478 tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
479 memset(&alloc_params, 0, sizeof(alloc_params));
480 alloc_params.ipt = ip;
481 alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
482
483 if (ip == IPA_IP_v4) {
484 lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
485 IPA_MEM_PART(v4_flt_hash_ofst) +
486 tbl_hdr_width; /* to skip the bitmap */
487 lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
488 IPA_MEM_PART(v4_flt_nhash_ofst) +
489 tbl_hdr_width; /* to skip the bitmap */
490 lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
491 IPA_MEM_PART(apps_v4_flt_hash_ofst);
492 lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
493 IPA_MEM_PART(apps_v4_flt_nhash_ofst);
494 lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
495 lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
496 } else {
497 lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
498 IPA_MEM_PART(v6_flt_hash_ofst) +
499 tbl_hdr_width; /* to skip the bitmap */
500 lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
501 IPA_MEM_PART(v6_flt_nhash_ofst) +
502 tbl_hdr_width; /* to skip the bitmap */
503 lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
504 IPA_MEM_PART(apps_v6_flt_hash_ofst);
505 lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
506 IPA_MEM_PART(apps_v6_flt_nhash_ofst);
507 lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
508 lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
509 }
510
511 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
512 if (!ipa_is_ep_support_flt(i))
513 continue;
514 tbl = &ipa3_ctx->flt_tbl[i][ip];
515 if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
516 rc = -EPERM;
517 goto prep_failed;
518 }
519 if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
520 tbl->sz[IPA_RULE_HASHABLE]) {
521 alloc_params.num_lcl_hash_tbls++;
522 alloc_params.total_sz_lcl_hash_tbls +=
523 tbl->sz[IPA_RULE_HASHABLE];
524 alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
525
526 }
527 if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
528 tbl->sz[IPA_RULE_NON_HASHABLE]) {
529 alloc_params.num_lcl_nhash_tbls++;
530 alloc_params.total_sz_lcl_nhash_tbls +=
531 tbl->sz[IPA_RULE_NON_HASHABLE];
532 alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
533 }
534 }
535
536 if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
537 IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
538 rc = -EFAULT;
539 goto prep_failed;
540 }
541
542 if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
543 &alloc_params.hash_bdy)) {
544 rc = -EFAULT;
545 goto fail_size_valid;
546 }
547 if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
548 &alloc_params.nhash_bdy)) {
549 rc = -EFAULT;
550 goto fail_size_valid;
551 }
552
553 if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
554 rc = -ENOMEM;
555 goto fail_size_valid;
556 }
557
558 /* flushing ipa internal hashable flt rules cache */
559 memset(&flush, 0, sizeof(flush));
560 if (ip == IPA_IP_v4)
561 flush.v4_flt = true;
562 else
563 flush.v6_flt = true;
564 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
565 reg_write_cmd.skip_pipeline_clear = false;
566 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
567 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
568 reg_write_cmd.value = valmask.val;
569 reg_write_cmd.value_mask = valmask.mask;
570 cmd_pyld[0] = ipahal_construct_imm_cmd(
571 IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
572 if (!cmd_pyld[0]) {
573 IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
574 rc = -EFAULT;
575 goto fail_reg_write_construct;
576 }
577 desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
578 desc[0].pyld = cmd_pyld[0]->data;
579 desc[0].len = cmd_pyld[0]->len;
580 desc[0].type = IPA_IMM_CMD_DESC;
581 num_cmd++;
582
583 hdr_idx = 0;
584 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
585 if (!ipa_is_ep_support_flt(i)) {
586 IPADBG_LOW("skip %d - not filtering pipe\n", i);
587 continue;
588 }
589
590 if (ipa_flt_skip_pipe_config(i)) {
591 hdr_idx++;
592 continue;
593 }
594
595 IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
596 hdr_idx, i);
597
598 mem_cmd.is_read = false;
599 mem_cmd.skip_pipeline_clear = false;
600 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
601 mem_cmd.size = tbl_hdr_width;
602 mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
603 hdr_idx * tbl_hdr_width;
604 mem_cmd.local_addr = lcl_nhash_hdr +
605 hdr_idx * tbl_hdr_width;
606 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
607 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
608 if (!cmd_pyld[num_cmd]) {
609 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
610 ip);
611 goto fail_imm_cmd_construct;
612 }
613 desc[num_cmd].opcode =
614 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
615 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
616 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
617 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
618
619 mem_cmd.is_read = false;
620 mem_cmd.skip_pipeline_clear = false;
621 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
622 mem_cmd.size = tbl_hdr_width;
623 mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
624 hdr_idx * tbl_hdr_width;
625 mem_cmd.local_addr = lcl_hash_hdr +
626 hdr_idx * tbl_hdr_width;
627 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
628 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
629 if (!cmd_pyld[num_cmd]) {
630 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
631 ip);
632 goto fail_imm_cmd_construct;
633 }
634 desc[num_cmd].opcode =
635 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
636 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
637 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
638 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
639
640 hdr_idx++;
641 }
642
643 if (lcl_nhash) {
644 mem_cmd.is_read = false;
645 mem_cmd.skip_pipeline_clear = false;
646 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
647 mem_cmd.size = alloc_params.nhash_bdy.size;
648 mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
649 mem_cmd.local_addr = lcl_nhash_bdy;
650 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
651 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
652 if (!cmd_pyld[num_cmd]) {
653 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
654 ip);
655 goto fail_imm_cmd_construct;
656 }
657 desc[num_cmd].opcode =
658 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
659 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
660 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
661 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
662 }
663 if (lcl_hash) {
664 mem_cmd.is_read = false;
665 mem_cmd.skip_pipeline_clear = false;
666 mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
667 mem_cmd.size = alloc_params.hash_bdy.size;
668 mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
669 mem_cmd.local_addr = lcl_hash_bdy;
670 cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
671 IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
672 if (!cmd_pyld[num_cmd]) {
673 IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
674 ip);
675 goto fail_imm_cmd_construct;
676 }
677 desc[num_cmd].opcode =
678 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
679 desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
680 desc[num_cmd].len = cmd_pyld[num_cmd]->len;
681 desc[num_cmd++].type = IPA_IMM_CMD_DESC;
682 }
683
684 if (ipa3_send_cmd(num_cmd, desc)) {
685 IPAERR("fail to send immediate command\n");
686 rc = -EFAULT;
687 goto fail_imm_cmd_construct;
688 }
689
690 IPADBG_LOW("Hashable HEAD\n");
691 IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
692 alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
693
694 IPADBG_LOW("Non-Hashable HEAD\n");
695 IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
696 alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
697
698 if (alloc_params.hash_bdy.size) {
699 IPADBG_LOW("Hashable BODY\n");
700 IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
701 alloc_params.hash_bdy.phys_base,
702 alloc_params.hash_bdy.size);
703 }
704
705 if (alloc_params.nhash_bdy.size) {
706 IPADBG_LOW("Non-Hashable BODY\n");
707 IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
708 alloc_params.nhash_bdy.phys_base,
709 alloc_params.nhash_bdy.size);
710 }
711
712 __ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
713 __ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
714
715fail_imm_cmd_construct:
716 for (i = 0 ; i < num_cmd ; i++)
717 ipahal_destroy_imm_cmd(cmd_pyld[i]);
718fail_reg_write_construct:
719 kfree(desc);
720 kfree(cmd_pyld);
721fail_size_valid:
722 if (alloc_params.hash_hdr.size)
723 ipahal_free_dma_mem(&alloc_params.hash_hdr);
724 ipahal_free_dma_mem(&alloc_params.nhash_hdr);
725 if (alloc_params.hash_bdy.size)
726 ipahal_free_dma_mem(&alloc_params.hash_bdy);
727 if (alloc_params.nhash_bdy.size)
728 ipahal_free_dma_mem(&alloc_params.nhash_bdy);
729prep_failed:
730 return rc;
731}
732
733static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
734 struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
735{
736 if (rule->action != IPA_PASS_TO_EXCEPTION) {
737 if (!rule->eq_attrib_type) {
738 if (!rule->rt_tbl_hdl) {
739 IPAERR("invalid RT tbl\n");
740 goto error;
741 }
742
743 *rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
744 if (*rt_tbl == NULL) {
745 IPAERR("RT tbl not found\n");
746 goto error;
747 }
748
749 if ((*rt_tbl)->cookie != IPA_COOKIE) {
750 IPAERR("RT table cookie is invalid\n");
751 goto error;
752 }
753 } else {
754 if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
755 IPA_MEM_PART(v4_modem_rt_index_hi) :
756 IPA_MEM_PART(v6_modem_rt_index_hi))) {
757 IPAERR("invalid RT tbl\n");
758 goto error;
759 }
760 }
761 }
762
763 if (rule->rule_id) {
764 if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
765 IPAERR("invalid rule_id provided 0x%x\n"
766 "rule_id with bit 0x%x are auto generated\n",
767 rule->rule_id, ipahal_get_rule_id_hi_bit());
768 goto error;
769 }
770 }
771
772 return 0;
773
774error:
775 return -EPERM;
776}
777
778static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
779 const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
780 struct ipa3_flt_tbl *tbl)
781{
782 int id;
783
784 *entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
785 if (!*entry) {
786 IPAERR("failed to alloc FLT rule object\n");
787 goto error;
788 }
789 INIT_LIST_HEAD(&((*entry)->link));
790 (*entry)->rule = *rule;
791 (*entry)->cookie = IPA_COOKIE;
792 (*entry)->rt_tbl = rt_tbl;
793 (*entry)->tbl = tbl;
794 if (rule->rule_id) {
795 id = rule->rule_id;
796 } else {
797 id = ipa3_alloc_rule_id(&tbl->rule_ids);
798 if (id < 0) {
799 IPAERR("failed to allocate rule id\n");
800 WARN_ON(1);
801 goto rule_id_fail;
802 }
803 }
804 (*entry)->rule_id = id;
805
806 return 0;
807
808rule_id_fail:
809 kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
810error:
811 return -EPERM;
812}
813
814static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
815 struct ipa3_flt_entry *entry, u32 *rule_hdl)
816{
817 int id;
818
819 tbl->rule_cnt++;
820 if (entry->rt_tbl)
821 entry->rt_tbl->ref_cnt++;
822 id = ipa3_id_alloc(entry);
823 if (id < 0) {
824 IPAERR("failed to add to tree\n");
825 WARN_ON(1);
826 }
827 *rule_hdl = id;
828 entry->id = id;
829 IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
830
831 return 0;
832}
833
834static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
835 const struct ipa_flt_rule *rule, u8 add_rear,
836 u32 *rule_hdl)
837{
838 struct ipa3_flt_entry *entry;
839 struct ipa3_rt_tbl *rt_tbl = NULL;
840
841 if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
842 goto error;
843
844 if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
845 goto error;
846
847 if (add_rear) {
848 if (tbl->sticky_rear)
849 list_add_tail(&entry->link,
850 tbl->head_flt_rule_list.prev);
851 else
852 list_add_tail(&entry->link, &tbl->head_flt_rule_list);
853 } else {
854 list_add(&entry->link, &tbl->head_flt_rule_list);
855 }
856
857 __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
858
859 return 0;
860
861error:
862 return -EPERM;
863}
864
865static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
866 const struct ipa_flt_rule *rule,
867 u32 *rule_hdl,
868 enum ipa_ip_type ip,
869 struct ipa3_flt_entry **add_after_entry)
870{
871 struct ipa3_flt_entry *entry;
872 struct ipa3_rt_tbl *rt_tbl = NULL;
873
874 if (!*add_after_entry)
875 goto error;
876
877 if (rule == NULL || rule_hdl == NULL) {
878 IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
879 rule_hdl);
880 goto error;
881 }
882
883 if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
884 goto error;
885
886 if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
887 goto error;
888
889 list_add(&entry->link, &((*add_after_entry)->link));
890
891 __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
892
893 /*
894 * prepare for next insertion
895 */
896 *add_after_entry = entry;
897
898 return 0;
899
900error:
901 *add_after_entry = NULL;
902 return -EPERM;
903}
904
905static int __ipa_del_flt_rule(u32 rule_hdl)
906{
907 struct ipa3_flt_entry *entry;
908 int id;
909
910 entry = ipa3_id_find(rule_hdl);
911 if (entry == NULL) {
912 IPAERR("lookup failed\n");
913 return -EINVAL;
914 }
915
916 if (entry->cookie != IPA_COOKIE) {
917 IPAERR("bad params\n");
918 return -EINVAL;
919 }
920 id = entry->id;
921
922 list_del(&entry->link);
923 entry->tbl->rule_cnt--;
924 if (entry->rt_tbl)
925 entry->rt_tbl->ref_cnt--;
926 IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
927 entry->tbl->rule_cnt, entry->rule_id);
928 entry->cookie = 0;
929 /* if rule id was allocated from idr, remove it */
930 if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
931 idr_remove(&entry->tbl->rule_ids, entry->rule_id);
932
933 kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
934
935 /* remove the handle from the database */
936 ipa3_id_remove(id);
937
938 return 0;
939}
940
941static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
942 enum ipa_ip_type ip)
943{
944 struct ipa3_flt_entry *entry;
945 struct ipa3_rt_tbl *rt_tbl = NULL;
946
947 entry = ipa3_id_find(frule->rule_hdl);
948 if (entry == NULL) {
949 IPAERR("lookup failed\n");
950 goto error;
951 }
952
953 if (entry->cookie != IPA_COOKIE) {
954 IPAERR("bad params\n");
955 goto error;
956 }
957
958 if (entry->rt_tbl)
959 entry->rt_tbl->ref_cnt--;
960
961 if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
962 if (!frule->rule.eq_attrib_type) {
963 if (!frule->rule.rt_tbl_hdl) {
964 IPAERR("invalid RT tbl\n");
965 goto error;
966 }
967
968 rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
969 if (rt_tbl == NULL) {
970 IPAERR("RT tbl not found\n");
971 goto error;
972 }
973
974 if (rt_tbl->cookie != IPA_COOKIE) {
975 IPAERR("RT table cookie is invalid\n");
976 goto error;
977 }
978 } else {
979 if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
980 IPA_MEM_PART(v4_modem_rt_index_hi) :
981 IPA_MEM_PART(v6_modem_rt_index_hi))) {
982 IPAERR("invalid RT tbl\n");
983 goto error;
984 }
985 }
986 }
987
988 entry->rule = frule->rule;
989 entry->rt_tbl = rt_tbl;
990 if (entry->rt_tbl)
991 entry->rt_tbl->ref_cnt++;
992 entry->hw_len = 0;
993 entry->prio = 0;
994
995 return 0;
996
997error:
998 return -EPERM;
999}
1000
1001static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
1002{
1003 *ipa_ep_idx = ipa3_get_ep_mapping(ep);
1004 if (*ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
1005 IPAERR("ep not valid ep=%d\n", ep);
1006 return -EINVAL;
1007 }
1008 if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
1009 IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
1010
1011 if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
1012 IPAERR("ep do not support filtering ep=%d\n", ep);
1013 return -EINVAL;
1014 }
1015
1016 return 0;
1017}
1018
1019static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
1020 const struct ipa_flt_rule *rule, u8 add_rear,
1021 u32 *rule_hdl)
1022{
1023 struct ipa3_flt_tbl *tbl;
1024 int ipa_ep_idx;
1025
1026 if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
1027 IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
1028 rule_hdl, ep);
1029
1030 return -EINVAL;
1031 }
1032
1033 if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
1034 return -EINVAL;
1035
1036 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
1037 IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
1038
1039 return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
1040}
1041
1042/**
1043 * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
1044 * commit to IPA HW
1045 *
1046 * Returns: 0 on success, negative on failure
1047 *
1048 * Note: Should not be called from atomic context
1049 */
1050int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
1051{
1052 int i;
1053 int result;
1054
1055 if (rules == NULL || rules->num_rules == 0 ||
1056 rules->ip >= IPA_IP_MAX) {
1057 IPAERR("bad parm\n");
1058 return -EINVAL;
1059 }
1060
1061 mutex_lock(&ipa3_ctx->lock);
1062 for (i = 0; i < rules->num_rules; i++) {
1063 if (!rules->global)
1064 result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
1065 &rules->rules[i].rule,
1066 rules->rules[i].at_rear,
1067 &rules->rules[i].flt_rule_hdl);
1068 else
1069 result = -1;
1070
1071 if (result) {
1072 IPAERR("failed to add flt rule %d\n", i);
1073 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1074 } else {
1075 rules->rules[i].status = 0;
1076 }
1077 }
1078
1079 if (rules->global) {
1080 IPAERR("no support for global filter rules\n");
1081 result = -EPERM;
1082 goto bail;
1083 }
1084
1085 if (rules->commit)
1086 if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
1087 result = -EPERM;
1088 goto bail;
1089 }
1090 result = 0;
1091bail:
1092 mutex_unlock(&ipa3_ctx->lock);
1093
1094 return result;
1095}
1096
1097/**
1098 * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
1099 * the rule which its handle is given and optionally commit to IPA HW
1100 *
1101 * Returns: 0 on success, negative on failure
1102 *
1103 * Note: Should not be called from atomic context
1104 */
1105int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
1106{
1107 int i;
1108 int result;
1109 struct ipa3_flt_tbl *tbl;
1110 int ipa_ep_idx;
1111 struct ipa3_flt_entry *entry;
1112
1113 if (rules == NULL || rules->num_rules == 0 ||
1114 rules->ip >= IPA_IP_MAX) {
1115 IPAERR("bad parm\n");
1116 return -EINVAL;
1117 }
1118
1119 if (rules->ep >= IPA_CLIENT_MAX) {
1120 IPAERR("bad parms ep=%d\n", rules->ep);
1121 return -EINVAL;
1122 }
1123
1124 mutex_lock(&ipa3_ctx->lock);
1125
1126 if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
1127 result = -EINVAL;
1128 goto bail;
1129 }
1130
1131 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
1132
1133 entry = ipa3_id_find(rules->add_after_hdl);
1134 if (entry == NULL) {
1135 IPAERR("lookup failed\n");
1136 result = -EINVAL;
1137 goto bail;
1138 }
1139
1140 if (entry->tbl != tbl) {
1141 IPAERR("given entry does not match the table\n");
1142 result = -EINVAL;
1143 goto bail;
1144 }
1145
1146 if (tbl->sticky_rear)
1147 if (&entry->link == tbl->head_flt_rule_list.prev) {
1148 IPAERR("cannot add rule at end of a sticky table");
1149 result = -EINVAL;
1150 goto bail;
1151 }
1152
1153 IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
1154 rules->ip, rules->ep, rules->add_after_hdl);
1155
1156 /*
1157 * we add all rules one after the other, if one insertion fails, it cuts
1158 * the chain (all following will receive fail status) following calls to
1159 * __ipa_add_flt_rule_after will fail (entry == NULL)
1160 */
1161
1162 for (i = 0; i < rules->num_rules; i++) {
1163 result = __ipa_add_flt_rule_after(tbl,
1164 &rules->rules[i].rule,
1165 &rules->rules[i].flt_rule_hdl,
1166 rules->ip,
1167 &entry);
1168
1169 if (result) {
1170 IPAERR("failed to add flt rule %d\n", i);
1171 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1172 } else {
1173 rules->rules[i].status = 0;
1174 }
1175 }
1176
1177 if (rules->commit)
1178 if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
1179 IPAERR("failed to commit flt rules\n");
1180 result = -EPERM;
1181 goto bail;
1182 }
1183 result = 0;
1184bail:
1185 mutex_unlock(&ipa3_ctx->lock);
1186
1187 return result;
1188}
1189
1190/**
1191 * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
1192 * optionally commit to IPA HW
1193 *
1194 * Returns: 0 on success, negative on failure
1195 *
1196 * Note: Should not be called from atomic context
1197 */
1198int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
1199{
1200 int i;
1201 int result;
1202
1203 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
1204 IPAERR("bad parm\n");
1205 return -EINVAL;
1206 }
1207
1208 mutex_lock(&ipa3_ctx->lock);
1209 for (i = 0; i < hdls->num_hdls; i++) {
1210 if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
1211 IPAERR("failed to del flt rule %i\n", i);
1212 hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
1213 } else {
1214 hdls->hdl[i].status = 0;
1215 }
1216 }
1217
1218 if (hdls->commit)
1219 if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
1220 result = -EPERM;
1221 goto bail;
1222 }
1223 result = 0;
1224bail:
1225 mutex_unlock(&ipa3_ctx->lock);
1226
1227 return result;
1228}
1229
1230/**
1231 * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
1232 * optionally commit to IPA HW
1233 *
1234 * Returns: 0 on success, negative on failure
1235 *
1236 * Note: Should not be called from atomic context
1237 */
1238int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
1239{
1240 int i;
1241 int result;
1242
1243 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
1244 IPAERR("bad parm\n");
1245 return -EINVAL;
1246 }
1247
1248 mutex_lock(&ipa3_ctx->lock);
1249 for (i = 0; i < hdls->num_rules; i++) {
1250 if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
1251 IPAERR("failed to mdfy flt rule %i\n", i);
1252 hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
1253 } else {
1254 hdls->rules[i].status = 0;
1255 }
1256 }
1257
1258 if (hdls->commit)
1259 if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
1260 result = -EPERM;
1261 goto bail;
1262 }
1263 result = 0;
1264bail:
1265 mutex_unlock(&ipa3_ctx->lock);
1266
1267 return result;
1268}
1269
1270
1271/**
1272 * ipa3_commit_flt() - Commit the current SW filtering table of specified type
1273 * to IPA HW
1274 * @ip: [in] the family of routing tables
1275 *
1276 * Returns: 0 on success, negative on failure
1277 *
1278 * Note: Should not be called from atomic context
1279 */
1280int ipa3_commit_flt(enum ipa_ip_type ip)
1281{
1282 int result;
1283
1284 if (ip >= IPA_IP_MAX) {
1285 IPAERR("bad parm\n");
1286 return -EINVAL;
1287 }
1288
1289 mutex_lock(&ipa3_ctx->lock);
1290
1291 if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
1292 result = -EPERM;
1293 goto bail;
1294 }
1295 result = 0;
1296
1297bail:
1298 mutex_unlock(&ipa3_ctx->lock);
1299
1300 return result;
1301}
1302
1303/**
1304 * ipa3_reset_flt() - Reset the current SW filtering table of specified type
1305 * (does not commit to HW)
1306 * @ip: [in] the family of routing tables
1307 *
1308 * Returns: 0 on success, negative on failure
1309 *
1310 * Note: Should not be called from atomic context
1311 */
1312int ipa3_reset_flt(enum ipa_ip_type ip)
1313{
1314 struct ipa3_flt_tbl *tbl;
1315 struct ipa3_flt_entry *entry;
1316 struct ipa3_flt_entry *next;
1317 int i;
1318 int id;
1319
1320 if (ip >= IPA_IP_MAX) {
1321 IPAERR("bad parm\n");
1322 return -EINVAL;
1323 }
1324
1325 mutex_lock(&ipa3_ctx->lock);
1326 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
1327 if (!ipa_is_ep_support_flt(i))
1328 continue;
1329
1330 tbl = &ipa3_ctx->flt_tbl[i][ip];
1331 list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
1332 link) {
1333 if (ipa3_id_find(entry->id) == NULL) {
1334 WARN_ON(1);
1335 mutex_unlock(&ipa3_ctx->lock);
1336 return -EFAULT;
1337 }
1338 list_del(&entry->link);
1339 entry->tbl->rule_cnt--;
1340 if (entry->rt_tbl)
1341 entry->rt_tbl->ref_cnt--;
1342 /* if rule id was allocated from idr, remove it */
1343 if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
1344 idr_remove(&entry->tbl->rule_ids,
1345 entry->rule_id);
1346 entry->cookie = 0;
1347 id = entry->id;
1348 kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
1349
1350 /* remove the handle from the database */
1351 ipa3_id_remove(id);
1352 }
1353 }
1354 mutex_unlock(&ipa3_ctx->lock);
1355
1356 return 0;
1357}
1358
1359void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
1360{
1361 struct ipa3_flt_tbl *tbl;
1362 struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
1363 struct ipa_flt_rule rule;
1364
1365 if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
1366 IPADBG("cannot add flt rules to non filtering pipe num %d\n",
1367 ipa_ep_idx);
1368 return;
1369 }
1370
1371 memset(&rule, 0, sizeof(rule));
1372
1373 mutex_lock(&ipa3_ctx->lock);
1374 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
Amir Levy9659e592016-10-27 18:08:27 +03001375 rule.action = IPA_PASS_TO_EXCEPTION;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001376 __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
Amir Levy9659e592016-10-27 18:08:27 +03001377 &ep->dflt_flt4_rule_hdl);
1378 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001379 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001380
1381 tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
Amir Levy9659e592016-10-27 18:08:27 +03001382 rule.action = IPA_PASS_TO_EXCEPTION;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001383 __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
Amir Levy9659e592016-10-27 18:08:27 +03001384 &ep->dflt_flt6_rule_hdl);
1385 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001386 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001387 mutex_unlock(&ipa3_ctx->lock);
1388}
1389
1390void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
1391{
1392 struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
1393
1394 mutex_lock(&ipa3_ctx->lock);
1395 if (ep->dflt_flt4_rule_hdl) {
1396 __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
1397 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
1398 ep->dflt_flt4_rule_hdl = 0;
1399 }
1400 if (ep->dflt_flt6_rule_hdl) {
1401 __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
1402 ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
1403 ep->dflt_flt6_rule_hdl = 0;
1404 }
1405 mutex_unlock(&ipa3_ctx->lock);
1406}
1407
1408/**
1409 * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
1410 * Pipe must be for AP EP (not modem) and support filtering
1411 * updates the the filtering masking values without changing the rt ones.
1412 *
1413 * @pipe_idx: filter pipe index to configure the tuple masking
1414 * @tuple: the tuple members masking
1415 * Returns: 0 on success, negative on failure
1416 *
1417 */
1418int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
1419{
1420 struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
1421
1422 if (!tuple) {
1423 IPAERR("bad tuple\n");
1424 return -EINVAL;
1425 }
1426
1427 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
1428 IPAERR("bad pipe index!\n");
1429 return -EINVAL;
1430 }
1431
1432 if (!ipa_is_ep_support_flt(pipe_idx)) {
1433 IPAERR("pipe %d not filtering pipe\n", pipe_idx);
1434 return -EINVAL;
1435 }
1436
1437 if (ipa_is_modem_pipe(pipe_idx)) {
1438 IPAERR("modem pipe tuple is not configured by AP\n");
1439 return -EINVAL;
1440 }
1441
1442 ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
1443 pipe_idx, &fltrt_tuple);
1444 fltrt_tuple.flt = *tuple;
1445 ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
1446 pipe_idx, &fltrt_tuple);
1447
1448 return 0;
1449}
1450
1451/**
1452 * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
1453 * @pipe_idx: IPA endpoint index
1454 * @ip_type: IPv4 or IPv6 table
1455 * @hashable: hashable or non-hashable table
1456 * @entry: array to fill the table entries
1457 * @num_entry: number of entries in entry array. set by the caller to indicate
1458 * entry array size. Then set by this function as an output parameter to
1459 * indicate the number of entries in the array
1460 *
1461 * This function reads the filtering table from IPA SRAM and prepares an array
1462 * of entries. This function is mainly used for debugging purposes.
1463 *
1464 * If empty table or Modem Apps table, zero entries will be returned.
1465 *
1466 * Returns: 0 on success, negative on failure
1467 */
1468int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
1469 bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
1470{
1471 void *ipa_sram_mmio;
1472 u64 hdr_base_ofst;
1473 int tbl_entry_idx;
1474 int i;
1475 int res = 0;
1476 u64 tbl_addr;
1477 bool is_sys;
1478 u8 *rule_addr;
1479 struct ipa_mem_buffer *sys_tbl_mem;
1480 int rule_idx;
1481
1482 IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
1483 pipe_idx, ip_type, hashable, entry, num_entry);
1484
1485 if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
1486 !entry || !num_entry) {
1487 IPAERR("Invalid params\n");
1488 return -EFAULT;
1489 }
1490
1491 if (!ipa_is_ep_support_flt(pipe_idx)) {
1492 IPAERR("pipe %d does not support filtering\n", pipe_idx);
1493 return -EINVAL;
1494 }
1495
1496 /* map IPA SRAM */
1497 ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
1498 ipa3_ctx->ctrl->ipa_reg_base_ofst +
1499 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
1500 ipa3_ctx->smem_restricted_bytes / 4),
1501 ipa3_ctx->smem_sz);
1502 if (!ipa_sram_mmio) {
1503 IPAERR("fail to ioremap IPA SRAM\n");
1504 return -ENOMEM;
1505 }
1506
1507 memset(entry, 0, sizeof(*entry) * (*num_entry));
1508 if (hashable) {
1509 if (ip_type == IPA_IP_v4)
1510 hdr_base_ofst =
1511 IPA_MEM_PART(v4_flt_hash_ofst);
1512 else
1513 hdr_base_ofst =
1514 IPA_MEM_PART(v6_flt_hash_ofst);
1515 } else {
1516 if (ip_type == IPA_IP_v4)
1517 hdr_base_ofst =
1518 IPA_MEM_PART(v4_flt_nhash_ofst);
1519 else
1520 hdr_base_ofst =
1521 IPA_MEM_PART(v6_flt_nhash_ofst);
1522 }
1523
1524 /* calculate the index of the tbl entry */
1525 tbl_entry_idx = 1; /* skip the bitmap */
1526 for (i = 0; i < pipe_idx; i++)
1527 if (ipa3_ctx->ep_flt_bitmap & (1 << i))
1528 tbl_entry_idx++;
1529
1530 IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
1531 hdr_base_ofst, tbl_entry_idx);
1532
1533 res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
1534 tbl_entry_idx, &tbl_addr, &is_sys);
1535 if (res) {
1536 IPAERR("failed to read table address from header structure\n");
1537 goto bail;
1538 }
1539 IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
1540 pipe_idx, tbl_addr, is_sys);
1541 if (!tbl_addr) {
1542 IPAERR("invalid flt tbl addr\n");
1543 res = -EFAULT;
1544 goto bail;
1545 }
1546
1547 /* for tables resides in DDR access it from the virtual memory */
1548 if (is_sys) {
1549 sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
1550 curr_mem[hashable ? IPA_RULE_HASHABLE :
1551 IPA_RULE_NON_HASHABLE];
1552 if (sys_tbl_mem->phys_base &&
1553 sys_tbl_mem->phys_base != tbl_addr) {
1554 IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
1555 tbl_addr, &sys_tbl_mem->phys_base);
1556 }
1557 if (sys_tbl_mem->phys_base)
1558 rule_addr = sys_tbl_mem->base;
1559 else
1560 rule_addr = NULL;
1561 } else {
1562 rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
1563 }
1564
1565 IPADBG("First rule addr 0x%p\n", rule_addr);
1566
1567 if (!rule_addr) {
1568 /* Modem table in system memory or empty table */
1569 *num_entry = 0;
1570 goto bail;
1571 }
1572
1573 rule_idx = 0;
1574 while (rule_idx < *num_entry) {
1575 res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
1576 if (res) {
1577 IPAERR("failed parsing flt rule\n");
1578 goto bail;
1579 }
1580
1581 IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
1582 if (!entry[rule_idx].rule_size)
1583 break;
1584
1585 rule_addr += entry[rule_idx].rule_size;
1586 rule_idx++;
1587 }
1588 *num_entry = rule_idx;
1589bail:
1590 iounmap(ipa_sram_mmio);
1591 return 0;
1592}