blob: 50fe2a10c07328e86c15144c42c539fbdff1ec8d [file] [log] [blame]
Skylar Chang68c37d82018-04-07 16:42:36 -07001/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14
15#define IPA_FLT_TABLE_WORD_SIZE (4)
16#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3)
17#define IPA_FLT_BIT_MASK (0x1)
18#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
19#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
20#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
21#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
22
23static int ipa_generate_hw_rule_from_eq(
24 const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
25{
Mohammed Javidbf4c8022017-08-07 23:15:48 +053026 uint8_t num_offset_meq_32 = attrib->num_offset_meq_32;
27 uint8_t num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
28 uint8_t num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
29 uint8_t num_offset_meq_128 = attrib->num_offset_meq_128;
Amir Levy9659e592016-10-27 18:08:27 +030030 int i;
31
32 if (attrib->tos_eq_present) {
33 *buf = ipa_write_8(attrib->tos_eq, *buf);
34 *buf = ipa_pad_to_32(*buf);
35 }
36
37 if (attrib->protocol_eq_present) {
38 *buf = ipa_write_8(attrib->protocol_eq, *buf);
39 *buf = ipa_pad_to_32(*buf);
40 }
41
42 if (num_offset_meq_32) {
43 *buf = ipa_write_8(attrib->offset_meq_32[0].offset, *buf);
44 *buf = ipa_write_32(attrib->offset_meq_32[0].mask, *buf);
45 *buf = ipa_write_32(attrib->offset_meq_32[0].value, *buf);
46 *buf = ipa_pad_to_32(*buf);
47 num_offset_meq_32--;
48 }
49
50 if (num_offset_meq_32) {
51 *buf = ipa_write_8(attrib->offset_meq_32[1].offset, *buf);
52 *buf = ipa_write_32(attrib->offset_meq_32[1].mask, *buf);
53 *buf = ipa_write_32(attrib->offset_meq_32[1].value, *buf);
54 *buf = ipa_pad_to_32(*buf);
55 num_offset_meq_32--;
56 }
57
58 if (num_ihl_offset_range_16) {
59 *buf = ipa_write_8(attrib->ihl_offset_range_16[0].offset, *buf);
60 *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
61 *buf);
62 *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
63 *buf);
64 *buf = ipa_pad_to_32(*buf);
65 num_ihl_offset_range_16--;
66 }
67
68 if (num_ihl_offset_range_16) {
69 *buf = ipa_write_8(attrib->ihl_offset_range_16[1].offset, *buf);
70 *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
71 *buf);
72 *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
73 *buf);
74 *buf = ipa_pad_to_32(*buf);
75 num_ihl_offset_range_16--;
76 }
77
78 if (attrib->ihl_offset_eq_16_present) {
79 *buf = ipa_write_8(attrib->ihl_offset_eq_16.offset, *buf);
80 *buf = ipa_write_16(attrib->ihl_offset_eq_16.value, *buf);
81 *buf = ipa_pad_to_32(*buf);
82 }
83
84 if (attrib->ihl_offset_eq_32_present) {
85 *buf = ipa_write_8(attrib->ihl_offset_eq_32.offset, *buf);
86 *buf = ipa_write_32(attrib->ihl_offset_eq_32.value, *buf);
87 *buf = ipa_pad_to_32(*buf);
88 }
89
90 if (num_ihl_offset_meq_32) {
91 *buf = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, *buf);
92 *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, *buf);
93 *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].value, *buf);
94 *buf = ipa_pad_to_32(*buf);
95 num_ihl_offset_meq_32--;
96 }
97
98 /* TODO check layout of 16 byte mask and value */
99 if (num_offset_meq_128) {
100 *buf = ipa_write_8(attrib->offset_meq_128[0].offset, *buf);
101 for (i = 0; i < 16; i++)
102 *buf = ipa_write_8(attrib->offset_meq_128[0].mask[i],
103 *buf);
104 for (i = 0; i < 16; i++)
105 *buf = ipa_write_8(attrib->offset_meq_128[0].value[i],
106 *buf);
107 *buf = ipa_pad_to_32(*buf);
108 num_offset_meq_128--;
109 }
110
111 if (num_offset_meq_128) {
112 *buf = ipa_write_8(attrib->offset_meq_128[1].offset, *buf);
113 for (i = 0; i < 16; i++)
114 *buf = ipa_write_8(attrib->offset_meq_128[1].mask[i],
115 *buf);
116 for (i = 0; i < 16; i++)
117 *buf = ipa_write_8(attrib->offset_meq_128[1].value[i],
118 *buf);
119 *buf = ipa_pad_to_32(*buf);
120 num_offset_meq_128--;
121 }
122
123 if (attrib->tc_eq_present) {
124 *buf = ipa_write_8(attrib->tc_eq, *buf);
125 *buf = ipa_pad_to_32(*buf);
126 }
127
128 if (attrib->fl_eq_present) {
129 *buf = ipa_write_32(attrib->fl_eq, *buf);
130 *buf = ipa_pad_to_32(*buf);
131 }
132
133 if (num_ihl_offset_meq_32) {
134 *buf = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, *buf);
135 *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, *buf);
136 *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].value, *buf);
137 *buf = ipa_pad_to_32(*buf);
138 num_ihl_offset_meq_32--;
139 }
140
141 if (attrib->metadata_meq32_present) {
142 *buf = ipa_write_8(attrib->metadata_meq32.offset, *buf);
143 *buf = ipa_write_32(attrib->metadata_meq32.mask, *buf);
144 *buf = ipa_write_32(attrib->metadata_meq32.value, *buf);
145 *buf = ipa_pad_to_32(*buf);
146 }
147
148 if (attrib->ipv4_frag_eq_present)
149 *buf = ipa_pad_to_32(*buf);
150
151 return 0;
152}
153
154/**
155 * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
156 * @ip: the ip address family type
157 * @entry: routing entry
158 * @buf: output buffer, buf == NULL means
159 * caller wants to know the size of the rule as seen
160 * by HW so they did not pass a valid buffer, we will use a
161 * scratch buffer instead.
162 * With this scheme we are going to
163 * generate the rule twice, once to know size using scratch
164 * buffer and second to write the rule to the actual caller
165 * supplied buffer which is of required size
166 *
167 * Returns: 0 on success, negative on failure
168 *
169 * caller needs to hold any needed locks to ensure integrity
170 *
171 */
172static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
173 struct ipa_flt_entry *entry, u8 *buf)
174{
175 struct ipa_flt_rule_hw_hdr *hdr;
176 const struct ipa_flt_rule *rule =
177 (const struct ipa_flt_rule *)&entry->rule;
178 u16 en_rule = 0;
179 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
180 u8 *start;
181
182 if (buf == NULL) {
183 memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
184 buf = (u8 *)tmp;
185 }
186
187 start = buf;
188 hdr = (struct ipa_flt_rule_hw_hdr *)buf;
189 hdr->u.hdr.action = entry->rule.action;
190 hdr->u.hdr.retain_hdr = entry->rule.retain_hdr;
191 hdr->u.hdr.to_uc = entry->rule.to_uc;
192 if (entry->rt_tbl)
193 hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
194 else
195 hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx;
196 hdr->u.hdr.rsvd = 0;
197 buf += sizeof(struct ipa_flt_rule_hw_hdr);
198
199 if (rule->eq_attrib_type) {
200 if (ipa_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) {
201 IPAERR("fail to generate hw rule\n");
202 return -EPERM;
203 }
204 en_rule = rule->eq_attrib.rule_eq_bitmap;
205 } else {
206 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
207 IPAERR("fail to generate hw rule\n");
208 return -EPERM;
209 }
210 }
211
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530212 IPADBG_LOW("en_rule 0x%x, action=%d, rt_idx=%d, uc=%d, retain_hdr=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300213 en_rule,
214 hdr->u.hdr.action,
215 hdr->u.hdr.rt_tbl_idx,
216 hdr->u.hdr.to_uc,
217 hdr->u.hdr.retain_hdr);
218
219 hdr->u.hdr.en_rule = en_rule;
220 ipa_write_32(hdr->u.word, (u8 *)hdr);
221
222 if (entry->hw_len == 0) {
223 entry->hw_len = buf - start;
224 } else if (entry->hw_len != (buf - start)) {
225 IPAERR("hw_len differs b/w passes passed=%x calc=%td\n",
226 entry->hw_len, (buf - start));
227 return -EPERM;
228 }
229
230 return 0;
231}
232
233/**
234 * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
235 * @ip: the ip address family type
236 * @hdr_sz: header size
237 *
Skylar Chang88610112016-10-19 13:30:44 -0700238 * Returns: size on success, negative on failure
Amir Levy9659e592016-10-27 18:08:27 +0300239 *
240 * caller needs to hold any needed locks to ensure integrity
241 *
242 */
243static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
244{
245 struct ipa_flt_tbl *tbl;
246 struct ipa_flt_entry *entry;
247 u32 total_sz = 0;
248 u32 rule_set_sz;
249 int i;
250
251 *hdr_sz = 0;
252 tbl = &ipa_ctx->glob_flt_tbl[ip];
253 rule_set_sz = 0;
254 list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
255 if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
256 IPAERR("failed to find HW FLT rule size\n");
257 return -EPERM;
258 }
259 IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
260 rule_set_sz += entry->hw_len;
261 }
262
263 if (rule_set_sz) {
264 tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
265 /* this rule-set uses a word in header block */
266 *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
267 if (!tbl->in_sys) {
268 /* add the terminator */
269 total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
270 total_sz = (total_sz +
271 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
272 ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
273 }
274 }
275
276 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
277 tbl = &ipa_ctx->flt_tbl[i][ip];
278 rule_set_sz = 0;
279 list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
280 if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
281 IPAERR("failed to find HW FLT rule size\n");
282 return -EPERM;
283 }
284 IPADBG("pipe %d len %d\n", i, entry->hw_len);
285 rule_set_sz += entry->hw_len;
286 }
287
288 if (rule_set_sz) {
289 tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
290 /* this rule-set uses a word in header block */
291 *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
292 if (!tbl->in_sys) {
293 /* add the terminator */
294 total_sz += (rule_set_sz +
295 IPA_FLT_TABLE_WORD_SIZE);
296 total_sz = (total_sz +
297 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
298 ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
299 }
300 }
301 }
302
303 *hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
304 total_sz += *hdr_sz;
305 IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
306
307 return total_sz;
308}
309
310static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
311 u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top)
312{
313 struct ipa_flt_tbl *tbl;
314 struct ipa_flt_entry *entry;
315 int i;
316 u32 offset;
317 u8 *body;
318 struct ipa_mem_buffer flt_tbl_mem;
319 u8 *ftbl_membody;
320
321 *hdr_top = 0;
322 body = base;
323
324#define IPA_WRITE_FLT_HDR(idx, val) { \
325 if (idx <= 5) { \
326 *((u32 *)hdr + 1 + idx) = val; \
327 } else if (idx >= 6 && idx <= 10) { \
328 WARN_ON(1); \
329 } else if (idx >= 11 && idx <= 19) { \
330 *((u32 *)hdr2 + idx - 11) = val; \
331 } else { \
332 WARN_ON(1); \
333 } \
334}
335
336 tbl = &ipa_ctx->glob_flt_tbl[ip];
337
338 if (!list_empty(&tbl->head_flt_rule_list)) {
339 *hdr_top |= IPA_FLT_BIT_MASK;
340
341 if (!tbl->in_sys) {
342 offset = body - base + body_start_offset;
343 if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
344 IPAERR("offset is not word multiple %d\n",
345 offset);
346 goto proc_err;
347 }
348
349 offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
350 /* rule is at an offset from base */
351 offset |= IPA_FLT_BIT_MASK;
352
353 if (hdr2)
354 *(u32 *)hdr = offset;
355 else
356 hdr = ipa_write_32(offset, hdr);
357
358 /* generate the rule-set */
359 list_for_each_entry(entry, &tbl->head_flt_rule_list,
360 link) {
361 if (ipa_generate_flt_hw_rule(ip, entry, body)) {
362 IPAERR("failed to gen HW FLT rule\n");
363 goto proc_err;
364 }
365 body += entry->hw_len;
366 }
367
368 /* write the rule-set terminator */
369 body = ipa_write_32(0, body);
370 if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
371 /* advance body to next word boundary */
372 body = body + (IPA_FLT_TABLE_WORD_SIZE -
373 ((long)body &
374 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
375 } else {
Skylar Chang88610112016-10-19 13:30:44 -0700376 if (tbl->sz == 0) {
377 IPAERR("tbl size is 0\n");
378 WARN_ON(1);
379 goto proc_err;
380 }
381
Amir Levy9659e592016-10-27 18:08:27 +0300382 /* allocate memory for the flt tbl */
383 flt_tbl_mem.size = tbl->sz;
384 flt_tbl_mem.base =
385 dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size,
386 &flt_tbl_mem.phys_base, GFP_KERNEL);
387 if (!flt_tbl_mem.base) {
388 IPAERR("fail to alloc DMA buff of size %d\n",
389 flt_tbl_mem.size);
390 WARN_ON(1);
391 goto proc_err;
392 }
393
394 WARN_ON(flt_tbl_mem.phys_base &
395 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
396 ftbl_membody = flt_tbl_mem.base;
397 memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
398
399 if (hdr2)
400 *(u32 *)hdr = flt_tbl_mem.phys_base;
401 else
402 hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
403
404 /* generate the rule-set */
405 list_for_each_entry(entry, &tbl->head_flt_rule_list,
406 link) {
407 if (ipa_generate_flt_hw_rule(ip, entry,
408 ftbl_membody)) {
409 IPAERR("failed to gen HW FLT rule\n");
410 WARN_ON(1);
411 }
412 ftbl_membody += entry->hw_len;
413 }
414
415 /* write the rule-set terminator */
416 ftbl_membody = ipa_write_32(0, ftbl_membody);
417 if (tbl->curr_mem.phys_base) {
418 WARN_ON(tbl->prev_mem.phys_base);
419 tbl->prev_mem = tbl->curr_mem;
420 }
421 tbl->curr_mem = flt_tbl_mem;
422 }
423 }
424
425 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
426 tbl = &ipa_ctx->flt_tbl[i][ip];
427 if (!list_empty(&tbl->head_flt_rule_list)) {
428 /* pipe "i" is at bit "i+1" */
429 *hdr_top |= (1 << (i + 1));
430
431 if (!tbl->in_sys) {
432 offset = body - base + body_start_offset;
433 if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
434 IPAERR("ofst is not word multiple %d\n",
435 offset);
436 goto proc_err;
437 }
438 offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
439 /* rule is at an offset from base */
440 offset |= IPA_FLT_BIT_MASK;
441
442 if (hdr2)
443 IPA_WRITE_FLT_HDR(i, offset)
444 else
445 hdr = ipa_write_32(offset, hdr);
446
447 /* generate the rule-set */
448 list_for_each_entry(entry,
449 &tbl->head_flt_rule_list,
450 link) {
451 if (ipa_generate_flt_hw_rule(ip, entry,
452 body)) {
453 IPAERR("fail gen FLT rule\n");
454 goto proc_err;
455 }
456 body += entry->hw_len;
457 }
458
459 /* write the rule-set terminator */
460 body = ipa_write_32(0, body);
461 if ((long)body &
462 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
463 /* advance body to next word boundary */
464 body = body + (IPA_FLT_TABLE_WORD_SIZE -
465 ((long)body &
466 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
467 } else {
Skylar Chang88610112016-10-19 13:30:44 -0700468 if (tbl->sz == 0) {
469 IPAERR("tbl size is 0\n");
470 WARN_ON(1);
471 goto proc_err;
472 }
473
Amir Levy9659e592016-10-27 18:08:27 +0300474 /* allocate memory for the flt tbl */
475 flt_tbl_mem.size = tbl->sz;
476 flt_tbl_mem.base =
477 dma_alloc_coherent(ipa_ctx->pdev,
478 flt_tbl_mem.size,
479 &flt_tbl_mem.phys_base,
480 GFP_KERNEL);
481 if (!flt_tbl_mem.base) {
482 IPAERR("fail alloc DMA buff size %d\n",
483 flt_tbl_mem.size);
484 WARN_ON(1);
485 goto proc_err;
486 }
487
488 WARN_ON(flt_tbl_mem.phys_base &
489 IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
490
491 ftbl_membody = flt_tbl_mem.base;
492 memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
493
494 if (hdr2)
495 IPA_WRITE_FLT_HDR(i,
496 flt_tbl_mem.phys_base)
497 else
498 hdr = ipa_write_32(
499 flt_tbl_mem.phys_base, hdr);
500
501 /* generate the rule-set */
502 list_for_each_entry(entry,
503 &tbl->head_flt_rule_list,
504 link) {
505 if (ipa_generate_flt_hw_rule(ip, entry,
506 ftbl_membody)) {
507 IPAERR("fail gen FLT rule\n");
508 WARN_ON(1);
509 }
510 ftbl_membody += entry->hw_len;
511 }
512
513 /* write the rule-set terminator */
514 ftbl_membody =
515 ipa_write_32(0, ftbl_membody);
516 if (tbl->curr_mem.phys_base) {
517 WARN_ON(tbl->prev_mem.phys_base);
518 tbl->prev_mem = tbl->curr_mem;
519 }
520 tbl->curr_mem = flt_tbl_mem;
521 }
522 }
523 }
524
525 return 0;
526
527proc_err:
528 return -EPERM;
529}
530
531
532/**
533 * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
534 * @ip: [in] the ip address family type
535 * @mem: [out] buffer to put the filtering table
536 *
537 * Returns: 0 on success, negative on failure
538 */
539static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip,
540 struct ipa_mem_buffer *mem)
541{
542 u32 hdr_top = 0;
543 u32 hdr_sz;
544 u8 *hdr;
545 u8 *body;
546 u8 *base;
Skylar Chang88610112016-10-19 13:30:44 -0700547 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300548
Skylar Chang88610112016-10-19 13:30:44 -0700549 res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
550 if (res < 0) {
551 IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res);
552 return res;
553 }
554
555 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300556 mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
557
558 if (mem->size == 0) {
559 IPAERR("flt tbl empty ip=%d\n", ip);
560 goto error;
561 }
562 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
563 &mem->phys_base, GFP_KERNEL);
564 if (!mem->base) {
565 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
566 goto error;
567 }
568
569 memset(mem->base, 0, mem->size);
570
571 /* build the flt tbl in the DMA buffer to submit to IPA HW */
572 base = hdr = (u8 *)mem->base;
573 body = base + hdr_sz;
574
575 /* write a dummy header to move cursor */
576 hdr = ipa_write_32(hdr_top, hdr);
577
578 if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0,
579 &hdr_top)) {
580 IPAERR("fail to generate FLT HW table\n");
581 goto proc_err;
582 }
583
584 /* now write the hdr_top */
585 ipa_write_32(hdr_top, base);
586
587 IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
588
589 return 0;
590
591proc_err:
592 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
593error:
594 return -EPERM;
595}
596
597static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
598{
599 struct ipa_flt_tbl *tbl;
600 int i;
601
602 tbl = &ipa_ctx->glob_flt_tbl[ip];
603 if (tbl->prev_mem.phys_base) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530604 IPADBG_LOW("reaping glob flt tbl (prev) ip=%d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +0300605 dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
606 tbl->prev_mem.base, tbl->prev_mem.phys_base);
607 memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
608 }
609
610 if (list_empty(&tbl->head_flt_rule_list)) {
611 if (tbl->curr_mem.phys_base) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530612 IPADBG_LOW("reaping glob flt tbl (curr) ip=%d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +0300613 dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
614 tbl->curr_mem.base,
615 tbl->curr_mem.phys_base);
616 memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
617 }
618 }
619
620 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
621 tbl = &ipa_ctx->flt_tbl[i][ip];
622 if (tbl->prev_mem.phys_base) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530623 IPADBG_LOW("reaping flt tbl");
624 IPADBG_LOW("(prev) pipe=%d ip=%d\n", i, ip);
Amir Levy9659e592016-10-27 18:08:27 +0300625 dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
626 tbl->prev_mem.base,
627 tbl->prev_mem.phys_base);
628 memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
629 }
630
631 if (list_empty(&tbl->head_flt_rule_list)) {
632 if (tbl->curr_mem.phys_base) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530633 IPADBG_LOW("reaping flt tbl");
634 IPADBG_LOW("(curr) pipe=%d ip=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300635 i, ip);
636 dma_free_coherent(ipa_ctx->pdev,
637 tbl->curr_mem.size,
638 tbl->curr_mem.base,
639 tbl->curr_mem.phys_base);
640 memset(&tbl->curr_mem, 0,
641 sizeof(tbl->curr_mem));
642 }
643 }
644 }
645}
646
647int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
648{
649 struct ipa_desc desc = { 0 };
650 struct ipa_mem_buffer *mem;
651 void *cmd;
652 struct ipa_ip_v4_filter_init *v4;
653 struct ipa_ip_v6_filter_init *v6;
654 u16 avail;
655 u16 size;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530656 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300657
658 mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
659 if (!mem) {
660 IPAERR("failed to alloc memory object\n");
661 goto fail_alloc_mem;
662 }
663
664 if (ip == IPA_IP_v4) {
665 avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_v1_RAM_V4_FLT_SIZE :
666 IPA_MEM_PART(v4_flt_size_ddr);
667 size = sizeof(struct ipa_ip_v4_filter_init);
668 } else {
669 avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_v1_RAM_V6_FLT_SIZE :
670 IPA_MEM_PART(v6_flt_size_ddr);
671 size = sizeof(struct ipa_ip_v6_filter_init);
672 }
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530673 cmd = kmalloc(size, flag);
Amir Levy9659e592016-10-27 18:08:27 +0300674 if (!cmd) {
675 IPAERR("failed to alloc immediate command object\n");
676 goto fail_alloc_cmd;
677 }
678
679 if (ipa_generate_flt_hw_tbl_v1_1(ip, mem)) {
680 IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
681 goto fail_hw_tbl_gen;
682 }
683
684 if (mem->size > avail) {
685 IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
686 goto fail_send_cmd;
687 }
688
689 if (ip == IPA_IP_v4) {
690 v4 = (struct ipa_ip_v4_filter_init *)cmd;
691 desc.opcode = IPA_IP_V4_FILTER_INIT;
692 v4->ipv4_rules_addr = mem->phys_base;
693 v4->size_ipv4_rules = mem->size;
694 v4->ipv4_addr = IPA_MEM_v1_RAM_V4_FLT_OFST;
695 } else {
696 v6 = (struct ipa_ip_v6_filter_init *)cmd;
697 desc.opcode = IPA_IP_V6_FILTER_INIT;
698 v6->ipv6_rules_addr = mem->phys_base;
699 v6->size_ipv6_rules = mem->size;
700 v6->ipv6_addr = IPA_MEM_v1_RAM_V6_FLT_OFST;
701 }
702
703 desc.pyld = cmd;
704 desc.len = size;
705 desc.type = IPA_IMM_CMD_DESC;
706 IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
707
708 if (ipa_send_cmd(1, &desc)) {
709 IPAERR("fail to send immediate command\n");
710 goto fail_send_cmd;
711 }
712
713 __ipa_reap_sys_flt_tbls(ip);
714 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
715 kfree(cmd);
716 kfree(mem);
717
718 return 0;
719
720fail_send_cmd:
721 if (mem->phys_base)
722 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
723 mem->phys_base);
724fail_hw_tbl_gen:
725 kfree(cmd);
726fail_alloc_cmd:
727 kfree(mem);
728fail_alloc_mem:
729
730 return -EPERM;
731}
732
733static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
734 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1,
735 struct ipa_mem_buffer *head2)
736{
737 int i;
738 u32 hdr_sz;
739 int num_words;
740 u32 *entr;
741 u32 body_start_offset;
742 u32 hdr_top;
Skylar Chang88610112016-10-19 13:30:44 -0700743 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300744
745 if (ip == IPA_IP_v4)
746 body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) -
747 IPA_MEM_PART(v4_flt_ofst);
748 else
749 body_start_offset = IPA_MEM_PART(apps_v6_flt_ofst) -
750 IPA_MEM_PART(v6_flt_ofst);
751
752 num_words = 7;
753 head1->size = num_words * 4;
754 head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size,
755 &head1->phys_base, GFP_KERNEL);
756 if (!head1->base) {
757 IPAERR("fail to alloc DMA buff of size %d\n", head1->size);
758 goto err;
759 }
760 entr = (u32 *)head1->base;
761 for (i = 0; i < num_words; i++) {
762 *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
763 entr++;
764 }
765
766 num_words = 9;
767 head2->size = num_words * 4;
768 head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size,
769 &head2->phys_base, GFP_KERNEL);
770 if (!head2->base) {
771 IPAERR("fail to alloc DMA buff of size %d\n", head2->size);
772 goto head_err;
773 }
774 entr = (u32 *)head2->base;
775 for (i = 0; i < num_words; i++) {
776 *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
777 entr++;
778 }
779
Skylar Chang88610112016-10-19 13:30:44 -0700780 res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
781 if (res < 0) {
782 IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res);
783 goto body_err;
784 }
785
786 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300787 mem->size -= hdr_sz;
788 mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
789
790 if (mem->size) {
791 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
792 &mem->phys_base, GFP_KERNEL);
793 if (!mem->base) {
794 IPAERR("fail to alloc DMA buff of size %d\n",
795 mem->size);
796 goto body_err;
797 }
798 memset(mem->base, 0, mem->size);
799 }
800
801 if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base,
802 body_start_offset, head2->base, &hdr_top)) {
803 IPAERR("fail to generate FLT HW table\n");
804 goto proc_err;
805 }
806
807 IPADBG("HEAD1\n");
808 IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size);
809 IPADBG("HEAD2\n");
810 IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size);
811 if (mem->size) {
812 IPADBG("BODY\n");
813 IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
814 }
815
816 return 0;
817
818proc_err:
819 if (mem->size)
820 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
821 mem->phys_base);
822body_err:
823 dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base,
824 head2->phys_base);
825head_err:
826 dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base,
827 head1->phys_base);
828err:
829 return -EPERM;
830}
831
832int __ipa_commit_flt_v2(enum ipa_ip_type ip)
833{
834 struct ipa_desc *desc;
835 struct ipa_hw_imm_cmd_dma_shared_mem *cmd;
836 struct ipa_mem_buffer body;
837 struct ipa_mem_buffer head1;
838 struct ipa_mem_buffer head2;
839 int rc = 0;
840 u32 local_addrb;
841 u32 local_addrh;
842 bool lcl;
843 int num_desc = 0;
844 int i;
845 u16 avail;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530846 gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300847
848 desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC);
849 if (desc == NULL) {
850 IPAERR("fail to alloc desc blob ip %d\n", ip);
851 rc = -ENOMEM;
852 goto fail_desc;
853 }
854
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530855 cmd = kzalloc(16 * sizeof(*cmd), flag);
Amir Levy9659e592016-10-27 18:08:27 +0300856 if (cmd == NULL) {
857 IPAERR("fail to alloc cmd blob ip %d\n", ip);
858 rc = -ENOMEM;
859 goto fail_imm;
860 }
861
862 if (ip == IPA_IP_v4) {
863 avail = ipa_ctx->ip4_flt_tbl_lcl ?
864 IPA_MEM_PART(apps_v4_flt_size) :
865 IPA_MEM_PART(v4_flt_size_ddr);
866 local_addrh = ipa_ctx->smem_restricted_bytes +
867 IPA_MEM_PART(v4_flt_ofst) + 4;
868 local_addrb = ipa_ctx->smem_restricted_bytes +
869 IPA_MEM_PART(apps_v4_flt_ofst);
870 lcl = ipa_ctx->ip4_flt_tbl_lcl;
871 } else {
872 avail = ipa_ctx->ip6_flt_tbl_lcl ?
873 IPA_MEM_PART(apps_v6_flt_size) :
874 IPA_MEM_PART(v6_flt_size_ddr);
875 local_addrh = ipa_ctx->smem_restricted_bytes +
876 IPA_MEM_PART(v6_flt_ofst) + 4;
877 local_addrb = ipa_ctx->smem_restricted_bytes +
878 IPA_MEM_PART(apps_v6_flt_ofst);
879 lcl = ipa_ctx->ip6_flt_tbl_lcl;
880 }
881
882 if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) {
883 IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
884 rc = -EFAULT;
885 goto fail_gen;
886 }
887
888 if (body.size > avail) {
889 IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
890 goto fail_send_cmd;
891 }
892
893 cmd[num_desc].size = 4;
894 cmd[num_desc].system_addr = head1.phys_base;
895 cmd[num_desc].local_addr = local_addrh;
896
897 desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
898 desc[num_desc].pyld = &cmd[num_desc];
899 desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
900 desc[num_desc++].type = IPA_IMM_CMD_DESC;
901
902 for (i = 0; i < 6; i++) {
903 if (ipa_ctx->skip_ep_cfg_shadow[i]) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530904 IPADBG_LOW("skip %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300905 continue;
906 }
907
908 if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) == i ||
909 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) == i ||
910 ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD) == i ||
911 (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i
912 && ipa_ctx->modem_cfg_emb_pipe_flt)) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530913 IPADBG_LOW("skip %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300914 continue;
915 }
916
917 if (ip == IPA_IP_v4) {
918 local_addrh = ipa_ctx->smem_restricted_bytes +
919 IPA_MEM_PART(v4_flt_ofst) +
920 8 + i * 4;
921 } else {
922 local_addrh = ipa_ctx->smem_restricted_bytes +
923 IPA_MEM_PART(v6_flt_ofst) +
924 8 + i * 4;
925 }
926 cmd[num_desc].size = 4;
927 cmd[num_desc].system_addr = head1.phys_base + 4 + i * 4;
928 cmd[num_desc].local_addr = local_addrh;
929
930 desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
931 desc[num_desc].pyld = &cmd[num_desc];
932 desc[num_desc].len =
933 sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
934 desc[num_desc++].type = IPA_IMM_CMD_DESC;
935 }
936
937 for (i = 11; i < ipa_ctx->ipa_num_pipes; i++) {
938 if (ipa_ctx->skip_ep_cfg_shadow[i]) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530939 IPADBG_LOW("skip %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300940 continue;
941 }
942 if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i &&
943 ipa_ctx->modem_cfg_emb_pipe_flt) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530944 IPADBG_LOW("skip %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300945 continue;
946 }
947 if (ip == IPA_IP_v4) {
948 local_addrh = ipa_ctx->smem_restricted_bytes +
949 IPA_MEM_PART(v4_flt_ofst) +
950 13 * 4 + (i - 11) * 4;
951 } else {
952 local_addrh = ipa_ctx->smem_restricted_bytes +
953 IPA_MEM_PART(v6_flt_ofst) +
954 13 * 4 + (i - 11) * 4;
955 }
956 cmd[num_desc].size = 4;
957 cmd[num_desc].system_addr = head2.phys_base + (i - 11) * 4;
958 cmd[num_desc].local_addr = local_addrh;
959
960 desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
961 desc[num_desc].pyld = &cmd[num_desc];
962 desc[num_desc].len =
963 sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
964 desc[num_desc++].type = IPA_IMM_CMD_DESC;
965 }
966
967 if (lcl) {
968 cmd[num_desc].size = body.size;
969 cmd[num_desc].system_addr = body.phys_base;
970 cmd[num_desc].local_addr = local_addrb;
971
972 desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
973 desc[num_desc].pyld = &cmd[num_desc];
974 desc[num_desc].len =
975 sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
976 desc[num_desc++].type = IPA_IMM_CMD_DESC;
977
978 if (ipa_send_cmd(num_desc, desc)) {
979 IPAERR("fail to send immediate command\n");
980 rc = -EFAULT;
981 goto fail_send_cmd;
982 }
983 } else {
984 if (ipa_send_cmd(num_desc, desc)) {
985 IPAERR("fail to send immediate command\n");
986 rc = -EFAULT;
987 goto fail_send_cmd;
988 }
989 }
990
991 __ipa_reap_sys_flt_tbls(ip);
992
993fail_send_cmd:
994 if (body.size)
995 dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
996 body.phys_base);
997 dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base,
998 head1.phys_base);
999 dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base,
1000 head2.phys_base);
1001fail_gen:
1002 kfree(cmd);
1003fail_imm:
1004 kfree(desc);
1005fail_desc:
1006 return rc;
1007}
1008
1009static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
1010 const struct ipa_flt_rule *rule, u8 add_rear,
Skylar Chang68c37d82018-04-07 16:42:36 -07001011 u32 *rule_hdl, bool user)
Amir Levy9659e592016-10-27 18:08:27 +03001012{
1013 struct ipa_flt_entry *entry;
1014 struct ipa_rt_tbl *rt_tbl = NULL;
1015 int id;
1016
1017 if (rule->action != IPA_PASS_TO_EXCEPTION) {
1018 if (!rule->eq_attrib_type) {
1019 if (!rule->rt_tbl_hdl) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301020 IPAERR_RL("invalid RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001021 goto error;
1022 }
1023
1024 rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
1025 if (rt_tbl == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301026 IPAERR_RL("RT tbl not found\n");
Amir Levy9659e592016-10-27 18:08:27 +03001027 goto error;
1028 }
1029
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301030 if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301031 IPAERR_RL("RT table cookie is invalid\n");
Amir Levy9659e592016-10-27 18:08:27 +03001032 goto error;
1033 }
1034 } else {
1035 if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
1036 IPA_MEM_PART(v4_modem_rt_index_hi) :
1037 IPA_MEM_PART(v6_modem_rt_index_hi))) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301038 IPAERR_RL("invalid RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001039 goto error;
1040 }
1041 }
Mohammed Javid7bfd0142017-10-05 23:39:39 +05301042 } else {
1043 if (rule->rt_tbl_idx > 0) {
1044 IPAERR_RL("invalid RT tbl\n");
1045 goto error;
1046 }
Amir Levy9659e592016-10-27 18:08:27 +03001047 }
1048
1049 entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
1050 if (!entry) {
1051 IPAERR("failed to alloc FLT rule object\n");
1052 goto error;
1053 }
1054 INIT_LIST_HEAD(&entry->link);
1055 entry->rule = *rule;
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301056 entry->cookie = IPA_FLT_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +03001057 entry->rt_tbl = rt_tbl;
1058 entry->tbl = tbl;
1059 if (add_rear) {
1060 if (tbl->sticky_rear)
1061 list_add_tail(&entry->link,
1062 tbl->head_flt_rule_list.prev);
1063 else
1064 list_add_tail(&entry->link, &tbl->head_flt_rule_list);
1065 } else {
1066 list_add(&entry->link, &tbl->head_flt_rule_list);
1067 }
1068 tbl->rule_cnt++;
1069 if (entry->rt_tbl)
1070 entry->rt_tbl->ref_cnt++;
1071 id = ipa_id_alloc(entry);
1072 if (id < 0) {
1073 IPAERR("failed to add to tree\n");
1074 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301075 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +03001076 }
1077 *rule_hdl = id;
1078 entry->id = id;
Skylar Chang68c37d82018-04-07 16:42:36 -07001079 entry->ipacm_installed = user;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301080 IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
Amir Levy9659e592016-10-27 18:08:27 +03001081
1082 return 0;
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301083ipa_insert_failed:
1084 tbl->rule_cnt--;
1085 if (entry->rt_tbl)
1086 entry->rt_tbl->ref_cnt--;
1087 list_del(&entry->link);
1088 kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
Amir Levy9659e592016-10-27 18:08:27 +03001089error:
1090 return -EPERM;
1091}
1092
1093static int __ipa_del_flt_rule(u32 rule_hdl)
1094{
1095 struct ipa_flt_entry *entry;
1096 int id;
1097
1098 entry = ipa_id_find(rule_hdl);
1099 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301100 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001101 return -EINVAL;
1102 }
1103
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301104 if (entry->cookie != IPA_FLT_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301105 IPAERR_RL("bad params\n");
Amir Levy9659e592016-10-27 18:08:27 +03001106 return -EINVAL;
1107 }
1108 id = entry->id;
1109
1110 list_del(&entry->link);
1111 entry->tbl->rule_cnt--;
1112 if (entry->rt_tbl)
1113 entry->rt_tbl->ref_cnt--;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301114 IPADBG_LOW("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
Amir Levy9659e592016-10-27 18:08:27 +03001115 entry->cookie = 0;
1116 kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
1117
1118 /* remove the handle from the database */
1119 ipa_id_remove(id);
1120
1121 return 0;
1122}
1123
1124static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
1125 enum ipa_ip_type ip)
1126{
1127 struct ipa_flt_entry *entry;
1128 struct ipa_rt_tbl *rt_tbl = NULL;
1129
1130 entry = ipa_id_find(frule->rule_hdl);
1131 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301132 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001133 goto error;
1134 }
1135
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301136 if (entry->cookie != IPA_FLT_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301137 IPAERR_RL("bad params\n");
Amir Levy9659e592016-10-27 18:08:27 +03001138 goto error;
1139 }
1140
1141 if (entry->rt_tbl)
1142 entry->rt_tbl->ref_cnt--;
1143
1144 if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
1145 if (!frule->rule.eq_attrib_type) {
1146 if (!frule->rule.rt_tbl_hdl) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301147 IPAERR_RL("invalid RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001148 goto error;
1149 }
1150
1151 rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
1152 if (rt_tbl == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301153 IPAERR_RL("RT tbl not found\n");
Amir Levy9659e592016-10-27 18:08:27 +03001154 goto error;
1155 }
1156
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301157 if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301158 IPAERR_RL("RT table cookie is invalid\n");
Amir Levy9659e592016-10-27 18:08:27 +03001159 goto error;
1160 }
1161 } else {
1162 if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
1163 IPA_MEM_PART(v4_modem_rt_index_hi) :
1164 IPA_MEM_PART(v6_modem_rt_index_hi))) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301165 IPAERR_RL("invalid RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001166 goto error;
1167 }
1168 }
Mohammed Javid7bfd0142017-10-05 23:39:39 +05301169 } else {
1170 if (frule->rule.rt_tbl_idx > 0) {
1171 IPAERR_RL("invalid RT tbl\n");
1172 goto error;
1173 }
Amir Levy9659e592016-10-27 18:08:27 +03001174 }
1175
1176 entry->rule = frule->rule;
1177 entry->rt_tbl = rt_tbl;
1178 if (entry->rt_tbl)
1179 entry->rt_tbl->ref_cnt++;
1180 entry->hw_len = 0;
1181
1182 return 0;
1183
1184error:
1185 return -EPERM;
1186}
1187
1188static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
1189 const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
1190{
1191 struct ipa_flt_tbl *tbl;
1192
1193 if (rule == NULL || rule_hdl == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301194 IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03001195
1196 return -EINVAL;
1197 }
1198
1199 tbl = &ipa_ctx->glob_flt_tbl[ip];
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301200 IPADBG_LOW("add global flt rule ip=%d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +03001201
Skylar Chang68c37d82018-04-07 16:42:36 -07001202 return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03001203}
1204
1205static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
1206 const struct ipa_flt_rule *rule, u8 add_rear,
Skylar Chang68c37d82018-04-07 16:42:36 -07001207 u32 *rule_hdl, bool user)
Amir Levy9659e592016-10-27 18:08:27 +03001208{
1209 struct ipa_flt_tbl *tbl;
1210 int ipa_ep_idx;
1211
1212 if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301213 IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
Amir Levy9659e592016-10-27 18:08:27 +03001214 rule_hdl, ep);
1215
1216 return -EINVAL;
1217 }
1218 ipa_ep_idx = ipa2_get_ep_mapping(ep);
1219 if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301220 IPAERR_RL("ep not valid ep=%d\n", ep);
Amir Levy9659e592016-10-27 18:08:27 +03001221 return -EINVAL;
1222 }
1223 if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
1224 IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx);
1225
1226 tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301227 IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
Amir Levy9659e592016-10-27 18:08:27 +03001228
Skylar Chang68c37d82018-04-07 16:42:36 -07001229 return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user);
Amir Levy9659e592016-10-27 18:08:27 +03001230}
1231
1232/**
1233 * ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally
1234 * commit to IPA HW
Skylar Chang68c37d82018-04-07 16:42:36 -07001235 * @rules: [inout] set of filtering rules to add
Amir Levy9659e592016-10-27 18:08:27 +03001236 *
1237 * Returns: 0 on success, negative on failure
1238 *
1239 * Note: Should not be called from atomic context
1240 */
1241int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
1242{
Skylar Chang68c37d82018-04-07 16:42:36 -07001243 return ipa2_add_flt_rule_usr(rules, false);
1244}
1245
1246/**
1247 * ipa2_add_flt_rule_usr() - Add the specified filtering rules
1248 * to SW and optionally commit to IPA HW
1249 * @rules: [inout] set of filtering rules to add
1250 * @user_only: [in] indicate rules installed by userspace
1251 *
1252 * Returns: 0 on success, negative on failure
1253 *
1254 * Note: Should not be called from atomic context
1255 */
1256int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only)
1257{
Amir Levy9659e592016-10-27 18:08:27 +03001258 int i;
1259 int result;
1260
1261 if (rules == NULL || rules->num_rules == 0 ||
1262 rules->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301263 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001264
1265 return -EINVAL;
1266 }
1267
1268 mutex_lock(&ipa_ctx->lock);
1269 for (i = 0; i < rules->num_rules; i++) {
1270 if (rules->global)
1271 result = __ipa_add_global_flt_rule(rules->ip,
1272 &rules->rules[i].rule,
1273 rules->rules[i].at_rear,
1274 &rules->rules[i].flt_rule_hdl);
1275 else
1276 result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
1277 &rules->rules[i].rule,
1278 rules->rules[i].at_rear,
Skylar Chang68c37d82018-04-07 16:42:36 -07001279 &rules->rules[i].flt_rule_hdl,
1280 user_only);
Amir Levy9659e592016-10-27 18:08:27 +03001281 if (result) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301282 IPAERR_RL("failed to add flt rule %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +03001283 rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
1284 } else {
1285 rules->rules[i].status = 0;
1286 }
1287 }
1288
1289 if (rules->commit)
1290 if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) {
1291 result = -EPERM;
1292 goto bail;
1293 }
1294 result = 0;
1295bail:
1296 mutex_unlock(&ipa_ctx->lock);
1297
1298 return result;
1299}
1300
1301/**
1302 * ipa2_del_flt_rule() - Remove the specified filtering rules from SW and
1303 * optionally commit to IPA HW
1304 *
1305 * Returns: 0 on success, negative on failure
1306 *
1307 * Note: Should not be called from atomic context
1308 */
1309int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
1310{
1311 int i;
1312 int result;
1313
1314 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301315 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001316 return -EINVAL;
1317 }
1318
1319 mutex_lock(&ipa_ctx->lock);
1320 for (i = 0; i < hdls->num_hdls; i++) {
1321 if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301322 IPAERR_RL("failed to del rt rule %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +03001323 hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
1324 } else {
1325 hdls->hdl[i].status = 0;
1326 }
1327 }
1328
1329 if (hdls->commit)
1330 if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
1331 result = -EPERM;
1332 goto bail;
1333 }
1334 result = 0;
1335bail:
1336 mutex_unlock(&ipa_ctx->lock);
1337
1338 return result;
1339}
1340
1341/**
1342 * ipa2_mdfy_flt_rule() - Modify the specified filtering rules in SW and
1343 * optionally commit to IPA HW
1344 *
1345 * Returns: 0 on success, negative on failure
1346 *
1347 * Note: Should not be called from atomic context
1348 */
1349int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
1350{
1351 int i;
1352 int result;
1353
1354 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301355 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001356 return -EINVAL;
1357 }
1358
1359 mutex_lock(&ipa_ctx->lock);
1360 for (i = 0; i < hdls->num_rules; i++) {
1361 if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301362 IPAERR_RL("failed to mdfy rt rule %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +03001363 hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
1364 } else {
1365 hdls->rules[i].status = 0;
1366 }
1367 }
1368
1369 if (hdls->commit)
1370 if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
1371 result = -EPERM;
1372 goto bail;
1373 }
1374 result = 0;
1375bail:
1376 mutex_unlock(&ipa_ctx->lock);
1377
1378 return result;
1379}
1380
1381
1382/**
1383 * ipa2_commit_flt() - Commit the current SW filtering table of specified type
1384 * to IPA HW
1385 * @ip: [in] the family of routing tables
1386 *
1387 * Returns: 0 on success, negative on failure
1388 *
1389 * Note: Should not be called from atomic context
1390 */
1391int ipa2_commit_flt(enum ipa_ip_type ip)
1392{
1393 int result;
1394
1395 if (ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301396 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001397 return -EINVAL;
1398 }
1399
1400 mutex_lock(&ipa_ctx->lock);
1401
1402 if (ipa_ctx->ctrl->ipa_commit_flt(ip)) {
1403 result = -EPERM;
1404 goto bail;
1405 }
1406 result = 0;
1407
1408bail:
1409 mutex_unlock(&ipa_ctx->lock);
1410
1411 return result;
1412}
1413
1414/**
1415 * ipa2_reset_flt() - Reset the current SW filtering table of specified type
1416 * (does not commit to HW)
Skylar Chang68c37d82018-04-07 16:42:36 -07001417 * @ip: [in] the family of routing tables
1418 * @user_only: [in] indicate rules deleted by userspace
Amir Levy9659e592016-10-27 18:08:27 +03001419 *
1420 * Returns: 0 on success, negative on failure
1421 *
1422 * Note: Should not be called from atomic context
1423 */
Skylar Chang68c37d82018-04-07 16:42:36 -07001424int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only)
Amir Levy9659e592016-10-27 18:08:27 +03001425{
1426 struct ipa_flt_tbl *tbl;
1427 struct ipa_flt_entry *entry;
1428 struct ipa_flt_entry *next;
1429 int i;
1430 int id;
1431
1432 if (ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301433 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001434 return -EINVAL;
1435 }
1436
1437 tbl = &ipa_ctx->glob_flt_tbl[ip];
1438 mutex_lock(&ipa_ctx->lock);
1439 IPADBG("reset flt ip=%d\n", ip);
1440 list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
1441 if (ipa_id_find(entry->id) == NULL) {
1442 WARN_ON(1);
1443 mutex_unlock(&ipa_ctx->lock);
1444 return -EFAULT;
1445 }
1446
1447 if ((ip == IPA_IP_v4 &&
1448 entry->rule.attrib.attrib_mask == IPA_FLT_PROTOCOL &&
1449 entry->rule.attrib.u.v4.protocol ==
1450 IPA_INVALID_L4_PROTOCOL) ||
1451 (ip == IPA_IP_v6 &&
1452 entry->rule.attrib.attrib_mask == IPA_FLT_NEXT_HDR &&
1453 entry->rule.attrib.u.v6.next_hdr ==
1454 IPA_INVALID_L4_PROTOCOL))
1455 continue;
1456
Skylar Chang68c37d82018-04-07 16:42:36 -07001457 if (!user_only ||
1458 entry->ipacm_installed) {
1459 list_del(&entry->link);
1460 entry->tbl->rule_cnt--;
1461 if (entry->rt_tbl)
1462 entry->rt_tbl->ref_cnt--;
1463 entry->cookie = 0;
1464 id = entry->id;
1465 kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
Amir Levy9659e592016-10-27 18:08:27 +03001466
Skylar Chang68c37d82018-04-07 16:42:36 -07001467 /* remove the handle from the database */
1468 ipa_id_remove(id);
1469 }
Amir Levy9659e592016-10-27 18:08:27 +03001470 }
1471
1472 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
1473 tbl = &ipa_ctx->flt_tbl[i][ip];
1474 list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
1475 link) {
1476 if (ipa_id_find(entry->id) == NULL) {
1477 WARN_ON(1);
1478 mutex_unlock(&ipa_ctx->lock);
1479 return -EFAULT;
1480 }
Amir Levy9659e592016-10-27 18:08:27 +03001481
Skylar Chang68c37d82018-04-07 16:42:36 -07001482 if (!user_only ||
1483 entry->ipacm_installed) {
1484 list_del(&entry->link);
1485 entry->tbl->rule_cnt--;
1486 if (entry->rt_tbl)
1487 entry->rt_tbl->ref_cnt--;
1488 entry->cookie = 0;
1489 id = entry->id;
1490 kmem_cache_free(ipa_ctx->flt_rule_cache,
1491 entry);
1492
1493 /* remove the handle from the database */
1494 ipa_id_remove(id);
1495 }
Amir Levy9659e592016-10-27 18:08:27 +03001496 }
1497 }
Amir Levy9659e592016-10-27 18:08:27 +03001498
Skylar Chang5ec274f2018-05-03 02:18:34 -07001499 /* commit the change to IPA-HW */
1500 if (ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4) ||
1501 ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6)) {
1502 IPAERR_RL("fail to commit flt-rule\n");
1503 WARN_ON_RATELIMIT_IPA(1);
1504 mutex_unlock(&ipa_ctx->lock);
1505 return -EPERM;
1506 }
1507 mutex_unlock(&ipa_ctx->lock);
Amir Levy9659e592016-10-27 18:08:27 +03001508 return 0;
1509}
1510
1511void ipa_install_dflt_flt_rules(u32 ipa_ep_idx)
1512{
1513 struct ipa_flt_tbl *tbl;
1514 struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
1515 struct ipa_flt_rule rule;
1516
1517 memset(&rule, 0, sizeof(rule));
1518
1519 mutex_lock(&ipa_ctx->lock);
1520 tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
Amir Levy9659e592016-10-27 18:08:27 +03001521 rule.action = IPA_PASS_TO_EXCEPTION;
Sunil Paidimarri4dbe11c2016-11-09 17:29:06 -08001522 __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
Skylar Chang68c37d82018-04-07 16:42:36 -07001523 &ep->dflt_flt4_rule_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03001524 ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
Sunil Paidimarri4dbe11c2016-11-09 17:29:06 -08001525 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001526
1527 tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
Amir Levy9659e592016-10-27 18:08:27 +03001528 rule.action = IPA_PASS_TO_EXCEPTION;
Sunil Paidimarri4dbe11c2016-11-09 17:29:06 -08001529 __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
Skylar Chang68c37d82018-04-07 16:42:36 -07001530 &ep->dflt_flt6_rule_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03001531 ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
Sunil Paidimarri4dbe11c2016-11-09 17:29:06 -08001532 tbl->sticky_rear = true;
Amir Levy9659e592016-10-27 18:08:27 +03001533 mutex_unlock(&ipa_ctx->lock);
1534}
1535
1536void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
1537{
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301538 struct ipa_flt_tbl *tbl;
Amir Levy9659e592016-10-27 18:08:27 +03001539 struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
1540
1541 mutex_lock(&ipa_ctx->lock);
1542 if (ep->dflt_flt4_rule_hdl) {
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301543 tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
Amir Levy9659e592016-10-27 18:08:27 +03001544 __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
1545 ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301546 /* Reset the sticky flag. */
1547 tbl->sticky_rear = false;
Amir Levy9659e592016-10-27 18:08:27 +03001548 ep->dflt_flt4_rule_hdl = 0;
1549 }
1550 if (ep->dflt_flt6_rule_hdl) {
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301551 tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
Amir Levy9659e592016-10-27 18:08:27 +03001552 __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
1553 ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
Utkarsh Saxenaf6cc7982017-03-28 22:51:46 +05301554 /* Reset the sticky flag. */
1555 tbl->sticky_rear = false;
Amir Levy9659e592016-10-27 18:08:27 +03001556 ep->dflt_flt6_rule_hdl = 0;
1557 }
1558 mutex_unlock(&ipa_ctx->lock);
1559}