blob: 073409bf7c7b3086b23efd81c94c796d012c7e8e [file] [log] [blame]
Ghanim Fodi5fd0c952018-01-31 14:49:37 +02001/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/bitops.h>
14#include "ipa_i.h"
15
16#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
17#define IPA_RT_TABLE_WORD_SIZE (4)
18#define IPA_RT_INDEX_BITMAP_SIZE (32)
19#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
20#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
21#define IPA_RT_BIT_MASK (0x1)
22#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
23#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
24#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
25
26/**
27 * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule
28 * @ip: the ip address family type
29 * @entry: routing entry
30 * @buf: output buffer, buf == NULL means
31 * caller wants to know the size of the rule as seen
32 * by HW so they did not pass a valid buffer, we will use a
33 * scratch buffer instead.
34 * With this scheme we are going to
35 * generate the rule twice, once to know size using scratch
36 * buffer and second to write the rule to the actual caller
37 * supplied buffer which is of required size
38 *
39 * Returns: 0 on success, negative on failure
40 *
41 * caller needs to hold any needed locks to ensure integrity
42 *
43 */
44int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
45 struct ipa_rt_entry *entry, u8 *buf)
46{
47 struct ipa_rt_rule_hw_hdr *rule_hdr;
48 const struct ipa_rt_rule *rule =
49 (const struct ipa_rt_rule *)&entry->rule;
50 u16 en_rule = 0;
51 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
52 u8 *start;
53 int pipe_idx;
Mohammed Javid6f6eadb2018-02-20 16:01:26 +053054 struct ipa_hdr_entry *hdr_entry;
Amir Levy9659e592016-10-27 18:08:27 +030055
56 if (buf == NULL) {
Mohammed Javid025d7bb2017-08-01 19:05:06 +053057 memset(tmp, 0, (IPA_RT_FLT_HW_RULE_BUF_SIZE/4));
Amir Levy9659e592016-10-27 18:08:27 +030058 buf = (u8 *)tmp;
59 }
60
61 start = buf;
62 rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
63 pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
64 if (pipe_idx == -1) {
65 IPAERR("Wrong destination pipe specified in RT rule\n");
66 WARN_ON(1);
67 return -EPERM;
68 }
69 if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
70 IPAERR("No RT rule on IPA_client_producer pipe.\n");
71 IPAERR("pipe_idx: %d dst_pipe: %d\n",
72 pipe_idx, entry->rule.dst);
73 WARN_ON(1);
74 return -EPERM;
75 }
76 rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
77 rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
Mohammed Javid6f6eadb2018-02-20 16:01:26 +053078
79 /* Adding check to confirm still
80 * header entry present in header table or not
81 */
82
83 if (entry->hdr) {
84 hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
85 if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
86 IPAERR_RL("Header entry already deleted\n");
87 return -EPERM;
88 }
89 }
Amir Levy9659e592016-10-27 18:08:27 +030090 if (entry->hdr) {
Mohammed Javid025d7bb2017-08-01 19:05:06 +053091 if (entry->hdr->cookie == IPA_HDR_COOKIE) {
92 rule_hdr->u.hdr.hdr_offset =
93 entry->hdr->offset_entry->offset >> 2;
94 } else {
95 IPAERR("Entry hdr deleted by user = %d cookie = %u\n",
96 entry->hdr->user_deleted, entry->hdr->cookie);
97 WARN_ON(1);
98 rule_hdr->u.hdr.hdr_offset = 0;
99 }
Amir Levy9659e592016-10-27 18:08:27 +0300100 } else {
101 rule_hdr->u.hdr.hdr_offset = 0;
102 }
103 buf += sizeof(struct ipa_rt_rule_hw_hdr);
104
105 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
106 IPAERR("fail to generate hw rule\n");
107 return -EPERM;
108 }
109
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530110 IPADBG_LOW("en_rule 0x%x\n", en_rule);
Amir Levy9659e592016-10-27 18:08:27 +0300111
112 rule_hdr->u.hdr.en_rule = en_rule;
113 ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
114
115 if (entry->hw_len == 0) {
116 entry->hw_len = buf - start;
117 } else if (entry->hw_len != (buf - start)) {
118 IPAERR(
Teng Fei Fan7d751942018-02-02 16:51:00 +0800119 "hw_len differs b/w passes passed=0x%x calc=0x%zxtd\n",
Amir Levy9659e592016-10-27 18:08:27 +0300120 entry->hw_len,
121 (buf - start));
122 return -EPERM;
123 }
124
125 return 0;
126}
127
128/**
129 * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule
130 * @ip: the ip address family type
131 * @entry: routing entry
132 * @buf: output buffer, buf == NULL means
133 * caller wants to know the size of the rule as seen
134 * by HW so they did not pass a valid buffer, we will use a
135 * scratch buffer instead.
136 * With this scheme we are going to
137 * generate the rule twice, once to know size using scratch
138 * buffer and second to write the rule to the actual caller
139 * supplied buffer which is of required size
140 *
141 * Returns: 0 on success, negative on failure
142 *
143 * caller needs to hold any needed locks to ensure integrity
144 *
145 */
146int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
147 struct ipa_rt_entry *entry, u8 *buf)
148{
149 struct ipa_rt_rule_hw_hdr *rule_hdr;
150 const struct ipa_rt_rule *rule =
151 (const struct ipa_rt_rule *)&entry->rule;
152 u16 en_rule = 0;
153 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
154 u8 *start;
155 int pipe_idx;
Mohammed Javid6f6eadb2018-02-20 16:01:26 +0530156 struct ipa_hdr_entry *hdr_entry;
157 struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300158
159 if (buf == NULL) {
160 memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
161 buf = (u8 *)tmp;
162 }
163
164 start = buf;
165 rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
166 pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
167 if (pipe_idx == -1) {
168 IPAERR("Wrong destination pipe specified in RT rule\n");
169 WARN_ON(1);
170 return -EPERM;
171 }
172 if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
173 IPAERR("No RT rule on IPA_client_producer pipe.\n");
174 IPAERR("pipe_idx: %d dst_pipe: %d\n",
175 pipe_idx, entry->rule.dst);
176 WARN_ON(1);
177 return -EPERM;
178 }
179 rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx;
Mohammed Javid6f6eadb2018-02-20 16:01:26 +0530180 /* Adding check to confirm still
181 * header entry present in header table or not
182 */
183
184 if (entry->hdr) {
185 hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
186 if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
187 IPAERR_RL("Header entry already deleted\n");
188 return -EPERM;
189 }
190 } else if (entry->proc_ctx) {
191 hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl);
192 if (!hdr_proc_entry ||
193 hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
194 IPAERR_RL("Proc header entry already deleted\n");
195 return -EPERM;
196 }
197 }
Amir Levy9659e592016-10-27 18:08:27 +0300198 if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
199 struct ipa_hdr_proc_ctx_entry *proc_ctx;
200
201 proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
202 rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl;
203 BUG_ON(proc_ctx->offset_entry->offset & 31);
204 rule_hdr->u.hdr_v2_5.proc_ctx = 1;
205 rule_hdr->u.hdr_v2_5.hdr_offset =
206 (proc_ctx->offset_entry->offset +
207 ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
208 } else if (entry->hdr) {
209 rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl;
210 BUG_ON(entry->hdr->offset_entry->offset & 3);
211 rule_hdr->u.hdr_v2_5.proc_ctx = 0;
212 rule_hdr->u.hdr_v2_5.hdr_offset =
213 entry->hdr->offset_entry->offset >> 2;
214 } else {
215 rule_hdr->u.hdr_v2_5.proc_ctx = 0;
216 rule_hdr->u.hdr_v2_5.hdr_offset = 0;
217 }
218 buf += sizeof(struct ipa_rt_rule_hw_hdr);
219
220 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
221 IPAERR("fail to generate hw rule\n");
222 return -EPERM;
223 }
224
225 IPADBG("en_rule 0x%x\n", en_rule);
226
227 rule_hdr->u.hdr_v2_5.en_rule = en_rule;
228 ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
229
230 if (entry->hw_len == 0) {
231 entry->hw_len = buf - start;
232 } else if (entry->hw_len != (buf - start)) {
Teng Fei Fan7d751942018-02-02 16:51:00 +0800233 IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%zxtd\n",
Amir Levy9659e592016-10-27 18:08:27 +0300234 entry->hw_len, (buf - start));
235 return -EPERM;
236 }
237
238 return 0;
239}
240
241/**
242 * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule
243 * @ip: the ip address family type
244 * @entry: routing entry
245 * @buf: output buffer, buf == NULL means that the caller wants to know the size
246 * of the rule as seen by HW so they did not pass a valid buffer, we will
247 * use a scratch buffer instead.
248 * With this scheme we are going to generate the rule twice, once to know
249 * size using scratch buffer and second to write the rule to the actual
250 * caller supplied buffer which is of required size.
251 *
252 * Returns: 0 on success, negative on failure
253 *
254 * caller needs to hold any needed locks to ensure integrity
255 *
256 */
257int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
258 struct ipa_rt_entry *entry, u8 *buf)
259{
260 /* Same implementation as IPAv2 */
261 return __ipa_generate_rt_hw_rule_v2(ip, entry, buf);
262}
263
264/**
265 * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
266 * @ip: the ip address family type
267 * @hdr_sz: header size
268 * @max_rt_idx: maximal index
269 *
Skylar Chang88610112016-10-19 13:30:44 -0700270 * Returns: size on success, negative on failure
Amir Levy9659e592016-10-27 18:08:27 +0300271 *
272 * caller needs to hold any needed locks to ensure integrity
273 *
274 * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
275 */
276static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
277 int *max_rt_idx)
278{
279 struct ipa_rt_tbl_set *set;
280 struct ipa_rt_tbl *tbl;
281 struct ipa_rt_entry *entry;
282 u32 total_sz = 0;
283 u32 tbl_sz;
284 u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
285 int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
286 int i;
287 int res;
288
289 *hdr_sz = 0;
290 set = &ipa_ctx->rt_tbl_set[ip];
291
292 for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
293 if (bitmap & IPA_RT_BIT_MASK)
294 highest_bit_set = i;
295 bitmap >>= 1;
296 }
297
298 *max_rt_idx = highest_bit_set;
299 if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
300 IPAERR("no rt tbls present\n");
301 total_sz = IPA_RT_TABLE_WORD_SIZE;
302 *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
303 return total_sz;
304 }
305
306 *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
307 total_sz += *hdr_sz;
308 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
309 tbl_sz = 0;
310 list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
311 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
312 ip,
313 entry,
314 NULL);
315 if (res) {
316 IPAERR("failed to find HW RT rule size\n");
317 return -EPERM;
318 }
319 tbl_sz += entry->hw_len;
320 }
321
322 if (tbl_sz)
323 tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
324
325 if (tbl->in_sys)
326 continue;
327
328 if (tbl_sz) {
329 /* add the terminator */
330 total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
331 /* every rule-set should start at word boundary */
332 total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
333 ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
334 }
335 }
336
337 IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
338
339 return total_sz;
340}
341
342static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr,
343 u32 body_ofst, u32 apps_start_idx)
344{
345 struct ipa_rt_tbl *tbl;
346 struct ipa_rt_entry *entry;
347 struct ipa_rt_tbl_set *set;
348 u32 offset;
349 u8 *body;
350 struct ipa_mem_buffer rt_tbl_mem;
351 u8 *rt_tbl_mem_body;
352 int res;
353
354 /* build the rt tbl in the DMA buffer to submit to IPA HW */
355 body = base;
356
357 set = &ipa_ctx->rt_tbl_set[ip];
358 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
359 if (!tbl->in_sys) {
360 offset = body - base + body_ofst;
361 if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
362 IPAERR("offset is not word multiple %d\n",
363 offset);
364 goto proc_err;
365 }
366
367 /* convert offset to words from bytes */
368 offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
369 /* rule is at an offset from base */
370 offset |= IPA_RT_BIT_MASK;
371
372 /* update the hdr at the right index */
373 ipa_write_32(offset, hdr +
374 ((tbl->idx - apps_start_idx) *
375 IPA_RT_TABLE_WORD_SIZE));
376
377 /* generate the rule-set */
378 list_for_each_entry(entry, &tbl->head_rt_rule_list,
379 link) {
380 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
381 ip,
382 entry,
383 body);
384 if (res) {
385 IPAERR("failed to gen HW RT rule\n");
386 goto proc_err;
387 }
388 body += entry->hw_len;
389 }
390
391 /* write the rule-set terminator */
392 body = ipa_write_32(0, body);
393 if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
394 /* advance body to next word boundary */
395 body = body + (IPA_RT_TABLE_WORD_SIZE -
396 ((long)body &
397 IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
398 } else {
Skylar Chang88610112016-10-19 13:30:44 -0700399 if (tbl->sz == 0) {
400 IPAERR("cannot generate 0 size table\n");
401 goto proc_err;
402 }
403
Amir Levy9659e592016-10-27 18:08:27 +0300404 /* allocate memory for the RT tbl */
405 rt_tbl_mem.size = tbl->sz;
406 rt_tbl_mem.base =
407 dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
408 &rt_tbl_mem.phys_base, GFP_KERNEL);
409 if (!rt_tbl_mem.base) {
410 IPAERR("fail to alloc DMA buff of size %d\n",
411 rt_tbl_mem.size);
412 WARN_ON(1);
413 goto proc_err;
414 }
415
416 WARN_ON(rt_tbl_mem.phys_base &
417 IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
418 rt_tbl_mem_body = rt_tbl_mem.base;
419 memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
420 /* update the hdr at the right index */
421 ipa_write_32(rt_tbl_mem.phys_base,
422 hdr + ((tbl->idx - apps_start_idx) *
423 IPA_RT_TABLE_WORD_SIZE));
424 /* generate the rule-set */
425 list_for_each_entry(entry, &tbl->head_rt_rule_list,
426 link) {
427 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
428 ip,
429 entry,
430 rt_tbl_mem_body);
431 if (res) {
432 IPAERR("failed to gen HW RT rule\n");
433 WARN_ON(1);
434 goto rt_table_mem_alloc_failed;
435 }
436 rt_tbl_mem_body += entry->hw_len;
437 }
438
439 /* write the rule-set terminator */
440 rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
441
442 if (tbl->curr_mem.phys_base) {
443 WARN_ON(tbl->prev_mem.phys_base);
444 tbl->prev_mem = tbl->curr_mem;
445 }
446 tbl->curr_mem = rt_tbl_mem;
447 }
448 }
449
450 return 0;
451
452rt_table_mem_alloc_failed:
453 dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
454 rt_tbl_mem.base, rt_tbl_mem.phys_base);
455proc_err:
456 return -EPERM;
457}
458
459
460/**
461 * ipa_generate_rt_hw_tbl() - generates the routing hardware table
462 * @ip: [in] the ip address family type
463 * @mem: [out] buffer to put the filtering table
464 *
465 * Returns: 0 on success, negative on failure
466 */
467static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip,
468 struct ipa_mem_buffer *mem)
469{
470 u32 hdr_sz;
471 u8 *hdr;
472 u8 *body;
473 u8 *base;
474 int max_rt_idx;
475 int i;
Skylar Chang88610112016-10-19 13:30:44 -0700476 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300477
Skylar Chang88610112016-10-19 13:30:44 -0700478 res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
479 if (res < 0) {
480 IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
481 goto error;
482 }
483
484 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300485 mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
486 ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
487
488 if (mem->size == 0) {
489 IPAERR("rt tbl empty ip=%d\n", ip);
490 goto error;
491 }
492 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
493 &mem->phys_base, GFP_KERNEL);
494 if (!mem->base) {
495 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
496 goto error;
497 }
498
499 memset(mem->base, 0, mem->size);
500
501 /* build the rt tbl in the DMA buffer to submit to IPA HW */
502 base = hdr = (u8 *)mem->base;
503 body = base + hdr_sz;
504
505 /* setup all indices to point to the empty sys rt tbl */
506 for (i = 0; i <= max_rt_idx; i++)
507 ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
508 hdr + (i * IPA_RT_TABLE_WORD_SIZE));
509
510 if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) {
511 IPAERR("fail to generate RT tbl\n");
512 goto proc_err;
513 }
514
515 return 0;
516
517proc_err:
518 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
519 mem->base = NULL;
520error:
521 return -EPERM;
522}
523
524static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
525{
526 struct ipa_rt_tbl *tbl;
527 struct ipa_rt_tbl *next;
528 struct ipa_rt_tbl_set *set;
529
530 set = &ipa_ctx->rt_tbl_set[ip];
531 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
532 if (tbl->prev_mem.phys_base) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530533 IPADBG_LOW("reaping rt");
534 IPADBG_LOW("tbl name=%s ip=%d\n",
535 tbl->name, ip);
Amir Levy9659e592016-10-27 18:08:27 +0300536 dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
537 tbl->prev_mem.base,
538 tbl->prev_mem.phys_base);
539 memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
540 }
541 }
542
543 set = &ipa_ctx->reap_rt_tbl_set[ip];
544 list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
545 list_del(&tbl->link);
546 WARN_ON(tbl->prev_mem.phys_base != 0);
547 if (tbl->curr_mem.phys_base) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530548 IPADBG_LOW("reaping sys");
549 IPADBG_LOW("rt tbl name=%s ip=%d\n",
550 tbl->name, ip);
Amir Levy9659e592016-10-27 18:08:27 +0300551 dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
552 tbl->curr_mem.base,
553 tbl->curr_mem.phys_base);
554 kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
555 }
556 }
557}
558
559int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
560{
561 struct ipa_desc desc = { 0 };
562 struct ipa_mem_buffer *mem;
563 void *cmd;
564 struct ipa_ip_v4_routing_init *v4;
565 struct ipa_ip_v6_routing_init *v6;
566 u16 avail;
567 u16 size;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530568 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300569
570 mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
571 if (!mem) {
572 IPAERR("failed to alloc memory object\n");
573 goto fail_alloc_mem;
574 }
575
576 if (ip == IPA_IP_v4) {
577 avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE :
578 IPA_MEM_PART(v4_rt_size_ddr);
579 size = sizeof(struct ipa_ip_v4_routing_init);
580 } else {
581 avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE :
582 IPA_MEM_PART(v6_rt_size_ddr);
583 size = sizeof(struct ipa_ip_v6_routing_init);
584 }
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530585 cmd = kmalloc(size, flag);
Amir Levy9659e592016-10-27 18:08:27 +0300586 if (!cmd) {
587 IPAERR("failed to alloc immediate command object\n");
588 goto fail_alloc_cmd;
589 }
590
591 if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) {
592 IPAERR("fail to generate RT HW TBL ip %d\n", ip);
593 goto fail_hw_tbl_gen;
594 }
595
596 if (mem->size > avail) {
597 IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
598 goto fail_send_cmd;
599 }
600
601 if (ip == IPA_IP_v4) {
602 v4 = (struct ipa_ip_v4_routing_init *)cmd;
603 desc.opcode = IPA_IP_V4_ROUTING_INIT;
604 v4->ipv4_rules_addr = mem->phys_base;
605 v4->size_ipv4_rules = mem->size;
606 v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST;
607 IPADBG("putting Routing IPv4 rules to phys 0x%x",
608 v4->ipv4_addr);
609 } else {
610 v6 = (struct ipa_ip_v6_routing_init *)cmd;
611 desc.opcode = IPA_IP_V6_ROUTING_INIT;
612 v6->ipv6_rules_addr = mem->phys_base;
613 v6->size_ipv6_rules = mem->size;
614 v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST;
615 IPADBG("putting Routing IPv6 rules to phys 0x%x",
616 v6->ipv6_addr);
617 }
618
619 desc.pyld = cmd;
620 desc.len = size;
621 desc.type = IPA_IMM_CMD_DESC;
622 IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
623
624 if (ipa_send_cmd(1, &desc)) {
625 IPAERR("fail to send immediate command\n");
626 goto fail_send_cmd;
627 }
628
629 __ipa_reap_sys_rt_tbls(ip);
630 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
631 kfree(cmd);
632 kfree(mem);
633
634 return 0;
635
636fail_send_cmd:
637 if (mem->base)
638 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
639 mem->phys_base);
640fail_hw_tbl_gen:
641 kfree(cmd);
642fail_alloc_cmd:
643 kfree(mem);
644fail_alloc_mem:
645 return -EPERM;
646}
647
648static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
649 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head)
650{
651 u32 hdr_sz;
652 u8 *hdr;
653 u8 *body;
654 u8 *base;
655 int max_rt_idx;
656 int i;
657 u32 *entr;
658 int num_index;
659 u32 body_start_offset;
660 u32 apps_start_idx;
Skylar Chang88610112016-10-19 13:30:44 -0700661 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300662
663 if (ip == IPA_IP_v4) {
664 num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
665 IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
666 body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) -
667 IPA_MEM_PART(v4_rt_ofst);
668 apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
669 } else {
670 num_index = IPA_MEM_PART(v6_apps_rt_index_hi) -
671 IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
672 body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) -
673 IPA_MEM_PART(v6_rt_ofst);
674 apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
675 }
676
677 head->size = num_index * 4;
678 head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size,
679 &head->phys_base, GFP_KERNEL);
680 if (!head->base) {
681 IPAERR("fail to alloc DMA buff of size %d\n", head->size);
682 goto err;
683 }
684 entr = (u32 *)head->base;
685 hdr = (u8 *)head->base;
686 for (i = 1; i <= num_index; i++) {
687 *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
688 entr++;
689 }
690
Skylar Chang88610112016-10-19 13:30:44 -0700691 res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
692 if (res < 0) {
693 IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
694 goto base_err;
695 }
696
697 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300698 mem->size -= hdr_sz;
699 mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
700 ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
701
702 if (mem->size > 0) {
703 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
704 &mem->phys_base, GFP_KERNEL);
705 if (!mem->base) {
706 IPAERR("fail to alloc DMA buff of size %d\n",
707 mem->size);
708 goto base_err;
709 }
710 memset(mem->base, 0, mem->size);
711 }
712
713 /* build the rt tbl in the DMA buffer to submit to IPA HW */
714 body = base = (u8 *)mem->base;
715
716 if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset,
717 apps_start_idx)) {
718 IPAERR("fail to generate RT tbl\n");
719 goto proc_err;
720 }
721
722 return 0;
723
724proc_err:
725 if (mem->size)
726 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
727 mem->phys_base);
728base_err:
729 dma_free_coherent(ipa_ctx->pdev, head->size, head->base,
730 head->phys_base);
731err:
732 return -EPERM;
733}
734
735int __ipa_commit_rt_v2(enum ipa_ip_type ip)
736{
737 struct ipa_desc desc[2];
738 struct ipa_mem_buffer body;
739 struct ipa_mem_buffer head;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530740 struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
741 struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530742 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300743 u16 avail;
744 u32 num_modem_rt_index;
745 int rc = 0;
746 u32 local_addr1;
747 u32 local_addr2;
748 bool lcl;
749
750 memset(desc, 0, 2 * sizeof(struct ipa_desc));
751
752 if (ip == IPA_IP_v4) {
753 avail = ipa_ctx->ip4_rt_tbl_lcl ?
754 IPA_MEM_PART(apps_v4_rt_size) :
755 IPA_MEM_PART(v4_rt_size_ddr);
756 num_modem_rt_index =
757 IPA_MEM_PART(v4_modem_rt_index_hi) -
758 IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
759 local_addr1 = ipa_ctx->smem_restricted_bytes +
760 IPA_MEM_PART(v4_rt_ofst) +
761 num_modem_rt_index * 4;
762 local_addr2 = ipa_ctx->smem_restricted_bytes +
763 IPA_MEM_PART(apps_v4_rt_ofst);
764 lcl = ipa_ctx->ip4_rt_tbl_lcl;
765 } else {
766 avail = ipa_ctx->ip6_rt_tbl_lcl ?
767 IPA_MEM_PART(apps_v6_rt_size) :
768 IPA_MEM_PART(v6_rt_size_ddr);
769 num_modem_rt_index =
770 IPA_MEM_PART(v6_modem_rt_index_hi) -
771 IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
772 local_addr1 = ipa_ctx->smem_restricted_bytes +
773 IPA_MEM_PART(v6_rt_ofst) +
774 num_modem_rt_index * 4;
775 local_addr2 = ipa_ctx->smem_restricted_bytes +
776 IPA_MEM_PART(apps_v6_rt_ofst);
777 lcl = ipa_ctx->ip6_rt_tbl_lcl;
778 }
779
780 if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) {
781 IPAERR("fail to generate RT HW TBL ip %d\n", ip);
782 rc = -EFAULT;
783 goto fail_gen;
784 }
785
786 if (body.size > avail) {
787 IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
788 rc = -EFAULT;
789 goto fail_send_cmd;
790 }
791
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530792 cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530793 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530794 if (cmd1 == NULL) {
795 IPAERR("Failed to alloc immediate command object\n");
796 rc = -ENOMEM;
797 goto fail_send_cmd;
798 }
799
800 cmd1->size = head.size;
801 cmd1->system_addr = head.phys_base;
802 cmd1->local_addr = local_addr1;
Amir Levy9659e592016-10-27 18:08:27 +0300803 desc[0].opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530804 desc[0].pyld = (void *)cmd1;
Amir Levy9659e592016-10-27 18:08:27 +0300805 desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
806 desc[0].type = IPA_IMM_CMD_DESC;
807
808 if (lcl) {
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530809 cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530810 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530811 if (cmd2 == NULL) {
812 IPAERR("Failed to alloc immediate command object\n");
813 rc = -ENOMEM;
814 goto fail_send_cmd1;
815 }
816
817 cmd2->size = body.size;
818 cmd2->system_addr = body.phys_base;
819 cmd2->local_addr = local_addr2;
Amir Levy9659e592016-10-27 18:08:27 +0300820
821 desc[1].opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530822 desc[1].pyld = (void *)cmd2;
Amir Levy9659e592016-10-27 18:08:27 +0300823 desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
824 desc[1].type = IPA_IMM_CMD_DESC;
825
826 if (ipa_send_cmd(2, desc)) {
827 IPAERR("fail to send immediate command\n");
828 rc = -EFAULT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530829 goto fail_send_cmd2;
Amir Levy9659e592016-10-27 18:08:27 +0300830 }
831 } else {
832 if (ipa_send_cmd(1, desc)) {
833 IPAERR("fail to send immediate command\n");
834 rc = -EFAULT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530835 goto fail_send_cmd1;
Amir Levy9659e592016-10-27 18:08:27 +0300836 }
837 }
838
839 IPADBG("HEAD\n");
840 IPA_DUMP_BUFF(head.base, head.phys_base, head.size);
841 if (body.size) {
842 IPADBG("BODY\n");
843 IPA_DUMP_BUFF(body.base, body.phys_base, body.size);
844 }
845 __ipa_reap_sys_rt_tbls(ip);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530846
847fail_send_cmd2:
848 kfree(cmd2);
849fail_send_cmd1:
850 kfree(cmd1);
Amir Levy9659e592016-10-27 18:08:27 +0300851fail_send_cmd:
852 dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base);
853 if (body.size)
854 dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
855 body.phys_base);
856fail_gen:
857 return rc;
858}
859
860/**
861 * __ipa_find_rt_tbl() - find the routing table
862 * which name is given as parameter
863 * @ip: [in] the ip address family type of the wanted routing table
864 * @name: [in] the name of the wanted routing table
865 *
866 * Returns: the routing table which name is given as parameter, or NULL if it
867 * doesn't exist
868 */
869struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
870{
871 struct ipa_rt_tbl *entry;
872 struct ipa_rt_tbl_set *set;
873
874 set = &ipa_ctx->rt_tbl_set[ip];
875 list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
876 if (!strcmp(name, entry->name))
877 return entry;
878 }
879
880 return NULL;
881}
882
883/**
884 * ipa2_query_rt_index() - find the routing table index
885 * which name and ip type are given as parameters
886 * @in: [out] the index of the wanted routing table
887 *
888 * Returns: the routing table which name is given as parameter, or NULL if it
889 * doesn't exist
890 */
891int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
892{
893 struct ipa_rt_tbl *entry;
894
895 if (in->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530896 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300897 return -EINVAL;
898 }
899
Mohammed Javid47193a12017-06-15 18:39:07 +0530900 mutex_lock(&ipa_ctx->lock);
Amir Levy9659e592016-10-27 18:08:27 +0300901 /* check if this table exists */
Mohammed Javidcd665892017-10-11 17:05:57 +0530902 in->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +0300903 entry = __ipa_find_rt_tbl(in->ip, in->name);
Mohammed Javid47193a12017-06-15 18:39:07 +0530904 if (!entry) {
905 mutex_unlock(&ipa_ctx->lock);
Amir Levy9659e592016-10-27 18:08:27 +0300906 return -EFAULT;
Mohammed Javid47193a12017-06-15 18:39:07 +0530907 }
Amir Levy9659e592016-10-27 18:08:27 +0300908
909 in->idx = entry->idx;
Mohammed Javid47193a12017-06-15 18:39:07 +0530910 mutex_unlock(&ipa_ctx->lock);
Amir Levy9659e592016-10-27 18:08:27 +0300911 return 0;
912}
913
914static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
915 const char *name)
916{
917 struct ipa_rt_tbl *entry;
918 struct ipa_rt_tbl_set *set;
919 int i;
920 int id;
921
922 if (ip >= IPA_IP_MAX || name == NULL) {
923 IPAERR("bad parm\n");
924 goto error;
925 }
926
927 set = &ipa_ctx->rt_tbl_set[ip];
928 /* check if this table exists */
929 entry = __ipa_find_rt_tbl(ip, name);
930 if (!entry) {
931 entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
932 if (!entry) {
933 IPAERR("failed to alloc RT tbl object\n");
934 goto error;
935 }
936 /* find a routing tbl index */
937 for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
938 if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
939 entry->idx = i;
940 set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
941 break;
942 }
943 }
944 if (i == IPA_RT_INDEX_BITMAP_SIZE) {
945 IPAERR("not free RT tbl indices left\n");
946 goto fail_rt_idx_alloc;
947 }
948
949 INIT_LIST_HEAD(&entry->head_rt_rule_list);
950 INIT_LIST_HEAD(&entry->link);
951 strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
952 entry->set = set;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530953 entry->cookie = IPA_RT_TBL_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300954 entry->in_sys = (ip == IPA_IP_v4) ?
955 !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
956 set->tbl_cnt++;
957 list_add(&entry->link, &set->head_rt_tbl_list);
958
959 IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
960 set->tbl_cnt, ip);
961
962 id = ipa_id_alloc(entry);
963 if (id < 0) {
964 IPAERR("failed to add to tree\n");
965 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530966 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300967 }
968 entry->id = id;
969 }
970
971 return entry;
972
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530973ipa_insert_failed:
974 set->tbl_cnt--;
975 list_del(&entry->link);
Amir Levy9659e592016-10-27 18:08:27 +0300976fail_rt_idx_alloc:
977 entry->cookie = 0;
978 kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
979error:
980 return NULL;
981}
982
983static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
984{
985 enum ipa_ip_type ip = IPA_IP_MAX;
986 u32 id;
987
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530988 if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530989 IPAERR_RL("bad parms\n");
Amir Levy9659e592016-10-27 18:08:27 +0300990 return -EINVAL;
991 }
992 id = entry->id;
993 if (ipa_id_find(id) == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530994 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +0300995 return -EPERM;
996 }
997
998 if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
999 ip = IPA_IP_v4;
1000 else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
1001 ip = IPA_IP_v6;
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301002 else {
Amir Levy9659e592016-10-27 18:08:27 +03001003 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301004 return -EPERM;
1005 }
1006
Amir Levy9659e592016-10-27 18:08:27 +03001007
1008 if (!entry->in_sys) {
1009 list_del(&entry->link);
1010 clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
1011 entry->set->tbl_cnt--;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301012 IPADBG_LOW("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
Amir Levy9659e592016-10-27 18:08:27 +03001013 entry->set->tbl_cnt);
1014 kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
1015 } else {
1016 list_move(&entry->link,
1017 &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
1018 clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
1019 entry->set->tbl_cnt--;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301020 IPADBG_LOW("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
Amir Levy9659e592016-10-27 18:08:27 +03001021 entry->set->tbl_cnt);
1022 }
1023
1024 /* remove the handle from the database */
1025 ipa_id_remove(id);
1026 return 0;
1027}
1028
1029static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
Skylar Chang68c37d82018-04-07 16:42:36 -07001030 const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
1031 bool user)
Amir Levy9659e592016-10-27 18:08:27 +03001032{
1033 struct ipa_rt_tbl *tbl;
1034 struct ipa_rt_entry *entry;
1035 struct ipa_hdr_entry *hdr = NULL;
1036 struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL;
1037 int id;
1038
1039 if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
1040 IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
1041 goto error;
1042 }
1043
1044 if (rule->hdr_hdl) {
1045 hdr = ipa_id_find(rule->hdr_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301046 if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +03001047 IPAERR("rt rule does not point to valid hdr\n");
1048 goto error;
1049 }
1050 } else if (rule->hdr_proc_ctx_hdl) {
1051 proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301052 if ((proc_ctx == NULL) ||
1053 (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +03001054 IPAERR("rt rule does not point to valid proc ctx\n");
1055 goto error;
1056 }
1057 }
1058
1059
1060 tbl = __ipa_add_rt_tbl(ip, name);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301061 if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +03001062 IPAERR("bad params\n");
1063 goto error;
1064 }
1065 /*
1066 * do not allow any rules to be added at end of the "default" routing
1067 * tables
1068 */
1069 if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
1070 (tbl->rule_cnt > 0) && (at_rear != 0)) {
1071 IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
1072 tbl->rule_cnt, at_rear);
1073 goto error;
1074 }
1075
1076 entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
1077 if (!entry) {
1078 IPAERR("failed to alloc RT rule object\n");
1079 goto error;
1080 }
1081 INIT_LIST_HEAD(&entry->link);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301082 entry->cookie = IPA_RT_RULE_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +03001083 entry->rule = *rule;
1084 entry->tbl = tbl;
1085 entry->hdr = hdr;
1086 entry->proc_ctx = proc_ctx;
1087 if (at_rear)
1088 list_add_tail(&entry->link, &tbl->head_rt_rule_list);
1089 else
1090 list_add(&entry->link, &tbl->head_rt_rule_list);
1091 tbl->rule_cnt++;
1092 if (entry->hdr)
1093 entry->hdr->ref_cnt++;
1094 else if (entry->proc_ctx)
1095 entry->proc_ctx->ref_cnt++;
1096 id = ipa_id_alloc(entry);
1097 if (id < 0) {
1098 IPAERR("failed to add to tree\n");
1099 WARN_ON(1);
1100 goto ipa_insert_failed;
1101 }
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301102 IPADBG_LOW("add rt rule tbl_idx=%d", tbl->idx);
1103 IPADBG_LOW("rule_cnt=%d\n", tbl->rule_cnt);
Amir Levy9659e592016-10-27 18:08:27 +03001104 *rule_hdl = id;
1105 entry->id = id;
Skylar Chang68c37d82018-04-07 16:42:36 -07001106 entry->ipacm_installed = user;
Amir Levy9659e592016-10-27 18:08:27 +03001107
1108 return 0;
1109
1110ipa_insert_failed:
1111 if (entry->hdr)
1112 entry->hdr->ref_cnt--;
1113 else if (entry->proc_ctx)
1114 entry->proc_ctx->ref_cnt--;
1115 list_del(&entry->link);
1116 kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
1117error:
1118 return -EPERM;
1119}
1120
1121/**
1122 * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally
1123 * commit to IPA HW
1124 * @rules: [inout] set of routing rules to add
1125 *
1126 * Returns: 0 on success, negative on failure
1127 *
1128 * Note: Should not be called from atomic context
1129 */
1130int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
1131{
Skylar Chang68c37d82018-04-07 16:42:36 -07001132 return ipa2_add_rt_rule_usr(rules, false);
1133}
1134
1135/**
1136 * ipa2_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
1137 * commit to IPA HW
1138 * @rules: [inout] set of routing rules to add
1139 * @user_only: [in] indicate installed by userspace module
1140 *
1141 * Returns: 0 on success, negative on failure
1142 *
1143 * Note: Should not be called from atomic context
1144 */
1145int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only)
1146{
Amir Levy9659e592016-10-27 18:08:27 +03001147 int i;
1148 int ret;
1149
1150 if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301151 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001152 return -EINVAL;
1153 }
1154
1155 mutex_lock(&ipa_ctx->lock);
1156 for (i = 0; i < rules->num_rules; i++) {
Mohammed Javidcd665892017-10-11 17:05:57 +05301157 rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +03001158 if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
1159 &rules->rules[i].rule,
1160 rules->rules[i].at_rear,
Skylar Chang68c37d82018-04-07 16:42:36 -07001161 &rules->rules[i].rt_rule_hdl,
1162 user_only)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301163 IPAERR_RL("failed to add rt rule %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +03001164 rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
1165 } else {
1166 rules->rules[i].status = 0;
1167 }
1168 }
1169
1170 if (rules->commit)
1171 if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) {
1172 ret = -EPERM;
1173 goto bail;
1174 }
1175
1176 ret = 0;
1177bail:
1178 mutex_unlock(&ipa_ctx->lock);
1179 return ret;
1180}
1181
1182int __ipa_del_rt_rule(u32 rule_hdl)
1183{
1184 struct ipa_rt_entry *entry;
1185 int id;
Mohammed Javid6f6eadb2018-02-20 16:01:26 +05301186 struct ipa_hdr_entry *hdr_entry;
1187 struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001188
1189 entry = ipa_id_find(rule_hdl);
1190
1191 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301192 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001193 return -EINVAL;
1194 }
1195
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301196 if (entry->cookie != IPA_RT_RULE_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301197 IPAERR_RL("bad params\n");
Amir Levy9659e592016-10-27 18:08:27 +03001198 return -EINVAL;
1199 }
1200
Ghanim Fodi5fd0c952018-01-31 14:49:37 +02001201 if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) {
1202 IPADBG("Deleting rule from default rt table idx=%u\n",
1203 entry->tbl->idx);
1204 if (entry->tbl->rule_cnt == 1) {
1205 IPAERR_RL("Default tbl last rule cannot be deleted\n");
1206 return -EINVAL;
1207 }
1208 }
Mohammed Javid6f6eadb2018-02-20 16:01:26 +05301209 /* Adding check to confirm still
1210 * header entry present in header table or not
1211 */
1212
1213 if (entry->hdr) {
1214 hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
1215 if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
1216 IPAERR_RL("Header entry already deleted\n");
1217 return -EINVAL;
1218 }
1219 } else if (entry->proc_ctx) {
1220 hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl);
1221 if (!hdr_proc_entry ||
1222 hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) {
1223 IPAERR_RL("Proc header entry already deleted\n");
1224 return -EINVAL;
1225 }
1226 }
Ghanim Fodi5fd0c952018-01-31 14:49:37 +02001227
Amir Levy9659e592016-10-27 18:08:27 +03001228 if (entry->hdr)
1229 __ipa_release_hdr(entry->hdr->id);
1230 else if (entry->proc_ctx)
1231 __ipa_release_hdr_proc_ctx(entry->proc_ctx->id);
1232 list_del(&entry->link);
1233 entry->tbl->rule_cnt--;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301234 IPADBG_LOW("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
Amir Levy9659e592016-10-27 18:08:27 +03001235 entry->tbl->rule_cnt);
1236 if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
1237 if (__ipa_del_rt_tbl(entry->tbl))
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301238 IPAERR_RL("fail to del RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001239 }
1240 entry->cookie = 0;
1241 id = entry->id;
1242 kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
1243
1244 /* remove the handle from the database */
1245 ipa_id_remove(id);
1246
1247 return 0;
1248}
1249
1250/**
1251 * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally
1252 * commit to IPA HW
1253 * @hdls: [inout] set of routing rules to delete
1254 *
1255 * Returns: 0 on success, negative on failure
1256 *
1257 * Note: Should not be called from atomic context
1258 */
1259int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
1260{
1261 int i;
1262 int ret;
1263
1264 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301265 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001266 return -EINVAL;
1267 }
1268
1269 mutex_lock(&ipa_ctx->lock);
1270 for (i = 0; i < hdls->num_hdls; i++) {
1271 if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301272 IPAERR_RL("failed to del rt rule %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +03001273 hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
1274 } else {
1275 hdls->hdl[i].status = 0;
1276 }
1277 }
1278
1279 if (hdls->commit)
1280 if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
1281 ret = -EPERM;
1282 goto bail;
1283 }
1284
1285 ret = 0;
1286bail:
1287 mutex_unlock(&ipa_ctx->lock);
1288 return ret;
1289}
1290
1291/**
1292 * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type
1293 * to IPA HW
1294 * @ip: The family of routing tables
1295 *
1296 * Returns: 0 on success, negative on failure
1297 *
1298 * Note: Should not be called from atomic context
1299 */
1300int ipa2_commit_rt(enum ipa_ip_type ip)
1301{
1302 int ret;
1303
1304 if (ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301305 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001306 return -EINVAL;
1307 }
1308
1309 /*
1310 * issue a commit on the filtering module of same IP type since
1311 * filtering rules point to routing tables
1312 */
1313 if (ipa2_commit_flt(ip))
1314 return -EPERM;
1315
1316 mutex_lock(&ipa_ctx->lock);
1317 if (ipa_ctx->ctrl->ipa_commit_rt(ip)) {
1318 ret = -EPERM;
1319 goto bail;
1320 }
1321
1322 ret = 0;
1323bail:
1324 mutex_unlock(&ipa_ctx->lock);
1325 return ret;
1326}
1327
1328/**
1329 * ipa2_reset_rt() - reset the current SW routing table of specified type
1330 * (does not commit to HW)
Skylar Chang68c37d82018-04-07 16:42:36 -07001331 * @ip: [in] The family of routing tables
1332 * @user_only: [in] indicate delete rules installed by userspace
Amir Levy9659e592016-10-27 18:08:27 +03001333 *
1334 * Returns: 0 on success, negative on failure
1335 *
1336 * Note: Should not be called from atomic context
1337 */
Skylar Chang68c37d82018-04-07 16:42:36 -07001338int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only)
Amir Levy9659e592016-10-27 18:08:27 +03001339{
1340 struct ipa_rt_tbl *tbl;
1341 struct ipa_rt_tbl *tbl_next;
1342 struct ipa_rt_tbl_set *set;
1343 struct ipa_rt_entry *rule;
1344 struct ipa_rt_entry *rule_next;
1345 struct ipa_rt_tbl_set *rset;
1346 u32 apps_start_idx;
1347 int id;
Skylar Chang68c37d82018-04-07 16:42:36 -07001348 bool tbl_user = false;
Amir Levy9659e592016-10-27 18:08:27 +03001349
1350 if (ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301351 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001352 return -EINVAL;
1353 }
1354
1355 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
1356 if (ip == IPA_IP_v4)
1357 apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
1358 else
1359 apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
1360 } else {
1361 apps_start_idx = 0;
1362 }
1363
1364 /*
1365 * issue a reset on the filtering module of same IP type since
1366 * filtering rules point to routing tables
1367 */
Skylar Chang68c37d82018-04-07 16:42:36 -07001368 if (ipa2_reset_flt(ip, user_only))
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301369 IPAERR_RL("fail to reset flt ip=%d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +03001370
1371 set = &ipa_ctx->rt_tbl_set[ip];
1372 rset = &ipa_ctx->reap_rt_tbl_set[ip];
1373 mutex_lock(&ipa_ctx->lock);
1374 IPADBG("reset rt ip=%d\n", ip);
1375 list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
Skylar Chang68c37d82018-04-07 16:42:36 -07001376 tbl_user = false;
Amir Levy9659e592016-10-27 18:08:27 +03001377 list_for_each_entry_safe(rule, rule_next,
1378 &tbl->head_rt_rule_list, link) {
1379 if (ipa_id_find(rule->id) == NULL) {
1380 WARN_ON(1);
1381 mutex_unlock(&ipa_ctx->lock);
1382 return -EFAULT;
1383 }
1384
Skylar Chang68c37d82018-04-07 16:42:36 -07001385 /* indicate if tbl used for user-specified rules*/
1386 if (rule->ipacm_installed) {
1387 IPADBG("tbl_user %d, tbl-index %d\n",
1388 tbl_user, tbl->id);
1389 tbl_user = true;
1390 }
Amir Levy9659e592016-10-27 18:08:27 +03001391 /*
1392 * for the "default" routing tbl, remove all but the
1393 * last rule
1394 */
1395 if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
1396 continue;
Skylar Chang68c37d82018-04-07 16:42:36 -07001397 if (!user_only ||
1398 rule->ipacm_installed) {
1399 list_del(&rule->link);
1400 tbl->rule_cnt--;
1401 if (rule->hdr)
1402 __ipa_release_hdr(rule->hdr->id);
1403 else if (rule->proc_ctx)
1404 __ipa_release_hdr_proc_ctx(
1405 rule->proc_ctx->id);
1406 rule->cookie = 0;
1407 id = rule->id;
1408 kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
Amir Levy9659e592016-10-27 18:08:27 +03001409
Skylar Chang68c37d82018-04-07 16:42:36 -07001410 /* remove the handle from the database */
1411 ipa_id_remove(id);
1412 }
Amir Levy9659e592016-10-27 18:08:27 +03001413 }
1414
1415 if (ipa_id_find(tbl->id) == NULL) {
1416 WARN_ON(1);
1417 mutex_unlock(&ipa_ctx->lock);
1418 return -EFAULT;
1419 }
1420 id = tbl->id;
1421
1422 /* do not remove the "default" routing tbl which has index 0 */
1423 if (tbl->idx != apps_start_idx) {
Skylar Chang68c37d82018-04-07 16:42:36 -07001424 if (!user_only || tbl_user) {
1425 if (!tbl->in_sys) {
1426 list_del(&tbl->link);
1427 set->tbl_cnt--;
1428 clear_bit(tbl->idx,
1429 &ipa_ctx->rt_idx_bitmap[ip]);
1430 IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
1431 tbl->idx, set->tbl_cnt);
1432 kmem_cache_free(ipa_ctx->rt_tbl_cache,
1433 tbl);
1434 } else {
1435 list_move(&tbl->link,
1436 &rset->head_rt_tbl_list);
1437 clear_bit(tbl->idx,
1438 &ipa_ctx->rt_idx_bitmap[ip]);
1439 set->tbl_cnt--;
1440 IPADBG("rst tbl_idx=%d cnt=%d\n",
1441 tbl->idx, set->tbl_cnt);
1442 }
1443 /* remove the handle from the database */
1444 ipa_id_remove(id);
Amir Levy9659e592016-10-27 18:08:27 +03001445 }
Amir Levy9659e592016-10-27 18:08:27 +03001446 }
1447 }
Skylar Chang5ec274f2018-05-03 02:18:34 -07001448
1449 /* commit the change to IPA-HW */
1450 if (ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v4) ||
1451 ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v6)) {
1452 IPAERR("fail to commit rt-rule\n");
1453 WARN_ON_RATELIMIT_IPA(1);
1454 mutex_unlock(&ipa_ctx->lock);
1455 return -EPERM;
1456 }
Amir Levy9659e592016-10-27 18:08:27 +03001457 mutex_unlock(&ipa_ctx->lock);
1458
1459 return 0;
1460}
1461
1462/**
1463 * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if
1464 * it exists, if lookup succeeds the routing table ref cnt is increased
1465 * @lookup: [inout] routing table to lookup and its handle
1466 *
1467 * Returns: 0 on success, negative on failure
1468 *
1469 * Note: Should not be called from atomic context
1470 * Caller should call ipa_put_rt_tbl later if this function succeeds
1471 */
1472int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
1473{
1474 struct ipa_rt_tbl *entry;
1475 int result = -EFAULT;
1476
1477 if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301478 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001479 return -EINVAL;
1480 }
1481 mutex_lock(&ipa_ctx->lock);
Mohammed Javidcd665892017-10-11 17:05:57 +05301482 lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +03001483 entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301484 if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
Utkarsh Saxena69e4ab0b2017-04-25 17:39:41 +05301485 if (entry->ref_cnt == U32_MAX) {
1486 IPAERR("fail: ref count crossed limit\n");
1487 goto ret;
1488 }
Amir Levy9659e592016-10-27 18:08:27 +03001489 entry->ref_cnt++;
1490 lookup->hdl = entry->id;
1491
1492 /* commit for get */
1493 if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301494 IPAERR_RL("fail to commit RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001495
1496 result = 0;
1497 }
Utkarsh Saxena69e4ab0b2017-04-25 17:39:41 +05301498
1499ret:
Amir Levy9659e592016-10-27 18:08:27 +03001500 mutex_unlock(&ipa_ctx->lock);
1501
1502 return result;
1503}
1504
1505/**
1506 * ipa2_put_rt_tbl() - Release the specified routing table handle
1507 * @rt_tbl_hdl: [in] the routing table handle to release
1508 *
1509 * Returns: 0 on success, negative on failure
1510 *
1511 * Note: Should not be called from atomic context
1512 */
1513int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
1514{
1515 struct ipa_rt_tbl *entry;
1516 enum ipa_ip_type ip = IPA_IP_MAX;
Mohammed Javid42ad67b2017-07-27 15:12:18 +05301517 int result = 0;
Amir Levy9659e592016-10-27 18:08:27 +03001518
1519 mutex_lock(&ipa_ctx->lock);
1520 entry = ipa_id_find(rt_tbl_hdl);
1521 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301522 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001523 result = -EINVAL;
1524 goto ret;
1525 }
1526
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301527 if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301528 IPAERR_RL("bad parms\n");
Amir Levy9659e592016-10-27 18:08:27 +03001529 result = -EINVAL;
1530 goto ret;
1531 }
1532
1533 if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
1534 ip = IPA_IP_v4;
1535 else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
1536 ip = IPA_IP_v6;
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301537 else {
Amir Levy9659e592016-10-27 18:08:27 +03001538 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301539 result = -EINVAL;
1540 goto ret;
1541 }
Amir Levy9659e592016-10-27 18:08:27 +03001542
1543 entry->ref_cnt--;
1544 if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
1545 if (__ipa_del_rt_tbl(entry))
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301546 IPAERR_RL("fail to del RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001547 /* commit for put */
1548 if (ipa_ctx->ctrl->ipa_commit_rt(ip))
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301549 IPAERR_RL("fail to commit RT tbl\n");
Amir Levy9659e592016-10-27 18:08:27 +03001550 }
1551
1552 result = 0;
1553
1554ret:
1555 mutex_unlock(&ipa_ctx->lock);
1556
1557 return result;
1558}
1559
1560
1561static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
1562{
1563 struct ipa_rt_entry *entry;
1564 struct ipa_hdr_entry *hdr = NULL;
Mohammed Javid6f6eadb2018-02-20 16:01:26 +05301565 struct ipa_hdr_entry *hdr_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001566
1567 if (rtrule->rule.hdr_hdl) {
1568 hdr = ipa_id_find(rtrule->rule.hdr_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301569 if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301570 IPAERR_RL("rt rule does not point to valid hdr\n");
Amir Levy9659e592016-10-27 18:08:27 +03001571 goto error;
1572 }
1573 }
1574
1575 entry = ipa_id_find(rtrule->rt_rule_hdl);
1576 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301577 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001578 goto error;
1579 }
1580
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301581 if (entry->cookie != IPA_RT_RULE_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301582 IPAERR_RL("bad params\n");
Amir Levy9659e592016-10-27 18:08:27 +03001583 goto error;
1584 }
1585
Mohammed Javid6f6eadb2018-02-20 16:01:26 +05301586 /* Adding check to confirm still
1587 * header entry present in header table or not
1588 */
1589
1590 if (entry->hdr) {
1591 hdr_entry = ipa_id_find(entry->rule.hdr_hdl);
1592 if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) {
1593 IPAERR_RL("Header entry already deleted\n");
1594 return -EPERM;
1595 }
1596 }
Amir Levy9659e592016-10-27 18:08:27 +03001597 if (entry->hdr)
1598 entry->hdr->ref_cnt--;
1599
1600 entry->rule = rtrule->rule;
1601 entry->hdr = hdr;
1602
1603 if (entry->hdr)
1604 entry->hdr->ref_cnt++;
1605
1606 return 0;
1607
1608error:
1609 return -EPERM;
1610}
1611
1612/**
1613 * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
1614 * commit to IPA HW
1615 *
1616 * Returns: 0 on success, negative on failure
1617 *
1618 * Note: Should not be called from atomic context
1619 */
1620int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
1621{
1622 int i;
1623 int result;
1624
1625 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301626 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001627 return -EINVAL;
1628 }
1629
1630 mutex_lock(&ipa_ctx->lock);
1631 for (i = 0; i < hdls->num_rules; i++) {
1632 if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301633 IPAERR_RL("failed to mdfy rt rule %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +03001634 hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
1635 } else {
1636 hdls->rules[i].status = 0;
1637 }
1638 }
1639
1640 if (hdls->commit)
1641 if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
1642 result = -EPERM;
1643 goto bail;
1644 }
1645 result = 0;
1646bail:
1647 mutex_unlock(&ipa_ctx->lock);
1648
1649 return result;
1650}