blob: 2c71143e0d0efa87464163124a679e8815fd7be7 [file] [log] [blame]
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/bitops.h>
14#include "ipa_i.h"
15
16#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
17#define IPA_RT_TABLE_WORD_SIZE (4)
18#define IPA_RT_INDEX_BITMAP_SIZE (32)
19#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
20#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
21#define IPA_RT_BIT_MASK (0x1)
22#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
23#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
24#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
25
26/**
27 * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule
28 * @ip: the ip address family type
29 * @entry: routing entry
30 * @buf: output buffer, buf == NULL means
31 * caller wants to know the size of the rule as seen
32 * by HW so they did not pass a valid buffer, we will use a
33 * scratch buffer instead.
34 * With this scheme we are going to
35 * generate the rule twice, once to know size using scratch
36 * buffer and second to write the rule to the actual caller
37 * supplied buffer which is of required size
38 *
39 * Returns: 0 on success, negative on failure
40 *
41 * caller needs to hold any needed locks to ensure integrity
42 *
43 */
44int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
45 struct ipa_rt_entry *entry, u8 *buf)
46{
47 struct ipa_rt_rule_hw_hdr *rule_hdr;
48 const struct ipa_rt_rule *rule =
49 (const struct ipa_rt_rule *)&entry->rule;
50 u16 en_rule = 0;
51 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
52 u8 *start;
53 int pipe_idx;
54
55 if (buf == NULL) {
56 memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
57 buf = (u8 *)tmp;
58 }
59
60 start = buf;
61 rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
62 pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
63 if (pipe_idx == -1) {
64 IPAERR("Wrong destination pipe specified in RT rule\n");
65 WARN_ON(1);
66 return -EPERM;
67 }
68 if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
69 IPAERR("No RT rule on IPA_client_producer pipe.\n");
70 IPAERR("pipe_idx: %d dst_pipe: %d\n",
71 pipe_idx, entry->rule.dst);
72 WARN_ON(1);
73 return -EPERM;
74 }
75 rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
76 rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
77 if (entry->hdr) {
78 rule_hdr->u.hdr.hdr_offset =
79 entry->hdr->offset_entry->offset >> 2;
80 } else {
81 rule_hdr->u.hdr.hdr_offset = 0;
82 }
83 buf += sizeof(struct ipa_rt_rule_hw_hdr);
84
85 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
86 IPAERR("fail to generate hw rule\n");
87 return -EPERM;
88 }
89
90 IPADBG("en_rule 0x%x\n", en_rule);
91
92 rule_hdr->u.hdr.en_rule = en_rule;
93 ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
94
95 if (entry->hw_len == 0) {
96 entry->hw_len = buf - start;
97 } else if (entry->hw_len != (buf - start)) {
98 IPAERR(
99 "hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
100 entry->hw_len,
101 (buf - start));
102 return -EPERM;
103 }
104
105 return 0;
106}
107
108/**
109 * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule
110 * @ip: the ip address family type
111 * @entry: routing entry
112 * @buf: output buffer, buf == NULL means
113 * caller wants to know the size of the rule as seen
114 * by HW so they did not pass a valid buffer, we will use a
115 * scratch buffer instead.
116 * With this scheme we are going to
117 * generate the rule twice, once to know size using scratch
118 * buffer and second to write the rule to the actual caller
119 * supplied buffer which is of required size
120 *
121 * Returns: 0 on success, negative on failure
122 *
123 * caller needs to hold any needed locks to ensure integrity
124 *
125 */
126int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
127 struct ipa_rt_entry *entry, u8 *buf)
128{
129 struct ipa_rt_rule_hw_hdr *rule_hdr;
130 const struct ipa_rt_rule *rule =
131 (const struct ipa_rt_rule *)&entry->rule;
132 u16 en_rule = 0;
133 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
134 u8 *start;
135 int pipe_idx;
136
137 if (buf == NULL) {
138 memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
139 buf = (u8 *)tmp;
140 }
141
142 start = buf;
143 rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
144 pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
145 if (pipe_idx == -1) {
146 IPAERR("Wrong destination pipe specified in RT rule\n");
147 WARN_ON(1);
148 return -EPERM;
149 }
150 if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
151 IPAERR("No RT rule on IPA_client_producer pipe.\n");
152 IPAERR("pipe_idx: %d dst_pipe: %d\n",
153 pipe_idx, entry->rule.dst);
154 WARN_ON(1);
155 return -EPERM;
156 }
157 rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx;
158 if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
159 struct ipa_hdr_proc_ctx_entry *proc_ctx;
160
161 proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
162 rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl;
163 BUG_ON(proc_ctx->offset_entry->offset & 31);
164 rule_hdr->u.hdr_v2_5.proc_ctx = 1;
165 rule_hdr->u.hdr_v2_5.hdr_offset =
166 (proc_ctx->offset_entry->offset +
167 ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
168 } else if (entry->hdr) {
169 rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl;
170 BUG_ON(entry->hdr->offset_entry->offset & 3);
171 rule_hdr->u.hdr_v2_5.proc_ctx = 0;
172 rule_hdr->u.hdr_v2_5.hdr_offset =
173 entry->hdr->offset_entry->offset >> 2;
174 } else {
175 rule_hdr->u.hdr_v2_5.proc_ctx = 0;
176 rule_hdr->u.hdr_v2_5.hdr_offset = 0;
177 }
178 buf += sizeof(struct ipa_rt_rule_hw_hdr);
179
180 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
181 IPAERR("fail to generate hw rule\n");
182 return -EPERM;
183 }
184
185 IPADBG("en_rule 0x%x\n", en_rule);
186
187 rule_hdr->u.hdr_v2_5.en_rule = en_rule;
188 ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
189
190 if (entry->hw_len == 0) {
191 entry->hw_len = buf - start;
192 } else if (entry->hw_len != (buf - start)) {
193 IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
194 entry->hw_len, (buf - start));
195 return -EPERM;
196 }
197
198 return 0;
199}
200
201/**
202 * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule
203 * @ip: the ip address family type
204 * @entry: routing entry
205 * @buf: output buffer, buf == NULL means that the caller wants to know the size
206 * of the rule as seen by HW so they did not pass a valid buffer, we will
207 * use a scratch buffer instead.
208 * With this scheme we are going to generate the rule twice, once to know
209 * size using scratch buffer and second to write the rule to the actual
210 * caller supplied buffer which is of required size.
211 *
212 * Returns: 0 on success, negative on failure
213 *
214 * caller needs to hold any needed locks to ensure integrity
215 *
216 */
217int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
218 struct ipa_rt_entry *entry, u8 *buf)
219{
220 /* Same implementation as IPAv2 */
221 return __ipa_generate_rt_hw_rule_v2(ip, entry, buf);
222}
223
224/**
225 * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
226 * @ip: the ip address family type
227 * @hdr_sz: header size
228 * @max_rt_idx: maximal index
229 *
Skylar Chang88610112016-10-19 13:30:44 -0700230 * Returns: size on success, negative on failure
Amir Levy9659e592016-10-27 18:08:27 +0300231 *
232 * caller needs to hold any needed locks to ensure integrity
233 *
234 * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
235 */
236static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
237 int *max_rt_idx)
238{
239 struct ipa_rt_tbl_set *set;
240 struct ipa_rt_tbl *tbl;
241 struct ipa_rt_entry *entry;
242 u32 total_sz = 0;
243 u32 tbl_sz;
244 u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
245 int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
246 int i;
247 int res;
248
249 *hdr_sz = 0;
250 set = &ipa_ctx->rt_tbl_set[ip];
251
252 for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
253 if (bitmap & IPA_RT_BIT_MASK)
254 highest_bit_set = i;
255 bitmap >>= 1;
256 }
257
258 *max_rt_idx = highest_bit_set;
259 if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
260 IPAERR("no rt tbls present\n");
261 total_sz = IPA_RT_TABLE_WORD_SIZE;
262 *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
263 return total_sz;
264 }
265
266 *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
267 total_sz += *hdr_sz;
268 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
269 tbl_sz = 0;
270 list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
271 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
272 ip,
273 entry,
274 NULL);
275 if (res) {
276 IPAERR("failed to find HW RT rule size\n");
277 return -EPERM;
278 }
279 tbl_sz += entry->hw_len;
280 }
281
282 if (tbl_sz)
283 tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
284
285 if (tbl->in_sys)
286 continue;
287
288 if (tbl_sz) {
289 /* add the terminator */
290 total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
291 /* every rule-set should start at word boundary */
292 total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
293 ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
294 }
295 }
296
297 IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
298
299 return total_sz;
300}
301
302static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr,
303 u32 body_ofst, u32 apps_start_idx)
304{
305 struct ipa_rt_tbl *tbl;
306 struct ipa_rt_entry *entry;
307 struct ipa_rt_tbl_set *set;
308 u32 offset;
309 u8 *body;
310 struct ipa_mem_buffer rt_tbl_mem;
311 u8 *rt_tbl_mem_body;
312 int res;
313
314 /* build the rt tbl in the DMA buffer to submit to IPA HW */
315 body = base;
316
317 set = &ipa_ctx->rt_tbl_set[ip];
318 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
319 if (!tbl->in_sys) {
320 offset = body - base + body_ofst;
321 if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
322 IPAERR("offset is not word multiple %d\n",
323 offset);
324 goto proc_err;
325 }
326
327 /* convert offset to words from bytes */
328 offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
329 /* rule is at an offset from base */
330 offset |= IPA_RT_BIT_MASK;
331
332 /* update the hdr at the right index */
333 ipa_write_32(offset, hdr +
334 ((tbl->idx - apps_start_idx) *
335 IPA_RT_TABLE_WORD_SIZE));
336
337 /* generate the rule-set */
338 list_for_each_entry(entry, &tbl->head_rt_rule_list,
339 link) {
340 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
341 ip,
342 entry,
343 body);
344 if (res) {
345 IPAERR("failed to gen HW RT rule\n");
346 goto proc_err;
347 }
348 body += entry->hw_len;
349 }
350
351 /* write the rule-set terminator */
352 body = ipa_write_32(0, body);
353 if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
354 /* advance body to next word boundary */
355 body = body + (IPA_RT_TABLE_WORD_SIZE -
356 ((long)body &
357 IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
358 } else {
Skylar Chang88610112016-10-19 13:30:44 -0700359 if (tbl->sz == 0) {
360 IPAERR("cannot generate 0 size table\n");
361 goto proc_err;
362 }
363
Amir Levy9659e592016-10-27 18:08:27 +0300364 /* allocate memory for the RT tbl */
365 rt_tbl_mem.size = tbl->sz;
366 rt_tbl_mem.base =
367 dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
368 &rt_tbl_mem.phys_base, GFP_KERNEL);
369 if (!rt_tbl_mem.base) {
370 IPAERR("fail to alloc DMA buff of size %d\n",
371 rt_tbl_mem.size);
372 WARN_ON(1);
373 goto proc_err;
374 }
375
376 WARN_ON(rt_tbl_mem.phys_base &
377 IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
378 rt_tbl_mem_body = rt_tbl_mem.base;
379 memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
380 /* update the hdr at the right index */
381 ipa_write_32(rt_tbl_mem.phys_base,
382 hdr + ((tbl->idx - apps_start_idx) *
383 IPA_RT_TABLE_WORD_SIZE));
384 /* generate the rule-set */
385 list_for_each_entry(entry, &tbl->head_rt_rule_list,
386 link) {
387 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
388 ip,
389 entry,
390 rt_tbl_mem_body);
391 if (res) {
392 IPAERR("failed to gen HW RT rule\n");
393 WARN_ON(1);
394 goto rt_table_mem_alloc_failed;
395 }
396 rt_tbl_mem_body += entry->hw_len;
397 }
398
399 /* write the rule-set terminator */
400 rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
401
402 if (tbl->curr_mem.phys_base) {
403 WARN_ON(tbl->prev_mem.phys_base);
404 tbl->prev_mem = tbl->curr_mem;
405 }
406 tbl->curr_mem = rt_tbl_mem;
407 }
408 }
409
410 return 0;
411
412rt_table_mem_alloc_failed:
413 dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
414 rt_tbl_mem.base, rt_tbl_mem.phys_base);
415proc_err:
416 return -EPERM;
417}
418
419
420/**
421 * ipa_generate_rt_hw_tbl() - generates the routing hardware table
422 * @ip: [in] the ip address family type
423 * @mem: [out] buffer to put the filtering table
424 *
425 * Returns: 0 on success, negative on failure
426 */
427static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip,
428 struct ipa_mem_buffer *mem)
429{
430 u32 hdr_sz;
431 u8 *hdr;
432 u8 *body;
433 u8 *base;
434 int max_rt_idx;
435 int i;
Skylar Chang88610112016-10-19 13:30:44 -0700436 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300437
Skylar Chang88610112016-10-19 13:30:44 -0700438 res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
439 if (res < 0) {
440 IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
441 goto error;
442 }
443
444 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300445 mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
446 ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
447
448 if (mem->size == 0) {
449 IPAERR("rt tbl empty ip=%d\n", ip);
450 goto error;
451 }
452 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
453 &mem->phys_base, GFP_KERNEL);
454 if (!mem->base) {
455 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
456 goto error;
457 }
458
459 memset(mem->base, 0, mem->size);
460
461 /* build the rt tbl in the DMA buffer to submit to IPA HW */
462 base = hdr = (u8 *)mem->base;
463 body = base + hdr_sz;
464
465 /* setup all indices to point to the empty sys rt tbl */
466 for (i = 0; i <= max_rt_idx; i++)
467 ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
468 hdr + (i * IPA_RT_TABLE_WORD_SIZE));
469
470 if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) {
471 IPAERR("fail to generate RT tbl\n");
472 goto proc_err;
473 }
474
475 return 0;
476
477proc_err:
478 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
479 mem->base = NULL;
480error:
481 return -EPERM;
482}
483
484static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
485{
486 struct ipa_rt_tbl *tbl;
487 struct ipa_rt_tbl *next;
488 struct ipa_rt_tbl_set *set;
489
490 set = &ipa_ctx->rt_tbl_set[ip];
491 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
492 if (tbl->prev_mem.phys_base) {
493 IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
494 dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
495 tbl->prev_mem.base,
496 tbl->prev_mem.phys_base);
497 memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
498 }
499 }
500
501 set = &ipa_ctx->reap_rt_tbl_set[ip];
502 list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
503 list_del(&tbl->link);
504 WARN_ON(tbl->prev_mem.phys_base != 0);
505 if (tbl->curr_mem.phys_base) {
506 IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
507 ip);
508 dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
509 tbl->curr_mem.base,
510 tbl->curr_mem.phys_base);
511 kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
512 }
513 }
514}
515
516int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
517{
518 struct ipa_desc desc = { 0 };
519 struct ipa_mem_buffer *mem;
520 void *cmd;
521 struct ipa_ip_v4_routing_init *v4;
522 struct ipa_ip_v6_routing_init *v6;
523 u16 avail;
524 u16 size;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530525 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300526
527 mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
528 if (!mem) {
529 IPAERR("failed to alloc memory object\n");
530 goto fail_alloc_mem;
531 }
532
533 if (ip == IPA_IP_v4) {
534 avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE :
535 IPA_MEM_PART(v4_rt_size_ddr);
536 size = sizeof(struct ipa_ip_v4_routing_init);
537 } else {
538 avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE :
539 IPA_MEM_PART(v6_rt_size_ddr);
540 size = sizeof(struct ipa_ip_v6_routing_init);
541 }
Utkarsh Saxena67d59b62017-05-16 22:41:50 +0530542 cmd = kmalloc(size, flag);
Amir Levy9659e592016-10-27 18:08:27 +0300543 if (!cmd) {
544 IPAERR("failed to alloc immediate command object\n");
545 goto fail_alloc_cmd;
546 }
547
548 if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) {
549 IPAERR("fail to generate RT HW TBL ip %d\n", ip);
550 goto fail_hw_tbl_gen;
551 }
552
553 if (mem->size > avail) {
554 IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
555 goto fail_send_cmd;
556 }
557
558 if (ip == IPA_IP_v4) {
559 v4 = (struct ipa_ip_v4_routing_init *)cmd;
560 desc.opcode = IPA_IP_V4_ROUTING_INIT;
561 v4->ipv4_rules_addr = mem->phys_base;
562 v4->size_ipv4_rules = mem->size;
563 v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST;
564 IPADBG("putting Routing IPv4 rules to phys 0x%x",
565 v4->ipv4_addr);
566 } else {
567 v6 = (struct ipa_ip_v6_routing_init *)cmd;
568 desc.opcode = IPA_IP_V6_ROUTING_INIT;
569 v6->ipv6_rules_addr = mem->phys_base;
570 v6->size_ipv6_rules = mem->size;
571 v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST;
572 IPADBG("putting Routing IPv6 rules to phys 0x%x",
573 v6->ipv6_addr);
574 }
575
576 desc.pyld = cmd;
577 desc.len = size;
578 desc.type = IPA_IMM_CMD_DESC;
579 IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
580
581 if (ipa_send_cmd(1, &desc)) {
582 IPAERR("fail to send immediate command\n");
583 goto fail_send_cmd;
584 }
585
586 __ipa_reap_sys_rt_tbls(ip);
587 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
588 kfree(cmd);
589 kfree(mem);
590
591 return 0;
592
593fail_send_cmd:
594 if (mem->base)
595 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
596 mem->phys_base);
597fail_hw_tbl_gen:
598 kfree(cmd);
599fail_alloc_cmd:
600 kfree(mem);
601fail_alloc_mem:
602 return -EPERM;
603}
604
605static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
606 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head)
607{
608 u32 hdr_sz;
609 u8 *hdr;
610 u8 *body;
611 u8 *base;
612 int max_rt_idx;
613 int i;
614 u32 *entr;
615 int num_index;
616 u32 body_start_offset;
617 u32 apps_start_idx;
Skylar Chang88610112016-10-19 13:30:44 -0700618 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300619
620 if (ip == IPA_IP_v4) {
621 num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
622 IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
623 body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) -
624 IPA_MEM_PART(v4_rt_ofst);
625 apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
626 } else {
627 num_index = IPA_MEM_PART(v6_apps_rt_index_hi) -
628 IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
629 body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) -
630 IPA_MEM_PART(v6_rt_ofst);
631 apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
632 }
633
634 head->size = num_index * 4;
635 head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size,
636 &head->phys_base, GFP_KERNEL);
637 if (!head->base) {
638 IPAERR("fail to alloc DMA buff of size %d\n", head->size);
639 goto err;
640 }
641 entr = (u32 *)head->base;
642 hdr = (u8 *)head->base;
643 for (i = 1; i <= num_index; i++) {
644 *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
645 entr++;
646 }
647
Skylar Chang88610112016-10-19 13:30:44 -0700648 res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
649 if (res < 0) {
650 IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
651 goto base_err;
652 }
653
654 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300655 mem->size -= hdr_sz;
656 mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
657 ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
658
659 if (mem->size > 0) {
660 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
661 &mem->phys_base, GFP_KERNEL);
662 if (!mem->base) {
663 IPAERR("fail to alloc DMA buff of size %d\n",
664 mem->size);
665 goto base_err;
666 }
667 memset(mem->base, 0, mem->size);
668 }
669
670 /* build the rt tbl in the DMA buffer to submit to IPA HW */
671 body = base = (u8 *)mem->base;
672
673 if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset,
674 apps_start_idx)) {
675 IPAERR("fail to generate RT tbl\n");
676 goto proc_err;
677 }
678
679 return 0;
680
681proc_err:
682 if (mem->size)
683 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
684 mem->phys_base);
685base_err:
686 dma_free_coherent(ipa_ctx->pdev, head->size, head->base,
687 head->phys_base);
688err:
689 return -EPERM;
690}
691
692int __ipa_commit_rt_v2(enum ipa_ip_type ip)
693{
694 struct ipa_desc desc[2];
695 struct ipa_mem_buffer body;
696 struct ipa_mem_buffer head;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530697 struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
698 struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530699 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300700 u16 avail;
701 u32 num_modem_rt_index;
702 int rc = 0;
703 u32 local_addr1;
704 u32 local_addr2;
705 bool lcl;
706
707 memset(desc, 0, 2 * sizeof(struct ipa_desc));
708
709 if (ip == IPA_IP_v4) {
710 avail = ipa_ctx->ip4_rt_tbl_lcl ?
711 IPA_MEM_PART(apps_v4_rt_size) :
712 IPA_MEM_PART(v4_rt_size_ddr);
713 num_modem_rt_index =
714 IPA_MEM_PART(v4_modem_rt_index_hi) -
715 IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
716 local_addr1 = ipa_ctx->smem_restricted_bytes +
717 IPA_MEM_PART(v4_rt_ofst) +
718 num_modem_rt_index * 4;
719 local_addr2 = ipa_ctx->smem_restricted_bytes +
720 IPA_MEM_PART(apps_v4_rt_ofst);
721 lcl = ipa_ctx->ip4_rt_tbl_lcl;
722 } else {
723 avail = ipa_ctx->ip6_rt_tbl_lcl ?
724 IPA_MEM_PART(apps_v6_rt_size) :
725 IPA_MEM_PART(v6_rt_size_ddr);
726 num_modem_rt_index =
727 IPA_MEM_PART(v6_modem_rt_index_hi) -
728 IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
729 local_addr1 = ipa_ctx->smem_restricted_bytes +
730 IPA_MEM_PART(v6_rt_ofst) +
731 num_modem_rt_index * 4;
732 local_addr2 = ipa_ctx->smem_restricted_bytes +
733 IPA_MEM_PART(apps_v6_rt_ofst);
734 lcl = ipa_ctx->ip6_rt_tbl_lcl;
735 }
736
737 if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) {
738 IPAERR("fail to generate RT HW TBL ip %d\n", ip);
739 rc = -EFAULT;
740 goto fail_gen;
741 }
742
743 if (body.size > avail) {
744 IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
745 rc = -EFAULT;
746 goto fail_send_cmd;
747 }
748
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530749 cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530750 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530751 if (cmd1 == NULL) {
752 IPAERR("Failed to alloc immediate command object\n");
753 rc = -ENOMEM;
754 goto fail_send_cmd;
755 }
756
757 cmd1->size = head.size;
758 cmd1->system_addr = head.phys_base;
759 cmd1->local_addr = local_addr1;
Amir Levy9659e592016-10-27 18:08:27 +0300760 desc[0].opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530761 desc[0].pyld = (void *)cmd1;
Amir Levy9659e592016-10-27 18:08:27 +0300762 desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
763 desc[0].type = IPA_IMM_CMD_DESC;
764
765 if (lcl) {
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530766 cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530767 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530768 if (cmd2 == NULL) {
769 IPAERR("Failed to alloc immediate command object\n");
770 rc = -ENOMEM;
771 goto fail_send_cmd1;
772 }
773
774 cmd2->size = body.size;
775 cmd2->system_addr = body.phys_base;
776 cmd2->local_addr = local_addr2;
Amir Levy9659e592016-10-27 18:08:27 +0300777
778 desc[1].opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530779 desc[1].pyld = (void *)cmd2;
Amir Levy9659e592016-10-27 18:08:27 +0300780 desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
781 desc[1].type = IPA_IMM_CMD_DESC;
782
783 if (ipa_send_cmd(2, desc)) {
784 IPAERR("fail to send immediate command\n");
785 rc = -EFAULT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530786 goto fail_send_cmd2;
Amir Levy9659e592016-10-27 18:08:27 +0300787 }
788 } else {
789 if (ipa_send_cmd(1, desc)) {
790 IPAERR("fail to send immediate command\n");
791 rc = -EFAULT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530792 goto fail_send_cmd1;
Amir Levy9659e592016-10-27 18:08:27 +0300793 }
794 }
795
796 IPADBG("HEAD\n");
797 IPA_DUMP_BUFF(head.base, head.phys_base, head.size);
798 if (body.size) {
799 IPADBG("BODY\n");
800 IPA_DUMP_BUFF(body.base, body.phys_base, body.size);
801 }
802 __ipa_reap_sys_rt_tbls(ip);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530803
804fail_send_cmd2:
805 kfree(cmd2);
806fail_send_cmd1:
807 kfree(cmd1);
Amir Levy9659e592016-10-27 18:08:27 +0300808fail_send_cmd:
809 dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base);
810 if (body.size)
811 dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
812 body.phys_base);
813fail_gen:
814 return rc;
815}
816
817/**
818 * __ipa_find_rt_tbl() - find the routing table
819 * which name is given as parameter
820 * @ip: [in] the ip address family type of the wanted routing table
821 * @name: [in] the name of the wanted routing table
822 *
823 * Returns: the routing table which name is given as parameter, or NULL if it
824 * doesn't exist
825 */
826struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
827{
828 struct ipa_rt_tbl *entry;
829 struct ipa_rt_tbl_set *set;
830
831 set = &ipa_ctx->rt_tbl_set[ip];
832 list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
833 if (!strcmp(name, entry->name))
834 return entry;
835 }
836
837 return NULL;
838}
839
840/**
841 * ipa2_query_rt_index() - find the routing table index
842 * which name and ip type are given as parameters
843 * @in: [out] the index of the wanted routing table
844 *
845 * Returns: the routing table which name is given as parameter, or NULL if it
846 * doesn't exist
847 */
848int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
849{
850 struct ipa_rt_tbl *entry;
851
852 if (in->ip >= IPA_IP_MAX) {
853 IPAERR("bad parm\n");
854 return -EINVAL;
855 }
856
857 /* check if this table exists */
858 entry = __ipa_find_rt_tbl(in->ip, in->name);
859 if (!entry)
860 return -EFAULT;
861
862 in->idx = entry->idx;
863 return 0;
864}
865
866static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
867 const char *name)
868{
869 struct ipa_rt_tbl *entry;
870 struct ipa_rt_tbl_set *set;
871 int i;
872 int id;
873
874 if (ip >= IPA_IP_MAX || name == NULL) {
875 IPAERR("bad parm\n");
876 goto error;
877 }
878
879 set = &ipa_ctx->rt_tbl_set[ip];
880 /* check if this table exists */
881 entry = __ipa_find_rt_tbl(ip, name);
882 if (!entry) {
883 entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
884 if (!entry) {
885 IPAERR("failed to alloc RT tbl object\n");
886 goto error;
887 }
888 /* find a routing tbl index */
889 for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
890 if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
891 entry->idx = i;
892 set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
893 break;
894 }
895 }
896 if (i == IPA_RT_INDEX_BITMAP_SIZE) {
897 IPAERR("not free RT tbl indices left\n");
898 goto fail_rt_idx_alloc;
899 }
900
901 INIT_LIST_HEAD(&entry->head_rt_rule_list);
902 INIT_LIST_HEAD(&entry->link);
903 strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
904 entry->set = set;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530905 entry->cookie = IPA_RT_TBL_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300906 entry->in_sys = (ip == IPA_IP_v4) ?
907 !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
908 set->tbl_cnt++;
909 list_add(&entry->link, &set->head_rt_tbl_list);
910
911 IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
912 set->tbl_cnt, ip);
913
914 id = ipa_id_alloc(entry);
915 if (id < 0) {
916 IPAERR("failed to add to tree\n");
917 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530918 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300919 }
920 entry->id = id;
921 }
922
923 return entry;
924
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530925ipa_insert_failed:
926 set->tbl_cnt--;
927 list_del(&entry->link);
Amir Levy9659e592016-10-27 18:08:27 +0300928fail_rt_idx_alloc:
929 entry->cookie = 0;
930 kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
931error:
932 return NULL;
933}
934
935static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
936{
937 enum ipa_ip_type ip = IPA_IP_MAX;
938 u32 id;
939
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530940 if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +0300941 IPAERR("bad parms\n");
942 return -EINVAL;
943 }
944 id = entry->id;
945 if (ipa_id_find(id) == NULL) {
946 IPAERR("lookup failed\n");
947 return -EPERM;
948 }
949
950 if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
951 ip = IPA_IP_v4;
952 else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
953 ip = IPA_IP_v6;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530954 else {
Amir Levy9659e592016-10-27 18:08:27 +0300955 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530956 return -EPERM;
957 }
958
Amir Levy9659e592016-10-27 18:08:27 +0300959
960 if (!entry->in_sys) {
961 list_del(&entry->link);
962 clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
963 entry->set->tbl_cnt--;
964 IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
965 entry->set->tbl_cnt);
966 kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
967 } else {
968 list_move(&entry->link,
969 &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
970 clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
971 entry->set->tbl_cnt--;
972 IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
973 entry->set->tbl_cnt);
974 }
975
976 /* remove the handle from the database */
977 ipa_id_remove(id);
978 return 0;
979}
980
981static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
982 const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
983{
984 struct ipa_rt_tbl *tbl;
985 struct ipa_rt_entry *entry;
986 struct ipa_hdr_entry *hdr = NULL;
987 struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL;
988 int id;
989
990 if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
991 IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
992 goto error;
993 }
994
995 if (rule->hdr_hdl) {
996 hdr = ipa_id_find(rule->hdr_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530997 if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +0300998 IPAERR("rt rule does not point to valid hdr\n");
999 goto error;
1000 }
1001 } else if (rule->hdr_proc_ctx_hdl) {
1002 proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301003 if ((proc_ctx == NULL) ||
1004 (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +03001005 IPAERR("rt rule does not point to valid proc ctx\n");
1006 goto error;
1007 }
1008 }
1009
1010
1011 tbl = __ipa_add_rt_tbl(ip, name);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301012 if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +03001013 IPAERR("bad params\n");
1014 goto error;
1015 }
1016 /*
1017 * do not allow any rules to be added at end of the "default" routing
1018 * tables
1019 */
1020 if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
1021 (tbl->rule_cnt > 0) && (at_rear != 0)) {
1022 IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
1023 tbl->rule_cnt, at_rear);
1024 goto error;
1025 }
1026
1027 entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
1028 if (!entry) {
1029 IPAERR("failed to alloc RT rule object\n");
1030 goto error;
1031 }
1032 INIT_LIST_HEAD(&entry->link);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301033 entry->cookie = IPA_RT_RULE_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +03001034 entry->rule = *rule;
1035 entry->tbl = tbl;
1036 entry->hdr = hdr;
1037 entry->proc_ctx = proc_ctx;
1038 if (at_rear)
1039 list_add_tail(&entry->link, &tbl->head_rt_rule_list);
1040 else
1041 list_add(&entry->link, &tbl->head_rt_rule_list);
1042 tbl->rule_cnt++;
1043 if (entry->hdr)
1044 entry->hdr->ref_cnt++;
1045 else if (entry->proc_ctx)
1046 entry->proc_ctx->ref_cnt++;
1047 id = ipa_id_alloc(entry);
1048 if (id < 0) {
1049 IPAERR("failed to add to tree\n");
1050 WARN_ON(1);
1051 goto ipa_insert_failed;
1052 }
1053 IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
1054 *rule_hdl = id;
1055 entry->id = id;
1056
1057 return 0;
1058
1059ipa_insert_failed:
1060 if (entry->hdr)
1061 entry->hdr->ref_cnt--;
1062 else if (entry->proc_ctx)
1063 entry->proc_ctx->ref_cnt--;
1064 list_del(&entry->link);
1065 kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
1066error:
1067 return -EPERM;
1068}
1069
1070/**
1071 * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally
1072 * commit to IPA HW
1073 * @rules: [inout] set of routing rules to add
1074 *
1075 * Returns: 0 on success, negative on failure
1076 *
1077 * Note: Should not be called from atomic context
1078 */
1079int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
1080{
1081 int i;
1082 int ret;
1083
1084 if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
1085 IPAERR("bad parm\n");
1086 return -EINVAL;
1087 }
1088
1089 mutex_lock(&ipa_ctx->lock);
1090 for (i = 0; i < rules->num_rules; i++) {
1091 if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
1092 &rules->rules[i].rule,
1093 rules->rules[i].at_rear,
1094 &rules->rules[i].rt_rule_hdl)) {
1095 IPAERR("failed to add rt rule %d\n", i);
1096 rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
1097 } else {
1098 rules->rules[i].status = 0;
1099 }
1100 }
1101
1102 if (rules->commit)
1103 if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) {
1104 ret = -EPERM;
1105 goto bail;
1106 }
1107
1108 ret = 0;
1109bail:
1110 mutex_unlock(&ipa_ctx->lock);
1111 return ret;
1112}
1113
1114int __ipa_del_rt_rule(u32 rule_hdl)
1115{
1116 struct ipa_rt_entry *entry;
1117 int id;
1118
1119 entry = ipa_id_find(rule_hdl);
1120
1121 if (entry == NULL) {
1122 IPAERR("lookup failed\n");
1123 return -EINVAL;
1124 }
1125
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301126 if (entry->cookie != IPA_RT_RULE_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +03001127 IPAERR("bad params\n");
1128 return -EINVAL;
1129 }
1130
1131 if (entry->hdr)
1132 __ipa_release_hdr(entry->hdr->id);
1133 else if (entry->proc_ctx)
1134 __ipa_release_hdr_proc_ctx(entry->proc_ctx->id);
1135 list_del(&entry->link);
1136 entry->tbl->rule_cnt--;
1137 IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
1138 entry->tbl->rule_cnt);
1139 if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
1140 if (__ipa_del_rt_tbl(entry->tbl))
1141 IPAERR("fail to del RT tbl\n");
1142 }
1143 entry->cookie = 0;
1144 id = entry->id;
1145 kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
1146
1147 /* remove the handle from the database */
1148 ipa_id_remove(id);
1149
1150 return 0;
1151}
1152
1153/**
1154 * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally
1155 * commit to IPA HW
1156 * @hdls: [inout] set of routing rules to delete
1157 *
1158 * Returns: 0 on success, negative on failure
1159 *
1160 * Note: Should not be called from atomic context
1161 */
1162int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
1163{
1164 int i;
1165 int ret;
1166
1167 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
1168 IPAERR("bad parm\n");
1169 return -EINVAL;
1170 }
1171
1172 mutex_lock(&ipa_ctx->lock);
1173 for (i = 0; i < hdls->num_hdls; i++) {
1174 if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
1175 IPAERR("failed to del rt rule %i\n", i);
1176 hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
1177 } else {
1178 hdls->hdl[i].status = 0;
1179 }
1180 }
1181
1182 if (hdls->commit)
1183 if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
1184 ret = -EPERM;
1185 goto bail;
1186 }
1187
1188 ret = 0;
1189bail:
1190 mutex_unlock(&ipa_ctx->lock);
1191 return ret;
1192}
1193
1194/**
1195 * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type
1196 * to IPA HW
1197 * @ip: The family of routing tables
1198 *
1199 * Returns: 0 on success, negative on failure
1200 *
1201 * Note: Should not be called from atomic context
1202 */
1203int ipa2_commit_rt(enum ipa_ip_type ip)
1204{
1205 int ret;
1206
1207 if (ip >= IPA_IP_MAX) {
1208 IPAERR("bad parm\n");
1209 return -EINVAL;
1210 }
1211
1212 /*
1213 * issue a commit on the filtering module of same IP type since
1214 * filtering rules point to routing tables
1215 */
1216 if (ipa2_commit_flt(ip))
1217 return -EPERM;
1218
1219 mutex_lock(&ipa_ctx->lock);
1220 if (ipa_ctx->ctrl->ipa_commit_rt(ip)) {
1221 ret = -EPERM;
1222 goto bail;
1223 }
1224
1225 ret = 0;
1226bail:
1227 mutex_unlock(&ipa_ctx->lock);
1228 return ret;
1229}
1230
1231/**
1232 * ipa2_reset_rt() - reset the current SW routing table of specified type
1233 * (does not commit to HW)
1234 * @ip: The family of routing tables
1235 *
1236 * Returns: 0 on success, negative on failure
1237 *
1238 * Note: Should not be called from atomic context
1239 */
1240int ipa2_reset_rt(enum ipa_ip_type ip)
1241{
1242 struct ipa_rt_tbl *tbl;
1243 struct ipa_rt_tbl *tbl_next;
1244 struct ipa_rt_tbl_set *set;
1245 struct ipa_rt_entry *rule;
1246 struct ipa_rt_entry *rule_next;
1247 struct ipa_rt_tbl_set *rset;
1248 u32 apps_start_idx;
1249 int id;
1250
1251 if (ip >= IPA_IP_MAX) {
1252 IPAERR("bad parm\n");
1253 return -EINVAL;
1254 }
1255
1256 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
1257 if (ip == IPA_IP_v4)
1258 apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
1259 else
1260 apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
1261 } else {
1262 apps_start_idx = 0;
1263 }
1264
1265 /*
1266 * issue a reset on the filtering module of same IP type since
1267 * filtering rules point to routing tables
1268 */
1269 if (ipa2_reset_flt(ip))
1270 IPAERR("fail to reset flt ip=%d\n", ip);
1271
1272 set = &ipa_ctx->rt_tbl_set[ip];
1273 rset = &ipa_ctx->reap_rt_tbl_set[ip];
1274 mutex_lock(&ipa_ctx->lock);
1275 IPADBG("reset rt ip=%d\n", ip);
1276 list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
1277 list_for_each_entry_safe(rule, rule_next,
1278 &tbl->head_rt_rule_list, link) {
1279 if (ipa_id_find(rule->id) == NULL) {
1280 WARN_ON(1);
1281 mutex_unlock(&ipa_ctx->lock);
1282 return -EFAULT;
1283 }
1284
1285 /*
1286 * for the "default" routing tbl, remove all but the
1287 * last rule
1288 */
1289 if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
1290 continue;
1291
1292 list_del(&rule->link);
1293 tbl->rule_cnt--;
1294 if (rule->hdr)
1295 __ipa_release_hdr(rule->hdr->id);
1296 else if (rule->proc_ctx)
1297 __ipa_release_hdr_proc_ctx(rule->proc_ctx->id);
1298 rule->cookie = 0;
1299 id = rule->id;
1300 kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
1301
1302 /* remove the handle from the database */
1303 ipa_id_remove(id);
1304 }
1305
1306 if (ipa_id_find(tbl->id) == NULL) {
1307 WARN_ON(1);
1308 mutex_unlock(&ipa_ctx->lock);
1309 return -EFAULT;
1310 }
1311 id = tbl->id;
1312
1313 /* do not remove the "default" routing tbl which has index 0 */
1314 if (tbl->idx != apps_start_idx) {
1315 if (!tbl->in_sys) {
1316 list_del(&tbl->link);
1317 set->tbl_cnt--;
1318 clear_bit(tbl->idx,
1319 &ipa_ctx->rt_idx_bitmap[ip]);
1320 IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
1321 tbl->idx, set->tbl_cnt);
1322 kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
1323 } else {
1324 list_move(&tbl->link, &rset->head_rt_tbl_list);
1325 clear_bit(tbl->idx,
1326 &ipa_ctx->rt_idx_bitmap[ip]);
1327 set->tbl_cnt--;
1328 IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
1329 tbl->idx, set->tbl_cnt);
1330 }
1331 /* remove the handle from the database */
1332 ipa_id_remove(id);
1333 }
1334 }
1335 mutex_unlock(&ipa_ctx->lock);
1336
1337 return 0;
1338}
1339
1340/**
1341 * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if
1342 * it exists, if lookup succeeds the routing table ref cnt is increased
1343 * @lookup: [inout] routing table to lookup and its handle
1344 *
1345 * Returns: 0 on success, negative on failure
1346 *
1347 * Note: Should not be called from atomic context
1348 * Caller should call ipa_put_rt_tbl later if this function succeeds
1349 */
1350int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
1351{
1352 struct ipa_rt_tbl *entry;
1353 int result = -EFAULT;
1354
1355 if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
1356 IPAERR("bad parm\n");
1357 return -EINVAL;
1358 }
1359 mutex_lock(&ipa_ctx->lock);
1360 entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301361 if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +03001362 entry->ref_cnt++;
1363 lookup->hdl = entry->id;
1364
1365 /* commit for get */
1366 if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
1367 IPAERR("fail to commit RT tbl\n");
1368
1369 result = 0;
1370 }
1371 mutex_unlock(&ipa_ctx->lock);
1372
1373 return result;
1374}
1375
1376/**
1377 * ipa2_put_rt_tbl() - Release the specified routing table handle
1378 * @rt_tbl_hdl: [in] the routing table handle to release
1379 *
1380 * Returns: 0 on success, negative on failure
1381 *
1382 * Note: Should not be called from atomic context
1383 */
1384int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
1385{
1386 struct ipa_rt_tbl *entry;
1387 enum ipa_ip_type ip = IPA_IP_MAX;
1388 int result;
1389
1390 mutex_lock(&ipa_ctx->lock);
1391 entry = ipa_id_find(rt_tbl_hdl);
1392 if (entry == NULL) {
1393 IPAERR("lookup failed\n");
1394 result = -EINVAL;
1395 goto ret;
1396 }
1397
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301398 if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
Amir Levy9659e592016-10-27 18:08:27 +03001399 IPAERR("bad parms\n");
1400 result = -EINVAL;
1401 goto ret;
1402 }
1403
1404 if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
1405 ip = IPA_IP_v4;
1406 else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
1407 ip = IPA_IP_v6;
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301408 else {
Amir Levy9659e592016-10-27 18:08:27 +03001409 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301410 result = -EINVAL;
1411 goto ret;
1412 }
Amir Levy9659e592016-10-27 18:08:27 +03001413
1414 entry->ref_cnt--;
1415 if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
1416 if (__ipa_del_rt_tbl(entry))
1417 IPAERR("fail to del RT tbl\n");
1418 /* commit for put */
1419 if (ipa_ctx->ctrl->ipa_commit_rt(ip))
1420 IPAERR("fail to commit RT tbl\n");
1421 }
1422
1423 result = 0;
1424
1425ret:
1426 mutex_unlock(&ipa_ctx->lock);
1427
1428 return result;
1429}
1430
1431
1432static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
1433{
1434 struct ipa_rt_entry *entry;
1435 struct ipa_hdr_entry *hdr = NULL;
1436
1437 if (rtrule->rule.hdr_hdl) {
1438 hdr = ipa_id_find(rtrule->rule.hdr_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301439 if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +03001440 IPAERR("rt rule does not point to valid hdr\n");
1441 goto error;
1442 }
1443 }
1444
1445 entry = ipa_id_find(rtrule->rt_rule_hdl);
1446 if (entry == NULL) {
1447 IPAERR("lookup failed\n");
1448 goto error;
1449 }
1450
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301451 if (entry->cookie != IPA_RT_RULE_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +03001452 IPAERR("bad params\n");
1453 goto error;
1454 }
1455
1456 if (entry->hdr)
1457 entry->hdr->ref_cnt--;
1458
1459 entry->rule = rtrule->rule;
1460 entry->hdr = hdr;
1461
1462 if (entry->hdr)
1463 entry->hdr->ref_cnt++;
1464
1465 return 0;
1466
1467error:
1468 return -EPERM;
1469}
1470
1471/**
1472 * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
1473 * commit to IPA HW
1474 *
1475 * Returns: 0 on success, negative on failure
1476 *
1477 * Note: Should not be called from atomic context
1478 */
1479int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
1480{
1481 int i;
1482 int result;
1483
1484 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
1485 IPAERR("bad parm\n");
1486 return -EINVAL;
1487 }
1488
1489 mutex_lock(&ipa_ctx->lock);
1490 for (i = 0; i < hdls->num_rules; i++) {
1491 if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
1492 IPAERR("failed to mdfy rt rule %i\n", i);
1493 hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
1494 } else {
1495 hdls->rules[i].status = 0;
1496 }
1497 }
1498
1499 if (hdls->commit)
1500 if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
1501 result = -EPERM;
1502 goto bail;
1503 }
1504 result = 0;
1505bail:
1506 mutex_unlock(&ipa_ctx->lock);
1507
1508 return result;
1509}