blob: 21fdec0e780a62b3857e32be524d02f5be680936 [file] [log] [blame]
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/bitops.h>
14#include "ipa_i.h"
15
16#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
17#define IPA_RT_TABLE_WORD_SIZE (4)
18#define IPA_RT_INDEX_BITMAP_SIZE (32)
19#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
20#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
21#define IPA_RT_BIT_MASK (0x1)
22#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
23#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
24#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
25
26/**
27 * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule
28 * @ip: the ip address family type
29 * @entry: routing entry
30 * @buf: output buffer, buf == NULL means
31 * caller wants to know the size of the rule as seen
32 * by HW so they did not pass a valid buffer, we will use a
33 * scratch buffer instead.
34 * With this scheme we are going to
35 * generate the rule twice, once to know size using scratch
36 * buffer and second to write the rule to the actual caller
37 * supplied buffer which is of required size
38 *
39 * Returns: 0 on success, negative on failure
40 *
41 * caller needs to hold any needed locks to ensure integrity
42 *
43 */
44int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
45 struct ipa_rt_entry *entry, u8 *buf)
46{
47 struct ipa_rt_rule_hw_hdr *rule_hdr;
48 const struct ipa_rt_rule *rule =
49 (const struct ipa_rt_rule *)&entry->rule;
50 u16 en_rule = 0;
51 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
52 u8 *start;
53 int pipe_idx;
54
55 if (buf == NULL) {
56 memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
57 buf = (u8 *)tmp;
58 }
59
60 start = buf;
61 rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
62 pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
63 if (pipe_idx == -1) {
64 IPAERR("Wrong destination pipe specified in RT rule\n");
65 WARN_ON(1);
66 return -EPERM;
67 }
68 if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
69 IPAERR("No RT rule on IPA_client_producer pipe.\n");
70 IPAERR("pipe_idx: %d dst_pipe: %d\n",
71 pipe_idx, entry->rule.dst);
72 WARN_ON(1);
73 return -EPERM;
74 }
75 rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
76 rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
77 if (entry->hdr) {
78 rule_hdr->u.hdr.hdr_offset =
79 entry->hdr->offset_entry->offset >> 2;
80 } else {
81 rule_hdr->u.hdr.hdr_offset = 0;
82 }
83 buf += sizeof(struct ipa_rt_rule_hw_hdr);
84
85 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
86 IPAERR("fail to generate hw rule\n");
87 return -EPERM;
88 }
89
90 IPADBG("en_rule 0x%x\n", en_rule);
91
92 rule_hdr->u.hdr.en_rule = en_rule;
93 ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
94
95 if (entry->hw_len == 0) {
96 entry->hw_len = buf - start;
97 } else if (entry->hw_len != (buf - start)) {
98 IPAERR(
99 "hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
100 entry->hw_len,
101 (buf - start));
102 return -EPERM;
103 }
104
105 return 0;
106}
107
108/**
109 * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule
110 * @ip: the ip address family type
111 * @entry: routing entry
112 * @buf: output buffer, buf == NULL means
113 * caller wants to know the size of the rule as seen
114 * by HW so they did not pass a valid buffer, we will use a
115 * scratch buffer instead.
116 * With this scheme we are going to
117 * generate the rule twice, once to know size using scratch
118 * buffer and second to write the rule to the actual caller
119 * supplied buffer which is of required size
120 *
121 * Returns: 0 on success, negative on failure
122 *
123 * caller needs to hold any needed locks to ensure integrity
124 *
125 */
126int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
127 struct ipa_rt_entry *entry, u8 *buf)
128{
129 struct ipa_rt_rule_hw_hdr *rule_hdr;
130 const struct ipa_rt_rule *rule =
131 (const struct ipa_rt_rule *)&entry->rule;
132 u16 en_rule = 0;
133 u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
134 u8 *start;
135 int pipe_idx;
136
137 if (buf == NULL) {
138 memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
139 buf = (u8 *)tmp;
140 }
141
142 start = buf;
143 rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
144 pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
145 if (pipe_idx == -1) {
146 IPAERR("Wrong destination pipe specified in RT rule\n");
147 WARN_ON(1);
148 return -EPERM;
149 }
150 if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
151 IPAERR("No RT rule on IPA_client_producer pipe.\n");
152 IPAERR("pipe_idx: %d dst_pipe: %d\n",
153 pipe_idx, entry->rule.dst);
154 WARN_ON(1);
155 return -EPERM;
156 }
157 rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx;
158 if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
159 struct ipa_hdr_proc_ctx_entry *proc_ctx;
160
161 proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
162 rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl;
163 BUG_ON(proc_ctx->offset_entry->offset & 31);
164 rule_hdr->u.hdr_v2_5.proc_ctx = 1;
165 rule_hdr->u.hdr_v2_5.hdr_offset =
166 (proc_ctx->offset_entry->offset +
167 ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
168 } else if (entry->hdr) {
169 rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl;
170 BUG_ON(entry->hdr->offset_entry->offset & 3);
171 rule_hdr->u.hdr_v2_5.proc_ctx = 0;
172 rule_hdr->u.hdr_v2_5.hdr_offset =
173 entry->hdr->offset_entry->offset >> 2;
174 } else {
175 rule_hdr->u.hdr_v2_5.proc_ctx = 0;
176 rule_hdr->u.hdr_v2_5.hdr_offset = 0;
177 }
178 buf += sizeof(struct ipa_rt_rule_hw_hdr);
179
180 if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
181 IPAERR("fail to generate hw rule\n");
182 return -EPERM;
183 }
184
185 IPADBG("en_rule 0x%x\n", en_rule);
186
187 rule_hdr->u.hdr_v2_5.en_rule = en_rule;
188 ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
189
190 if (entry->hw_len == 0) {
191 entry->hw_len = buf - start;
192 } else if (entry->hw_len != (buf - start)) {
193 IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
194 entry->hw_len, (buf - start));
195 return -EPERM;
196 }
197
198 return 0;
199}
200
201/**
202 * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule
203 * @ip: the ip address family type
204 * @entry: routing entry
205 * @buf: output buffer, buf == NULL means that the caller wants to know the size
206 * of the rule as seen by HW so they did not pass a valid buffer, we will
207 * use a scratch buffer instead.
208 * With this scheme we are going to generate the rule twice, once to know
209 * size using scratch buffer and second to write the rule to the actual
210 * caller supplied buffer which is of required size.
211 *
212 * Returns: 0 on success, negative on failure
213 *
214 * caller needs to hold any needed locks to ensure integrity
215 *
216 */
217int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
218 struct ipa_rt_entry *entry, u8 *buf)
219{
220 /* Same implementation as IPAv2 */
221 return __ipa_generate_rt_hw_rule_v2(ip, entry, buf);
222}
223
224/**
225 * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
226 * @ip: the ip address family type
227 * @hdr_sz: header size
228 * @max_rt_idx: maximal index
229 *
Skylar Chang88610112016-10-19 13:30:44 -0700230 * Returns: size on success, negative on failure
Amir Levy9659e592016-10-27 18:08:27 +0300231 *
232 * caller needs to hold any needed locks to ensure integrity
233 *
234 * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
235 */
236static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
237 int *max_rt_idx)
238{
239 struct ipa_rt_tbl_set *set;
240 struct ipa_rt_tbl *tbl;
241 struct ipa_rt_entry *entry;
242 u32 total_sz = 0;
243 u32 tbl_sz;
244 u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
245 int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
246 int i;
247 int res;
248
249 *hdr_sz = 0;
250 set = &ipa_ctx->rt_tbl_set[ip];
251
252 for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
253 if (bitmap & IPA_RT_BIT_MASK)
254 highest_bit_set = i;
255 bitmap >>= 1;
256 }
257
258 *max_rt_idx = highest_bit_set;
259 if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
260 IPAERR("no rt tbls present\n");
261 total_sz = IPA_RT_TABLE_WORD_SIZE;
262 *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
263 return total_sz;
264 }
265
266 *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
267 total_sz += *hdr_sz;
268 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
269 tbl_sz = 0;
270 list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
271 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
272 ip,
273 entry,
274 NULL);
275 if (res) {
276 IPAERR("failed to find HW RT rule size\n");
277 return -EPERM;
278 }
279 tbl_sz += entry->hw_len;
280 }
281
282 if (tbl_sz)
283 tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
284
285 if (tbl->in_sys)
286 continue;
287
288 if (tbl_sz) {
289 /* add the terminator */
290 total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
291 /* every rule-set should start at word boundary */
292 total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
293 ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
294 }
295 }
296
297 IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
298
299 return total_sz;
300}
301
302static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr,
303 u32 body_ofst, u32 apps_start_idx)
304{
305 struct ipa_rt_tbl *tbl;
306 struct ipa_rt_entry *entry;
307 struct ipa_rt_tbl_set *set;
308 u32 offset;
309 u8 *body;
310 struct ipa_mem_buffer rt_tbl_mem;
311 u8 *rt_tbl_mem_body;
312 int res;
313
314 /* build the rt tbl in the DMA buffer to submit to IPA HW */
315 body = base;
316
317 set = &ipa_ctx->rt_tbl_set[ip];
318 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
319 if (!tbl->in_sys) {
320 offset = body - base + body_ofst;
321 if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
322 IPAERR("offset is not word multiple %d\n",
323 offset);
324 goto proc_err;
325 }
326
327 /* convert offset to words from bytes */
328 offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
329 /* rule is at an offset from base */
330 offset |= IPA_RT_BIT_MASK;
331
332 /* update the hdr at the right index */
333 ipa_write_32(offset, hdr +
334 ((tbl->idx - apps_start_idx) *
335 IPA_RT_TABLE_WORD_SIZE));
336
337 /* generate the rule-set */
338 list_for_each_entry(entry, &tbl->head_rt_rule_list,
339 link) {
340 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
341 ip,
342 entry,
343 body);
344 if (res) {
345 IPAERR("failed to gen HW RT rule\n");
346 goto proc_err;
347 }
348 body += entry->hw_len;
349 }
350
351 /* write the rule-set terminator */
352 body = ipa_write_32(0, body);
353 if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
354 /* advance body to next word boundary */
355 body = body + (IPA_RT_TABLE_WORD_SIZE -
356 ((long)body &
357 IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
358 } else {
Skylar Chang88610112016-10-19 13:30:44 -0700359 if (tbl->sz == 0) {
360 IPAERR("cannot generate 0 size table\n");
361 goto proc_err;
362 }
363
Amir Levy9659e592016-10-27 18:08:27 +0300364 /* allocate memory for the RT tbl */
365 rt_tbl_mem.size = tbl->sz;
366 rt_tbl_mem.base =
367 dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
368 &rt_tbl_mem.phys_base, GFP_KERNEL);
369 if (!rt_tbl_mem.base) {
370 IPAERR("fail to alloc DMA buff of size %d\n",
371 rt_tbl_mem.size);
372 WARN_ON(1);
373 goto proc_err;
374 }
375
376 WARN_ON(rt_tbl_mem.phys_base &
377 IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
378 rt_tbl_mem_body = rt_tbl_mem.base;
379 memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
380 /* update the hdr at the right index */
381 ipa_write_32(rt_tbl_mem.phys_base,
382 hdr + ((tbl->idx - apps_start_idx) *
383 IPA_RT_TABLE_WORD_SIZE));
384 /* generate the rule-set */
385 list_for_each_entry(entry, &tbl->head_rt_rule_list,
386 link) {
387 res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
388 ip,
389 entry,
390 rt_tbl_mem_body);
391 if (res) {
392 IPAERR("failed to gen HW RT rule\n");
393 WARN_ON(1);
394 goto rt_table_mem_alloc_failed;
395 }
396 rt_tbl_mem_body += entry->hw_len;
397 }
398
399 /* write the rule-set terminator */
400 rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
401
402 if (tbl->curr_mem.phys_base) {
403 WARN_ON(tbl->prev_mem.phys_base);
404 tbl->prev_mem = tbl->curr_mem;
405 }
406 tbl->curr_mem = rt_tbl_mem;
407 }
408 }
409
410 return 0;
411
412rt_table_mem_alloc_failed:
413 dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
414 rt_tbl_mem.base, rt_tbl_mem.phys_base);
415proc_err:
416 return -EPERM;
417}
418
419
420/**
421 * ipa_generate_rt_hw_tbl() - generates the routing hardware table
422 * @ip: [in] the ip address family type
423 * @mem: [out] buffer to put the filtering table
424 *
425 * Returns: 0 on success, negative on failure
426 */
427static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip,
428 struct ipa_mem_buffer *mem)
429{
430 u32 hdr_sz;
431 u8 *hdr;
432 u8 *body;
433 u8 *base;
434 int max_rt_idx;
435 int i;
Skylar Chang88610112016-10-19 13:30:44 -0700436 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300437
Skylar Chang88610112016-10-19 13:30:44 -0700438 res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
439 if (res < 0) {
440 IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
441 goto error;
442 }
443
444 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300445 mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
446 ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
447
448 if (mem->size == 0) {
449 IPAERR("rt tbl empty ip=%d\n", ip);
450 goto error;
451 }
452 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
453 &mem->phys_base, GFP_KERNEL);
454 if (!mem->base) {
455 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
456 goto error;
457 }
458
459 memset(mem->base, 0, mem->size);
460
461 /* build the rt tbl in the DMA buffer to submit to IPA HW */
462 base = hdr = (u8 *)mem->base;
463 body = base + hdr_sz;
464
465 /* setup all indices to point to the empty sys rt tbl */
466 for (i = 0; i <= max_rt_idx; i++)
467 ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
468 hdr + (i * IPA_RT_TABLE_WORD_SIZE));
469
470 if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) {
471 IPAERR("fail to generate RT tbl\n");
472 goto proc_err;
473 }
474
475 return 0;
476
477proc_err:
478 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
479 mem->base = NULL;
480error:
481 return -EPERM;
482}
483
484static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
485{
486 struct ipa_rt_tbl *tbl;
487 struct ipa_rt_tbl *next;
488 struct ipa_rt_tbl_set *set;
489
490 set = &ipa_ctx->rt_tbl_set[ip];
491 list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
492 if (tbl->prev_mem.phys_base) {
493 IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
494 dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
495 tbl->prev_mem.base,
496 tbl->prev_mem.phys_base);
497 memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
498 }
499 }
500
501 set = &ipa_ctx->reap_rt_tbl_set[ip];
502 list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
503 list_del(&tbl->link);
504 WARN_ON(tbl->prev_mem.phys_base != 0);
505 if (tbl->curr_mem.phys_base) {
506 IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
507 ip);
508 dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
509 tbl->curr_mem.base,
510 tbl->curr_mem.phys_base);
511 kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
512 }
513 }
514}
515
516int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
517{
518 struct ipa_desc desc = { 0 };
519 struct ipa_mem_buffer *mem;
520 void *cmd;
521 struct ipa_ip_v4_routing_init *v4;
522 struct ipa_ip_v6_routing_init *v6;
523 u16 avail;
524 u16 size;
525
526 mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
527 if (!mem) {
528 IPAERR("failed to alloc memory object\n");
529 goto fail_alloc_mem;
530 }
531
532 if (ip == IPA_IP_v4) {
533 avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE :
534 IPA_MEM_PART(v4_rt_size_ddr);
535 size = sizeof(struct ipa_ip_v4_routing_init);
536 } else {
537 avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE :
538 IPA_MEM_PART(v6_rt_size_ddr);
539 size = sizeof(struct ipa_ip_v6_routing_init);
540 }
541 cmd = kmalloc(size, GFP_KERNEL);
542 if (!cmd) {
543 IPAERR("failed to alloc immediate command object\n");
544 goto fail_alloc_cmd;
545 }
546
547 if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) {
548 IPAERR("fail to generate RT HW TBL ip %d\n", ip);
549 goto fail_hw_tbl_gen;
550 }
551
552 if (mem->size > avail) {
553 IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
554 goto fail_send_cmd;
555 }
556
557 if (ip == IPA_IP_v4) {
558 v4 = (struct ipa_ip_v4_routing_init *)cmd;
559 desc.opcode = IPA_IP_V4_ROUTING_INIT;
560 v4->ipv4_rules_addr = mem->phys_base;
561 v4->size_ipv4_rules = mem->size;
562 v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST;
563 IPADBG("putting Routing IPv4 rules to phys 0x%x",
564 v4->ipv4_addr);
565 } else {
566 v6 = (struct ipa_ip_v6_routing_init *)cmd;
567 desc.opcode = IPA_IP_V6_ROUTING_INIT;
568 v6->ipv6_rules_addr = mem->phys_base;
569 v6->size_ipv6_rules = mem->size;
570 v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST;
571 IPADBG("putting Routing IPv6 rules to phys 0x%x",
572 v6->ipv6_addr);
573 }
574
575 desc.pyld = cmd;
576 desc.len = size;
577 desc.type = IPA_IMM_CMD_DESC;
578 IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
579
580 if (ipa_send_cmd(1, &desc)) {
581 IPAERR("fail to send immediate command\n");
582 goto fail_send_cmd;
583 }
584
585 __ipa_reap_sys_rt_tbls(ip);
586 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
587 kfree(cmd);
588 kfree(mem);
589
590 return 0;
591
592fail_send_cmd:
593 if (mem->base)
594 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
595 mem->phys_base);
596fail_hw_tbl_gen:
597 kfree(cmd);
598fail_alloc_cmd:
599 kfree(mem);
600fail_alloc_mem:
601 return -EPERM;
602}
603
604static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
605 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head)
606{
607 u32 hdr_sz;
608 u8 *hdr;
609 u8 *body;
610 u8 *base;
611 int max_rt_idx;
612 int i;
613 u32 *entr;
614 int num_index;
615 u32 body_start_offset;
616 u32 apps_start_idx;
Skylar Chang88610112016-10-19 13:30:44 -0700617 int res;
Amir Levy9659e592016-10-27 18:08:27 +0300618
619 if (ip == IPA_IP_v4) {
620 num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
621 IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
622 body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) -
623 IPA_MEM_PART(v4_rt_ofst);
624 apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
625 } else {
626 num_index = IPA_MEM_PART(v6_apps_rt_index_hi) -
627 IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
628 body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) -
629 IPA_MEM_PART(v6_rt_ofst);
630 apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
631 }
632
633 head->size = num_index * 4;
634 head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size,
635 &head->phys_base, GFP_KERNEL);
636 if (!head->base) {
637 IPAERR("fail to alloc DMA buff of size %d\n", head->size);
638 goto err;
639 }
640 entr = (u32 *)head->base;
641 hdr = (u8 *)head->base;
642 for (i = 1; i <= num_index; i++) {
643 *entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
644 entr++;
645 }
646
Skylar Chang88610112016-10-19 13:30:44 -0700647 res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
648 if (res < 0) {
649 IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
650 goto base_err;
651 }
652
653 mem->size = res;
Amir Levy9659e592016-10-27 18:08:27 +0300654 mem->size -= hdr_sz;
655 mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
656 ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
657
658 if (mem->size > 0) {
659 mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
660 &mem->phys_base, GFP_KERNEL);
661 if (!mem->base) {
662 IPAERR("fail to alloc DMA buff of size %d\n",
663 mem->size);
664 goto base_err;
665 }
666 memset(mem->base, 0, mem->size);
667 }
668
669 /* build the rt tbl in the DMA buffer to submit to IPA HW */
670 body = base = (u8 *)mem->base;
671
672 if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset,
673 apps_start_idx)) {
674 IPAERR("fail to generate RT tbl\n");
675 goto proc_err;
676 }
677
678 return 0;
679
680proc_err:
681 if (mem->size)
682 dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
683 mem->phys_base);
684base_err:
685 dma_free_coherent(ipa_ctx->pdev, head->size, head->base,
686 head->phys_base);
687err:
688 return -EPERM;
689}
690
691int __ipa_commit_rt_v2(enum ipa_ip_type ip)
692{
693 struct ipa_desc desc[2];
694 struct ipa_mem_buffer body;
695 struct ipa_mem_buffer head;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530696 struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
697 struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530698 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +0300699 u16 avail;
700 u32 num_modem_rt_index;
701 int rc = 0;
702 u32 local_addr1;
703 u32 local_addr2;
704 bool lcl;
705
706 memset(desc, 0, 2 * sizeof(struct ipa_desc));
707
708 if (ip == IPA_IP_v4) {
709 avail = ipa_ctx->ip4_rt_tbl_lcl ?
710 IPA_MEM_PART(apps_v4_rt_size) :
711 IPA_MEM_PART(v4_rt_size_ddr);
712 num_modem_rt_index =
713 IPA_MEM_PART(v4_modem_rt_index_hi) -
714 IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
715 local_addr1 = ipa_ctx->smem_restricted_bytes +
716 IPA_MEM_PART(v4_rt_ofst) +
717 num_modem_rt_index * 4;
718 local_addr2 = ipa_ctx->smem_restricted_bytes +
719 IPA_MEM_PART(apps_v4_rt_ofst);
720 lcl = ipa_ctx->ip4_rt_tbl_lcl;
721 } else {
722 avail = ipa_ctx->ip6_rt_tbl_lcl ?
723 IPA_MEM_PART(apps_v6_rt_size) :
724 IPA_MEM_PART(v6_rt_size_ddr);
725 num_modem_rt_index =
726 IPA_MEM_PART(v6_modem_rt_index_hi) -
727 IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
728 local_addr1 = ipa_ctx->smem_restricted_bytes +
729 IPA_MEM_PART(v6_rt_ofst) +
730 num_modem_rt_index * 4;
731 local_addr2 = ipa_ctx->smem_restricted_bytes +
732 IPA_MEM_PART(apps_v6_rt_ofst);
733 lcl = ipa_ctx->ip6_rt_tbl_lcl;
734 }
735
736 if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) {
737 IPAERR("fail to generate RT HW TBL ip %d\n", ip);
738 rc = -EFAULT;
739 goto fail_gen;
740 }
741
742 if (body.size > avail) {
743 IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
744 rc = -EFAULT;
745 goto fail_send_cmd;
746 }
747
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530748 cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530749 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530750 if (cmd1 == NULL) {
751 IPAERR("Failed to alloc immediate command object\n");
752 rc = -ENOMEM;
753 goto fail_send_cmd;
754 }
755
756 cmd1->size = head.size;
757 cmd1->system_addr = head.phys_base;
758 cmd1->local_addr = local_addr1;
Amir Levy9659e592016-10-27 18:08:27 +0300759 desc[0].opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530760 desc[0].pyld = (void *)cmd1;
Amir Levy9659e592016-10-27 18:08:27 +0300761 desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
762 desc[0].type = IPA_IMM_CMD_DESC;
763
764 if (lcl) {
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530765 cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxenae6510102017-04-14 19:31:07 +0530766 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530767 if (cmd2 == NULL) {
768 IPAERR("Failed to alloc immediate command object\n");
769 rc = -ENOMEM;
770 goto fail_send_cmd1;
771 }
772
773 cmd2->size = body.size;
774 cmd2->system_addr = body.phys_base;
775 cmd2->local_addr = local_addr2;
Amir Levy9659e592016-10-27 18:08:27 +0300776
777 desc[1].opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530778 desc[1].pyld = (void *)cmd2;
Amir Levy9659e592016-10-27 18:08:27 +0300779 desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
780 desc[1].type = IPA_IMM_CMD_DESC;
781
782 if (ipa_send_cmd(2, desc)) {
783 IPAERR("fail to send immediate command\n");
784 rc = -EFAULT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530785 goto fail_send_cmd2;
Amir Levy9659e592016-10-27 18:08:27 +0300786 }
787 } else {
788 if (ipa_send_cmd(1, desc)) {
789 IPAERR("fail to send immediate command\n");
790 rc = -EFAULT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530791 goto fail_send_cmd1;
Amir Levy9659e592016-10-27 18:08:27 +0300792 }
793 }
794
795 IPADBG("HEAD\n");
796 IPA_DUMP_BUFF(head.base, head.phys_base, head.size);
797 if (body.size) {
798 IPADBG("BODY\n");
799 IPA_DUMP_BUFF(body.base, body.phys_base, body.size);
800 }
801 __ipa_reap_sys_rt_tbls(ip);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +0530802
803fail_send_cmd2:
804 kfree(cmd2);
805fail_send_cmd1:
806 kfree(cmd1);
Amir Levy9659e592016-10-27 18:08:27 +0300807fail_send_cmd:
808 dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base);
809 if (body.size)
810 dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
811 body.phys_base);
812fail_gen:
813 return rc;
814}
815
816/**
817 * __ipa_find_rt_tbl() - find the routing table
818 * which name is given as parameter
819 * @ip: [in] the ip address family type of the wanted routing table
820 * @name: [in] the name of the wanted routing table
821 *
822 * Returns: the routing table which name is given as parameter, or NULL if it
823 * doesn't exist
824 */
825struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
826{
827 struct ipa_rt_tbl *entry;
828 struct ipa_rt_tbl_set *set;
829
830 set = &ipa_ctx->rt_tbl_set[ip];
831 list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
832 if (!strcmp(name, entry->name))
833 return entry;
834 }
835
836 return NULL;
837}
838
839/**
840 * ipa2_query_rt_index() - find the routing table index
841 * which name and ip type are given as parameters
842 * @in: [out] the index of the wanted routing table
843 *
844 * Returns: the routing table which name is given as parameter, or NULL if it
845 * doesn't exist
846 */
847int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
848{
849 struct ipa_rt_tbl *entry;
850
851 if (in->ip >= IPA_IP_MAX) {
852 IPAERR("bad parm\n");
853 return -EINVAL;
854 }
855
856 /* check if this table exists */
857 entry = __ipa_find_rt_tbl(in->ip, in->name);
858 if (!entry)
859 return -EFAULT;
860
861 in->idx = entry->idx;
862 return 0;
863}
864
865static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
866 const char *name)
867{
868 struct ipa_rt_tbl *entry;
869 struct ipa_rt_tbl_set *set;
870 int i;
871 int id;
872
873 if (ip >= IPA_IP_MAX || name == NULL) {
874 IPAERR("bad parm\n");
875 goto error;
876 }
877
878 set = &ipa_ctx->rt_tbl_set[ip];
879 /* check if this table exists */
880 entry = __ipa_find_rt_tbl(ip, name);
881 if (!entry) {
882 entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
883 if (!entry) {
884 IPAERR("failed to alloc RT tbl object\n");
885 goto error;
886 }
887 /* find a routing tbl index */
888 for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
889 if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
890 entry->idx = i;
891 set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
892 break;
893 }
894 }
895 if (i == IPA_RT_INDEX_BITMAP_SIZE) {
896 IPAERR("not free RT tbl indices left\n");
897 goto fail_rt_idx_alloc;
898 }
899
900 INIT_LIST_HEAD(&entry->head_rt_rule_list);
901 INIT_LIST_HEAD(&entry->link);
902 strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
903 entry->set = set;
904 entry->cookie = IPA_COOKIE;
905 entry->in_sys = (ip == IPA_IP_v4) ?
906 !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
907 set->tbl_cnt++;
908 list_add(&entry->link, &set->head_rt_tbl_list);
909
910 IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
911 set->tbl_cnt, ip);
912
913 id = ipa_id_alloc(entry);
914 if (id < 0) {
915 IPAERR("failed to add to tree\n");
916 WARN_ON(1);
917 }
918 entry->id = id;
919 }
920
921 return entry;
922
923fail_rt_idx_alloc:
924 entry->cookie = 0;
925 kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
926error:
927 return NULL;
928}
929
930static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
931{
932 enum ipa_ip_type ip = IPA_IP_MAX;
933 u32 id;
934
935 if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
936 IPAERR("bad parms\n");
937 return -EINVAL;
938 }
939 id = entry->id;
940 if (ipa_id_find(id) == NULL) {
941 IPAERR("lookup failed\n");
942 return -EPERM;
943 }
944
945 if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
946 ip = IPA_IP_v4;
947 else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
948 ip = IPA_IP_v6;
949 else
950 WARN_ON(1);
951
952 if (!entry->in_sys) {
953 list_del(&entry->link);
954 clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
955 entry->set->tbl_cnt--;
956 IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
957 entry->set->tbl_cnt);
958 kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
959 } else {
960 list_move(&entry->link,
961 &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
962 clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
963 entry->set->tbl_cnt--;
964 IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
965 entry->set->tbl_cnt);
966 }
967
968 /* remove the handle from the database */
969 ipa_id_remove(id);
970 return 0;
971}
972
973static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
974 const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
975{
976 struct ipa_rt_tbl *tbl;
977 struct ipa_rt_entry *entry;
978 struct ipa_hdr_entry *hdr = NULL;
979 struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL;
980 int id;
981
982 if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
983 IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
984 goto error;
985 }
986
987 if (rule->hdr_hdl) {
988 hdr = ipa_id_find(rule->hdr_hdl);
989 if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
990 IPAERR("rt rule does not point to valid hdr\n");
991 goto error;
992 }
993 } else if (rule->hdr_proc_ctx_hdl) {
994 proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
995 if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
996 IPAERR("rt rule does not point to valid proc ctx\n");
997 goto error;
998 }
999 }
1000
1001
1002 tbl = __ipa_add_rt_tbl(ip, name);
1003 if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
1004 IPAERR("bad params\n");
1005 goto error;
1006 }
1007 /*
1008 * do not allow any rules to be added at end of the "default" routing
1009 * tables
1010 */
1011 if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
1012 (tbl->rule_cnt > 0) && (at_rear != 0)) {
1013 IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
1014 tbl->rule_cnt, at_rear);
1015 goto error;
1016 }
1017
1018 entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
1019 if (!entry) {
1020 IPAERR("failed to alloc RT rule object\n");
1021 goto error;
1022 }
1023 INIT_LIST_HEAD(&entry->link);
1024 entry->cookie = IPA_COOKIE;
1025 entry->rule = *rule;
1026 entry->tbl = tbl;
1027 entry->hdr = hdr;
1028 entry->proc_ctx = proc_ctx;
1029 if (at_rear)
1030 list_add_tail(&entry->link, &tbl->head_rt_rule_list);
1031 else
1032 list_add(&entry->link, &tbl->head_rt_rule_list);
1033 tbl->rule_cnt++;
1034 if (entry->hdr)
1035 entry->hdr->ref_cnt++;
1036 else if (entry->proc_ctx)
1037 entry->proc_ctx->ref_cnt++;
1038 id = ipa_id_alloc(entry);
1039 if (id < 0) {
1040 IPAERR("failed to add to tree\n");
1041 WARN_ON(1);
1042 goto ipa_insert_failed;
1043 }
1044 IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
1045 *rule_hdl = id;
1046 entry->id = id;
1047
1048 return 0;
1049
1050ipa_insert_failed:
1051 if (entry->hdr)
1052 entry->hdr->ref_cnt--;
1053 else if (entry->proc_ctx)
1054 entry->proc_ctx->ref_cnt--;
1055 list_del(&entry->link);
1056 kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
1057error:
1058 return -EPERM;
1059}
1060
1061/**
1062 * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally
1063 * commit to IPA HW
1064 * @rules: [inout] set of routing rules to add
1065 *
1066 * Returns: 0 on success, negative on failure
1067 *
1068 * Note: Should not be called from atomic context
1069 */
1070int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
1071{
1072 int i;
1073 int ret;
1074
1075 if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
1076 IPAERR("bad parm\n");
1077 return -EINVAL;
1078 }
1079
1080 mutex_lock(&ipa_ctx->lock);
1081 for (i = 0; i < rules->num_rules; i++) {
1082 if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
1083 &rules->rules[i].rule,
1084 rules->rules[i].at_rear,
1085 &rules->rules[i].rt_rule_hdl)) {
1086 IPAERR("failed to add rt rule %d\n", i);
1087 rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
1088 } else {
1089 rules->rules[i].status = 0;
1090 }
1091 }
1092
1093 if (rules->commit)
1094 if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) {
1095 ret = -EPERM;
1096 goto bail;
1097 }
1098
1099 ret = 0;
1100bail:
1101 mutex_unlock(&ipa_ctx->lock);
1102 return ret;
1103}
1104
1105int __ipa_del_rt_rule(u32 rule_hdl)
1106{
1107 struct ipa_rt_entry *entry;
1108 int id;
1109
1110 entry = ipa_id_find(rule_hdl);
1111
1112 if (entry == NULL) {
1113 IPAERR("lookup failed\n");
1114 return -EINVAL;
1115 }
1116
1117 if (entry->cookie != IPA_COOKIE) {
1118 IPAERR("bad params\n");
1119 return -EINVAL;
1120 }
1121
1122 if (entry->hdr)
1123 __ipa_release_hdr(entry->hdr->id);
1124 else if (entry->proc_ctx)
1125 __ipa_release_hdr_proc_ctx(entry->proc_ctx->id);
1126 list_del(&entry->link);
1127 entry->tbl->rule_cnt--;
1128 IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
1129 entry->tbl->rule_cnt);
1130 if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
1131 if (__ipa_del_rt_tbl(entry->tbl))
1132 IPAERR("fail to del RT tbl\n");
1133 }
1134 entry->cookie = 0;
1135 id = entry->id;
1136 kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
1137
1138 /* remove the handle from the database */
1139 ipa_id_remove(id);
1140
1141 return 0;
1142}
1143
1144/**
1145 * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally
1146 * commit to IPA HW
1147 * @hdls: [inout] set of routing rules to delete
1148 *
1149 * Returns: 0 on success, negative on failure
1150 *
1151 * Note: Should not be called from atomic context
1152 */
1153int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
1154{
1155 int i;
1156 int ret;
1157
1158 if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
1159 IPAERR("bad parm\n");
1160 return -EINVAL;
1161 }
1162
1163 mutex_lock(&ipa_ctx->lock);
1164 for (i = 0; i < hdls->num_hdls; i++) {
1165 if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
1166 IPAERR("failed to del rt rule %i\n", i);
1167 hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
1168 } else {
1169 hdls->hdl[i].status = 0;
1170 }
1171 }
1172
1173 if (hdls->commit)
1174 if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
1175 ret = -EPERM;
1176 goto bail;
1177 }
1178
1179 ret = 0;
1180bail:
1181 mutex_unlock(&ipa_ctx->lock);
1182 return ret;
1183}
1184
1185/**
1186 * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type
1187 * to IPA HW
1188 * @ip: The family of routing tables
1189 *
1190 * Returns: 0 on success, negative on failure
1191 *
1192 * Note: Should not be called from atomic context
1193 */
1194int ipa2_commit_rt(enum ipa_ip_type ip)
1195{
1196 int ret;
1197
1198 if (ip >= IPA_IP_MAX) {
1199 IPAERR("bad parm\n");
1200 return -EINVAL;
1201 }
1202
1203 /*
1204 * issue a commit on the filtering module of same IP type since
1205 * filtering rules point to routing tables
1206 */
1207 if (ipa2_commit_flt(ip))
1208 return -EPERM;
1209
1210 mutex_lock(&ipa_ctx->lock);
1211 if (ipa_ctx->ctrl->ipa_commit_rt(ip)) {
1212 ret = -EPERM;
1213 goto bail;
1214 }
1215
1216 ret = 0;
1217bail:
1218 mutex_unlock(&ipa_ctx->lock);
1219 return ret;
1220}
1221
1222/**
1223 * ipa2_reset_rt() - reset the current SW routing table of specified type
1224 * (does not commit to HW)
1225 * @ip: The family of routing tables
1226 *
1227 * Returns: 0 on success, negative on failure
1228 *
1229 * Note: Should not be called from atomic context
1230 */
1231int ipa2_reset_rt(enum ipa_ip_type ip)
1232{
1233 struct ipa_rt_tbl *tbl;
1234 struct ipa_rt_tbl *tbl_next;
1235 struct ipa_rt_tbl_set *set;
1236 struct ipa_rt_entry *rule;
1237 struct ipa_rt_entry *rule_next;
1238 struct ipa_rt_tbl_set *rset;
1239 u32 apps_start_idx;
1240 int id;
1241
1242 if (ip >= IPA_IP_MAX) {
1243 IPAERR("bad parm\n");
1244 return -EINVAL;
1245 }
1246
1247 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
1248 if (ip == IPA_IP_v4)
1249 apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
1250 else
1251 apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
1252 } else {
1253 apps_start_idx = 0;
1254 }
1255
1256 /*
1257 * issue a reset on the filtering module of same IP type since
1258 * filtering rules point to routing tables
1259 */
1260 if (ipa2_reset_flt(ip))
1261 IPAERR("fail to reset flt ip=%d\n", ip);
1262
1263 set = &ipa_ctx->rt_tbl_set[ip];
1264 rset = &ipa_ctx->reap_rt_tbl_set[ip];
1265 mutex_lock(&ipa_ctx->lock);
1266 IPADBG("reset rt ip=%d\n", ip);
1267 list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
1268 list_for_each_entry_safe(rule, rule_next,
1269 &tbl->head_rt_rule_list, link) {
1270 if (ipa_id_find(rule->id) == NULL) {
1271 WARN_ON(1);
1272 mutex_unlock(&ipa_ctx->lock);
1273 return -EFAULT;
1274 }
1275
1276 /*
1277 * for the "default" routing tbl, remove all but the
1278 * last rule
1279 */
1280 if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
1281 continue;
1282
1283 list_del(&rule->link);
1284 tbl->rule_cnt--;
1285 if (rule->hdr)
1286 __ipa_release_hdr(rule->hdr->id);
1287 else if (rule->proc_ctx)
1288 __ipa_release_hdr_proc_ctx(rule->proc_ctx->id);
1289 rule->cookie = 0;
1290 id = rule->id;
1291 kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
1292
1293 /* remove the handle from the database */
1294 ipa_id_remove(id);
1295 }
1296
1297 if (ipa_id_find(tbl->id) == NULL) {
1298 WARN_ON(1);
1299 mutex_unlock(&ipa_ctx->lock);
1300 return -EFAULT;
1301 }
1302 id = tbl->id;
1303
1304 /* do not remove the "default" routing tbl which has index 0 */
1305 if (tbl->idx != apps_start_idx) {
1306 if (!tbl->in_sys) {
1307 list_del(&tbl->link);
1308 set->tbl_cnt--;
1309 clear_bit(tbl->idx,
1310 &ipa_ctx->rt_idx_bitmap[ip]);
1311 IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
1312 tbl->idx, set->tbl_cnt);
1313 kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
1314 } else {
1315 list_move(&tbl->link, &rset->head_rt_tbl_list);
1316 clear_bit(tbl->idx,
1317 &ipa_ctx->rt_idx_bitmap[ip]);
1318 set->tbl_cnt--;
1319 IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
1320 tbl->idx, set->tbl_cnt);
1321 }
1322 /* remove the handle from the database */
1323 ipa_id_remove(id);
1324 }
1325 }
1326 mutex_unlock(&ipa_ctx->lock);
1327
1328 return 0;
1329}
1330
1331/**
1332 * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if
1333 * it exists, if lookup succeeds the routing table ref cnt is increased
1334 * @lookup: [inout] routing table to lookup and its handle
1335 *
1336 * Returns: 0 on success, negative on failure
1337 *
1338 * Note: Should not be called from atomic context
1339 * Caller should call ipa_put_rt_tbl later if this function succeeds
1340 */
1341int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
1342{
1343 struct ipa_rt_tbl *entry;
1344 int result = -EFAULT;
1345
1346 if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
1347 IPAERR("bad parm\n");
1348 return -EINVAL;
1349 }
1350 mutex_lock(&ipa_ctx->lock);
1351 entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
1352 if (entry && entry->cookie == IPA_COOKIE) {
1353 entry->ref_cnt++;
1354 lookup->hdl = entry->id;
1355
1356 /* commit for get */
1357 if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
1358 IPAERR("fail to commit RT tbl\n");
1359
1360 result = 0;
1361 }
1362 mutex_unlock(&ipa_ctx->lock);
1363
1364 return result;
1365}
1366
1367/**
1368 * ipa2_put_rt_tbl() - Release the specified routing table handle
1369 * @rt_tbl_hdl: [in] the routing table handle to release
1370 *
1371 * Returns: 0 on success, negative on failure
1372 *
1373 * Note: Should not be called from atomic context
1374 */
1375int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
1376{
1377 struct ipa_rt_tbl *entry;
1378 enum ipa_ip_type ip = IPA_IP_MAX;
1379 int result;
1380
1381 mutex_lock(&ipa_ctx->lock);
1382 entry = ipa_id_find(rt_tbl_hdl);
1383 if (entry == NULL) {
1384 IPAERR("lookup failed\n");
1385 result = -EINVAL;
1386 goto ret;
1387 }
1388
1389 if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
1390 IPAERR("bad parms\n");
1391 result = -EINVAL;
1392 goto ret;
1393 }
1394
1395 if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
1396 ip = IPA_IP_v4;
1397 else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
1398 ip = IPA_IP_v6;
1399 else
1400 WARN_ON(1);
1401
1402 entry->ref_cnt--;
1403 if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
1404 if (__ipa_del_rt_tbl(entry))
1405 IPAERR("fail to del RT tbl\n");
1406 /* commit for put */
1407 if (ipa_ctx->ctrl->ipa_commit_rt(ip))
1408 IPAERR("fail to commit RT tbl\n");
1409 }
1410
1411 result = 0;
1412
1413ret:
1414 mutex_unlock(&ipa_ctx->lock);
1415
1416 return result;
1417}
1418
1419
1420static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
1421{
1422 struct ipa_rt_entry *entry;
1423 struct ipa_hdr_entry *hdr = NULL;
1424
1425 if (rtrule->rule.hdr_hdl) {
1426 hdr = ipa_id_find(rtrule->rule.hdr_hdl);
1427 if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
1428 IPAERR("rt rule does not point to valid hdr\n");
1429 goto error;
1430 }
1431 }
1432
1433 entry = ipa_id_find(rtrule->rt_rule_hdl);
1434 if (entry == NULL) {
1435 IPAERR("lookup failed\n");
1436 goto error;
1437 }
1438
1439 if (entry->cookie != IPA_COOKIE) {
1440 IPAERR("bad params\n");
1441 goto error;
1442 }
1443
1444 if (entry->hdr)
1445 entry->hdr->ref_cnt--;
1446
1447 entry->rule = rtrule->rule;
1448 entry->hdr = hdr;
1449
1450 if (entry->hdr)
1451 entry->hdr->ref_cnt++;
1452
1453 return 0;
1454
1455error:
1456 return -EPERM;
1457}
1458
1459/**
1460 * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
1461 * commit to IPA HW
1462 *
1463 * Returns: 0 on success, negative on failure
1464 *
1465 * Note: Should not be called from atomic context
1466 */
1467int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
1468{
1469 int i;
1470 int result;
1471
1472 if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
1473 IPAERR("bad parm\n");
1474 return -EINVAL;
1475 }
1476
1477 mutex_lock(&ipa_ctx->lock);
1478 for (i = 0; i < hdls->num_rules; i++) {
1479 if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
1480 IPAERR("failed to mdfy rt rule %i\n", i);
1481 hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
1482 } else {
1483 hdls->rules[i].status = 0;
1484 }
1485 }
1486
1487 if (hdls->commit)
1488 if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
1489 result = -EPERM;
1490 goto bail;
1491 }
1492 result = 0;
1493bail:
1494 mutex_unlock(&ipa_ctx->lock);
1495
1496 return result;
1497}