blob: da52b267d6f575693f7db4f61d3b488279fcd1e3 [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
16static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
17static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
69
70 list_for_each_entry(entry,
71 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
72 link) {
73 IPADBG_LOW("processing type %d ofst=%d\n",
74 entry->type, entry->offset_entry->offset);
75 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
76 entry->offset_entry->offset,
77 entry->hdr->hdr_len,
78 entry->hdr->is_hdr_proc_ctx,
79 entry->hdr->phys_base,
80 hdr_base_addr,
81 entry->hdr->offset_entry);
82 if (ret)
83 return ret;
84 }
85
86 return 0;
87}
88
89/**
90 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
91 * generates the headers processing context table.
92 * @mem: [out] buffer to put the processing context table
93 * @aligned_mem: [out] actual processing context table (with alignment).
94 * Processing context table needs to be 8 Bytes aligned.
95 *
96 * Returns: 0 on success, negative on failure
97 */
98static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
99 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
100{
101 u32 hdr_base_addr;
102
103 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
104
105 /* make sure table is aligned */
106 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
107
108 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
109
110 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
111 &mem->phys_base, GFP_KERNEL);
112 if (!mem->base) {
113 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
114 return -ENOMEM;
115 }
116
117 aligned_mem->phys_base =
118 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
119 aligned_mem->base = mem->base +
120 (aligned_mem->phys_base - mem->phys_base);
121 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
122 memset(aligned_mem->base, 0, aligned_mem->size);
123 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
124 hdr_sys_addr;
125 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
126}
127
128/**
129 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
130 *
131 * Returns: 0 on success, negative on failure
132 */
133int __ipa_commit_hdr_v3_0(void)
134{
135 struct ipa3_desc desc[2];
136 struct ipa_mem_buffer hdr_mem;
137 struct ipa_mem_buffer ctx_mem;
138 struct ipa_mem_buffer aligned_ctx_mem;
139 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
140 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
141 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
142 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
143 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
144 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
145 int rc = -EFAULT;
146 u32 proc_ctx_size;
147 u32 proc_ctx_ofst;
148 u32 proc_ctx_size_ddr;
149
150 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
151
152 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
153 IPAERR("fail to generate HDR HW TBL\n");
154 goto end;
155 }
156
157 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
158 &aligned_ctx_mem)) {
159 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
160 goto end;
161 }
162
163 if (ipa3_ctx->hdr_tbl_lcl) {
164 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
165 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
166 IPA_MEM_PART(apps_hdr_size));
167 goto end;
168 } else {
169 dma_cmd_hdr.is_read = false; /* write operation */
170 dma_cmd_hdr.skip_pipeline_clear = false;
171 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
172 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
173 dma_cmd_hdr.size = hdr_mem.size;
174 dma_cmd_hdr.local_addr =
175 ipa3_ctx->smem_restricted_bytes +
176 IPA_MEM_PART(apps_hdr_ofst);
177 hdr_cmd_pyld = ipahal_construct_imm_cmd(
178 IPA_IMM_CMD_DMA_SHARED_MEM,
179 &dma_cmd_hdr, false);
180 if (!hdr_cmd_pyld) {
181 IPAERR("fail construct dma_shared_mem cmd\n");
182 goto end;
183 }
184 desc[0].opcode = ipahal_imm_cmd_get_opcode(
185 IPA_IMM_CMD_DMA_SHARED_MEM);
186 desc[0].pyld = hdr_cmd_pyld->data;
187 desc[0].len = hdr_cmd_pyld->len;
188 }
189 } else {
190 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
191 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
192 IPA_MEM_PART(apps_hdr_size_ddr));
193 goto end;
194 } else {
195 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
196 hdr_cmd_pyld = ipahal_construct_imm_cmd(
197 IPA_IMM_CMD_HDR_INIT_SYSTEM,
198 &hdr_init_cmd, false);
199 if (!hdr_cmd_pyld) {
200 IPAERR("fail construct hdr_init_system cmd\n");
201 goto end;
202 }
203 desc[0].opcode = ipahal_imm_cmd_get_opcode(
204 IPA_IMM_CMD_HDR_INIT_SYSTEM);
205 desc[0].pyld = hdr_cmd_pyld->data;
206 desc[0].len = hdr_cmd_pyld->len;
207 }
208 }
209 desc[0].type = IPA_IMM_CMD_DESC;
210 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
211
212 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
213 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
214 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
215 if (aligned_ctx_mem.size > proc_ctx_size) {
216 IPAERR("tbl too big needed %d avail %d\n",
217 aligned_ctx_mem.size,
218 proc_ctx_size);
219 goto end;
220 } else {
221 dma_cmd_ctx.is_read = false; /* Write operation */
222 dma_cmd_ctx.skip_pipeline_clear = false;
223 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
224 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
225 dma_cmd_ctx.size = aligned_ctx_mem.size;
226 dma_cmd_ctx.local_addr =
227 ipa3_ctx->smem_restricted_bytes +
228 proc_ctx_ofst;
229 ctx_cmd_pyld = ipahal_construct_imm_cmd(
230 IPA_IMM_CMD_DMA_SHARED_MEM,
231 &dma_cmd_ctx, false);
232 if (!ctx_cmd_pyld) {
233 IPAERR("fail construct dma_shared_mem cmd\n");
234 goto end;
235 }
236 desc[1].opcode = ipahal_imm_cmd_get_opcode(
237 IPA_IMM_CMD_DMA_SHARED_MEM);
238 desc[1].pyld = ctx_cmd_pyld->data;
239 desc[1].len = ctx_cmd_pyld->len;
240 }
241 } else {
242 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
243 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
244 IPAERR("tbl too big, needed %d avail %d\n",
245 aligned_ctx_mem.size,
246 proc_ctx_size_ddr);
247 goto end;
248 } else {
249 reg_write_cmd.skip_pipeline_clear = false;
250 reg_write_cmd.pipeline_clear_options =
251 IPAHAL_HPS_CLEAR;
252 reg_write_cmd.offset =
253 ipahal_get_reg_ofst(
254 IPA_SYS_PKT_PROC_CNTXT_BASE);
255 reg_write_cmd.value = aligned_ctx_mem.phys_base;
256 reg_write_cmd.value_mask =
257 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
258 ctx_cmd_pyld = ipahal_construct_imm_cmd(
259 IPA_IMM_CMD_REGISTER_WRITE,
260 &reg_write_cmd, false);
261 if (!ctx_cmd_pyld) {
262 IPAERR("fail construct register_write cmd\n");
263 goto end;
264 }
265 desc[1].opcode = ipahal_imm_cmd_get_opcode(
266 IPA_IMM_CMD_REGISTER_WRITE);
267 desc[1].pyld = ctx_cmd_pyld->data;
268 desc[1].len = ctx_cmd_pyld->len;
269 }
270 }
271 desc[1].type = IPA_IMM_CMD_DESC;
272 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
273
274 if (ipa3_send_cmd(2, desc))
275 IPAERR("fail to send immediate command\n");
276 else
277 rc = 0;
278
279 if (ipa3_ctx->hdr_tbl_lcl) {
280 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
281 hdr_mem.phys_base);
282 } else {
283 if (!rc) {
284 if (ipa3_ctx->hdr_mem.phys_base)
285 dma_free_coherent(ipa3_ctx->pdev,
286 ipa3_ctx->hdr_mem.size,
287 ipa3_ctx->hdr_mem.base,
288 ipa3_ctx->hdr_mem.phys_base);
289 ipa3_ctx->hdr_mem = hdr_mem;
290 }
291 }
292
293 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
294 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
295 ctx_mem.phys_base);
296 } else {
297 if (!rc) {
298 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
299 dma_free_coherent(ipa3_ctx->pdev,
300 ipa3_ctx->hdr_proc_ctx_mem.size,
301 ipa3_ctx->hdr_proc_ctx_mem.base,
302 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
303 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
304 }
305 }
306
307end:
308 if (ctx_cmd_pyld)
309 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
310
311 if (hdr_cmd_pyld)
312 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
313
314 return rc;
315}
316
317static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
318 bool add_ref_hdr)
319{
320 struct ipa3_hdr_entry *hdr_entry;
321 struct ipa3_hdr_proc_ctx_entry *entry;
322 struct ipa3_hdr_proc_ctx_offset_entry *offset;
323 u32 bin;
324 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
325 int id;
326 int needed_len;
327 int mem_size;
328
329 IPADBG_LOW("processing type %d hdr_hdl %d\n",
330 proc_ctx->type, proc_ctx->hdr_hdl);
331
332 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
333 IPAERR("invalid processing type %d\n", proc_ctx->type);
334 return -EINVAL;
335 }
336
337 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
338 if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
339 IPAERR("hdr_hdl is invalid\n");
340 return -EINVAL;
341 }
342
343 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
344 if (!entry) {
345 IPAERR("failed to alloc proc_ctx object\n");
346 return -ENOMEM;
347 }
348
349 INIT_LIST_HEAD(&entry->link);
350
351 entry->type = proc_ctx->type;
352 entry->hdr = hdr_entry;
353 if (add_ref_hdr)
354 hdr_entry->ref_cnt++;
355 entry->cookie = IPA_COOKIE;
356
357 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
358
359 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
360 bin = IPA_HDR_PROC_CTX_BIN0;
361 } else if (needed_len <=
362 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
363 bin = IPA_HDR_PROC_CTX_BIN1;
364 } else {
365 IPAERR("unexpected needed len %d\n", needed_len);
366 WARN_ON(1);
367 goto bad_len;
368 }
369
370 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
371 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
372 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
373 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
374 IPAERR("hdr proc ctx table overflow\n");
375 goto bad_len;
376 }
377
378 if (list_empty(&htbl->head_free_offset_list[bin])) {
379 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
380 GFP_KERNEL);
381 if (!offset) {
382 IPAERR("failed to alloc offset object\n");
383 goto bad_len;
384 }
385 INIT_LIST_HEAD(&offset->link);
386 /*
387 * for a first item grow, set the bin and offset which are set
388 * in stone
389 */
390 offset->offset = htbl->end;
391 offset->bin = bin;
392 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
393 list_add(&offset->link,
394 &htbl->head_offset_list[bin]);
395 } else {
396 /* get the first free slot */
397 offset =
398 list_first_entry(&htbl->head_free_offset_list[bin],
399 struct ipa3_hdr_proc_ctx_offset_entry, link);
400 list_move(&offset->link, &htbl->head_offset_list[bin]);
401 }
402
403 entry->offset_entry = offset;
404 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
405 htbl->proc_ctx_cnt++;
406 IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
407 htbl->proc_ctx_cnt, offset->offset);
408
409 id = ipa3_id_alloc(entry);
410 if (id < 0) {
411 IPAERR("failed to alloc id\n");
412 WARN_ON(1);
413 }
414 entry->id = id;
415 proc_ctx->proc_ctx_hdl = id;
416 entry->ref_cnt++;
417
418 return 0;
419
420bad_len:
421 if (add_ref_hdr)
422 hdr_entry->ref_cnt--;
423 entry->cookie = 0;
424 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
425 return -EPERM;
426}
427
428
429static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
430{
431 struct ipa3_hdr_entry *entry;
432 struct ipa_hdr_offset_entry *offset;
433 u32 bin;
434 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
435 int id;
436 int mem_size;
437
438 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
439 IPAERR("bad parm\n");
440 goto error;
441 }
442
443 if (!HDR_TYPE_IS_VALID(hdr->type)) {
444 IPAERR("invalid hdr type %d\n", hdr->type);
445 goto error;
446 }
447
448 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
449 if (!entry) {
450 IPAERR("failed to alloc hdr object\n");
451 goto error;
452 }
453
454 INIT_LIST_HEAD(&entry->link);
455
456 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
457 entry->hdr_len = hdr->hdr_len;
458 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
459 entry->is_partial = hdr->is_partial;
460 entry->type = hdr->type;
461 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
462 entry->eth2_ofst = hdr->eth2_ofst;
463 entry->cookie = IPA_COOKIE;
464
465 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
466 bin = IPA_HDR_BIN0;
467 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
468 bin = IPA_HDR_BIN1;
469 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
470 bin = IPA_HDR_BIN2;
471 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
472 bin = IPA_HDR_BIN3;
473 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
474 bin = IPA_HDR_BIN4;
475 else {
476 IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
477 goto bad_hdr_len;
478 }
479
480 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
481 IPA_MEM_PART(apps_hdr_size_ddr);
482
483 /* if header does not fit to table, place it in DDR */
484 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
485 entry->is_hdr_proc_ctx = true;
486 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
487 entry->hdr,
488 entry->hdr_len,
489 DMA_TO_DEVICE);
490 } else {
491 entry->is_hdr_proc_ctx = false;
492 if (list_empty(&htbl->head_free_offset_list[bin])) {
493 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
494 GFP_KERNEL);
495 if (!offset) {
496 IPAERR("failed to alloc hdr offset object\n");
497 goto bad_hdr_len;
498 }
499 INIT_LIST_HEAD(&offset->link);
500 /*
501 * for a first item grow, set the bin and offset which
502 * are set in stone
503 */
504 offset->offset = htbl->end;
505 offset->bin = bin;
506 htbl->end += ipa_hdr_bin_sz[bin];
507 list_add(&offset->link,
508 &htbl->head_offset_list[bin]);
509 } else {
510 /* get the first free slot */
511 offset =
512 list_first_entry(&htbl->head_free_offset_list[bin],
513 struct ipa_hdr_offset_entry, link);
514 list_move(&offset->link, &htbl->head_offset_list[bin]);
515 }
516
517 entry->offset_entry = offset;
518 }
519
520 list_add(&entry->link, &htbl->head_hdr_entry_list);
521 htbl->hdr_cnt++;
522 if (entry->is_hdr_proc_ctx)
523 IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
524 hdr->hdr_len,
525 htbl->hdr_cnt,
526 &entry->phys_base);
527 else
528 IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
529 hdr->hdr_len,
530 htbl->hdr_cnt,
531 entry->offset_entry->offset);
532
533 id = ipa3_id_alloc(entry);
534 if (id < 0) {
535 IPAERR("failed to alloc id\n");
536 WARN_ON(1);
537 }
538 entry->id = id;
539 hdr->hdr_hdl = id;
540 entry->ref_cnt++;
541
542 if (entry->is_hdr_proc_ctx) {
543 struct ipa_hdr_proc_ctx_add proc_ctx;
544
545 IPADBG("adding processing context for header %s\n", hdr->name);
546 proc_ctx.type = IPA_HDR_PROC_NONE;
547 proc_ctx.hdr_hdl = id;
548 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
549 IPAERR("failed to add hdr proc ctx\n");
550 goto fail_add_proc_ctx;
551 }
552 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
553 }
554
555 return 0;
556
557fail_add_proc_ctx:
558 entry->ref_cnt--;
559 hdr->hdr_hdl = 0;
560 ipa3_id_remove(id);
561 htbl->hdr_cnt--;
562 list_del(&entry->link);
563 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
564 entry->hdr_len, DMA_TO_DEVICE);
565bad_hdr_len:
566 entry->cookie = 0;
567 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
568error:
569 return -EPERM;
570}
571
572static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
573{
574 struct ipa3_hdr_proc_ctx_entry *entry;
575 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
576
577 entry = ipa3_id_find(proc_ctx_hdl);
578 if (!entry || (entry->cookie != IPA_COOKIE)) {
579 IPAERR("bad parm\n");
580 return -EINVAL;
581 }
582
583 IPADBG("del ctx proc cnt=%d ofst=%d\n",
584 htbl->proc_ctx_cnt, entry->offset_entry->offset);
585
586 if (--entry->ref_cnt) {
587 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
588 proc_ctx_hdl, entry->ref_cnt);
589 return 0;
590 }
591
592 if (release_hdr)
593 __ipa3_del_hdr(entry->hdr->id);
594
595 /* move the offset entry to appropriate free list */
596 list_move(&entry->offset_entry->link,
597 &htbl->head_free_offset_list[entry->offset_entry->bin]);
598 list_del(&entry->link);
599 htbl->proc_ctx_cnt--;
600 entry->cookie = 0;
601 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
602
603 /* remove the handle from the database */
604 ipa3_id_remove(proc_ctx_hdl);
605
606 return 0;
607}
608
609
610int __ipa3_del_hdr(u32 hdr_hdl)
611{
612 struct ipa3_hdr_entry *entry;
613 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
614
615 entry = ipa3_id_find(hdr_hdl);
616 if (entry == NULL) {
617 IPAERR("lookup failed\n");
618 return -EINVAL;
619 }
620
621 if (!entry || (entry->cookie != IPA_COOKIE)) {
622 IPAERR("bad parm\n");
623 return -EINVAL;
624 }
625
626 if (entry->is_hdr_proc_ctx)
627 IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
628 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
629 else
630 IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
631 htbl->hdr_cnt, entry->offset_entry->offset);
632
633 if (--entry->ref_cnt) {
634 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
635 return 0;
636 }
637
638 if (entry->is_hdr_proc_ctx) {
639 dma_unmap_single(ipa3_ctx->pdev,
640 entry->phys_base,
641 entry->hdr_len,
642 DMA_TO_DEVICE);
643 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false);
644 } else {
645 /* move the offset entry to appropriate free list */
646 list_move(&entry->offset_entry->link,
647 &htbl->head_free_offset_list[entry->offset_entry->bin]);
648 }
649 list_del(&entry->link);
650 htbl->hdr_cnt--;
651 entry->cookie = 0;
652 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
653
654 /* remove the handle from the database */
655 ipa3_id_remove(hdr_hdl);
656
657 return 0;
658}
659
660/**
661 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
662 * to IPA HW
663 * @hdrs: [inout] set of headers to add
664 *
665 * Returns: 0 on success, negative on failure
666 *
667 * Note: Should not be called from atomic context
668 */
669int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
670{
671 int i;
672 int result = -EFAULT;
673
674 if (hdrs == NULL || hdrs->num_hdrs == 0) {
675 IPAERR("bad parm\n");
676 return -EINVAL;
677 }
678
679 mutex_lock(&ipa3_ctx->lock);
680 IPADBG("adding %d headers to IPA driver internal data struct\n",
681 hdrs->num_hdrs);
682 for (i = 0; i < hdrs->num_hdrs; i++) {
683 if (__ipa_add_hdr(&hdrs->hdr[i])) {
684 IPAERR("failed to add hdr %d\n", i);
685 hdrs->hdr[i].status = -1;
686 } else {
687 hdrs->hdr[i].status = 0;
688 }
689 }
690
691 if (hdrs->commit) {
692 IPADBG("committing all headers to IPA core");
693 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
694 result = -EPERM;
695 goto bail;
696 }
697 }
698 result = 0;
699bail:
700 mutex_unlock(&ipa3_ctx->lock);
701 return result;
702}
703
704/**
705 * ipa3_del_hdr() - Remove the specified headers from SW and optionally commit
706 * them to IPA HW
707 * @hdls: [inout] set of headers to delete
708 *
709 * Returns: 0 on success, negative on failure
710 *
711 * Note: Should not be called from atomic context
712 */
713int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
714{
715 int i;
716 int result = -EFAULT;
717
718 if (hdls == NULL || hdls->num_hdls == 0) {
719 IPAERR("bad parm\n");
720 return -EINVAL;
721 }
722
723 mutex_lock(&ipa3_ctx->lock);
724 for (i = 0; i < hdls->num_hdls; i++) {
725 if (__ipa3_del_hdr(hdls->hdl[i].hdl)) {
726 IPAERR("failed to del hdr %i\n", i);
727 hdls->hdl[i].status = -1;
728 } else {
729 hdls->hdl[i].status = 0;
730 }
731 }
732
733 if (hdls->commit) {
734 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
735 result = -EPERM;
736 goto bail;
737 }
738 }
739 result = 0;
740bail:
741 mutex_unlock(&ipa3_ctx->lock);
742 return result;
743}
744
745/**
746 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
747 * and optionally commit them to IPA HW
748 * @proc_ctxs: [inout] set of processing context headers to add
749 *
750 * Returns: 0 on success, negative on failure
751 *
752 * Note: Should not be called from atomic context
753 */
754int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
755{
756 int i;
757 int result = -EFAULT;
758
759 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
760 IPAERR("bad parm\n");
761 return -EINVAL;
762 }
763
764 mutex_lock(&ipa3_ctx->lock);
765 IPADBG("adding %d header processing contextes to IPA driver\n",
766 proc_ctxs->num_proc_ctxs);
767 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
768 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
769 IPAERR("failed to add hdr pric ctx %d\n", i);
770 proc_ctxs->proc_ctx[i].status = -1;
771 } else {
772 proc_ctxs->proc_ctx[i].status = 0;
773 }
774 }
775
776 if (proc_ctxs->commit) {
777 IPADBG("committing all headers to IPA core");
778 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
779 result = -EPERM;
780 goto bail;
781 }
782 }
783 result = 0;
784bail:
785 mutex_unlock(&ipa3_ctx->lock);
786 return result;
787}
788
789/**
790 * ipa3_del_hdr_proc_ctx() -
791 * Remove the specified processing context headers from SW and
792 * optionally commit them to IPA HW.
793 * @hdls: [inout] set of processing context headers to delete
794 *
795 * Returns: 0 on success, negative on failure
796 *
797 * Note: Should not be called from atomic context
798 */
799int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
800{
801 int i;
802 int result;
803
804 if (hdls == NULL || hdls->num_hdls == 0) {
805 IPAERR("bad parm\n");
806 return -EINVAL;
807 }
808
809 mutex_lock(&ipa3_ctx->lock);
810 for (i = 0; i < hdls->num_hdls; i++) {
811 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true)) {
812 IPAERR("failed to del hdr %i\n", i);
813 hdls->hdl[i].status = -1;
814 } else {
815 hdls->hdl[i].status = 0;
816 }
817 }
818
819 if (hdls->commit) {
820 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
821 result = -EPERM;
822 goto bail;
823 }
824 }
825 result = 0;
826bail:
827 mutex_unlock(&ipa3_ctx->lock);
828 return result;
829}
830
831/**
832 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
833 *
834 * Returns: 0 on success, negative on failure
835 *
836 * Note: Should not be called from atomic context
837 */
838int ipa3_commit_hdr(void)
839{
840 int result = -EFAULT;
841
842 /*
843 * issue a commit on the routing module since routing rules point to
844 * header table entries
845 */
846 if (ipa3_commit_rt(IPA_IP_v4))
847 return -EPERM;
848 if (ipa3_commit_rt(IPA_IP_v6))
849 return -EPERM;
850
851 mutex_lock(&ipa3_ctx->lock);
852 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
853 result = -EPERM;
854 goto bail;
855 }
856 result = 0;
857bail:
858 mutex_unlock(&ipa3_ctx->lock);
859 return result;
860}
861
862/**
863 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
864 * HW)
865 *
866 * Returns: 0 on success, negative on failure
867 *
868 * Note: Should not be called from atomic context
869 */
870int ipa3_reset_hdr(void)
871{
872 struct ipa3_hdr_entry *entry;
873 struct ipa3_hdr_entry *next;
874 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
875 struct ipa3_hdr_proc_ctx_entry *ctx_next;
876 struct ipa_hdr_offset_entry *off_entry;
877 struct ipa_hdr_offset_entry *off_next;
878 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
879 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
880 int i;
881
882 /*
883 * issue a reset on the routing module since routing rules point to
884 * header table entries
885 */
886 if (ipa3_reset_rt(IPA_IP_v4))
887 IPAERR("fail to reset v4 rt\n");
888 if (ipa3_reset_rt(IPA_IP_v6))
889 IPAERR("fail to reset v4 rt\n");
890
891 mutex_lock(&ipa3_ctx->lock);
892 IPADBG("reset hdr\n");
893 list_for_each_entry_safe(entry, next,
894 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
895
896 /* do not remove the default header */
897 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
898 if (entry->is_hdr_proc_ctx) {
899 IPAERR("default header is proc ctx\n");
900 mutex_unlock(&ipa3_ctx->lock);
901 WARN_ON(1);
902 return -EFAULT;
903 }
904 continue;
905 }
906
907 if (ipa3_id_find(entry->id) == NULL) {
908 mutex_unlock(&ipa3_ctx->lock);
909 WARN_ON(1);
910 return -EFAULT;
911 }
912 if (entry->is_hdr_proc_ctx) {
913 dma_unmap_single(ipa3_ctx->pdev,
914 entry->phys_base,
915 entry->hdr_len,
916 DMA_TO_DEVICE);
917 entry->proc_ctx = NULL;
918 }
919 list_del(&entry->link);
920 entry->ref_cnt = 0;
921 entry->cookie = 0;
922
923 /* remove the handle from the database */
924 ipa3_id_remove(entry->id);
925 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
926
927 }
928 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
929 list_for_each_entry_safe(off_entry, off_next,
930 &ipa3_ctx->hdr_tbl.head_offset_list[i],
931 link) {
932
933 /*
934 * do not remove the default exception header which is
935 * at offset 0
936 */
937 if (off_entry->offset == 0)
938 continue;
939
940 list_del(&off_entry->link);
941 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
942 }
943 list_for_each_entry_safe(off_entry, off_next,
944 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
945 link) {
946 list_del(&off_entry->link);
947 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
948 }
949 }
950 /* there is one header of size 8 */
951 ipa3_ctx->hdr_tbl.end = 8;
952 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
953
954 IPADBG("reset hdr proc ctx\n");
955 list_for_each_entry_safe(
956 ctx_entry,
957 ctx_next,
958 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
959 link) {
960
961 if (ipa3_id_find(ctx_entry->id) == NULL) {
962 mutex_unlock(&ipa3_ctx->lock);
963 WARN_ON(1);
964 return -EFAULT;
965 }
966 list_del(&ctx_entry->link);
967 ctx_entry->ref_cnt = 0;
968 ctx_entry->cookie = 0;
969
970 /* remove the handle from the database */
971 ipa3_id_remove(ctx_entry->id);
972 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
973
974 }
975 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
976 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
977 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
978 link) {
979
980 list_del(&ctx_off_entry->link);
981 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
982 ctx_off_entry);
983 }
984 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
985 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
986 link) {
987 list_del(&ctx_off_entry->link);
988 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
989 ctx_off_entry);
990 }
991 }
992 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
993 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
994 mutex_unlock(&ipa3_ctx->lock);
995
996 return 0;
997}
998
999static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1000{
1001 struct ipa3_hdr_entry *entry;
1002
1003 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
1004 IPAERR("Header name too long: %s\n", name);
1005 return NULL;
1006 }
1007
1008 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1009 link) {
1010 if (!strcmp(name, entry->name))
1011 return entry;
1012 }
1013
1014 return NULL;
1015}
1016
1017/**
1018 * ipa3_get_hdr() - Lookup the specified header resource
1019 * @lookup: [inout] header to lookup and its handle
1020 *
1021 * lookup the specified header resource and return handle if it exists
1022 *
1023 * Returns: 0 on success, negative on failure
1024 *
1025 * Note: Should not be called from atomic context
1026 * Caller should call ipa3_put_hdr later if this function succeeds
1027 */
1028int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1029{
1030 struct ipa3_hdr_entry *entry;
1031 int result = -1;
1032
1033 if (lookup == NULL) {
1034 IPAERR("bad parm\n");
1035 return -EINVAL;
1036 }
1037 mutex_lock(&ipa3_ctx->lock);
1038 entry = __ipa_find_hdr(lookup->name);
1039 if (entry) {
1040 lookup->hdl = entry->id;
1041 result = 0;
1042 }
1043 mutex_unlock(&ipa3_ctx->lock);
1044
1045 return result;
1046}
1047
1048/**
1049 * __ipa3_release_hdr() - drop reference to header and cause
1050 * deletion if reference count permits
1051 * @hdr_hdl: [in] handle of header to be released
1052 *
1053 * Returns: 0 on success, negative on failure
1054 */
1055int __ipa3_release_hdr(u32 hdr_hdl)
1056{
1057 int result = 0;
1058
1059 if (__ipa3_del_hdr(hdr_hdl)) {
1060 IPADBG("fail to del hdr %x\n", hdr_hdl);
1061 result = -EFAULT;
1062 goto bail;
1063 }
1064
1065 /* commit for put */
1066 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1067 IPAERR("fail to commit hdr\n");
1068 result = -EFAULT;
1069 goto bail;
1070 }
1071
1072bail:
1073 return result;
1074}
1075
1076/**
1077 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1078 * and cause deletion if reference count permits
1079 * @proc_ctx_hdl: [in] handle of processing context to be released
1080 *
1081 * Returns: 0 on success, negative on failure
1082 */
1083int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1084{
1085 int result = 0;
1086
1087 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true)) {
1088 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1089 result = -EFAULT;
1090 goto bail;
1091 }
1092
1093 /* commit for put */
1094 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1095 IPAERR("fail to commit hdr\n");
1096 result = -EFAULT;
1097 goto bail;
1098 }
1099
1100bail:
1101 return result;
1102}
1103
1104/**
1105 * ipa3_put_hdr() - Release the specified header handle
1106 * @hdr_hdl: [in] the header handle to release
1107 *
1108 * Returns: 0 on success, negative on failure
1109 *
1110 * Note: Should not be called from atomic context
1111 */
1112int ipa3_put_hdr(u32 hdr_hdl)
1113{
1114 struct ipa3_hdr_entry *entry;
1115 int result = -EFAULT;
1116
1117 mutex_lock(&ipa3_ctx->lock);
1118
1119 entry = ipa3_id_find(hdr_hdl);
1120 if (entry == NULL) {
1121 IPAERR("lookup failed\n");
1122 result = -EINVAL;
1123 goto bail;
1124 }
1125
1126 if (entry->cookie != IPA_COOKIE) {
1127 IPAERR("invalid header entry\n");
1128 result = -EINVAL;
1129 goto bail;
1130 }
1131
1132 result = 0;
1133bail:
1134 mutex_unlock(&ipa3_ctx->lock);
1135 return result;
1136}
1137
1138/**
1139 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1140 * it
1141 * @copy: [inout] header to lookup and its copy
1142 *
1143 * lookup the specified header resource and return a copy of it (along with its
1144 * attributes) if it exists, this would be called for partial headers
1145 *
1146 * Returns: 0 on success, negative on failure
1147 *
1148 * Note: Should not be called from atomic context
1149 */
1150int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1151{
1152 struct ipa3_hdr_entry *entry;
1153 int result = -EFAULT;
1154
1155 if (copy == NULL) {
1156 IPAERR("bad parm\n");
1157 return -EINVAL;
1158 }
1159 mutex_lock(&ipa3_ctx->lock);
1160 entry = __ipa_find_hdr(copy->name);
1161 if (entry) {
1162 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1163 copy->hdr_len = entry->hdr_len;
1164 copy->type = entry->type;
1165 copy->is_partial = entry->is_partial;
1166 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1167 copy->eth2_ofst = entry->eth2_ofst;
1168 result = 0;
1169 }
1170 mutex_unlock(&ipa3_ctx->lock);
1171
1172 return result;
1173}