blob: 6e51472c5d1e052e577ef2de4ca2646812f58523 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
16static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
17static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
69
70 list_for_each_entry(entry,
71 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
72 link) {
73 IPADBG_LOW("processing type %d ofst=%d\n",
74 entry->type, entry->offset_entry->offset);
75 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
76 entry->offset_entry->offset,
77 entry->hdr->hdr_len,
78 entry->hdr->is_hdr_proc_ctx,
79 entry->hdr->phys_base,
80 hdr_base_addr,
81 entry->hdr->offset_entry);
82 if (ret)
83 return ret;
84 }
85
86 return 0;
87}
88
89/**
90 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
91 * generates the headers processing context table.
92 * @mem: [out] buffer to put the processing context table
93 * @aligned_mem: [out] actual processing context table (with alignment).
94 * Processing context table needs to be 8 Bytes aligned.
95 *
96 * Returns: 0 on success, negative on failure
97 */
98static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
99 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
100{
101 u32 hdr_base_addr;
102
103 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
104
105 /* make sure table is aligned */
106 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
107
108 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
109
110 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
111 &mem->phys_base, GFP_KERNEL);
112 if (!mem->base) {
113 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
114 return -ENOMEM;
115 }
116
117 aligned_mem->phys_base =
118 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
119 aligned_mem->base = mem->base +
120 (aligned_mem->phys_base - mem->phys_base);
121 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
122 memset(aligned_mem->base, 0, aligned_mem->size);
123 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
124 hdr_sys_addr;
125 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
126}
127
128/**
129 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
130 *
131 * Returns: 0 on success, negative on failure
132 */
133int __ipa_commit_hdr_v3_0(void)
134{
135 struct ipa3_desc desc[2];
136 struct ipa_mem_buffer hdr_mem;
137 struct ipa_mem_buffer ctx_mem;
138 struct ipa_mem_buffer aligned_ctx_mem;
139 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
140 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
141 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
142 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
143 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
144 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
145 int rc = -EFAULT;
146 u32 proc_ctx_size;
147 u32 proc_ctx_ofst;
148 u32 proc_ctx_size_ddr;
149
150 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
151
152 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
153 IPAERR("fail to generate HDR HW TBL\n");
154 goto end;
155 }
156
157 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
158 &aligned_ctx_mem)) {
159 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
160 goto end;
161 }
162
163 if (ipa3_ctx->hdr_tbl_lcl) {
164 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
165 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
166 IPA_MEM_PART(apps_hdr_size));
167 goto end;
168 } else {
169 dma_cmd_hdr.is_read = false; /* write operation */
170 dma_cmd_hdr.skip_pipeline_clear = false;
171 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
172 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
173 dma_cmd_hdr.size = hdr_mem.size;
174 dma_cmd_hdr.local_addr =
175 ipa3_ctx->smem_restricted_bytes +
176 IPA_MEM_PART(apps_hdr_ofst);
177 hdr_cmd_pyld = ipahal_construct_imm_cmd(
178 IPA_IMM_CMD_DMA_SHARED_MEM,
179 &dma_cmd_hdr, false);
180 if (!hdr_cmd_pyld) {
181 IPAERR("fail construct dma_shared_mem cmd\n");
182 goto end;
183 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700184 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300185 desc[0].pyld = hdr_cmd_pyld->data;
186 desc[0].len = hdr_cmd_pyld->len;
187 }
188 } else {
189 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
190 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
191 IPA_MEM_PART(apps_hdr_size_ddr));
192 goto end;
193 } else {
194 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
195 hdr_cmd_pyld = ipahal_construct_imm_cmd(
196 IPA_IMM_CMD_HDR_INIT_SYSTEM,
197 &hdr_init_cmd, false);
198 if (!hdr_cmd_pyld) {
199 IPAERR("fail construct hdr_init_system cmd\n");
200 goto end;
201 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700202 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300203 desc[0].pyld = hdr_cmd_pyld->data;
204 desc[0].len = hdr_cmd_pyld->len;
205 }
206 }
207 desc[0].type = IPA_IMM_CMD_DESC;
208 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
209
210 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
211 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
212 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
213 if (aligned_ctx_mem.size > proc_ctx_size) {
214 IPAERR("tbl too big needed %d avail %d\n",
215 aligned_ctx_mem.size,
216 proc_ctx_size);
217 goto end;
218 } else {
219 dma_cmd_ctx.is_read = false; /* Write operation */
220 dma_cmd_ctx.skip_pipeline_clear = false;
221 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
222 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
223 dma_cmd_ctx.size = aligned_ctx_mem.size;
224 dma_cmd_ctx.local_addr =
225 ipa3_ctx->smem_restricted_bytes +
226 proc_ctx_ofst;
227 ctx_cmd_pyld = ipahal_construct_imm_cmd(
228 IPA_IMM_CMD_DMA_SHARED_MEM,
229 &dma_cmd_ctx, false);
230 if (!ctx_cmd_pyld) {
231 IPAERR("fail construct dma_shared_mem cmd\n");
232 goto end;
233 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700234 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300235 desc[1].pyld = ctx_cmd_pyld->data;
236 desc[1].len = ctx_cmd_pyld->len;
237 }
238 } else {
239 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
240 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
241 IPAERR("tbl too big, needed %d avail %d\n",
242 aligned_ctx_mem.size,
243 proc_ctx_size_ddr);
244 goto end;
245 } else {
246 reg_write_cmd.skip_pipeline_clear = false;
247 reg_write_cmd.pipeline_clear_options =
248 IPAHAL_HPS_CLEAR;
249 reg_write_cmd.offset =
250 ipahal_get_reg_ofst(
251 IPA_SYS_PKT_PROC_CNTXT_BASE);
252 reg_write_cmd.value = aligned_ctx_mem.phys_base;
253 reg_write_cmd.value_mask =
254 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
255 ctx_cmd_pyld = ipahal_construct_imm_cmd(
256 IPA_IMM_CMD_REGISTER_WRITE,
257 &reg_write_cmd, false);
258 if (!ctx_cmd_pyld) {
259 IPAERR("fail construct register_write cmd\n");
260 goto end;
261 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700262 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300263 desc[1].pyld = ctx_cmd_pyld->data;
264 desc[1].len = ctx_cmd_pyld->len;
265 }
266 }
267 desc[1].type = IPA_IMM_CMD_DESC;
268 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
269
270 if (ipa3_send_cmd(2, desc))
271 IPAERR("fail to send immediate command\n");
272 else
273 rc = 0;
274
275 if (ipa3_ctx->hdr_tbl_lcl) {
276 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
277 hdr_mem.phys_base);
278 } else {
279 if (!rc) {
280 if (ipa3_ctx->hdr_mem.phys_base)
281 dma_free_coherent(ipa3_ctx->pdev,
282 ipa3_ctx->hdr_mem.size,
283 ipa3_ctx->hdr_mem.base,
284 ipa3_ctx->hdr_mem.phys_base);
285 ipa3_ctx->hdr_mem = hdr_mem;
286 }
287 }
288
289 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
290 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
291 ctx_mem.phys_base);
292 } else {
293 if (!rc) {
294 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
295 dma_free_coherent(ipa3_ctx->pdev,
296 ipa3_ctx->hdr_proc_ctx_mem.size,
297 ipa3_ctx->hdr_proc_ctx_mem.base,
298 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
299 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
300 }
301 }
302
303end:
304 if (ctx_cmd_pyld)
305 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
306
307 if (hdr_cmd_pyld)
308 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
309
310 return rc;
311}
312
313static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
314 bool add_ref_hdr)
315{
316 struct ipa3_hdr_entry *hdr_entry;
317 struct ipa3_hdr_proc_ctx_entry *entry;
318 struct ipa3_hdr_proc_ctx_offset_entry *offset;
319 u32 bin;
320 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
321 int id;
322 int needed_len;
323 int mem_size;
324
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200325 IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300326 proc_ctx->type, proc_ctx->hdr_hdl);
327
328 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
329 IPAERR("invalid processing type %d\n", proc_ctx->type);
330 return -EINVAL;
331 }
332
333 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200334 if (!hdr_entry) {
Amir Levy9659e592016-10-27 18:08:27 +0300335 IPAERR("hdr_hdl is invalid\n");
336 return -EINVAL;
337 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200338 if (hdr_entry->cookie != IPA_COOKIE) {
339 IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
340 WARN_ON(1);
341 return -EINVAL;
342 }
343 IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
344 hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
Amir Levy9659e592016-10-27 18:08:27 +0300345
346 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
347 if (!entry) {
348 IPAERR("failed to alloc proc_ctx object\n");
349 return -ENOMEM;
350 }
351
352 INIT_LIST_HEAD(&entry->link);
353
354 entry->type = proc_ctx->type;
355 entry->hdr = hdr_entry;
356 if (add_ref_hdr)
357 hdr_entry->ref_cnt++;
358 entry->cookie = IPA_COOKIE;
359
360 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
361
362 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
363 bin = IPA_HDR_PROC_CTX_BIN0;
364 } else if (needed_len <=
365 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
366 bin = IPA_HDR_PROC_CTX_BIN1;
367 } else {
368 IPAERR("unexpected needed len %d\n", needed_len);
369 WARN_ON(1);
370 goto bad_len;
371 }
372
373 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
374 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
375 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
Amir Levy9659e592016-10-27 18:08:27 +0300376 if (list_empty(&htbl->head_free_offset_list[bin])) {
Skylar Changd8b80fe2017-06-08 15:47:22 -0700377 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
378 IPAERR("hdr proc ctx table overflow\n");
379 goto bad_len;
380 }
381
Amir Levy9659e592016-10-27 18:08:27 +0300382 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
383 GFP_KERNEL);
384 if (!offset) {
385 IPAERR("failed to alloc offset object\n");
386 goto bad_len;
387 }
388 INIT_LIST_HEAD(&offset->link);
389 /*
390 * for a first item grow, set the bin and offset which are set
391 * in stone
392 */
393 offset->offset = htbl->end;
394 offset->bin = bin;
395 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
396 list_add(&offset->link,
397 &htbl->head_offset_list[bin]);
398 } else {
399 /* get the first free slot */
400 offset =
401 list_first_entry(&htbl->head_free_offset_list[bin],
402 struct ipa3_hdr_proc_ctx_offset_entry, link);
403 list_move(&offset->link, &htbl->head_offset_list[bin]);
404 }
405
406 entry->offset_entry = offset;
407 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
408 htbl->proc_ctx_cnt++;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200409 IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
Amir Levy9659e592016-10-27 18:08:27 +0300410 htbl->proc_ctx_cnt, offset->offset);
411
412 id = ipa3_id_alloc(entry);
413 if (id < 0) {
414 IPAERR("failed to alloc id\n");
415 WARN_ON(1);
416 }
417 entry->id = id;
418 proc_ctx->proc_ctx_hdl = id;
419 entry->ref_cnt++;
420
421 return 0;
422
423bad_len:
424 if (add_ref_hdr)
425 hdr_entry->ref_cnt--;
426 entry->cookie = 0;
427 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
428 return -EPERM;
429}
430
431
432static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
433{
434 struct ipa3_hdr_entry *entry;
435 struct ipa_hdr_offset_entry *offset;
436 u32 bin;
437 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
438 int id;
439 int mem_size;
440
441 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
442 IPAERR("bad parm\n");
443 goto error;
444 }
445
446 if (!HDR_TYPE_IS_VALID(hdr->type)) {
447 IPAERR("invalid hdr type %d\n", hdr->type);
448 goto error;
449 }
450
451 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
452 if (!entry) {
453 IPAERR("failed to alloc hdr object\n");
454 goto error;
455 }
456
457 INIT_LIST_HEAD(&entry->link);
458
459 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
460 entry->hdr_len = hdr->hdr_len;
461 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
462 entry->is_partial = hdr->is_partial;
463 entry->type = hdr->type;
464 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
465 entry->eth2_ofst = hdr->eth2_ofst;
466 entry->cookie = IPA_COOKIE;
467
468 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
469 bin = IPA_HDR_BIN0;
470 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
471 bin = IPA_HDR_BIN1;
472 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
473 bin = IPA_HDR_BIN2;
474 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
475 bin = IPA_HDR_BIN3;
476 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
477 bin = IPA_HDR_BIN4;
478 else {
479 IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
480 goto bad_hdr_len;
481 }
482
483 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
484 IPA_MEM_PART(apps_hdr_size_ddr);
485
Skylar Changd8b80fe2017-06-08 15:47:22 -0700486 if (list_empty(&htbl->head_free_offset_list[bin])) {
487 /* if header does not fit to table, place it in DDR */
488 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
489 entry->is_hdr_proc_ctx = true;
490 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
491 entry->hdr,
492 entry->hdr_len,
493 DMA_TO_DEVICE);
494 if (dma_mapping_error(ipa3_ctx->pdev,
495 entry->phys_base)) {
496 IPAERR("dma_map_single failure for entry\n");
497 goto fail_dma_mapping;
498 }
499 } else {
500 entry->is_hdr_proc_ctx = false;
Amir Levy9659e592016-10-27 18:08:27 +0300501 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
502 GFP_KERNEL);
503 if (!offset) {
504 IPAERR("failed to alloc hdr offset object\n");
505 goto bad_hdr_len;
506 }
507 INIT_LIST_HEAD(&offset->link);
508 /*
509 * for a first item grow, set the bin and offset which
510 * are set in stone
511 */
512 offset->offset = htbl->end;
513 offset->bin = bin;
514 htbl->end += ipa_hdr_bin_sz[bin];
515 list_add(&offset->link,
516 &htbl->head_offset_list[bin]);
Skylar Changd8b80fe2017-06-08 15:47:22 -0700517 entry->offset_entry = offset;
Amir Levy9659e592016-10-27 18:08:27 +0300518 }
Skylar Changd8b80fe2017-06-08 15:47:22 -0700519 } else {
520 entry->is_hdr_proc_ctx = false;
521 /* get the first free slot */
522 offset = list_first_entry(&htbl->head_free_offset_list[bin],
523 struct ipa_hdr_offset_entry, link);
524 list_move(&offset->link, &htbl->head_offset_list[bin]);
Amir Levy9659e592016-10-27 18:08:27 +0300525 entry->offset_entry = offset;
526 }
527
528 list_add(&entry->link, &htbl->head_hdr_entry_list);
529 htbl->hdr_cnt++;
530 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200531 IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300532 hdr->hdr_len,
533 htbl->hdr_cnt,
534 &entry->phys_base);
535 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200536 IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300537 hdr->hdr_len,
538 htbl->hdr_cnt,
539 entry->offset_entry->offset);
540
541 id = ipa3_id_alloc(entry);
542 if (id < 0) {
543 IPAERR("failed to alloc id\n");
544 WARN_ON(1);
545 }
546 entry->id = id;
547 hdr->hdr_hdl = id;
548 entry->ref_cnt++;
549
550 if (entry->is_hdr_proc_ctx) {
551 struct ipa_hdr_proc_ctx_add proc_ctx;
552
553 IPADBG("adding processing context for header %s\n", hdr->name);
554 proc_ctx.type = IPA_HDR_PROC_NONE;
555 proc_ctx.hdr_hdl = id;
556 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
557 IPAERR("failed to add hdr proc ctx\n");
558 goto fail_add_proc_ctx;
559 }
560 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
561 }
562
563 return 0;
564
565fail_add_proc_ctx:
566 entry->ref_cnt--;
567 hdr->hdr_hdl = 0;
568 ipa3_id_remove(id);
569 htbl->hdr_cnt--;
570 list_del(&entry->link);
571 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
572 entry->hdr_len, DMA_TO_DEVICE);
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530573fail_dma_mapping:
574 entry->is_hdr_proc_ctx = false;
575
Amir Levy9659e592016-10-27 18:08:27 +0300576bad_hdr_len:
577 entry->cookie = 0;
578 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
579error:
580 return -EPERM;
581}
582
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200583static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
584 bool release_hdr, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300585{
586 struct ipa3_hdr_proc_ctx_entry *entry;
587 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
588
589 entry = ipa3_id_find(proc_ctx_hdl);
590 if (!entry || (entry->cookie != IPA_COOKIE)) {
591 IPAERR("bad parm\n");
592 return -EINVAL;
593 }
594
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200595 IPADBG("del proc ctx cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300596 htbl->proc_ctx_cnt, entry->offset_entry->offset);
597
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200598 if (by_user && entry->user_deleted) {
599 IPAERR("proc_ctx already deleted by user\n");
600 return -EINVAL;
601 }
602
603 if (by_user)
604 entry->user_deleted = true;
605
Amir Levy9659e592016-10-27 18:08:27 +0300606 if (--entry->ref_cnt) {
607 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
608 proc_ctx_hdl, entry->ref_cnt);
609 return 0;
610 }
611
612 if (release_hdr)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200613 __ipa3_del_hdr(entry->hdr->id, false);
Amir Levy9659e592016-10-27 18:08:27 +0300614
615 /* move the offset entry to appropriate free list */
616 list_move(&entry->offset_entry->link,
617 &htbl->head_free_offset_list[entry->offset_entry->bin]);
618 list_del(&entry->link);
619 htbl->proc_ctx_cnt--;
620 entry->cookie = 0;
621 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
622
623 /* remove the handle from the database */
624 ipa3_id_remove(proc_ctx_hdl);
625
626 return 0;
627}
628
629
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200630int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300631{
632 struct ipa3_hdr_entry *entry;
633 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
634
635 entry = ipa3_id_find(hdr_hdl);
636 if (entry == NULL) {
637 IPAERR("lookup failed\n");
638 return -EINVAL;
639 }
640
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200641 if (entry->cookie != IPA_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +0300642 IPAERR("bad parm\n");
643 return -EINVAL;
644 }
645
646 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200647 IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300648 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
649 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200650 IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
651 entry->hdr_len, htbl->hdr_cnt,
652 entry->offset_entry->offset);
Amir Levy9659e592016-10-27 18:08:27 +0300653
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200654 if (by_user && entry->user_deleted) {
655 IPAERR("proc_ctx already deleted by user\n");
656 return -EINVAL;
657 }
658
659 if (by_user)
660 entry->user_deleted = true;
661
Amir Levy9659e592016-10-27 18:08:27 +0300662 if (--entry->ref_cnt) {
663 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
664 return 0;
665 }
666
667 if (entry->is_hdr_proc_ctx) {
668 dma_unmap_single(ipa3_ctx->pdev,
669 entry->phys_base,
670 entry->hdr_len,
671 DMA_TO_DEVICE);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200672 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
Amir Levy9659e592016-10-27 18:08:27 +0300673 } else {
674 /* move the offset entry to appropriate free list */
675 list_move(&entry->offset_entry->link,
676 &htbl->head_free_offset_list[entry->offset_entry->bin]);
677 }
678 list_del(&entry->link);
679 htbl->hdr_cnt--;
680 entry->cookie = 0;
681 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
682
683 /* remove the handle from the database */
684 ipa3_id_remove(hdr_hdl);
685
686 return 0;
687}
688
689/**
690 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
691 * to IPA HW
692 * @hdrs: [inout] set of headers to add
693 *
694 * Returns: 0 on success, negative on failure
695 *
696 * Note: Should not be called from atomic context
697 */
698int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
699{
700 int i;
701 int result = -EFAULT;
702
703 if (hdrs == NULL || hdrs->num_hdrs == 0) {
704 IPAERR("bad parm\n");
705 return -EINVAL;
706 }
707
708 mutex_lock(&ipa3_ctx->lock);
709 IPADBG("adding %d headers to IPA driver internal data struct\n",
710 hdrs->num_hdrs);
711 for (i = 0; i < hdrs->num_hdrs; i++) {
712 if (__ipa_add_hdr(&hdrs->hdr[i])) {
713 IPAERR("failed to add hdr %d\n", i);
714 hdrs->hdr[i].status = -1;
715 } else {
716 hdrs->hdr[i].status = 0;
717 }
718 }
719
720 if (hdrs->commit) {
721 IPADBG("committing all headers to IPA core");
722 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
723 result = -EPERM;
724 goto bail;
725 }
726 }
727 result = 0;
728bail:
729 mutex_unlock(&ipa3_ctx->lock);
730 return result;
731}
732
733/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200734 * ipa3_del_hdr_by_user() - Remove the specified headers
735 * from SW and optionally commit them to IPA HW
Amir Levy9659e592016-10-27 18:08:27 +0300736 * @hdls: [inout] set of headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200737 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300738 *
739 * Returns: 0 on success, negative on failure
740 *
741 * Note: Should not be called from atomic context
742 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200743int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300744{
745 int i;
746 int result = -EFAULT;
747
748 if (hdls == NULL || hdls->num_hdls == 0) {
749 IPAERR("bad parm\n");
750 return -EINVAL;
751 }
752
753 mutex_lock(&ipa3_ctx->lock);
754 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200755 if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
Amir Levy9659e592016-10-27 18:08:27 +0300756 IPAERR("failed to del hdr %i\n", i);
757 hdls->hdl[i].status = -1;
758 } else {
759 hdls->hdl[i].status = 0;
760 }
761 }
762
763 if (hdls->commit) {
764 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
765 result = -EPERM;
766 goto bail;
767 }
768 }
769 result = 0;
770bail:
771 mutex_unlock(&ipa3_ctx->lock);
772 return result;
773}
774
775/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200776 * ipa3_del_hdr() - Remove the specified headers from SW
777 * and optionally commit them to IPA HW
778 * @hdls: [inout] set of headers to delete
779 *
780 * Returns: 0 on success, negative on failure
781 *
782 * Note: Should not be called from atomic context
783 */
784int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
785{
786 return ipa3_del_hdr_by_user(hdls, false);
787}
788
789/**
Amir Levy9659e592016-10-27 18:08:27 +0300790 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
791 * and optionally commit them to IPA HW
792 * @proc_ctxs: [inout] set of processing context headers to add
793 *
794 * Returns: 0 on success, negative on failure
795 *
796 * Note: Should not be called from atomic context
797 */
798int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
799{
800 int i;
801 int result = -EFAULT;
802
803 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
804 IPAERR("bad parm\n");
805 return -EINVAL;
806 }
807
808 mutex_lock(&ipa3_ctx->lock);
809 IPADBG("adding %d header processing contextes to IPA driver\n",
810 proc_ctxs->num_proc_ctxs);
811 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
812 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
813 IPAERR("failed to add hdr pric ctx %d\n", i);
814 proc_ctxs->proc_ctx[i].status = -1;
815 } else {
816 proc_ctxs->proc_ctx[i].status = 0;
817 }
818 }
819
820 if (proc_ctxs->commit) {
821 IPADBG("committing all headers to IPA core");
822 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
823 result = -EPERM;
824 goto bail;
825 }
826 }
827 result = 0;
828bail:
829 mutex_unlock(&ipa3_ctx->lock);
830 return result;
831}
832
833/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200834 * ipa3_del_hdr_proc_ctx_by_user() -
Amir Levy9659e592016-10-27 18:08:27 +0300835 * Remove the specified processing context headers from SW and
836 * optionally commit them to IPA HW.
837 * @hdls: [inout] set of processing context headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200838 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300839 *
840 * Returns: 0 on success, negative on failure
841 *
842 * Note: Should not be called from atomic context
843 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200844int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
845 bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300846{
847 int i;
848 int result;
849
850 if (hdls == NULL || hdls->num_hdls == 0) {
851 IPAERR("bad parm\n");
852 return -EINVAL;
853 }
854
855 mutex_lock(&ipa3_ctx->lock);
856 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200857 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
Amir Levy9659e592016-10-27 18:08:27 +0300858 IPAERR("failed to del hdr %i\n", i);
859 hdls->hdl[i].status = -1;
860 } else {
861 hdls->hdl[i].status = 0;
862 }
863 }
864
865 if (hdls->commit) {
866 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
867 result = -EPERM;
868 goto bail;
869 }
870 }
871 result = 0;
872bail:
873 mutex_unlock(&ipa3_ctx->lock);
874 return result;
875}
876
877/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200878 * ipa3_del_hdr_proc_ctx() -
879 * Remove the specified processing context headers from SW and
880 * optionally commit them to IPA HW.
881 * @hdls: [inout] set of processing context headers to delete
882 *
883 * Returns: 0 on success, negative on failure
884 *
885 * Note: Should not be called from atomic context
886 */
887int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
888{
889 return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
890}
891
892/**
Amir Levy9659e592016-10-27 18:08:27 +0300893 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
894 *
895 * Returns: 0 on success, negative on failure
896 *
897 * Note: Should not be called from atomic context
898 */
899int ipa3_commit_hdr(void)
900{
901 int result = -EFAULT;
902
903 /*
904 * issue a commit on the routing module since routing rules point to
905 * header table entries
906 */
907 if (ipa3_commit_rt(IPA_IP_v4))
908 return -EPERM;
909 if (ipa3_commit_rt(IPA_IP_v6))
910 return -EPERM;
911
912 mutex_lock(&ipa3_ctx->lock);
913 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
914 result = -EPERM;
915 goto bail;
916 }
917 result = 0;
918bail:
919 mutex_unlock(&ipa3_ctx->lock);
920 return result;
921}
922
923/**
924 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
925 * HW)
926 *
927 * Returns: 0 on success, negative on failure
928 *
929 * Note: Should not be called from atomic context
930 */
931int ipa3_reset_hdr(void)
932{
933 struct ipa3_hdr_entry *entry;
934 struct ipa3_hdr_entry *next;
935 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
936 struct ipa3_hdr_proc_ctx_entry *ctx_next;
937 struct ipa_hdr_offset_entry *off_entry;
938 struct ipa_hdr_offset_entry *off_next;
939 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
940 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
941 int i;
942
943 /*
944 * issue a reset on the routing module since routing rules point to
945 * header table entries
946 */
947 if (ipa3_reset_rt(IPA_IP_v4))
948 IPAERR("fail to reset v4 rt\n");
949 if (ipa3_reset_rt(IPA_IP_v6))
950 IPAERR("fail to reset v4 rt\n");
951
952 mutex_lock(&ipa3_ctx->lock);
953 IPADBG("reset hdr\n");
954 list_for_each_entry_safe(entry, next,
955 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
956
957 /* do not remove the default header */
958 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
959 if (entry->is_hdr_proc_ctx) {
960 IPAERR("default header is proc ctx\n");
961 mutex_unlock(&ipa3_ctx->lock);
962 WARN_ON(1);
963 return -EFAULT;
964 }
965 continue;
966 }
967
968 if (ipa3_id_find(entry->id) == NULL) {
969 mutex_unlock(&ipa3_ctx->lock);
970 WARN_ON(1);
971 return -EFAULT;
972 }
973 if (entry->is_hdr_proc_ctx) {
974 dma_unmap_single(ipa3_ctx->pdev,
975 entry->phys_base,
976 entry->hdr_len,
977 DMA_TO_DEVICE);
978 entry->proc_ctx = NULL;
979 }
980 list_del(&entry->link);
981 entry->ref_cnt = 0;
982 entry->cookie = 0;
983
984 /* remove the handle from the database */
985 ipa3_id_remove(entry->id);
986 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
987
988 }
989 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
990 list_for_each_entry_safe(off_entry, off_next,
991 &ipa3_ctx->hdr_tbl.head_offset_list[i],
992 link) {
993
994 /*
995 * do not remove the default exception header which is
996 * at offset 0
997 */
998 if (off_entry->offset == 0)
999 continue;
1000
1001 list_del(&off_entry->link);
1002 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1003 }
1004 list_for_each_entry_safe(off_entry, off_next,
1005 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
1006 link) {
1007 list_del(&off_entry->link);
1008 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1009 }
1010 }
1011 /* there is one header of size 8 */
1012 ipa3_ctx->hdr_tbl.end = 8;
1013 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
1014
1015 IPADBG("reset hdr proc ctx\n");
1016 list_for_each_entry_safe(
1017 ctx_entry,
1018 ctx_next,
1019 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
1020 link) {
1021
1022 if (ipa3_id_find(ctx_entry->id) == NULL) {
1023 mutex_unlock(&ipa3_ctx->lock);
1024 WARN_ON(1);
1025 return -EFAULT;
1026 }
1027 list_del(&ctx_entry->link);
1028 ctx_entry->ref_cnt = 0;
1029 ctx_entry->cookie = 0;
1030
1031 /* remove the handle from the database */
1032 ipa3_id_remove(ctx_entry->id);
1033 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
1034
1035 }
1036 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
1037 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1038 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
1039 link) {
1040
1041 list_del(&ctx_off_entry->link);
1042 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1043 ctx_off_entry);
1044 }
1045 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1046 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
1047 link) {
1048 list_del(&ctx_off_entry->link);
1049 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1050 ctx_off_entry);
1051 }
1052 }
1053 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
1054 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
1055 mutex_unlock(&ipa3_ctx->lock);
1056
1057 return 0;
1058}
1059
1060static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1061{
1062 struct ipa3_hdr_entry *entry;
1063
1064 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
1065 IPAERR("Header name too long: %s\n", name);
1066 return NULL;
1067 }
1068
1069 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1070 link) {
1071 if (!strcmp(name, entry->name))
1072 return entry;
1073 }
1074
1075 return NULL;
1076}
1077
1078/**
1079 * ipa3_get_hdr() - Lookup the specified header resource
1080 * @lookup: [inout] header to lookup and its handle
1081 *
1082 * lookup the specified header resource and return handle if it exists
1083 *
1084 * Returns: 0 on success, negative on failure
1085 *
1086 * Note: Should not be called from atomic context
1087 * Caller should call ipa3_put_hdr later if this function succeeds
1088 */
1089int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1090{
1091 struct ipa3_hdr_entry *entry;
1092 int result = -1;
1093
1094 if (lookup == NULL) {
1095 IPAERR("bad parm\n");
1096 return -EINVAL;
1097 }
1098 mutex_lock(&ipa3_ctx->lock);
1099 entry = __ipa_find_hdr(lookup->name);
1100 if (entry) {
1101 lookup->hdl = entry->id;
1102 result = 0;
1103 }
1104 mutex_unlock(&ipa3_ctx->lock);
1105
1106 return result;
1107}
1108
1109/**
1110 * __ipa3_release_hdr() - drop reference to header and cause
1111 * deletion if reference count permits
1112 * @hdr_hdl: [in] handle of header to be released
1113 *
1114 * Returns: 0 on success, negative on failure
1115 */
1116int __ipa3_release_hdr(u32 hdr_hdl)
1117{
1118 int result = 0;
1119
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001120 if (__ipa3_del_hdr(hdr_hdl, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001121 IPADBG("fail to del hdr %x\n", hdr_hdl);
1122 result = -EFAULT;
1123 goto bail;
1124 }
1125
1126 /* commit for put */
1127 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1128 IPAERR("fail to commit hdr\n");
1129 result = -EFAULT;
1130 goto bail;
1131 }
1132
1133bail:
1134 return result;
1135}
1136
1137/**
1138 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1139 * and cause deletion if reference count permits
1140 * @proc_ctx_hdl: [in] handle of processing context to be released
1141 *
1142 * Returns: 0 on success, negative on failure
1143 */
1144int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1145{
1146 int result = 0;
1147
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001148 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001149 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1150 result = -EFAULT;
1151 goto bail;
1152 }
1153
1154 /* commit for put */
1155 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1156 IPAERR("fail to commit hdr\n");
1157 result = -EFAULT;
1158 goto bail;
1159 }
1160
1161bail:
1162 return result;
1163}
1164
1165/**
1166 * ipa3_put_hdr() - Release the specified header handle
1167 * @hdr_hdl: [in] the header handle to release
1168 *
1169 * Returns: 0 on success, negative on failure
1170 *
1171 * Note: Should not be called from atomic context
1172 */
1173int ipa3_put_hdr(u32 hdr_hdl)
1174{
1175 struct ipa3_hdr_entry *entry;
1176 int result = -EFAULT;
1177
1178 mutex_lock(&ipa3_ctx->lock);
1179
1180 entry = ipa3_id_find(hdr_hdl);
1181 if (entry == NULL) {
1182 IPAERR("lookup failed\n");
1183 result = -EINVAL;
1184 goto bail;
1185 }
1186
1187 if (entry->cookie != IPA_COOKIE) {
1188 IPAERR("invalid header entry\n");
1189 result = -EINVAL;
1190 goto bail;
1191 }
1192
1193 result = 0;
1194bail:
1195 mutex_unlock(&ipa3_ctx->lock);
1196 return result;
1197}
1198
1199/**
1200 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1201 * it
1202 * @copy: [inout] header to lookup and its copy
1203 *
1204 * lookup the specified header resource and return a copy of it (along with its
1205 * attributes) if it exists, this would be called for partial headers
1206 *
1207 * Returns: 0 on success, negative on failure
1208 *
1209 * Note: Should not be called from atomic context
1210 */
1211int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1212{
1213 struct ipa3_hdr_entry *entry;
1214 int result = -EFAULT;
1215
1216 if (copy == NULL) {
1217 IPAERR("bad parm\n");
1218 return -EINVAL;
1219 }
1220 mutex_lock(&ipa3_ctx->lock);
1221 entry = __ipa_find_hdr(copy->name);
1222 if (entry) {
1223 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1224 copy->hdr_len = entry->hdr_len;
1225 copy->type = entry->type;
1226 copy->is_partial = entry->is_partial;
1227 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1228 copy->eth2_ofst = entry->eth2_ofst;
1229 result = 0;
1230 }
1231 mutex_unlock(&ipa3_ctx->lock);
1232
1233 return result;
1234}