blob: 69db99ad3ab903623e604373b29b3a4fad2e3fd0 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
16static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
17static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
69
70 list_for_each_entry(entry,
71 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
72 link) {
73 IPADBG_LOW("processing type %d ofst=%d\n",
74 entry->type, entry->offset_entry->offset);
75 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
76 entry->offset_entry->offset,
77 entry->hdr->hdr_len,
78 entry->hdr->is_hdr_proc_ctx,
79 entry->hdr->phys_base,
80 hdr_base_addr,
81 entry->hdr->offset_entry);
82 if (ret)
83 return ret;
84 }
85
86 return 0;
87}
88
89/**
90 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
91 * generates the headers processing context table.
92 * @mem: [out] buffer to put the processing context table
93 * @aligned_mem: [out] actual processing context table (with alignment).
94 * Processing context table needs to be 8 Bytes aligned.
95 *
96 * Returns: 0 on success, negative on failure
97 */
98static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
99 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
100{
101 u32 hdr_base_addr;
102
103 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
104
105 /* make sure table is aligned */
106 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
107
108 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
109
110 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
111 &mem->phys_base, GFP_KERNEL);
112 if (!mem->base) {
113 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
114 return -ENOMEM;
115 }
116
117 aligned_mem->phys_base =
118 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
119 aligned_mem->base = mem->base +
120 (aligned_mem->phys_base - mem->phys_base);
121 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
122 memset(aligned_mem->base, 0, aligned_mem->size);
123 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
124 hdr_sys_addr;
125 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
126}
127
128/**
129 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
130 *
131 * Returns: 0 on success, negative on failure
132 */
133int __ipa_commit_hdr_v3_0(void)
134{
135 struct ipa3_desc desc[2];
136 struct ipa_mem_buffer hdr_mem;
137 struct ipa_mem_buffer ctx_mem;
138 struct ipa_mem_buffer aligned_ctx_mem;
139 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
140 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
141 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
142 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
143 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
144 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
145 int rc = -EFAULT;
146 u32 proc_ctx_size;
147 u32 proc_ctx_ofst;
148 u32 proc_ctx_size_ddr;
149
150 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
151
152 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
153 IPAERR("fail to generate HDR HW TBL\n");
154 goto end;
155 }
156
157 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
158 &aligned_ctx_mem)) {
159 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
160 goto end;
161 }
162
163 if (ipa3_ctx->hdr_tbl_lcl) {
164 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
165 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
166 IPA_MEM_PART(apps_hdr_size));
167 goto end;
168 } else {
169 dma_cmd_hdr.is_read = false; /* write operation */
170 dma_cmd_hdr.skip_pipeline_clear = false;
171 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
172 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
173 dma_cmd_hdr.size = hdr_mem.size;
174 dma_cmd_hdr.local_addr =
175 ipa3_ctx->smem_restricted_bytes +
176 IPA_MEM_PART(apps_hdr_ofst);
177 hdr_cmd_pyld = ipahal_construct_imm_cmd(
178 IPA_IMM_CMD_DMA_SHARED_MEM,
179 &dma_cmd_hdr, false);
180 if (!hdr_cmd_pyld) {
181 IPAERR("fail construct dma_shared_mem cmd\n");
182 goto end;
183 }
184 desc[0].opcode = ipahal_imm_cmd_get_opcode(
185 IPA_IMM_CMD_DMA_SHARED_MEM);
186 desc[0].pyld = hdr_cmd_pyld->data;
187 desc[0].len = hdr_cmd_pyld->len;
188 }
189 } else {
190 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
191 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
192 IPA_MEM_PART(apps_hdr_size_ddr));
193 goto end;
194 } else {
195 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
196 hdr_cmd_pyld = ipahal_construct_imm_cmd(
197 IPA_IMM_CMD_HDR_INIT_SYSTEM,
198 &hdr_init_cmd, false);
199 if (!hdr_cmd_pyld) {
200 IPAERR("fail construct hdr_init_system cmd\n");
201 goto end;
202 }
203 desc[0].opcode = ipahal_imm_cmd_get_opcode(
204 IPA_IMM_CMD_HDR_INIT_SYSTEM);
205 desc[0].pyld = hdr_cmd_pyld->data;
206 desc[0].len = hdr_cmd_pyld->len;
207 }
208 }
209 desc[0].type = IPA_IMM_CMD_DESC;
210 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
211
212 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
213 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
214 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
215 if (aligned_ctx_mem.size > proc_ctx_size) {
216 IPAERR("tbl too big needed %d avail %d\n",
217 aligned_ctx_mem.size,
218 proc_ctx_size);
219 goto end;
220 } else {
221 dma_cmd_ctx.is_read = false; /* Write operation */
222 dma_cmd_ctx.skip_pipeline_clear = false;
223 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
224 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
225 dma_cmd_ctx.size = aligned_ctx_mem.size;
226 dma_cmd_ctx.local_addr =
227 ipa3_ctx->smem_restricted_bytes +
228 proc_ctx_ofst;
229 ctx_cmd_pyld = ipahal_construct_imm_cmd(
230 IPA_IMM_CMD_DMA_SHARED_MEM,
231 &dma_cmd_ctx, false);
232 if (!ctx_cmd_pyld) {
233 IPAERR("fail construct dma_shared_mem cmd\n");
234 goto end;
235 }
236 desc[1].opcode = ipahal_imm_cmd_get_opcode(
237 IPA_IMM_CMD_DMA_SHARED_MEM);
238 desc[1].pyld = ctx_cmd_pyld->data;
239 desc[1].len = ctx_cmd_pyld->len;
240 }
241 } else {
242 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
243 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
244 IPAERR("tbl too big, needed %d avail %d\n",
245 aligned_ctx_mem.size,
246 proc_ctx_size_ddr);
247 goto end;
248 } else {
249 reg_write_cmd.skip_pipeline_clear = false;
250 reg_write_cmd.pipeline_clear_options =
251 IPAHAL_HPS_CLEAR;
252 reg_write_cmd.offset =
253 ipahal_get_reg_ofst(
254 IPA_SYS_PKT_PROC_CNTXT_BASE);
255 reg_write_cmd.value = aligned_ctx_mem.phys_base;
256 reg_write_cmd.value_mask =
257 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
258 ctx_cmd_pyld = ipahal_construct_imm_cmd(
259 IPA_IMM_CMD_REGISTER_WRITE,
260 &reg_write_cmd, false);
261 if (!ctx_cmd_pyld) {
262 IPAERR("fail construct register_write cmd\n");
263 goto end;
264 }
265 desc[1].opcode = ipahal_imm_cmd_get_opcode(
266 IPA_IMM_CMD_REGISTER_WRITE);
267 desc[1].pyld = ctx_cmd_pyld->data;
268 desc[1].len = ctx_cmd_pyld->len;
269 }
270 }
271 desc[1].type = IPA_IMM_CMD_DESC;
272 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
273
274 if (ipa3_send_cmd(2, desc))
275 IPAERR("fail to send immediate command\n");
276 else
277 rc = 0;
278
279 if (ipa3_ctx->hdr_tbl_lcl) {
280 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
281 hdr_mem.phys_base);
282 } else {
283 if (!rc) {
284 if (ipa3_ctx->hdr_mem.phys_base)
285 dma_free_coherent(ipa3_ctx->pdev,
286 ipa3_ctx->hdr_mem.size,
287 ipa3_ctx->hdr_mem.base,
288 ipa3_ctx->hdr_mem.phys_base);
289 ipa3_ctx->hdr_mem = hdr_mem;
290 }
291 }
292
293 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
294 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
295 ctx_mem.phys_base);
296 } else {
297 if (!rc) {
298 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
299 dma_free_coherent(ipa3_ctx->pdev,
300 ipa3_ctx->hdr_proc_ctx_mem.size,
301 ipa3_ctx->hdr_proc_ctx_mem.base,
302 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
303 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
304 }
305 }
306
307end:
308 if (ctx_cmd_pyld)
309 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
310
311 if (hdr_cmd_pyld)
312 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
313
314 return rc;
315}
316
317static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
318 bool add_ref_hdr)
319{
320 struct ipa3_hdr_entry *hdr_entry;
321 struct ipa3_hdr_proc_ctx_entry *entry;
322 struct ipa3_hdr_proc_ctx_offset_entry *offset;
323 u32 bin;
324 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
325 int id;
326 int needed_len;
327 int mem_size;
328
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200329 IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300330 proc_ctx->type, proc_ctx->hdr_hdl);
331
332 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
333 IPAERR("invalid processing type %d\n", proc_ctx->type);
334 return -EINVAL;
335 }
336
337 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200338 if (!hdr_entry) {
Amir Levy9659e592016-10-27 18:08:27 +0300339 IPAERR("hdr_hdl is invalid\n");
340 return -EINVAL;
341 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200342 if (hdr_entry->cookie != IPA_COOKIE) {
343 IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
344 WARN_ON(1);
345 return -EINVAL;
346 }
347 IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
348 hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
Amir Levy9659e592016-10-27 18:08:27 +0300349
350 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
351 if (!entry) {
352 IPAERR("failed to alloc proc_ctx object\n");
353 return -ENOMEM;
354 }
355
356 INIT_LIST_HEAD(&entry->link);
357
358 entry->type = proc_ctx->type;
359 entry->hdr = hdr_entry;
360 if (add_ref_hdr)
361 hdr_entry->ref_cnt++;
362 entry->cookie = IPA_COOKIE;
363
364 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
365
366 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
367 bin = IPA_HDR_PROC_CTX_BIN0;
368 } else if (needed_len <=
369 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
370 bin = IPA_HDR_PROC_CTX_BIN1;
371 } else {
372 IPAERR("unexpected needed len %d\n", needed_len);
373 WARN_ON(1);
374 goto bad_len;
375 }
376
377 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
378 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
379 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
380 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
381 IPAERR("hdr proc ctx table overflow\n");
382 goto bad_len;
383 }
384
385 if (list_empty(&htbl->head_free_offset_list[bin])) {
386 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
387 GFP_KERNEL);
388 if (!offset) {
389 IPAERR("failed to alloc offset object\n");
390 goto bad_len;
391 }
392 INIT_LIST_HEAD(&offset->link);
393 /*
394 * for a first item grow, set the bin and offset which are set
395 * in stone
396 */
397 offset->offset = htbl->end;
398 offset->bin = bin;
399 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
400 list_add(&offset->link,
401 &htbl->head_offset_list[bin]);
402 } else {
403 /* get the first free slot */
404 offset =
405 list_first_entry(&htbl->head_free_offset_list[bin],
406 struct ipa3_hdr_proc_ctx_offset_entry, link);
407 list_move(&offset->link, &htbl->head_offset_list[bin]);
408 }
409
410 entry->offset_entry = offset;
411 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
412 htbl->proc_ctx_cnt++;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200413 IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
Amir Levy9659e592016-10-27 18:08:27 +0300414 htbl->proc_ctx_cnt, offset->offset);
415
416 id = ipa3_id_alloc(entry);
417 if (id < 0) {
418 IPAERR("failed to alloc id\n");
419 WARN_ON(1);
420 }
421 entry->id = id;
422 proc_ctx->proc_ctx_hdl = id;
423 entry->ref_cnt++;
424
425 return 0;
426
427bad_len:
428 if (add_ref_hdr)
429 hdr_entry->ref_cnt--;
430 entry->cookie = 0;
431 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
432 return -EPERM;
433}
434
435
436static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
437{
438 struct ipa3_hdr_entry *entry;
439 struct ipa_hdr_offset_entry *offset;
440 u32 bin;
441 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
442 int id;
443 int mem_size;
444
445 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
446 IPAERR("bad parm\n");
447 goto error;
448 }
449
450 if (!HDR_TYPE_IS_VALID(hdr->type)) {
451 IPAERR("invalid hdr type %d\n", hdr->type);
452 goto error;
453 }
454
455 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
456 if (!entry) {
457 IPAERR("failed to alloc hdr object\n");
458 goto error;
459 }
460
461 INIT_LIST_HEAD(&entry->link);
462
463 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
464 entry->hdr_len = hdr->hdr_len;
465 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
466 entry->is_partial = hdr->is_partial;
467 entry->type = hdr->type;
468 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
469 entry->eth2_ofst = hdr->eth2_ofst;
470 entry->cookie = IPA_COOKIE;
471
472 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
473 bin = IPA_HDR_BIN0;
474 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
475 bin = IPA_HDR_BIN1;
476 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
477 bin = IPA_HDR_BIN2;
478 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
479 bin = IPA_HDR_BIN3;
480 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
481 bin = IPA_HDR_BIN4;
482 else {
483 IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
484 goto bad_hdr_len;
485 }
486
487 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
488 IPA_MEM_PART(apps_hdr_size_ddr);
489
490 /* if header does not fit to table, place it in DDR */
491 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
492 entry->is_hdr_proc_ctx = true;
493 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
494 entry->hdr,
495 entry->hdr_len,
496 DMA_TO_DEVICE);
497 } else {
498 entry->is_hdr_proc_ctx = false;
499 if (list_empty(&htbl->head_free_offset_list[bin])) {
500 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
501 GFP_KERNEL);
502 if (!offset) {
503 IPAERR("failed to alloc hdr offset object\n");
504 goto bad_hdr_len;
505 }
506 INIT_LIST_HEAD(&offset->link);
507 /*
508 * for a first item grow, set the bin and offset which
509 * are set in stone
510 */
511 offset->offset = htbl->end;
512 offset->bin = bin;
513 htbl->end += ipa_hdr_bin_sz[bin];
514 list_add(&offset->link,
515 &htbl->head_offset_list[bin]);
516 } else {
517 /* get the first free slot */
518 offset =
519 list_first_entry(&htbl->head_free_offset_list[bin],
520 struct ipa_hdr_offset_entry, link);
521 list_move(&offset->link, &htbl->head_offset_list[bin]);
522 }
523
524 entry->offset_entry = offset;
525 }
526
527 list_add(&entry->link, &htbl->head_hdr_entry_list);
528 htbl->hdr_cnt++;
529 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200530 IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300531 hdr->hdr_len,
532 htbl->hdr_cnt,
533 &entry->phys_base);
534 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200535 IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300536 hdr->hdr_len,
537 htbl->hdr_cnt,
538 entry->offset_entry->offset);
539
540 id = ipa3_id_alloc(entry);
541 if (id < 0) {
542 IPAERR("failed to alloc id\n");
543 WARN_ON(1);
544 }
545 entry->id = id;
546 hdr->hdr_hdl = id;
547 entry->ref_cnt++;
548
549 if (entry->is_hdr_proc_ctx) {
550 struct ipa_hdr_proc_ctx_add proc_ctx;
551
552 IPADBG("adding processing context for header %s\n", hdr->name);
553 proc_ctx.type = IPA_HDR_PROC_NONE;
554 proc_ctx.hdr_hdl = id;
555 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
556 IPAERR("failed to add hdr proc ctx\n");
557 goto fail_add_proc_ctx;
558 }
559 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
560 }
561
562 return 0;
563
564fail_add_proc_ctx:
565 entry->ref_cnt--;
566 hdr->hdr_hdl = 0;
567 ipa3_id_remove(id);
568 htbl->hdr_cnt--;
569 list_del(&entry->link);
570 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
571 entry->hdr_len, DMA_TO_DEVICE);
572bad_hdr_len:
573 entry->cookie = 0;
574 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
575error:
576 return -EPERM;
577}
578
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200579static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
580 bool release_hdr, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300581{
582 struct ipa3_hdr_proc_ctx_entry *entry;
583 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
584
585 entry = ipa3_id_find(proc_ctx_hdl);
586 if (!entry || (entry->cookie != IPA_COOKIE)) {
587 IPAERR("bad parm\n");
588 return -EINVAL;
589 }
590
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200591 IPADBG("del proc ctx cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300592 htbl->proc_ctx_cnt, entry->offset_entry->offset);
593
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200594 if (by_user && entry->user_deleted) {
595 IPAERR("proc_ctx already deleted by user\n");
596 return -EINVAL;
597 }
598
599 if (by_user)
600 entry->user_deleted = true;
601
Amir Levy9659e592016-10-27 18:08:27 +0300602 if (--entry->ref_cnt) {
603 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
604 proc_ctx_hdl, entry->ref_cnt);
605 return 0;
606 }
607
608 if (release_hdr)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200609 __ipa3_del_hdr(entry->hdr->id, false);
Amir Levy9659e592016-10-27 18:08:27 +0300610
611 /* move the offset entry to appropriate free list */
612 list_move(&entry->offset_entry->link,
613 &htbl->head_free_offset_list[entry->offset_entry->bin]);
614 list_del(&entry->link);
615 htbl->proc_ctx_cnt--;
616 entry->cookie = 0;
617 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
618
619 /* remove the handle from the database */
620 ipa3_id_remove(proc_ctx_hdl);
621
622 return 0;
623}
624
625
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200626int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300627{
628 struct ipa3_hdr_entry *entry;
629 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
630
631 entry = ipa3_id_find(hdr_hdl);
632 if (entry == NULL) {
633 IPAERR("lookup failed\n");
634 return -EINVAL;
635 }
636
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200637 if (entry->cookie != IPA_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +0300638 IPAERR("bad parm\n");
639 return -EINVAL;
640 }
641
642 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200643 IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300644 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
645 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200646 IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
647 entry->hdr_len, htbl->hdr_cnt,
648 entry->offset_entry->offset);
Amir Levy9659e592016-10-27 18:08:27 +0300649
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200650 if (by_user && entry->user_deleted) {
651 IPAERR("proc_ctx already deleted by user\n");
652 return -EINVAL;
653 }
654
655 if (by_user)
656 entry->user_deleted = true;
657
Amir Levy9659e592016-10-27 18:08:27 +0300658 if (--entry->ref_cnt) {
659 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
660 return 0;
661 }
662
663 if (entry->is_hdr_proc_ctx) {
664 dma_unmap_single(ipa3_ctx->pdev,
665 entry->phys_base,
666 entry->hdr_len,
667 DMA_TO_DEVICE);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200668 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
Amir Levy9659e592016-10-27 18:08:27 +0300669 } else {
670 /* move the offset entry to appropriate free list */
671 list_move(&entry->offset_entry->link,
672 &htbl->head_free_offset_list[entry->offset_entry->bin]);
673 }
674 list_del(&entry->link);
675 htbl->hdr_cnt--;
676 entry->cookie = 0;
677 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
678
679 /* remove the handle from the database */
680 ipa3_id_remove(hdr_hdl);
681
682 return 0;
683}
684
685/**
686 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
687 * to IPA HW
688 * @hdrs: [inout] set of headers to add
689 *
690 * Returns: 0 on success, negative on failure
691 *
692 * Note: Should not be called from atomic context
693 */
694int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
695{
696 int i;
697 int result = -EFAULT;
698
699 if (hdrs == NULL || hdrs->num_hdrs == 0) {
700 IPAERR("bad parm\n");
701 return -EINVAL;
702 }
703
704 mutex_lock(&ipa3_ctx->lock);
705 IPADBG("adding %d headers to IPA driver internal data struct\n",
706 hdrs->num_hdrs);
707 for (i = 0; i < hdrs->num_hdrs; i++) {
708 if (__ipa_add_hdr(&hdrs->hdr[i])) {
709 IPAERR("failed to add hdr %d\n", i);
710 hdrs->hdr[i].status = -1;
711 } else {
712 hdrs->hdr[i].status = 0;
713 }
714 }
715
716 if (hdrs->commit) {
717 IPADBG("committing all headers to IPA core");
718 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
719 result = -EPERM;
720 goto bail;
721 }
722 }
723 result = 0;
724bail:
725 mutex_unlock(&ipa3_ctx->lock);
726 return result;
727}
728
729/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200730 * ipa3_del_hdr_by_user() - Remove the specified headers
731 * from SW and optionally commit them to IPA HW
Amir Levy9659e592016-10-27 18:08:27 +0300732 * @hdls: [inout] set of headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200733 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300734 *
735 * Returns: 0 on success, negative on failure
736 *
737 * Note: Should not be called from atomic context
738 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200739int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300740{
741 int i;
742 int result = -EFAULT;
743
744 if (hdls == NULL || hdls->num_hdls == 0) {
745 IPAERR("bad parm\n");
746 return -EINVAL;
747 }
748
749 mutex_lock(&ipa3_ctx->lock);
750 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200751 if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
Amir Levy9659e592016-10-27 18:08:27 +0300752 IPAERR("failed to del hdr %i\n", i);
753 hdls->hdl[i].status = -1;
754 } else {
755 hdls->hdl[i].status = 0;
756 }
757 }
758
759 if (hdls->commit) {
760 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
761 result = -EPERM;
762 goto bail;
763 }
764 }
765 result = 0;
766bail:
767 mutex_unlock(&ipa3_ctx->lock);
768 return result;
769}
770
771/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200772 * ipa3_del_hdr() - Remove the specified headers from SW
773 * and optionally commit them to IPA HW
774 * @hdls: [inout] set of headers to delete
775 *
776 * Returns: 0 on success, negative on failure
777 *
778 * Note: Should not be called from atomic context
779 */
780int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
781{
782 return ipa3_del_hdr_by_user(hdls, false);
783}
784
785/**
Amir Levy9659e592016-10-27 18:08:27 +0300786 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
787 * and optionally commit them to IPA HW
788 * @proc_ctxs: [inout] set of processing context headers to add
789 *
790 * Returns: 0 on success, negative on failure
791 *
792 * Note: Should not be called from atomic context
793 */
794int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
795{
796 int i;
797 int result = -EFAULT;
798
799 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
800 IPAERR("bad parm\n");
801 return -EINVAL;
802 }
803
804 mutex_lock(&ipa3_ctx->lock);
805 IPADBG("adding %d header processing contextes to IPA driver\n",
806 proc_ctxs->num_proc_ctxs);
807 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
808 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
809 IPAERR("failed to add hdr pric ctx %d\n", i);
810 proc_ctxs->proc_ctx[i].status = -1;
811 } else {
812 proc_ctxs->proc_ctx[i].status = 0;
813 }
814 }
815
816 if (proc_ctxs->commit) {
817 IPADBG("committing all headers to IPA core");
818 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
819 result = -EPERM;
820 goto bail;
821 }
822 }
823 result = 0;
824bail:
825 mutex_unlock(&ipa3_ctx->lock);
826 return result;
827}
828
829/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200830 * ipa3_del_hdr_proc_ctx_by_user() -
Amir Levy9659e592016-10-27 18:08:27 +0300831 * Remove the specified processing context headers from SW and
832 * optionally commit them to IPA HW.
833 * @hdls: [inout] set of processing context headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200834 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300835 *
836 * Returns: 0 on success, negative on failure
837 *
838 * Note: Should not be called from atomic context
839 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200840int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
841 bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300842{
843 int i;
844 int result;
845
846 if (hdls == NULL || hdls->num_hdls == 0) {
847 IPAERR("bad parm\n");
848 return -EINVAL;
849 }
850
851 mutex_lock(&ipa3_ctx->lock);
852 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200853 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
Amir Levy9659e592016-10-27 18:08:27 +0300854 IPAERR("failed to del hdr %i\n", i);
855 hdls->hdl[i].status = -1;
856 } else {
857 hdls->hdl[i].status = 0;
858 }
859 }
860
861 if (hdls->commit) {
862 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
863 result = -EPERM;
864 goto bail;
865 }
866 }
867 result = 0;
868bail:
869 mutex_unlock(&ipa3_ctx->lock);
870 return result;
871}
872
873/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200874 * ipa3_del_hdr_proc_ctx() -
875 * Remove the specified processing context headers from SW and
876 * optionally commit them to IPA HW.
877 * @hdls: [inout] set of processing context headers to delete
878 *
879 * Returns: 0 on success, negative on failure
880 *
881 * Note: Should not be called from atomic context
882 */
883int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
884{
885 return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
886}
887
888/**
Amir Levy9659e592016-10-27 18:08:27 +0300889 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
890 *
891 * Returns: 0 on success, negative on failure
892 *
893 * Note: Should not be called from atomic context
894 */
895int ipa3_commit_hdr(void)
896{
897 int result = -EFAULT;
898
899 /*
900 * issue a commit on the routing module since routing rules point to
901 * header table entries
902 */
903 if (ipa3_commit_rt(IPA_IP_v4))
904 return -EPERM;
905 if (ipa3_commit_rt(IPA_IP_v6))
906 return -EPERM;
907
908 mutex_lock(&ipa3_ctx->lock);
909 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
910 result = -EPERM;
911 goto bail;
912 }
913 result = 0;
914bail:
915 mutex_unlock(&ipa3_ctx->lock);
916 return result;
917}
918
919/**
920 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
921 * HW)
922 *
923 * Returns: 0 on success, negative on failure
924 *
925 * Note: Should not be called from atomic context
926 */
927int ipa3_reset_hdr(void)
928{
929 struct ipa3_hdr_entry *entry;
930 struct ipa3_hdr_entry *next;
931 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
932 struct ipa3_hdr_proc_ctx_entry *ctx_next;
933 struct ipa_hdr_offset_entry *off_entry;
934 struct ipa_hdr_offset_entry *off_next;
935 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
936 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
937 int i;
938
939 /*
940 * issue a reset on the routing module since routing rules point to
941 * header table entries
942 */
943 if (ipa3_reset_rt(IPA_IP_v4))
944 IPAERR("fail to reset v4 rt\n");
945 if (ipa3_reset_rt(IPA_IP_v6))
946 IPAERR("fail to reset v4 rt\n");
947
948 mutex_lock(&ipa3_ctx->lock);
949 IPADBG("reset hdr\n");
950 list_for_each_entry_safe(entry, next,
951 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
952
953 /* do not remove the default header */
954 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
955 if (entry->is_hdr_proc_ctx) {
956 IPAERR("default header is proc ctx\n");
957 mutex_unlock(&ipa3_ctx->lock);
958 WARN_ON(1);
959 return -EFAULT;
960 }
961 continue;
962 }
963
964 if (ipa3_id_find(entry->id) == NULL) {
965 mutex_unlock(&ipa3_ctx->lock);
966 WARN_ON(1);
967 return -EFAULT;
968 }
969 if (entry->is_hdr_proc_ctx) {
970 dma_unmap_single(ipa3_ctx->pdev,
971 entry->phys_base,
972 entry->hdr_len,
973 DMA_TO_DEVICE);
974 entry->proc_ctx = NULL;
975 }
976 list_del(&entry->link);
977 entry->ref_cnt = 0;
978 entry->cookie = 0;
979
980 /* remove the handle from the database */
981 ipa3_id_remove(entry->id);
982 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
983
984 }
985 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
986 list_for_each_entry_safe(off_entry, off_next,
987 &ipa3_ctx->hdr_tbl.head_offset_list[i],
988 link) {
989
990 /*
991 * do not remove the default exception header which is
992 * at offset 0
993 */
994 if (off_entry->offset == 0)
995 continue;
996
997 list_del(&off_entry->link);
998 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
999 }
1000 list_for_each_entry_safe(off_entry, off_next,
1001 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
1002 link) {
1003 list_del(&off_entry->link);
1004 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1005 }
1006 }
1007 /* there is one header of size 8 */
1008 ipa3_ctx->hdr_tbl.end = 8;
1009 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
1010
1011 IPADBG("reset hdr proc ctx\n");
1012 list_for_each_entry_safe(
1013 ctx_entry,
1014 ctx_next,
1015 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
1016 link) {
1017
1018 if (ipa3_id_find(ctx_entry->id) == NULL) {
1019 mutex_unlock(&ipa3_ctx->lock);
1020 WARN_ON(1);
1021 return -EFAULT;
1022 }
1023 list_del(&ctx_entry->link);
1024 ctx_entry->ref_cnt = 0;
1025 ctx_entry->cookie = 0;
1026
1027 /* remove the handle from the database */
1028 ipa3_id_remove(ctx_entry->id);
1029 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
1030
1031 }
1032 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
1033 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1034 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
1035 link) {
1036
1037 list_del(&ctx_off_entry->link);
1038 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1039 ctx_off_entry);
1040 }
1041 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1042 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
1043 link) {
1044 list_del(&ctx_off_entry->link);
1045 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1046 ctx_off_entry);
1047 }
1048 }
1049 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
1050 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
1051 mutex_unlock(&ipa3_ctx->lock);
1052
1053 return 0;
1054}
1055
1056static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1057{
1058 struct ipa3_hdr_entry *entry;
1059
1060 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
1061 IPAERR("Header name too long: %s\n", name);
1062 return NULL;
1063 }
1064
1065 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1066 link) {
1067 if (!strcmp(name, entry->name))
1068 return entry;
1069 }
1070
1071 return NULL;
1072}
1073
1074/**
1075 * ipa3_get_hdr() - Lookup the specified header resource
1076 * @lookup: [inout] header to lookup and its handle
1077 *
1078 * lookup the specified header resource and return handle if it exists
1079 *
1080 * Returns: 0 on success, negative on failure
1081 *
1082 * Note: Should not be called from atomic context
1083 * Caller should call ipa3_put_hdr later if this function succeeds
1084 */
1085int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1086{
1087 struct ipa3_hdr_entry *entry;
1088 int result = -1;
1089
1090 if (lookup == NULL) {
1091 IPAERR("bad parm\n");
1092 return -EINVAL;
1093 }
1094 mutex_lock(&ipa3_ctx->lock);
1095 entry = __ipa_find_hdr(lookup->name);
1096 if (entry) {
1097 lookup->hdl = entry->id;
1098 result = 0;
1099 }
1100 mutex_unlock(&ipa3_ctx->lock);
1101
1102 return result;
1103}
1104
1105/**
1106 * __ipa3_release_hdr() - drop reference to header and cause
1107 * deletion if reference count permits
1108 * @hdr_hdl: [in] handle of header to be released
1109 *
1110 * Returns: 0 on success, negative on failure
1111 */
1112int __ipa3_release_hdr(u32 hdr_hdl)
1113{
1114 int result = 0;
1115
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001116 if (__ipa3_del_hdr(hdr_hdl, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001117 IPADBG("fail to del hdr %x\n", hdr_hdl);
1118 result = -EFAULT;
1119 goto bail;
1120 }
1121
1122 /* commit for put */
1123 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1124 IPAERR("fail to commit hdr\n");
1125 result = -EFAULT;
1126 goto bail;
1127 }
1128
1129bail:
1130 return result;
1131}
1132
1133/**
1134 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1135 * and cause deletion if reference count permits
1136 * @proc_ctx_hdl: [in] handle of processing context to be released
1137 *
1138 * Returns: 0 on success, negative on failure
1139 */
1140int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1141{
1142 int result = 0;
1143
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001144 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001145 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1146 result = -EFAULT;
1147 goto bail;
1148 }
1149
1150 /* commit for put */
1151 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1152 IPAERR("fail to commit hdr\n");
1153 result = -EFAULT;
1154 goto bail;
1155 }
1156
1157bail:
1158 return result;
1159}
1160
1161/**
1162 * ipa3_put_hdr() - Release the specified header handle
1163 * @hdr_hdl: [in] the header handle to release
1164 *
1165 * Returns: 0 on success, negative on failure
1166 *
1167 * Note: Should not be called from atomic context
1168 */
1169int ipa3_put_hdr(u32 hdr_hdl)
1170{
1171 struct ipa3_hdr_entry *entry;
1172 int result = -EFAULT;
1173
1174 mutex_lock(&ipa3_ctx->lock);
1175
1176 entry = ipa3_id_find(hdr_hdl);
1177 if (entry == NULL) {
1178 IPAERR("lookup failed\n");
1179 result = -EINVAL;
1180 goto bail;
1181 }
1182
1183 if (entry->cookie != IPA_COOKIE) {
1184 IPAERR("invalid header entry\n");
1185 result = -EINVAL;
1186 goto bail;
1187 }
1188
1189 result = 0;
1190bail:
1191 mutex_unlock(&ipa3_ctx->lock);
1192 return result;
1193}
1194
1195/**
1196 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1197 * it
1198 * @copy: [inout] header to lookup and its copy
1199 *
1200 * lookup the specified header resource and return a copy of it (along with its
1201 * attributes) if it exists, this would be called for partial headers
1202 *
1203 * Returns: 0 on success, negative on failure
1204 *
1205 * Note: Should not be called from atomic context
1206 */
1207int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1208{
1209 struct ipa3_hdr_entry *entry;
1210 int result = -EFAULT;
1211
1212 if (copy == NULL) {
1213 IPAERR("bad parm\n");
1214 return -EINVAL;
1215 }
1216 mutex_lock(&ipa3_ctx->lock);
1217 entry = __ipa_find_hdr(copy->name);
1218 if (entry) {
1219 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1220 copy->hdr_len = entry->hdr_len;
1221 copy->type = entry->type;
1222 copy->is_partial = entry->is_partial;
1223 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1224 copy->eth2_ofst = entry->eth2_ofst;
1225 result = 0;
1226 }
1227 mutex_unlock(&ipa3_ctx->lock);
1228
1229 return result;
1230}