blob: 0c1832c0b4ac3eb863f8840d05a7cc1f07b594c4 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
Skylar Chang7fa22712017-04-03 18:29:21 -070016static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
Amir Levy9659e592016-10-27 18:08:27 +030017static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
69
70 list_for_each_entry(entry,
71 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
72 link) {
73 IPADBG_LOW("processing type %d ofst=%d\n",
74 entry->type, entry->offset_entry->offset);
75 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
76 entry->offset_entry->offset,
77 entry->hdr->hdr_len,
78 entry->hdr->is_hdr_proc_ctx,
79 entry->hdr->phys_base,
80 hdr_base_addr,
Skylar Chang7fa22712017-04-03 18:29:21 -070081 entry->hdr->offset_entry,
82 entry->l2tp_params);
Amir Levy9659e592016-10-27 18:08:27 +030083 if (ret)
84 return ret;
85 }
86
87 return 0;
88}
89
90/**
91 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
92 * generates the headers processing context table.
93 * @mem: [out] buffer to put the processing context table
94 * @aligned_mem: [out] actual processing context table (with alignment).
95 * Processing context table needs to be 8 Bytes aligned.
96 *
97 * Returns: 0 on success, negative on failure
98 */
99static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
100 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
101{
102 u32 hdr_base_addr;
103
104 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
105
106 /* make sure table is aligned */
107 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
108
109 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
110
111 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
112 &mem->phys_base, GFP_KERNEL);
113 if (!mem->base) {
114 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
115 return -ENOMEM;
116 }
117
118 aligned_mem->phys_base =
119 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
120 aligned_mem->base = mem->base +
121 (aligned_mem->phys_base - mem->phys_base);
122 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
123 memset(aligned_mem->base, 0, aligned_mem->size);
124 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
125 hdr_sys_addr;
126 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
127}
128
129/**
130 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
131 *
132 * Returns: 0 on success, negative on failure
133 */
134int __ipa_commit_hdr_v3_0(void)
135{
136 struct ipa3_desc desc[2];
137 struct ipa_mem_buffer hdr_mem;
138 struct ipa_mem_buffer ctx_mem;
139 struct ipa_mem_buffer aligned_ctx_mem;
140 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
141 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
142 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
143 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
144 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
145 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
146 int rc = -EFAULT;
147 u32 proc_ctx_size;
148 u32 proc_ctx_ofst;
149 u32 proc_ctx_size_ddr;
150
151 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
152
153 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
154 IPAERR("fail to generate HDR HW TBL\n");
155 goto end;
156 }
157
158 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
159 &aligned_ctx_mem)) {
160 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
161 goto end;
162 }
163
164 if (ipa3_ctx->hdr_tbl_lcl) {
165 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
166 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
167 IPA_MEM_PART(apps_hdr_size));
168 goto end;
169 } else {
170 dma_cmd_hdr.is_read = false; /* write operation */
171 dma_cmd_hdr.skip_pipeline_clear = false;
172 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
173 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
174 dma_cmd_hdr.size = hdr_mem.size;
175 dma_cmd_hdr.local_addr =
176 ipa3_ctx->smem_restricted_bytes +
177 IPA_MEM_PART(apps_hdr_ofst);
178 hdr_cmd_pyld = ipahal_construct_imm_cmd(
179 IPA_IMM_CMD_DMA_SHARED_MEM,
180 &dma_cmd_hdr, false);
181 if (!hdr_cmd_pyld) {
182 IPAERR("fail construct dma_shared_mem cmd\n");
183 goto end;
184 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700185 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300186 desc[0].pyld = hdr_cmd_pyld->data;
187 desc[0].len = hdr_cmd_pyld->len;
188 }
189 } else {
190 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
191 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
192 IPA_MEM_PART(apps_hdr_size_ddr));
193 goto end;
194 } else {
195 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
196 hdr_cmd_pyld = ipahal_construct_imm_cmd(
197 IPA_IMM_CMD_HDR_INIT_SYSTEM,
198 &hdr_init_cmd, false);
199 if (!hdr_cmd_pyld) {
200 IPAERR("fail construct hdr_init_system cmd\n");
201 goto end;
202 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700203 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300204 desc[0].pyld = hdr_cmd_pyld->data;
205 desc[0].len = hdr_cmd_pyld->len;
206 }
207 }
208 desc[0].type = IPA_IMM_CMD_DESC;
209 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
210
211 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
212 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
213 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
214 if (aligned_ctx_mem.size > proc_ctx_size) {
215 IPAERR("tbl too big needed %d avail %d\n",
216 aligned_ctx_mem.size,
217 proc_ctx_size);
218 goto end;
219 } else {
220 dma_cmd_ctx.is_read = false; /* Write operation */
221 dma_cmd_ctx.skip_pipeline_clear = false;
222 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
223 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
224 dma_cmd_ctx.size = aligned_ctx_mem.size;
225 dma_cmd_ctx.local_addr =
226 ipa3_ctx->smem_restricted_bytes +
227 proc_ctx_ofst;
228 ctx_cmd_pyld = ipahal_construct_imm_cmd(
229 IPA_IMM_CMD_DMA_SHARED_MEM,
230 &dma_cmd_ctx, false);
231 if (!ctx_cmd_pyld) {
232 IPAERR("fail construct dma_shared_mem cmd\n");
233 goto end;
234 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700235 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300236 desc[1].pyld = ctx_cmd_pyld->data;
237 desc[1].len = ctx_cmd_pyld->len;
238 }
239 } else {
240 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
241 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
242 IPAERR("tbl too big, needed %d avail %d\n",
243 aligned_ctx_mem.size,
244 proc_ctx_size_ddr);
245 goto end;
246 } else {
247 reg_write_cmd.skip_pipeline_clear = false;
248 reg_write_cmd.pipeline_clear_options =
249 IPAHAL_HPS_CLEAR;
250 reg_write_cmd.offset =
251 ipahal_get_reg_ofst(
252 IPA_SYS_PKT_PROC_CNTXT_BASE);
253 reg_write_cmd.value = aligned_ctx_mem.phys_base;
254 reg_write_cmd.value_mask =
255 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
256 ctx_cmd_pyld = ipahal_construct_imm_cmd(
257 IPA_IMM_CMD_REGISTER_WRITE,
258 &reg_write_cmd, false);
259 if (!ctx_cmd_pyld) {
260 IPAERR("fail construct register_write cmd\n");
261 goto end;
262 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700263 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300264 desc[1].pyld = ctx_cmd_pyld->data;
265 desc[1].len = ctx_cmd_pyld->len;
266 }
267 }
268 desc[1].type = IPA_IMM_CMD_DESC;
269 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
270
271 if (ipa3_send_cmd(2, desc))
272 IPAERR("fail to send immediate command\n");
273 else
274 rc = 0;
275
276 if (ipa3_ctx->hdr_tbl_lcl) {
277 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
278 hdr_mem.phys_base);
279 } else {
280 if (!rc) {
281 if (ipa3_ctx->hdr_mem.phys_base)
282 dma_free_coherent(ipa3_ctx->pdev,
283 ipa3_ctx->hdr_mem.size,
284 ipa3_ctx->hdr_mem.base,
285 ipa3_ctx->hdr_mem.phys_base);
286 ipa3_ctx->hdr_mem = hdr_mem;
287 }
288 }
289
290 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
291 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
292 ctx_mem.phys_base);
293 } else {
294 if (!rc) {
295 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
296 dma_free_coherent(ipa3_ctx->pdev,
297 ipa3_ctx->hdr_proc_ctx_mem.size,
298 ipa3_ctx->hdr_proc_ctx_mem.base,
299 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
300 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
301 }
302 }
303
304end:
305 if (ctx_cmd_pyld)
306 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
307
308 if (hdr_cmd_pyld)
309 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
310
311 return rc;
312}
313
314static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
315 bool add_ref_hdr)
316{
317 struct ipa3_hdr_entry *hdr_entry;
318 struct ipa3_hdr_proc_ctx_entry *entry;
319 struct ipa3_hdr_proc_ctx_offset_entry *offset;
320 u32 bin;
321 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
322 int id;
323 int needed_len;
324 int mem_size;
325
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200326 IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300327 proc_ctx->type, proc_ctx->hdr_hdl);
328
329 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
330 IPAERR("invalid processing type %d\n", proc_ctx->type);
331 return -EINVAL;
332 }
333
334 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200335 if (!hdr_entry) {
Amir Levy9659e592016-10-27 18:08:27 +0300336 IPAERR("hdr_hdl is invalid\n");
337 return -EINVAL;
338 }
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530339 if (hdr_entry->cookie != IPA_HDR_COOKIE) {
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200340 IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
341 WARN_ON(1);
342 return -EINVAL;
343 }
344 IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
345 hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
Amir Levy9659e592016-10-27 18:08:27 +0300346
347 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
348 if (!entry) {
349 IPAERR("failed to alloc proc_ctx object\n");
350 return -ENOMEM;
351 }
352
353 INIT_LIST_HEAD(&entry->link);
354
355 entry->type = proc_ctx->type;
356 entry->hdr = hdr_entry;
Skylar Chang7fa22712017-04-03 18:29:21 -0700357 entry->l2tp_params = proc_ctx->l2tp_params;
Amir Levy9659e592016-10-27 18:08:27 +0300358 if (add_ref_hdr)
359 hdr_entry->ref_cnt++;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530360 entry->cookie = IPA_PROC_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300361
362 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
363
364 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
365 bin = IPA_HDR_PROC_CTX_BIN0;
366 } else if (needed_len <=
367 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
368 bin = IPA_HDR_PROC_CTX_BIN1;
369 } else {
370 IPAERR("unexpected needed len %d\n", needed_len);
371 WARN_ON(1);
372 goto bad_len;
373 }
374
375 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
376 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
377 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
Amir Levy9659e592016-10-27 18:08:27 +0300378 if (list_empty(&htbl->head_free_offset_list[bin])) {
Skylar Changd8b80fe2017-06-08 15:47:22 -0700379 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
380 IPAERR("hdr proc ctx table overflow\n");
381 goto bad_len;
382 }
383
Amir Levy9659e592016-10-27 18:08:27 +0300384 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
385 GFP_KERNEL);
386 if (!offset) {
387 IPAERR("failed to alloc offset object\n");
388 goto bad_len;
389 }
390 INIT_LIST_HEAD(&offset->link);
391 /*
392 * for a first item grow, set the bin and offset which are set
393 * in stone
394 */
395 offset->offset = htbl->end;
396 offset->bin = bin;
397 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
398 list_add(&offset->link,
399 &htbl->head_offset_list[bin]);
400 } else {
401 /* get the first free slot */
402 offset =
403 list_first_entry(&htbl->head_free_offset_list[bin],
404 struct ipa3_hdr_proc_ctx_offset_entry, link);
405 list_move(&offset->link, &htbl->head_offset_list[bin]);
406 }
407
408 entry->offset_entry = offset;
409 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
410 htbl->proc_ctx_cnt++;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200411 IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
Amir Levy9659e592016-10-27 18:08:27 +0300412 htbl->proc_ctx_cnt, offset->offset);
413
414 id = ipa3_id_alloc(entry);
415 if (id < 0) {
416 IPAERR("failed to alloc id\n");
417 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530418 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300419 }
420 entry->id = id;
421 proc_ctx->proc_ctx_hdl = id;
422 entry->ref_cnt++;
423
424 return 0;
425
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530426ipa_insert_failed:
427 if (offset)
428 list_move(&offset->link,
429 &htbl->head_free_offset_list[offset->bin]);
430 entry->offset_entry = NULL;
431 list_del(&entry->link);
432 htbl->proc_ctx_cnt--;
433
Amir Levy9659e592016-10-27 18:08:27 +0300434bad_len:
435 if (add_ref_hdr)
436 hdr_entry->ref_cnt--;
437 entry->cookie = 0;
438 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
439 return -EPERM;
440}
441
442
443static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
444{
445 struct ipa3_hdr_entry *entry;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530446 struct ipa_hdr_offset_entry *offset = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300447 u32 bin;
448 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
449 int id;
450 int mem_size;
451
452 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
453 IPAERR("bad parm\n");
454 goto error;
455 }
456
457 if (!HDR_TYPE_IS_VALID(hdr->type)) {
458 IPAERR("invalid hdr type %d\n", hdr->type);
459 goto error;
460 }
461
462 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
463 if (!entry) {
464 IPAERR("failed to alloc hdr object\n");
465 goto error;
466 }
467
468 INIT_LIST_HEAD(&entry->link);
469
470 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
471 entry->hdr_len = hdr->hdr_len;
472 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
473 entry->is_partial = hdr->is_partial;
474 entry->type = hdr->type;
475 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
476 entry->eth2_ofst = hdr->eth2_ofst;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530477 entry->cookie = IPA_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300478
479 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
480 bin = IPA_HDR_BIN0;
481 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
482 bin = IPA_HDR_BIN1;
483 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
484 bin = IPA_HDR_BIN2;
485 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
486 bin = IPA_HDR_BIN3;
487 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
488 bin = IPA_HDR_BIN4;
489 else {
490 IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
491 goto bad_hdr_len;
492 }
493
494 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
495 IPA_MEM_PART(apps_hdr_size_ddr);
496
Skylar Changd8b80fe2017-06-08 15:47:22 -0700497 if (list_empty(&htbl->head_free_offset_list[bin])) {
498 /* if header does not fit to table, place it in DDR */
499 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
500 entry->is_hdr_proc_ctx = true;
501 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
502 entry->hdr,
503 entry->hdr_len,
504 DMA_TO_DEVICE);
505 if (dma_mapping_error(ipa3_ctx->pdev,
506 entry->phys_base)) {
507 IPAERR("dma_map_single failure for entry\n");
508 goto fail_dma_mapping;
509 }
510 } else {
511 entry->is_hdr_proc_ctx = false;
Amir Levy9659e592016-10-27 18:08:27 +0300512 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
513 GFP_KERNEL);
514 if (!offset) {
515 IPAERR("failed to alloc hdr offset object\n");
516 goto bad_hdr_len;
517 }
518 INIT_LIST_HEAD(&offset->link);
519 /*
520 * for a first item grow, set the bin and offset which
521 * are set in stone
522 */
523 offset->offset = htbl->end;
524 offset->bin = bin;
525 htbl->end += ipa_hdr_bin_sz[bin];
526 list_add(&offset->link,
527 &htbl->head_offset_list[bin]);
Skylar Changd8b80fe2017-06-08 15:47:22 -0700528 entry->offset_entry = offset;
Amir Levy9659e592016-10-27 18:08:27 +0300529 }
Skylar Changd8b80fe2017-06-08 15:47:22 -0700530 } else {
531 entry->is_hdr_proc_ctx = false;
532 /* get the first free slot */
533 offset = list_first_entry(&htbl->head_free_offset_list[bin],
534 struct ipa_hdr_offset_entry, link);
535 list_move(&offset->link, &htbl->head_offset_list[bin]);
Amir Levy9659e592016-10-27 18:08:27 +0300536 entry->offset_entry = offset;
537 }
538
539 list_add(&entry->link, &htbl->head_hdr_entry_list);
540 htbl->hdr_cnt++;
541 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200542 IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300543 hdr->hdr_len,
544 htbl->hdr_cnt,
545 &entry->phys_base);
546 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200547 IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300548 hdr->hdr_len,
549 htbl->hdr_cnt,
550 entry->offset_entry->offset);
551
552 id = ipa3_id_alloc(entry);
553 if (id < 0) {
554 IPAERR("failed to alloc id\n");
555 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530556 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300557 }
558 entry->id = id;
559 hdr->hdr_hdl = id;
560 entry->ref_cnt++;
561
562 if (entry->is_hdr_proc_ctx) {
563 struct ipa_hdr_proc_ctx_add proc_ctx;
564
565 IPADBG("adding processing context for header %s\n", hdr->name);
566 proc_ctx.type = IPA_HDR_PROC_NONE;
567 proc_ctx.hdr_hdl = id;
568 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
569 IPAERR("failed to add hdr proc ctx\n");
570 goto fail_add_proc_ctx;
571 }
572 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
573 }
574
575 return 0;
576
577fail_add_proc_ctx:
578 entry->ref_cnt--;
579 hdr->hdr_hdl = 0;
580 ipa3_id_remove(id);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530581ipa_insert_failed:
582 if (entry->is_hdr_proc_ctx) {
583 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
584 entry->hdr_len, DMA_TO_DEVICE);
585 } else {
586 if (offset)
587 list_move(&offset->link,
588 &htbl->head_free_offset_list[offset->bin]);
589 entry->offset_entry = NULL;
590 }
Amir Levy9659e592016-10-27 18:08:27 +0300591 htbl->hdr_cnt--;
592 list_del(&entry->link);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530593
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530594fail_dma_mapping:
595 entry->is_hdr_proc_ctx = false;
596
Amir Levy9659e592016-10-27 18:08:27 +0300597bad_hdr_len:
598 entry->cookie = 0;
599 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
600error:
601 return -EPERM;
602}
603
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200604static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
605 bool release_hdr, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300606{
607 struct ipa3_hdr_proc_ctx_entry *entry;
608 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
609
610 entry = ipa3_id_find(proc_ctx_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530611 if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
Amir Levy9659e592016-10-27 18:08:27 +0300612 IPAERR("bad parm\n");
613 return -EINVAL;
614 }
615
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200616 IPADBG("del proc ctx cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300617 htbl->proc_ctx_cnt, entry->offset_entry->offset);
618
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200619 if (by_user && entry->user_deleted) {
620 IPAERR("proc_ctx already deleted by user\n");
621 return -EINVAL;
622 }
623
624 if (by_user)
625 entry->user_deleted = true;
626
Amir Levy9659e592016-10-27 18:08:27 +0300627 if (--entry->ref_cnt) {
628 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
629 proc_ctx_hdl, entry->ref_cnt);
630 return 0;
631 }
632
633 if (release_hdr)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200634 __ipa3_del_hdr(entry->hdr->id, false);
Amir Levy9659e592016-10-27 18:08:27 +0300635
636 /* move the offset entry to appropriate free list */
637 list_move(&entry->offset_entry->link,
638 &htbl->head_free_offset_list[entry->offset_entry->bin]);
639 list_del(&entry->link);
640 htbl->proc_ctx_cnt--;
641 entry->cookie = 0;
642 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
643
644 /* remove the handle from the database */
645 ipa3_id_remove(proc_ctx_hdl);
646
647 return 0;
648}
649
650
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200651int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300652{
653 struct ipa3_hdr_entry *entry;
654 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
655
656 entry = ipa3_id_find(hdr_hdl);
657 if (entry == NULL) {
658 IPAERR("lookup failed\n");
659 return -EINVAL;
660 }
661
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530662 if (entry->cookie != IPA_HDR_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +0300663 IPAERR("bad parm\n");
664 return -EINVAL;
665 }
666
667 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200668 IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300669 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
670 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200671 IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
672 entry->hdr_len, htbl->hdr_cnt,
673 entry->offset_entry->offset);
Amir Levy9659e592016-10-27 18:08:27 +0300674
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200675 if (by_user && entry->user_deleted) {
676 IPAERR("proc_ctx already deleted by user\n");
677 return -EINVAL;
678 }
679
680 if (by_user)
681 entry->user_deleted = true;
682
Amir Levy9659e592016-10-27 18:08:27 +0300683 if (--entry->ref_cnt) {
684 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
685 return 0;
686 }
687
688 if (entry->is_hdr_proc_ctx) {
689 dma_unmap_single(ipa3_ctx->pdev,
690 entry->phys_base,
691 entry->hdr_len,
692 DMA_TO_DEVICE);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200693 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
Amir Levy9659e592016-10-27 18:08:27 +0300694 } else {
695 /* move the offset entry to appropriate free list */
696 list_move(&entry->offset_entry->link,
697 &htbl->head_free_offset_list[entry->offset_entry->bin]);
698 }
699 list_del(&entry->link);
700 htbl->hdr_cnt--;
701 entry->cookie = 0;
702 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
703
704 /* remove the handle from the database */
705 ipa3_id_remove(hdr_hdl);
706
707 return 0;
708}
709
710/**
711 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
712 * to IPA HW
713 * @hdrs: [inout] set of headers to add
714 *
715 * Returns: 0 on success, negative on failure
716 *
717 * Note: Should not be called from atomic context
718 */
719int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
720{
721 int i;
722 int result = -EFAULT;
723
724 if (hdrs == NULL || hdrs->num_hdrs == 0) {
725 IPAERR("bad parm\n");
726 return -EINVAL;
727 }
728
729 mutex_lock(&ipa3_ctx->lock);
730 IPADBG("adding %d headers to IPA driver internal data struct\n",
731 hdrs->num_hdrs);
732 for (i = 0; i < hdrs->num_hdrs; i++) {
733 if (__ipa_add_hdr(&hdrs->hdr[i])) {
734 IPAERR("failed to add hdr %d\n", i);
735 hdrs->hdr[i].status = -1;
736 } else {
737 hdrs->hdr[i].status = 0;
738 }
739 }
740
741 if (hdrs->commit) {
742 IPADBG("committing all headers to IPA core");
743 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
744 result = -EPERM;
745 goto bail;
746 }
747 }
748 result = 0;
749bail:
750 mutex_unlock(&ipa3_ctx->lock);
751 return result;
752}
753
754/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200755 * ipa3_del_hdr_by_user() - Remove the specified headers
756 * from SW and optionally commit them to IPA HW
Amir Levy9659e592016-10-27 18:08:27 +0300757 * @hdls: [inout] set of headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200758 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300759 *
760 * Returns: 0 on success, negative on failure
761 *
762 * Note: Should not be called from atomic context
763 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200764int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300765{
766 int i;
767 int result = -EFAULT;
768
769 if (hdls == NULL || hdls->num_hdls == 0) {
770 IPAERR("bad parm\n");
771 return -EINVAL;
772 }
773
774 mutex_lock(&ipa3_ctx->lock);
775 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200776 if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
Amir Levy9659e592016-10-27 18:08:27 +0300777 IPAERR("failed to del hdr %i\n", i);
778 hdls->hdl[i].status = -1;
779 } else {
780 hdls->hdl[i].status = 0;
781 }
782 }
783
784 if (hdls->commit) {
785 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
786 result = -EPERM;
787 goto bail;
788 }
789 }
790 result = 0;
791bail:
792 mutex_unlock(&ipa3_ctx->lock);
793 return result;
794}
795
796/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200797 * ipa3_del_hdr() - Remove the specified headers from SW
798 * and optionally commit them to IPA HW
799 * @hdls: [inout] set of headers to delete
800 *
801 * Returns: 0 on success, negative on failure
802 *
803 * Note: Should not be called from atomic context
804 */
805int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
806{
807 return ipa3_del_hdr_by_user(hdls, false);
808}
809
810/**
Amir Levy9659e592016-10-27 18:08:27 +0300811 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
812 * and optionally commit them to IPA HW
813 * @proc_ctxs: [inout] set of processing context headers to add
814 *
815 * Returns: 0 on success, negative on failure
816 *
817 * Note: Should not be called from atomic context
818 */
819int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
820{
821 int i;
822 int result = -EFAULT;
823
824 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
825 IPAERR("bad parm\n");
826 return -EINVAL;
827 }
828
829 mutex_lock(&ipa3_ctx->lock);
830 IPADBG("adding %d header processing contextes to IPA driver\n",
831 proc_ctxs->num_proc_ctxs);
832 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
833 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
834 IPAERR("failed to add hdr pric ctx %d\n", i);
835 proc_ctxs->proc_ctx[i].status = -1;
836 } else {
837 proc_ctxs->proc_ctx[i].status = 0;
838 }
839 }
840
841 if (proc_ctxs->commit) {
842 IPADBG("committing all headers to IPA core");
843 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
844 result = -EPERM;
845 goto bail;
846 }
847 }
848 result = 0;
849bail:
850 mutex_unlock(&ipa3_ctx->lock);
851 return result;
852}
853
854/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200855 * ipa3_del_hdr_proc_ctx_by_user() -
Amir Levy9659e592016-10-27 18:08:27 +0300856 * Remove the specified processing context headers from SW and
857 * optionally commit them to IPA HW.
858 * @hdls: [inout] set of processing context headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200859 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300860 *
861 * Returns: 0 on success, negative on failure
862 *
863 * Note: Should not be called from atomic context
864 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200865int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
866 bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300867{
868 int i;
869 int result;
870
871 if (hdls == NULL || hdls->num_hdls == 0) {
872 IPAERR("bad parm\n");
873 return -EINVAL;
874 }
875
876 mutex_lock(&ipa3_ctx->lock);
877 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200878 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
Amir Levy9659e592016-10-27 18:08:27 +0300879 IPAERR("failed to del hdr %i\n", i);
880 hdls->hdl[i].status = -1;
881 } else {
882 hdls->hdl[i].status = 0;
883 }
884 }
885
886 if (hdls->commit) {
887 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
888 result = -EPERM;
889 goto bail;
890 }
891 }
892 result = 0;
893bail:
894 mutex_unlock(&ipa3_ctx->lock);
895 return result;
896}
897
898/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200899 * ipa3_del_hdr_proc_ctx() -
900 * Remove the specified processing context headers from SW and
901 * optionally commit them to IPA HW.
902 * @hdls: [inout] set of processing context headers to delete
903 *
904 * Returns: 0 on success, negative on failure
905 *
906 * Note: Should not be called from atomic context
907 */
908int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
909{
910 return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
911}
912
913/**
Amir Levy9659e592016-10-27 18:08:27 +0300914 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
915 *
916 * Returns: 0 on success, negative on failure
917 *
918 * Note: Should not be called from atomic context
919 */
920int ipa3_commit_hdr(void)
921{
922 int result = -EFAULT;
923
924 /*
925 * issue a commit on the routing module since routing rules point to
926 * header table entries
927 */
928 if (ipa3_commit_rt(IPA_IP_v4))
929 return -EPERM;
930 if (ipa3_commit_rt(IPA_IP_v6))
931 return -EPERM;
932
933 mutex_lock(&ipa3_ctx->lock);
934 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
935 result = -EPERM;
936 goto bail;
937 }
938 result = 0;
939bail:
940 mutex_unlock(&ipa3_ctx->lock);
941 return result;
942}
943
944/**
945 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
946 * HW)
947 *
948 * Returns: 0 on success, negative on failure
949 *
950 * Note: Should not be called from atomic context
951 */
952int ipa3_reset_hdr(void)
953{
954 struct ipa3_hdr_entry *entry;
955 struct ipa3_hdr_entry *next;
956 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
957 struct ipa3_hdr_proc_ctx_entry *ctx_next;
958 struct ipa_hdr_offset_entry *off_entry;
959 struct ipa_hdr_offset_entry *off_next;
960 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
961 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
962 int i;
963
964 /*
965 * issue a reset on the routing module since routing rules point to
966 * header table entries
967 */
968 if (ipa3_reset_rt(IPA_IP_v4))
969 IPAERR("fail to reset v4 rt\n");
970 if (ipa3_reset_rt(IPA_IP_v6))
971 IPAERR("fail to reset v4 rt\n");
972
973 mutex_lock(&ipa3_ctx->lock);
974 IPADBG("reset hdr\n");
975 list_for_each_entry_safe(entry, next,
976 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
977
978 /* do not remove the default header */
979 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
980 if (entry->is_hdr_proc_ctx) {
981 IPAERR("default header is proc ctx\n");
982 mutex_unlock(&ipa3_ctx->lock);
983 WARN_ON(1);
984 return -EFAULT;
985 }
986 continue;
987 }
988
989 if (ipa3_id_find(entry->id) == NULL) {
990 mutex_unlock(&ipa3_ctx->lock);
991 WARN_ON(1);
992 return -EFAULT;
993 }
994 if (entry->is_hdr_proc_ctx) {
995 dma_unmap_single(ipa3_ctx->pdev,
996 entry->phys_base,
997 entry->hdr_len,
998 DMA_TO_DEVICE);
999 entry->proc_ctx = NULL;
1000 }
1001 list_del(&entry->link);
1002 entry->ref_cnt = 0;
1003 entry->cookie = 0;
1004
1005 /* remove the handle from the database */
1006 ipa3_id_remove(entry->id);
1007 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
1008
1009 }
1010 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
1011 list_for_each_entry_safe(off_entry, off_next,
1012 &ipa3_ctx->hdr_tbl.head_offset_list[i],
1013 link) {
1014
1015 /*
1016 * do not remove the default exception header which is
1017 * at offset 0
1018 */
1019 if (off_entry->offset == 0)
1020 continue;
1021
1022 list_del(&off_entry->link);
1023 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1024 }
1025 list_for_each_entry_safe(off_entry, off_next,
1026 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
1027 link) {
1028 list_del(&off_entry->link);
1029 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1030 }
1031 }
1032 /* there is one header of size 8 */
1033 ipa3_ctx->hdr_tbl.end = 8;
1034 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
1035
1036 IPADBG("reset hdr proc ctx\n");
1037 list_for_each_entry_safe(
1038 ctx_entry,
1039 ctx_next,
1040 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
1041 link) {
1042
1043 if (ipa3_id_find(ctx_entry->id) == NULL) {
1044 mutex_unlock(&ipa3_ctx->lock);
1045 WARN_ON(1);
1046 return -EFAULT;
1047 }
1048 list_del(&ctx_entry->link);
1049 ctx_entry->ref_cnt = 0;
1050 ctx_entry->cookie = 0;
1051
1052 /* remove the handle from the database */
1053 ipa3_id_remove(ctx_entry->id);
1054 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
1055
1056 }
1057 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
1058 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1059 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
1060 link) {
1061
1062 list_del(&ctx_off_entry->link);
1063 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1064 ctx_off_entry);
1065 }
1066 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1067 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
1068 link) {
1069 list_del(&ctx_off_entry->link);
1070 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1071 ctx_off_entry);
1072 }
1073 }
1074 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
1075 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
1076 mutex_unlock(&ipa3_ctx->lock);
1077
1078 return 0;
1079}
1080
1081static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1082{
1083 struct ipa3_hdr_entry *entry;
1084
1085 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
1086 IPAERR("Header name too long: %s\n", name);
1087 return NULL;
1088 }
1089
1090 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1091 link) {
1092 if (!strcmp(name, entry->name))
1093 return entry;
1094 }
1095
1096 return NULL;
1097}
1098
1099/**
1100 * ipa3_get_hdr() - Lookup the specified header resource
1101 * @lookup: [inout] header to lookup and its handle
1102 *
1103 * lookup the specified header resource and return handle if it exists
1104 *
1105 * Returns: 0 on success, negative on failure
1106 *
1107 * Note: Should not be called from atomic context
1108 * Caller should call ipa3_put_hdr later if this function succeeds
1109 */
1110int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1111{
1112 struct ipa3_hdr_entry *entry;
1113 int result = -1;
1114
1115 if (lookup == NULL) {
1116 IPAERR("bad parm\n");
1117 return -EINVAL;
1118 }
1119 mutex_lock(&ipa3_ctx->lock);
1120 entry = __ipa_find_hdr(lookup->name);
1121 if (entry) {
1122 lookup->hdl = entry->id;
1123 result = 0;
1124 }
1125 mutex_unlock(&ipa3_ctx->lock);
1126
1127 return result;
1128}
1129
1130/**
1131 * __ipa3_release_hdr() - drop reference to header and cause
1132 * deletion if reference count permits
1133 * @hdr_hdl: [in] handle of header to be released
1134 *
1135 * Returns: 0 on success, negative on failure
1136 */
1137int __ipa3_release_hdr(u32 hdr_hdl)
1138{
1139 int result = 0;
1140
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001141 if (__ipa3_del_hdr(hdr_hdl, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001142 IPADBG("fail to del hdr %x\n", hdr_hdl);
1143 result = -EFAULT;
1144 goto bail;
1145 }
1146
1147 /* commit for put */
1148 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1149 IPAERR("fail to commit hdr\n");
1150 result = -EFAULT;
1151 goto bail;
1152 }
1153
1154bail:
1155 return result;
1156}
1157
1158/**
1159 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1160 * and cause deletion if reference count permits
1161 * @proc_ctx_hdl: [in] handle of processing context to be released
1162 *
1163 * Returns: 0 on success, negative on failure
1164 */
1165int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1166{
1167 int result = 0;
1168
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001169 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001170 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1171 result = -EFAULT;
1172 goto bail;
1173 }
1174
1175 /* commit for put */
1176 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1177 IPAERR("fail to commit hdr\n");
1178 result = -EFAULT;
1179 goto bail;
1180 }
1181
1182bail:
1183 return result;
1184}
1185
1186/**
1187 * ipa3_put_hdr() - Release the specified header handle
1188 * @hdr_hdl: [in] the header handle to release
1189 *
1190 * Returns: 0 on success, negative on failure
1191 *
1192 * Note: Should not be called from atomic context
1193 */
1194int ipa3_put_hdr(u32 hdr_hdl)
1195{
1196 struct ipa3_hdr_entry *entry;
1197 int result = -EFAULT;
1198
1199 mutex_lock(&ipa3_ctx->lock);
1200
1201 entry = ipa3_id_find(hdr_hdl);
1202 if (entry == NULL) {
1203 IPAERR("lookup failed\n");
1204 result = -EINVAL;
1205 goto bail;
1206 }
1207
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301208 if (entry->cookie != IPA_HDR_COOKIE) {
Amir Levy9659e592016-10-27 18:08:27 +03001209 IPAERR("invalid header entry\n");
1210 result = -EINVAL;
1211 goto bail;
1212 }
1213
1214 result = 0;
1215bail:
1216 mutex_unlock(&ipa3_ctx->lock);
1217 return result;
1218}
1219
1220/**
1221 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1222 * it
1223 * @copy: [inout] header to lookup and its copy
1224 *
1225 * lookup the specified header resource and return a copy of it (along with its
1226 * attributes) if it exists, this would be called for partial headers
1227 *
1228 * Returns: 0 on success, negative on failure
1229 *
1230 * Note: Should not be called from atomic context
1231 */
1232int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1233{
1234 struct ipa3_hdr_entry *entry;
1235 int result = -EFAULT;
1236
1237 if (copy == NULL) {
1238 IPAERR("bad parm\n");
1239 return -EINVAL;
1240 }
1241 mutex_lock(&ipa3_ctx->lock);
1242 entry = __ipa_find_hdr(copy->name);
1243 if (entry) {
1244 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1245 copy->hdr_len = entry->hdr_len;
1246 copy->type = entry->type;
1247 copy->is_partial = entry->is_partial;
1248 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1249 copy->eth2_ofst = entry->eth2_ofst;
1250 result = 0;
1251 }
1252 mutex_unlock(&ipa3_ctx->lock);
1253
1254 return result;
1255}