blob: 122c54170a25d07c76b76c7f0977a1ef4321aa49 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
Skylar Chang7fa22712017-04-03 18:29:21 -070016static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
Amir Levy9659e592016-10-27 18:08:27 +030017static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
69
70 list_for_each_entry(entry,
71 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
72 link) {
73 IPADBG_LOW("processing type %d ofst=%d\n",
74 entry->type, entry->offset_entry->offset);
75 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
76 entry->offset_entry->offset,
77 entry->hdr->hdr_len,
78 entry->hdr->is_hdr_proc_ctx,
79 entry->hdr->phys_base,
80 hdr_base_addr,
Skylar Chang7fa22712017-04-03 18:29:21 -070081 entry->hdr->offset_entry,
82 entry->l2tp_params);
Amir Levy9659e592016-10-27 18:08:27 +030083 if (ret)
84 return ret;
85 }
86
87 return 0;
88}
89
90/**
91 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
92 * generates the headers processing context table.
93 * @mem: [out] buffer to put the processing context table
94 * @aligned_mem: [out] actual processing context table (with alignment).
95 * Processing context table needs to be 8 Bytes aligned.
96 *
97 * Returns: 0 on success, negative on failure
98 */
99static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
100 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
101{
102 u32 hdr_base_addr;
103
104 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
105
106 /* make sure table is aligned */
107 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
108
109 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
110
111 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
112 &mem->phys_base, GFP_KERNEL);
113 if (!mem->base) {
114 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
115 return -ENOMEM;
116 }
117
118 aligned_mem->phys_base =
119 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
120 aligned_mem->base = mem->base +
121 (aligned_mem->phys_base - mem->phys_base);
122 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
123 memset(aligned_mem->base, 0, aligned_mem->size);
124 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
125 hdr_sys_addr;
126 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
127}
128
129/**
130 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
131 *
132 * Returns: 0 on success, negative on failure
133 */
134int __ipa_commit_hdr_v3_0(void)
135{
136 struct ipa3_desc desc[2];
137 struct ipa_mem_buffer hdr_mem;
138 struct ipa_mem_buffer ctx_mem;
139 struct ipa_mem_buffer aligned_ctx_mem;
140 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
141 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
142 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
143 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
144 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
145 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
146 int rc = -EFAULT;
147 u32 proc_ctx_size;
148 u32 proc_ctx_ofst;
149 u32 proc_ctx_size_ddr;
150
151 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
152
153 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
154 IPAERR("fail to generate HDR HW TBL\n");
155 goto end;
156 }
157
158 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
159 &aligned_ctx_mem)) {
160 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
161 goto end;
162 }
163
164 if (ipa3_ctx->hdr_tbl_lcl) {
165 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
166 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
167 IPA_MEM_PART(apps_hdr_size));
168 goto end;
169 } else {
170 dma_cmd_hdr.is_read = false; /* write operation */
171 dma_cmd_hdr.skip_pipeline_clear = false;
172 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
173 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
174 dma_cmd_hdr.size = hdr_mem.size;
175 dma_cmd_hdr.local_addr =
176 ipa3_ctx->smem_restricted_bytes +
177 IPA_MEM_PART(apps_hdr_ofst);
178 hdr_cmd_pyld = ipahal_construct_imm_cmd(
179 IPA_IMM_CMD_DMA_SHARED_MEM,
180 &dma_cmd_hdr, false);
181 if (!hdr_cmd_pyld) {
182 IPAERR("fail construct dma_shared_mem cmd\n");
183 goto end;
184 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700185 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300186 desc[0].pyld = hdr_cmd_pyld->data;
187 desc[0].len = hdr_cmd_pyld->len;
188 }
189 } else {
190 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
191 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
192 IPA_MEM_PART(apps_hdr_size_ddr));
193 goto end;
194 } else {
195 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
196 hdr_cmd_pyld = ipahal_construct_imm_cmd(
197 IPA_IMM_CMD_HDR_INIT_SYSTEM,
198 &hdr_init_cmd, false);
199 if (!hdr_cmd_pyld) {
200 IPAERR("fail construct hdr_init_system cmd\n");
201 goto end;
202 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700203 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300204 desc[0].pyld = hdr_cmd_pyld->data;
205 desc[0].len = hdr_cmd_pyld->len;
206 }
207 }
208 desc[0].type = IPA_IMM_CMD_DESC;
209 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
210
211 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
212 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
213 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
214 if (aligned_ctx_mem.size > proc_ctx_size) {
215 IPAERR("tbl too big needed %d avail %d\n",
216 aligned_ctx_mem.size,
217 proc_ctx_size);
218 goto end;
219 } else {
220 dma_cmd_ctx.is_read = false; /* Write operation */
221 dma_cmd_ctx.skip_pipeline_clear = false;
222 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
223 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
224 dma_cmd_ctx.size = aligned_ctx_mem.size;
225 dma_cmd_ctx.local_addr =
226 ipa3_ctx->smem_restricted_bytes +
227 proc_ctx_ofst;
228 ctx_cmd_pyld = ipahal_construct_imm_cmd(
229 IPA_IMM_CMD_DMA_SHARED_MEM,
230 &dma_cmd_ctx, false);
231 if (!ctx_cmd_pyld) {
232 IPAERR("fail construct dma_shared_mem cmd\n");
233 goto end;
234 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700235 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300236 desc[1].pyld = ctx_cmd_pyld->data;
237 desc[1].len = ctx_cmd_pyld->len;
238 }
239 } else {
240 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
241 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
242 IPAERR("tbl too big, needed %d avail %d\n",
243 aligned_ctx_mem.size,
244 proc_ctx_size_ddr);
245 goto end;
246 } else {
247 reg_write_cmd.skip_pipeline_clear = false;
248 reg_write_cmd.pipeline_clear_options =
249 IPAHAL_HPS_CLEAR;
250 reg_write_cmd.offset =
251 ipahal_get_reg_ofst(
252 IPA_SYS_PKT_PROC_CNTXT_BASE);
253 reg_write_cmd.value = aligned_ctx_mem.phys_base;
254 reg_write_cmd.value_mask =
255 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
256 ctx_cmd_pyld = ipahal_construct_imm_cmd(
257 IPA_IMM_CMD_REGISTER_WRITE,
258 &reg_write_cmd, false);
259 if (!ctx_cmd_pyld) {
260 IPAERR("fail construct register_write cmd\n");
261 goto end;
262 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700263 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300264 desc[1].pyld = ctx_cmd_pyld->data;
265 desc[1].len = ctx_cmd_pyld->len;
266 }
267 }
268 desc[1].type = IPA_IMM_CMD_DESC;
269 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
270
271 if (ipa3_send_cmd(2, desc))
272 IPAERR("fail to send immediate command\n");
273 else
274 rc = 0;
275
276 if (ipa3_ctx->hdr_tbl_lcl) {
277 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
278 hdr_mem.phys_base);
279 } else {
280 if (!rc) {
281 if (ipa3_ctx->hdr_mem.phys_base)
282 dma_free_coherent(ipa3_ctx->pdev,
283 ipa3_ctx->hdr_mem.size,
284 ipa3_ctx->hdr_mem.base,
285 ipa3_ctx->hdr_mem.phys_base);
286 ipa3_ctx->hdr_mem = hdr_mem;
287 }
288 }
289
290 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
291 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
292 ctx_mem.phys_base);
293 } else {
294 if (!rc) {
295 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
296 dma_free_coherent(ipa3_ctx->pdev,
297 ipa3_ctx->hdr_proc_ctx_mem.size,
298 ipa3_ctx->hdr_proc_ctx_mem.base,
299 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
300 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
301 }
302 }
303
304end:
305 if (ctx_cmd_pyld)
306 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
307
308 if (hdr_cmd_pyld)
309 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
310
311 return rc;
312}
313
314static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
315 bool add_ref_hdr)
316{
317 struct ipa3_hdr_entry *hdr_entry;
318 struct ipa3_hdr_proc_ctx_entry *entry;
319 struct ipa3_hdr_proc_ctx_offset_entry *offset;
320 u32 bin;
321 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
322 int id;
323 int needed_len;
324 int mem_size;
325
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200326 IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300327 proc_ctx->type, proc_ctx->hdr_hdl);
328
329 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530330 IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
Amir Levy9659e592016-10-27 18:08:27 +0300331 return -EINVAL;
332 }
333
334 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200335 if (!hdr_entry) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530336 IPAERR_RL("hdr_hdl is invalid\n");
Amir Levy9659e592016-10-27 18:08:27 +0300337 return -EINVAL;
338 }
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530339 if (hdr_entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530340 IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200341 WARN_ON(1);
342 return -EINVAL;
343 }
344 IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
345 hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
Amir Levy9659e592016-10-27 18:08:27 +0300346
347 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
348 if (!entry) {
349 IPAERR("failed to alloc proc_ctx object\n");
350 return -ENOMEM;
351 }
352
353 INIT_LIST_HEAD(&entry->link);
354
355 entry->type = proc_ctx->type;
356 entry->hdr = hdr_entry;
Skylar Chang7fa22712017-04-03 18:29:21 -0700357 entry->l2tp_params = proc_ctx->l2tp_params;
Amir Levy9659e592016-10-27 18:08:27 +0300358 if (add_ref_hdr)
359 hdr_entry->ref_cnt++;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530360 entry->cookie = IPA_PROC_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300361
362 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
363
364 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
365 bin = IPA_HDR_PROC_CTX_BIN0;
366 } else if (needed_len <=
367 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
368 bin = IPA_HDR_PROC_CTX_BIN1;
369 } else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530370 IPAERR_RL("unexpected needed len %d\n", needed_len);
Amir Levy9659e592016-10-27 18:08:27 +0300371 WARN_ON(1);
372 goto bad_len;
373 }
374
375 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
376 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
377 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
Amir Levy9659e592016-10-27 18:08:27 +0300378 if (list_empty(&htbl->head_free_offset_list[bin])) {
Skylar Changd8b80fe2017-06-08 15:47:22 -0700379 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530380 IPAERR_RL("hdr proc ctx table overflow\n");
Skylar Changd8b80fe2017-06-08 15:47:22 -0700381 goto bad_len;
382 }
383
Amir Levy9659e592016-10-27 18:08:27 +0300384 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
385 GFP_KERNEL);
386 if (!offset) {
387 IPAERR("failed to alloc offset object\n");
388 goto bad_len;
389 }
390 INIT_LIST_HEAD(&offset->link);
391 /*
392 * for a first item grow, set the bin and offset which are set
393 * in stone
394 */
395 offset->offset = htbl->end;
396 offset->bin = bin;
397 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
398 list_add(&offset->link,
399 &htbl->head_offset_list[bin]);
400 } else {
401 /* get the first free slot */
402 offset =
403 list_first_entry(&htbl->head_free_offset_list[bin],
404 struct ipa3_hdr_proc_ctx_offset_entry, link);
405 list_move(&offset->link, &htbl->head_offset_list[bin]);
406 }
407
408 entry->offset_entry = offset;
409 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
410 htbl->proc_ctx_cnt++;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200411 IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
Amir Levy9659e592016-10-27 18:08:27 +0300412 htbl->proc_ctx_cnt, offset->offset);
413
414 id = ipa3_id_alloc(entry);
415 if (id < 0) {
416 IPAERR("failed to alloc id\n");
417 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530418 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300419 }
420 entry->id = id;
421 proc_ctx->proc_ctx_hdl = id;
422 entry->ref_cnt++;
423
424 return 0;
425
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530426ipa_insert_failed:
Mohammed Javid42ad67b2017-07-27 15:12:18 +0530427 list_move(&offset->link,
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530428 &htbl->head_free_offset_list[offset->bin]);
429 entry->offset_entry = NULL;
430 list_del(&entry->link);
431 htbl->proc_ctx_cnt--;
432
Amir Levy9659e592016-10-27 18:08:27 +0300433bad_len:
434 if (add_ref_hdr)
435 hdr_entry->ref_cnt--;
436 entry->cookie = 0;
437 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
438 return -EPERM;
439}
440
441
442static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
443{
444 struct ipa3_hdr_entry *entry;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530445 struct ipa_hdr_offset_entry *offset = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300446 u32 bin;
447 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
448 int id;
449 int mem_size;
450
451 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530452 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300453 goto error;
454 }
455
456 if (!HDR_TYPE_IS_VALID(hdr->type)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530457 IPAERR_RL("invalid hdr type %d\n", hdr->type);
Amir Levy9659e592016-10-27 18:08:27 +0300458 goto error;
459 }
460
461 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
462 if (!entry) {
463 IPAERR("failed to alloc hdr object\n");
464 goto error;
465 }
466
467 INIT_LIST_HEAD(&entry->link);
468
469 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
470 entry->hdr_len = hdr->hdr_len;
471 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
472 entry->is_partial = hdr->is_partial;
473 entry->type = hdr->type;
474 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
475 entry->eth2_ofst = hdr->eth2_ofst;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530476 entry->cookie = IPA_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300477
478 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
479 bin = IPA_HDR_BIN0;
480 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
481 bin = IPA_HDR_BIN1;
482 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
483 bin = IPA_HDR_BIN2;
484 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
485 bin = IPA_HDR_BIN3;
486 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
487 bin = IPA_HDR_BIN4;
488 else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530489 IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
Amir Levy9659e592016-10-27 18:08:27 +0300490 goto bad_hdr_len;
491 }
492
493 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
494 IPA_MEM_PART(apps_hdr_size_ddr);
495
Skylar Changd8b80fe2017-06-08 15:47:22 -0700496 if (list_empty(&htbl->head_free_offset_list[bin])) {
497 /* if header does not fit to table, place it in DDR */
498 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
499 entry->is_hdr_proc_ctx = true;
500 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
501 entry->hdr,
502 entry->hdr_len,
503 DMA_TO_DEVICE);
504 if (dma_mapping_error(ipa3_ctx->pdev,
505 entry->phys_base)) {
506 IPAERR("dma_map_single failure for entry\n");
507 goto fail_dma_mapping;
508 }
509 } else {
510 entry->is_hdr_proc_ctx = false;
Amir Levy9659e592016-10-27 18:08:27 +0300511 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
512 GFP_KERNEL);
513 if (!offset) {
514 IPAERR("failed to alloc hdr offset object\n");
515 goto bad_hdr_len;
516 }
517 INIT_LIST_HEAD(&offset->link);
518 /*
519 * for a first item grow, set the bin and offset which
520 * are set in stone
521 */
522 offset->offset = htbl->end;
523 offset->bin = bin;
524 htbl->end += ipa_hdr_bin_sz[bin];
525 list_add(&offset->link,
526 &htbl->head_offset_list[bin]);
Skylar Changd8b80fe2017-06-08 15:47:22 -0700527 entry->offset_entry = offset;
Amir Levy9659e592016-10-27 18:08:27 +0300528 }
Skylar Changd8b80fe2017-06-08 15:47:22 -0700529 } else {
530 entry->is_hdr_proc_ctx = false;
531 /* get the first free slot */
532 offset = list_first_entry(&htbl->head_free_offset_list[bin],
533 struct ipa_hdr_offset_entry, link);
534 list_move(&offset->link, &htbl->head_offset_list[bin]);
Amir Levy9659e592016-10-27 18:08:27 +0300535 entry->offset_entry = offset;
536 }
537
538 list_add(&entry->link, &htbl->head_hdr_entry_list);
539 htbl->hdr_cnt++;
540 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200541 IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300542 hdr->hdr_len,
543 htbl->hdr_cnt,
544 &entry->phys_base);
545 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200546 IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300547 hdr->hdr_len,
548 htbl->hdr_cnt,
549 entry->offset_entry->offset);
550
551 id = ipa3_id_alloc(entry);
552 if (id < 0) {
553 IPAERR("failed to alloc id\n");
554 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530555 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300556 }
557 entry->id = id;
558 hdr->hdr_hdl = id;
559 entry->ref_cnt++;
560
561 if (entry->is_hdr_proc_ctx) {
562 struct ipa_hdr_proc_ctx_add proc_ctx;
563
564 IPADBG("adding processing context for header %s\n", hdr->name);
565 proc_ctx.type = IPA_HDR_PROC_NONE;
566 proc_ctx.hdr_hdl = id;
567 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
568 IPAERR("failed to add hdr proc ctx\n");
569 goto fail_add_proc_ctx;
570 }
571 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
572 }
573
574 return 0;
575
576fail_add_proc_ctx:
577 entry->ref_cnt--;
578 hdr->hdr_hdl = 0;
579 ipa3_id_remove(id);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530580ipa_insert_failed:
581 if (entry->is_hdr_proc_ctx) {
582 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
583 entry->hdr_len, DMA_TO_DEVICE);
584 } else {
585 if (offset)
586 list_move(&offset->link,
587 &htbl->head_free_offset_list[offset->bin]);
588 entry->offset_entry = NULL;
589 }
Amir Levy9659e592016-10-27 18:08:27 +0300590 htbl->hdr_cnt--;
591 list_del(&entry->link);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530592
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530593fail_dma_mapping:
594 entry->is_hdr_proc_ctx = false;
595
Amir Levy9659e592016-10-27 18:08:27 +0300596bad_hdr_len:
597 entry->cookie = 0;
598 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
599error:
600 return -EPERM;
601}
602
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200603static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
604 bool release_hdr, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300605{
606 struct ipa3_hdr_proc_ctx_entry *entry;
607 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
608
609 entry = ipa3_id_find(proc_ctx_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530610 if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530611 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300612 return -EINVAL;
613 }
614
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200615 IPADBG("del proc ctx cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300616 htbl->proc_ctx_cnt, entry->offset_entry->offset);
617
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200618 if (by_user && entry->user_deleted) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530619 IPAERR_RL("proc_ctx already deleted by user\n");
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200620 return -EINVAL;
621 }
622
623 if (by_user)
624 entry->user_deleted = true;
625
Amir Levy9659e592016-10-27 18:08:27 +0300626 if (--entry->ref_cnt) {
627 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
628 proc_ctx_hdl, entry->ref_cnt);
629 return 0;
630 }
631
632 if (release_hdr)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200633 __ipa3_del_hdr(entry->hdr->id, false);
Amir Levy9659e592016-10-27 18:08:27 +0300634
635 /* move the offset entry to appropriate free list */
636 list_move(&entry->offset_entry->link,
637 &htbl->head_free_offset_list[entry->offset_entry->bin]);
638 list_del(&entry->link);
639 htbl->proc_ctx_cnt--;
640 entry->cookie = 0;
641 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
642
643 /* remove the handle from the database */
644 ipa3_id_remove(proc_ctx_hdl);
645
646 return 0;
647}
648
649
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200650int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300651{
652 struct ipa3_hdr_entry *entry;
653 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
654
655 entry = ipa3_id_find(hdr_hdl);
656 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530657 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +0300658 return -EINVAL;
659 }
660
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530661 if (entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530662 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300663 return -EINVAL;
664 }
665
666 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200667 IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300668 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
669 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200670 IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
671 entry->hdr_len, htbl->hdr_cnt,
672 entry->offset_entry->offset);
Amir Levy9659e592016-10-27 18:08:27 +0300673
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200674 if (by_user && entry->user_deleted) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530675 IPAERR_RL("proc_ctx already deleted by user\n");
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200676 return -EINVAL;
677 }
678
679 if (by_user)
680 entry->user_deleted = true;
681
Amir Levy9659e592016-10-27 18:08:27 +0300682 if (--entry->ref_cnt) {
683 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
684 return 0;
685 }
686
687 if (entry->is_hdr_proc_ctx) {
688 dma_unmap_single(ipa3_ctx->pdev,
689 entry->phys_base,
690 entry->hdr_len,
691 DMA_TO_DEVICE);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200692 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
Amir Levy9659e592016-10-27 18:08:27 +0300693 } else {
694 /* move the offset entry to appropriate free list */
695 list_move(&entry->offset_entry->link,
696 &htbl->head_free_offset_list[entry->offset_entry->bin]);
697 }
698 list_del(&entry->link);
699 htbl->hdr_cnt--;
700 entry->cookie = 0;
701 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
702
703 /* remove the handle from the database */
704 ipa3_id_remove(hdr_hdl);
705
706 return 0;
707}
708
709/**
710 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
711 * to IPA HW
712 * @hdrs: [inout] set of headers to add
713 *
714 * Returns: 0 on success, negative on failure
715 *
716 * Note: Should not be called from atomic context
717 */
718int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
719{
720 int i;
721 int result = -EFAULT;
722
723 if (hdrs == NULL || hdrs->num_hdrs == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530724 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300725 return -EINVAL;
726 }
727
728 mutex_lock(&ipa3_ctx->lock);
729 IPADBG("adding %d headers to IPA driver internal data struct\n",
730 hdrs->num_hdrs);
731 for (i = 0; i < hdrs->num_hdrs; i++) {
732 if (__ipa_add_hdr(&hdrs->hdr[i])) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530733 IPAERR_RL("failed to add hdr %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300734 hdrs->hdr[i].status = -1;
735 } else {
736 hdrs->hdr[i].status = 0;
737 }
738 }
739
740 if (hdrs->commit) {
741 IPADBG("committing all headers to IPA core");
742 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
743 result = -EPERM;
744 goto bail;
745 }
746 }
747 result = 0;
748bail:
749 mutex_unlock(&ipa3_ctx->lock);
750 return result;
751}
752
753/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200754 * ipa3_del_hdr_by_user() - Remove the specified headers
755 * from SW and optionally commit them to IPA HW
Amir Levy9659e592016-10-27 18:08:27 +0300756 * @hdls: [inout] set of headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200757 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300758 *
759 * Returns: 0 on success, negative on failure
760 *
761 * Note: Should not be called from atomic context
762 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200763int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300764{
765 int i;
766 int result = -EFAULT;
767
768 if (hdls == NULL || hdls->num_hdls == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530769 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300770 return -EINVAL;
771 }
772
773 mutex_lock(&ipa3_ctx->lock);
774 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200775 if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530776 IPAERR_RL("failed to del hdr %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300777 hdls->hdl[i].status = -1;
778 } else {
779 hdls->hdl[i].status = 0;
780 }
781 }
782
783 if (hdls->commit) {
784 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
785 result = -EPERM;
786 goto bail;
787 }
788 }
789 result = 0;
790bail:
791 mutex_unlock(&ipa3_ctx->lock);
792 return result;
793}
794
795/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200796 * ipa3_del_hdr() - Remove the specified headers from SW
797 * and optionally commit them to IPA HW
798 * @hdls: [inout] set of headers to delete
799 *
800 * Returns: 0 on success, negative on failure
801 *
802 * Note: Should not be called from atomic context
803 */
804int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
805{
806 return ipa3_del_hdr_by_user(hdls, false);
807}
808
809/**
Amir Levy9659e592016-10-27 18:08:27 +0300810 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
811 * and optionally commit them to IPA HW
812 * @proc_ctxs: [inout] set of processing context headers to add
813 *
814 * Returns: 0 on success, negative on failure
815 *
816 * Note: Should not be called from atomic context
817 */
818int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
819{
820 int i;
821 int result = -EFAULT;
822
823 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530824 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300825 return -EINVAL;
826 }
827
828 mutex_lock(&ipa3_ctx->lock);
829 IPADBG("adding %d header processing contextes to IPA driver\n",
830 proc_ctxs->num_proc_ctxs);
831 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
832 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530833 IPAERR_RL("failed to add hdr pric ctx %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300834 proc_ctxs->proc_ctx[i].status = -1;
835 } else {
836 proc_ctxs->proc_ctx[i].status = 0;
837 }
838 }
839
840 if (proc_ctxs->commit) {
841 IPADBG("committing all headers to IPA core");
842 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
843 result = -EPERM;
844 goto bail;
845 }
846 }
847 result = 0;
848bail:
849 mutex_unlock(&ipa3_ctx->lock);
850 return result;
851}
852
853/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200854 * ipa3_del_hdr_proc_ctx_by_user() -
Amir Levy9659e592016-10-27 18:08:27 +0300855 * Remove the specified processing context headers from SW and
856 * optionally commit them to IPA HW.
857 * @hdls: [inout] set of processing context headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200858 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300859 *
860 * Returns: 0 on success, negative on failure
861 *
862 * Note: Should not be called from atomic context
863 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200864int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
865 bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300866{
867 int i;
868 int result;
869
870 if (hdls == NULL || hdls->num_hdls == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530871 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300872 return -EINVAL;
873 }
874
875 mutex_lock(&ipa3_ctx->lock);
876 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200877 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530878 IPAERR_RL("failed to del hdr %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300879 hdls->hdl[i].status = -1;
880 } else {
881 hdls->hdl[i].status = 0;
882 }
883 }
884
885 if (hdls->commit) {
886 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
887 result = -EPERM;
888 goto bail;
889 }
890 }
891 result = 0;
892bail:
893 mutex_unlock(&ipa3_ctx->lock);
894 return result;
895}
896
897/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200898 * ipa3_del_hdr_proc_ctx() -
899 * Remove the specified processing context headers from SW and
900 * optionally commit them to IPA HW.
901 * @hdls: [inout] set of processing context headers to delete
902 *
903 * Returns: 0 on success, negative on failure
904 *
905 * Note: Should not be called from atomic context
906 */
907int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
908{
909 return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
910}
911
912/**
Amir Levy9659e592016-10-27 18:08:27 +0300913 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
914 *
915 * Returns: 0 on success, negative on failure
916 *
917 * Note: Should not be called from atomic context
918 */
919int ipa3_commit_hdr(void)
920{
921 int result = -EFAULT;
922
923 /*
924 * issue a commit on the routing module since routing rules point to
925 * header table entries
926 */
927 if (ipa3_commit_rt(IPA_IP_v4))
928 return -EPERM;
929 if (ipa3_commit_rt(IPA_IP_v6))
930 return -EPERM;
931
932 mutex_lock(&ipa3_ctx->lock);
933 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
934 result = -EPERM;
935 goto bail;
936 }
937 result = 0;
938bail:
939 mutex_unlock(&ipa3_ctx->lock);
940 return result;
941}
942
943/**
944 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
945 * HW)
946 *
947 * Returns: 0 on success, negative on failure
948 *
949 * Note: Should not be called from atomic context
950 */
951int ipa3_reset_hdr(void)
952{
953 struct ipa3_hdr_entry *entry;
954 struct ipa3_hdr_entry *next;
955 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
956 struct ipa3_hdr_proc_ctx_entry *ctx_next;
957 struct ipa_hdr_offset_entry *off_entry;
958 struct ipa_hdr_offset_entry *off_next;
959 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
960 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
961 int i;
962
963 /*
964 * issue a reset on the routing module since routing rules point to
965 * header table entries
966 */
967 if (ipa3_reset_rt(IPA_IP_v4))
968 IPAERR("fail to reset v4 rt\n");
969 if (ipa3_reset_rt(IPA_IP_v6))
970 IPAERR("fail to reset v4 rt\n");
971
972 mutex_lock(&ipa3_ctx->lock);
973 IPADBG("reset hdr\n");
974 list_for_each_entry_safe(entry, next,
975 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
976
977 /* do not remove the default header */
978 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
979 if (entry->is_hdr_proc_ctx) {
980 IPAERR("default header is proc ctx\n");
981 mutex_unlock(&ipa3_ctx->lock);
982 WARN_ON(1);
983 return -EFAULT;
984 }
985 continue;
986 }
987
988 if (ipa3_id_find(entry->id) == NULL) {
989 mutex_unlock(&ipa3_ctx->lock);
990 WARN_ON(1);
991 return -EFAULT;
992 }
993 if (entry->is_hdr_proc_ctx) {
994 dma_unmap_single(ipa3_ctx->pdev,
995 entry->phys_base,
996 entry->hdr_len,
997 DMA_TO_DEVICE);
998 entry->proc_ctx = NULL;
999 }
1000 list_del(&entry->link);
1001 entry->ref_cnt = 0;
1002 entry->cookie = 0;
1003
1004 /* remove the handle from the database */
1005 ipa3_id_remove(entry->id);
1006 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
1007
1008 }
1009 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
1010 list_for_each_entry_safe(off_entry, off_next,
1011 &ipa3_ctx->hdr_tbl.head_offset_list[i],
1012 link) {
1013
1014 /*
1015 * do not remove the default exception header which is
1016 * at offset 0
1017 */
1018 if (off_entry->offset == 0)
1019 continue;
1020
1021 list_del(&off_entry->link);
1022 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1023 }
1024 list_for_each_entry_safe(off_entry, off_next,
1025 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
1026 link) {
1027 list_del(&off_entry->link);
1028 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1029 }
1030 }
1031 /* there is one header of size 8 */
1032 ipa3_ctx->hdr_tbl.end = 8;
1033 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
1034
1035 IPADBG("reset hdr proc ctx\n");
1036 list_for_each_entry_safe(
1037 ctx_entry,
1038 ctx_next,
1039 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
1040 link) {
1041
1042 if (ipa3_id_find(ctx_entry->id) == NULL) {
1043 mutex_unlock(&ipa3_ctx->lock);
1044 WARN_ON(1);
1045 return -EFAULT;
1046 }
1047 list_del(&ctx_entry->link);
1048 ctx_entry->ref_cnt = 0;
1049 ctx_entry->cookie = 0;
1050
1051 /* remove the handle from the database */
1052 ipa3_id_remove(ctx_entry->id);
1053 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
1054
1055 }
1056 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
1057 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1058 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
1059 link) {
1060
1061 list_del(&ctx_off_entry->link);
1062 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1063 ctx_off_entry);
1064 }
1065 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1066 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
1067 link) {
1068 list_del(&ctx_off_entry->link);
1069 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1070 ctx_off_entry);
1071 }
1072 }
1073 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
1074 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
1075 mutex_unlock(&ipa3_ctx->lock);
1076
1077 return 0;
1078}
1079
1080static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1081{
1082 struct ipa3_hdr_entry *entry;
1083
1084 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301085 IPAERR_RL("Header name too long: %s\n", name);
Amir Levy9659e592016-10-27 18:08:27 +03001086 return NULL;
1087 }
1088
1089 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1090 link) {
1091 if (!strcmp(name, entry->name))
1092 return entry;
1093 }
1094
1095 return NULL;
1096}
1097
1098/**
1099 * ipa3_get_hdr() - Lookup the specified header resource
1100 * @lookup: [inout] header to lookup and its handle
1101 *
1102 * lookup the specified header resource and return handle if it exists
1103 *
1104 * Returns: 0 on success, negative on failure
1105 *
1106 * Note: Should not be called from atomic context
1107 * Caller should call ipa3_put_hdr later if this function succeeds
1108 */
1109int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1110{
1111 struct ipa3_hdr_entry *entry;
1112 int result = -1;
1113
1114 if (lookup == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301115 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001116 return -EINVAL;
1117 }
1118 mutex_lock(&ipa3_ctx->lock);
1119 entry = __ipa_find_hdr(lookup->name);
1120 if (entry) {
1121 lookup->hdl = entry->id;
1122 result = 0;
1123 }
1124 mutex_unlock(&ipa3_ctx->lock);
1125
1126 return result;
1127}
1128
1129/**
1130 * __ipa3_release_hdr() - drop reference to header and cause
1131 * deletion if reference count permits
1132 * @hdr_hdl: [in] handle of header to be released
1133 *
1134 * Returns: 0 on success, negative on failure
1135 */
1136int __ipa3_release_hdr(u32 hdr_hdl)
1137{
1138 int result = 0;
1139
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001140 if (__ipa3_del_hdr(hdr_hdl, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001141 IPADBG("fail to del hdr %x\n", hdr_hdl);
1142 result = -EFAULT;
1143 goto bail;
1144 }
1145
1146 /* commit for put */
1147 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1148 IPAERR("fail to commit hdr\n");
1149 result = -EFAULT;
1150 goto bail;
1151 }
1152
1153bail:
1154 return result;
1155}
1156
1157/**
1158 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1159 * and cause deletion if reference count permits
1160 * @proc_ctx_hdl: [in] handle of processing context to be released
1161 *
1162 * Returns: 0 on success, negative on failure
1163 */
1164int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1165{
1166 int result = 0;
1167
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001168 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001169 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1170 result = -EFAULT;
1171 goto bail;
1172 }
1173
1174 /* commit for put */
1175 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1176 IPAERR("fail to commit hdr\n");
1177 result = -EFAULT;
1178 goto bail;
1179 }
1180
1181bail:
1182 return result;
1183}
1184
1185/**
1186 * ipa3_put_hdr() - Release the specified header handle
1187 * @hdr_hdl: [in] the header handle to release
1188 *
1189 * Returns: 0 on success, negative on failure
1190 *
1191 * Note: Should not be called from atomic context
1192 */
1193int ipa3_put_hdr(u32 hdr_hdl)
1194{
1195 struct ipa3_hdr_entry *entry;
1196 int result = -EFAULT;
1197
1198 mutex_lock(&ipa3_ctx->lock);
1199
1200 entry = ipa3_id_find(hdr_hdl);
1201 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301202 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001203 result = -EINVAL;
1204 goto bail;
1205 }
1206
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301207 if (entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301208 IPAERR_RL("invalid header entry\n");
Amir Levy9659e592016-10-27 18:08:27 +03001209 result = -EINVAL;
1210 goto bail;
1211 }
1212
1213 result = 0;
1214bail:
1215 mutex_unlock(&ipa3_ctx->lock);
1216 return result;
1217}
1218
1219/**
1220 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1221 * it
1222 * @copy: [inout] header to lookup and its copy
1223 *
1224 * lookup the specified header resource and return a copy of it (along with its
1225 * attributes) if it exists, this would be called for partial headers
1226 *
1227 * Returns: 0 on success, negative on failure
1228 *
1229 * Note: Should not be called from atomic context
1230 */
1231int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1232{
1233 struct ipa3_hdr_entry *entry;
1234 int result = -EFAULT;
1235
1236 if (copy == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301237 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001238 return -EINVAL;
1239 }
1240 mutex_lock(&ipa3_ctx->lock);
1241 entry = __ipa_find_hdr(copy->name);
1242 if (entry) {
1243 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1244 copy->hdr_len = entry->hdr_len;
1245 copy->type = entry->type;
1246 copy->is_partial = entry->is_partial;
1247 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1248 copy->eth2_ofst = entry->eth2_ofst;
1249 result = 0;
1250 }
1251 mutex_unlock(&ipa3_ctx->lock);
1252
1253 return result;
1254}