blob: 34624c0446daf3a18ab0dc3f194f2d8305bd0802 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
Skylar Chang7fa22712017-04-03 18:29:21 -070016static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
Amir Levy9659e592016-10-27 18:08:27 +030017static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
Shihuan Liufe2818b2017-07-03 22:14:55 -070069 int ep;
Amir Levy9659e592016-10-27 18:08:27 +030070
71 list_for_each_entry(entry,
72 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
73 link) {
74 IPADBG_LOW("processing type %d ofst=%d\n",
75 entry->type, entry->offset_entry->offset);
Shihuan Liufe2818b2017-07-03 22:14:55 -070076
77 if (entry->l2tp_params.is_dst_pipe_valid) {
78 ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
79 if (ep >= 0) {
80 entry->l2tp_params.hdr_remove_param.
81 hdr_ofst_pkt_size_valid = ipa3_ctx->
82 ep[ep].cfg.hdr.hdr_ofst_pkt_size_valid;
83 entry->l2tp_params.hdr_remove_param.
84 hdr_ofst_pkt_size = ipa3_ctx->ep[ep].
85 cfg.hdr.hdr_ofst_pkt_size;
86 entry->l2tp_params.hdr_remove_param.
87 hdr_endianness = ipa3_ctx->ep[ep].
88 cfg.hdr_ext.hdr_little_endian ? 0 : 1;
89 }
90 }
91
Amir Levy9659e592016-10-27 18:08:27 +030092 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
93 entry->offset_entry->offset,
94 entry->hdr->hdr_len,
95 entry->hdr->is_hdr_proc_ctx,
96 entry->hdr->phys_base,
97 hdr_base_addr,
Skylar Chang7fa22712017-04-03 18:29:21 -070098 entry->hdr->offset_entry,
99 entry->l2tp_params);
Amir Levy9659e592016-10-27 18:08:27 +0300100 if (ret)
101 return ret;
102 }
103
104 return 0;
105}
106
107/**
108 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
109 * generates the headers processing context table.
110 * @mem: [out] buffer to put the processing context table
111 * @aligned_mem: [out] actual processing context table (with alignment).
112 * Processing context table needs to be 8 Bytes aligned.
113 *
114 * Returns: 0 on success, negative on failure
115 */
116static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
117 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
118{
119 u32 hdr_base_addr;
120
121 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
122
123 /* make sure table is aligned */
124 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
125
126 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
127
128 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
129 &mem->phys_base, GFP_KERNEL);
130 if (!mem->base) {
131 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
132 return -ENOMEM;
133 }
134
135 aligned_mem->phys_base =
136 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
137 aligned_mem->base = mem->base +
138 (aligned_mem->phys_base - mem->phys_base);
139 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
140 memset(aligned_mem->base, 0, aligned_mem->size);
141 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
142 hdr_sys_addr;
143 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
144}
145
146/**
147 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
148 *
149 * Returns: 0 on success, negative on failure
150 */
151int __ipa_commit_hdr_v3_0(void)
152{
153 struct ipa3_desc desc[2];
154 struct ipa_mem_buffer hdr_mem;
155 struct ipa_mem_buffer ctx_mem;
156 struct ipa_mem_buffer aligned_ctx_mem;
157 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
158 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
159 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
160 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
161 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
162 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
163 int rc = -EFAULT;
164 u32 proc_ctx_size;
165 u32 proc_ctx_ofst;
166 u32 proc_ctx_size_ddr;
167
168 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
169
170 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
171 IPAERR("fail to generate HDR HW TBL\n");
172 goto end;
173 }
174
175 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
176 &aligned_ctx_mem)) {
177 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
178 goto end;
179 }
180
181 if (ipa3_ctx->hdr_tbl_lcl) {
182 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
183 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
184 IPA_MEM_PART(apps_hdr_size));
185 goto end;
186 } else {
187 dma_cmd_hdr.is_read = false; /* write operation */
188 dma_cmd_hdr.skip_pipeline_clear = false;
189 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
190 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
191 dma_cmd_hdr.size = hdr_mem.size;
192 dma_cmd_hdr.local_addr =
193 ipa3_ctx->smem_restricted_bytes +
194 IPA_MEM_PART(apps_hdr_ofst);
195 hdr_cmd_pyld = ipahal_construct_imm_cmd(
196 IPA_IMM_CMD_DMA_SHARED_MEM,
197 &dma_cmd_hdr, false);
198 if (!hdr_cmd_pyld) {
199 IPAERR("fail construct dma_shared_mem cmd\n");
200 goto end;
201 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700202 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300203 desc[0].pyld = hdr_cmd_pyld->data;
204 desc[0].len = hdr_cmd_pyld->len;
205 }
206 } else {
207 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
208 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
209 IPA_MEM_PART(apps_hdr_size_ddr));
210 goto end;
211 } else {
212 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
213 hdr_cmd_pyld = ipahal_construct_imm_cmd(
214 IPA_IMM_CMD_HDR_INIT_SYSTEM,
215 &hdr_init_cmd, false);
216 if (!hdr_cmd_pyld) {
217 IPAERR("fail construct hdr_init_system cmd\n");
218 goto end;
219 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700220 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300221 desc[0].pyld = hdr_cmd_pyld->data;
222 desc[0].len = hdr_cmd_pyld->len;
223 }
224 }
225 desc[0].type = IPA_IMM_CMD_DESC;
226 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
227
228 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
229 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
230 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
231 if (aligned_ctx_mem.size > proc_ctx_size) {
232 IPAERR("tbl too big needed %d avail %d\n",
233 aligned_ctx_mem.size,
234 proc_ctx_size);
235 goto end;
236 } else {
237 dma_cmd_ctx.is_read = false; /* Write operation */
238 dma_cmd_ctx.skip_pipeline_clear = false;
239 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
240 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
241 dma_cmd_ctx.size = aligned_ctx_mem.size;
242 dma_cmd_ctx.local_addr =
243 ipa3_ctx->smem_restricted_bytes +
244 proc_ctx_ofst;
245 ctx_cmd_pyld = ipahal_construct_imm_cmd(
246 IPA_IMM_CMD_DMA_SHARED_MEM,
247 &dma_cmd_ctx, false);
248 if (!ctx_cmd_pyld) {
249 IPAERR("fail construct dma_shared_mem cmd\n");
250 goto end;
251 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700252 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300253 desc[1].pyld = ctx_cmd_pyld->data;
254 desc[1].len = ctx_cmd_pyld->len;
255 }
256 } else {
257 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
258 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
259 IPAERR("tbl too big, needed %d avail %d\n",
260 aligned_ctx_mem.size,
261 proc_ctx_size_ddr);
262 goto end;
263 } else {
264 reg_write_cmd.skip_pipeline_clear = false;
265 reg_write_cmd.pipeline_clear_options =
266 IPAHAL_HPS_CLEAR;
267 reg_write_cmd.offset =
268 ipahal_get_reg_ofst(
269 IPA_SYS_PKT_PROC_CNTXT_BASE);
270 reg_write_cmd.value = aligned_ctx_mem.phys_base;
271 reg_write_cmd.value_mask =
272 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
273 ctx_cmd_pyld = ipahal_construct_imm_cmd(
274 IPA_IMM_CMD_REGISTER_WRITE,
275 &reg_write_cmd, false);
276 if (!ctx_cmd_pyld) {
277 IPAERR("fail construct register_write cmd\n");
278 goto end;
279 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700280 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300281 desc[1].pyld = ctx_cmd_pyld->data;
282 desc[1].len = ctx_cmd_pyld->len;
283 }
284 }
285 desc[1].type = IPA_IMM_CMD_DESC;
286 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
287
288 if (ipa3_send_cmd(2, desc))
289 IPAERR("fail to send immediate command\n");
290 else
291 rc = 0;
292
293 if (ipa3_ctx->hdr_tbl_lcl) {
294 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
295 hdr_mem.phys_base);
296 } else {
297 if (!rc) {
298 if (ipa3_ctx->hdr_mem.phys_base)
299 dma_free_coherent(ipa3_ctx->pdev,
300 ipa3_ctx->hdr_mem.size,
301 ipa3_ctx->hdr_mem.base,
302 ipa3_ctx->hdr_mem.phys_base);
303 ipa3_ctx->hdr_mem = hdr_mem;
304 }
305 }
306
307 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
308 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
309 ctx_mem.phys_base);
310 } else {
311 if (!rc) {
312 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
313 dma_free_coherent(ipa3_ctx->pdev,
314 ipa3_ctx->hdr_proc_ctx_mem.size,
315 ipa3_ctx->hdr_proc_ctx_mem.base,
316 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
317 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
318 }
319 }
320
321end:
322 if (ctx_cmd_pyld)
323 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
324
325 if (hdr_cmd_pyld)
326 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
327
328 return rc;
329}
330
331static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
332 bool add_ref_hdr)
333{
334 struct ipa3_hdr_entry *hdr_entry;
335 struct ipa3_hdr_proc_ctx_entry *entry;
336 struct ipa3_hdr_proc_ctx_offset_entry *offset;
337 u32 bin;
338 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
339 int id;
340 int needed_len;
341 int mem_size;
342
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200343 IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300344 proc_ctx->type, proc_ctx->hdr_hdl);
345
346 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530347 IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
Amir Levy9659e592016-10-27 18:08:27 +0300348 return -EINVAL;
349 }
350
351 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200352 if (!hdr_entry) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530353 IPAERR_RL("hdr_hdl is invalid\n");
Amir Levy9659e592016-10-27 18:08:27 +0300354 return -EINVAL;
355 }
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530356 if (hdr_entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530357 IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200358 WARN_ON(1);
359 return -EINVAL;
360 }
361 IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
362 hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
Amir Levy9659e592016-10-27 18:08:27 +0300363
364 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
365 if (!entry) {
366 IPAERR("failed to alloc proc_ctx object\n");
367 return -ENOMEM;
368 }
369
370 INIT_LIST_HEAD(&entry->link);
371
372 entry->type = proc_ctx->type;
373 entry->hdr = hdr_entry;
Skylar Chang7fa22712017-04-03 18:29:21 -0700374 entry->l2tp_params = proc_ctx->l2tp_params;
Amir Levy9659e592016-10-27 18:08:27 +0300375 if (add_ref_hdr)
376 hdr_entry->ref_cnt++;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530377 entry->cookie = IPA_PROC_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300378
379 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
380
381 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
382 bin = IPA_HDR_PROC_CTX_BIN0;
383 } else if (needed_len <=
384 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
385 bin = IPA_HDR_PROC_CTX_BIN1;
386 } else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530387 IPAERR_RL("unexpected needed len %d\n", needed_len);
Amir Levy9659e592016-10-27 18:08:27 +0300388 WARN_ON(1);
389 goto bad_len;
390 }
391
392 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
393 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
394 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
Amir Levy9659e592016-10-27 18:08:27 +0300395 if (list_empty(&htbl->head_free_offset_list[bin])) {
Skylar Changd8b80fe2017-06-08 15:47:22 -0700396 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530397 IPAERR_RL("hdr proc ctx table overflow\n");
Skylar Changd8b80fe2017-06-08 15:47:22 -0700398 goto bad_len;
399 }
400
Amir Levy9659e592016-10-27 18:08:27 +0300401 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
402 GFP_KERNEL);
403 if (!offset) {
404 IPAERR("failed to alloc offset object\n");
405 goto bad_len;
406 }
407 INIT_LIST_HEAD(&offset->link);
408 /*
409 * for a first item grow, set the bin and offset which are set
410 * in stone
411 */
412 offset->offset = htbl->end;
413 offset->bin = bin;
414 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
415 list_add(&offset->link,
416 &htbl->head_offset_list[bin]);
417 } else {
418 /* get the first free slot */
419 offset =
420 list_first_entry(&htbl->head_free_offset_list[bin],
421 struct ipa3_hdr_proc_ctx_offset_entry, link);
422 list_move(&offset->link, &htbl->head_offset_list[bin]);
423 }
424
425 entry->offset_entry = offset;
426 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
427 htbl->proc_ctx_cnt++;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200428 IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
Amir Levy9659e592016-10-27 18:08:27 +0300429 htbl->proc_ctx_cnt, offset->offset);
430
431 id = ipa3_id_alloc(entry);
432 if (id < 0) {
433 IPAERR("failed to alloc id\n");
434 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530435 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300436 }
437 entry->id = id;
438 proc_ctx->proc_ctx_hdl = id;
439 entry->ref_cnt++;
440
441 return 0;
442
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530443ipa_insert_failed:
Mohammed Javid42ad67b2017-07-27 15:12:18 +0530444 list_move(&offset->link,
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530445 &htbl->head_free_offset_list[offset->bin]);
446 entry->offset_entry = NULL;
447 list_del(&entry->link);
448 htbl->proc_ctx_cnt--;
449
Amir Levy9659e592016-10-27 18:08:27 +0300450bad_len:
451 if (add_ref_hdr)
452 hdr_entry->ref_cnt--;
453 entry->cookie = 0;
454 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
455 return -EPERM;
456}
457
458
459static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
460{
461 struct ipa3_hdr_entry *entry;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530462 struct ipa_hdr_offset_entry *offset = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300463 u32 bin;
464 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
465 int id;
466 int mem_size;
467
468 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530469 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300470 goto error;
471 }
472
473 if (!HDR_TYPE_IS_VALID(hdr->type)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530474 IPAERR_RL("invalid hdr type %d\n", hdr->type);
Amir Levy9659e592016-10-27 18:08:27 +0300475 goto error;
476 }
477
478 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
479 if (!entry) {
480 IPAERR("failed to alloc hdr object\n");
481 goto error;
482 }
483
484 INIT_LIST_HEAD(&entry->link);
485
486 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
487 entry->hdr_len = hdr->hdr_len;
488 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
489 entry->is_partial = hdr->is_partial;
490 entry->type = hdr->type;
491 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
492 entry->eth2_ofst = hdr->eth2_ofst;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530493 entry->cookie = IPA_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300494
495 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
496 bin = IPA_HDR_BIN0;
497 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
498 bin = IPA_HDR_BIN1;
499 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
500 bin = IPA_HDR_BIN2;
501 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
502 bin = IPA_HDR_BIN3;
503 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
504 bin = IPA_HDR_BIN4;
505 else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530506 IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
Amir Levy9659e592016-10-27 18:08:27 +0300507 goto bad_hdr_len;
508 }
509
510 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
511 IPA_MEM_PART(apps_hdr_size_ddr);
512
Skylar Changd8b80fe2017-06-08 15:47:22 -0700513 if (list_empty(&htbl->head_free_offset_list[bin])) {
514 /* if header does not fit to table, place it in DDR */
515 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
516 entry->is_hdr_proc_ctx = true;
517 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
518 entry->hdr,
519 entry->hdr_len,
520 DMA_TO_DEVICE);
521 if (dma_mapping_error(ipa3_ctx->pdev,
522 entry->phys_base)) {
523 IPAERR("dma_map_single failure for entry\n");
524 goto fail_dma_mapping;
525 }
526 } else {
527 entry->is_hdr_proc_ctx = false;
Amir Levy9659e592016-10-27 18:08:27 +0300528 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
529 GFP_KERNEL);
530 if (!offset) {
531 IPAERR("failed to alloc hdr offset object\n");
532 goto bad_hdr_len;
533 }
534 INIT_LIST_HEAD(&offset->link);
535 /*
536 * for a first item grow, set the bin and offset which
537 * are set in stone
538 */
539 offset->offset = htbl->end;
540 offset->bin = bin;
541 htbl->end += ipa_hdr_bin_sz[bin];
542 list_add(&offset->link,
543 &htbl->head_offset_list[bin]);
Skylar Changd8b80fe2017-06-08 15:47:22 -0700544 entry->offset_entry = offset;
Amir Levy9659e592016-10-27 18:08:27 +0300545 }
Skylar Changd8b80fe2017-06-08 15:47:22 -0700546 } else {
547 entry->is_hdr_proc_ctx = false;
548 /* get the first free slot */
549 offset = list_first_entry(&htbl->head_free_offset_list[bin],
550 struct ipa_hdr_offset_entry, link);
551 list_move(&offset->link, &htbl->head_offset_list[bin]);
Amir Levy9659e592016-10-27 18:08:27 +0300552 entry->offset_entry = offset;
553 }
554
555 list_add(&entry->link, &htbl->head_hdr_entry_list);
556 htbl->hdr_cnt++;
557 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200558 IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300559 hdr->hdr_len,
560 htbl->hdr_cnt,
561 &entry->phys_base);
562 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200563 IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300564 hdr->hdr_len,
565 htbl->hdr_cnt,
566 entry->offset_entry->offset);
567
568 id = ipa3_id_alloc(entry);
569 if (id < 0) {
570 IPAERR("failed to alloc id\n");
571 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530572 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300573 }
574 entry->id = id;
575 hdr->hdr_hdl = id;
576 entry->ref_cnt++;
577
578 if (entry->is_hdr_proc_ctx) {
579 struct ipa_hdr_proc_ctx_add proc_ctx;
580
581 IPADBG("adding processing context for header %s\n", hdr->name);
582 proc_ctx.type = IPA_HDR_PROC_NONE;
583 proc_ctx.hdr_hdl = id;
584 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
585 IPAERR("failed to add hdr proc ctx\n");
586 goto fail_add_proc_ctx;
587 }
588 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
589 }
590
591 return 0;
592
593fail_add_proc_ctx:
594 entry->ref_cnt--;
595 hdr->hdr_hdl = 0;
596 ipa3_id_remove(id);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530597ipa_insert_failed:
598 if (entry->is_hdr_proc_ctx) {
599 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
600 entry->hdr_len, DMA_TO_DEVICE);
601 } else {
602 if (offset)
603 list_move(&offset->link,
604 &htbl->head_free_offset_list[offset->bin]);
605 entry->offset_entry = NULL;
606 }
Amir Levy9659e592016-10-27 18:08:27 +0300607 htbl->hdr_cnt--;
608 list_del(&entry->link);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530609
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530610fail_dma_mapping:
611 entry->is_hdr_proc_ctx = false;
612
Amir Levy9659e592016-10-27 18:08:27 +0300613bad_hdr_len:
614 entry->cookie = 0;
615 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
616error:
617 return -EPERM;
618}
619
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200620static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
621 bool release_hdr, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300622{
623 struct ipa3_hdr_proc_ctx_entry *entry;
624 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
625
626 entry = ipa3_id_find(proc_ctx_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530627 if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530628 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300629 return -EINVAL;
630 }
631
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200632 IPADBG("del proc ctx cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300633 htbl->proc_ctx_cnt, entry->offset_entry->offset);
634
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200635 if (by_user && entry->user_deleted) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530636 IPAERR_RL("proc_ctx already deleted by user\n");
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200637 return -EINVAL;
638 }
639
640 if (by_user)
641 entry->user_deleted = true;
642
Amir Levy9659e592016-10-27 18:08:27 +0300643 if (--entry->ref_cnt) {
644 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
645 proc_ctx_hdl, entry->ref_cnt);
646 return 0;
647 }
648
649 if (release_hdr)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200650 __ipa3_del_hdr(entry->hdr->id, false);
Amir Levy9659e592016-10-27 18:08:27 +0300651
652 /* move the offset entry to appropriate free list */
653 list_move(&entry->offset_entry->link,
654 &htbl->head_free_offset_list[entry->offset_entry->bin]);
655 list_del(&entry->link);
656 htbl->proc_ctx_cnt--;
657 entry->cookie = 0;
658 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
659
660 /* remove the handle from the database */
661 ipa3_id_remove(proc_ctx_hdl);
662
663 return 0;
664}
665
666
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200667int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300668{
669 struct ipa3_hdr_entry *entry;
670 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
671
672 entry = ipa3_id_find(hdr_hdl);
673 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530674 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +0300675 return -EINVAL;
676 }
677
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530678 if (entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530679 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300680 return -EINVAL;
681 }
682
683 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200684 IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300685 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
686 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200687 IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
688 entry->hdr_len, htbl->hdr_cnt,
689 entry->offset_entry->offset);
Amir Levy9659e592016-10-27 18:08:27 +0300690
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200691 if (by_user && entry->user_deleted) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530692 IPAERR_RL("proc_ctx already deleted by user\n");
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200693 return -EINVAL;
694 }
695
696 if (by_user)
697 entry->user_deleted = true;
698
Amir Levy9659e592016-10-27 18:08:27 +0300699 if (--entry->ref_cnt) {
700 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
701 return 0;
702 }
703
704 if (entry->is_hdr_proc_ctx) {
705 dma_unmap_single(ipa3_ctx->pdev,
706 entry->phys_base,
707 entry->hdr_len,
708 DMA_TO_DEVICE);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200709 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
Amir Levy9659e592016-10-27 18:08:27 +0300710 } else {
711 /* move the offset entry to appropriate free list */
712 list_move(&entry->offset_entry->link,
713 &htbl->head_free_offset_list[entry->offset_entry->bin]);
714 }
715 list_del(&entry->link);
716 htbl->hdr_cnt--;
717 entry->cookie = 0;
718 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
719
720 /* remove the handle from the database */
721 ipa3_id_remove(hdr_hdl);
722
723 return 0;
724}
725
726/**
727 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
728 * to IPA HW
729 * @hdrs: [inout] set of headers to add
730 *
731 * Returns: 0 on success, negative on failure
732 *
733 * Note: Should not be called from atomic context
734 */
735int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
736{
737 int i;
738 int result = -EFAULT;
739
740 if (hdrs == NULL || hdrs->num_hdrs == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530741 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300742 return -EINVAL;
743 }
744
745 mutex_lock(&ipa3_ctx->lock);
746 IPADBG("adding %d headers to IPA driver internal data struct\n",
747 hdrs->num_hdrs);
748 for (i = 0; i < hdrs->num_hdrs; i++) {
749 if (__ipa_add_hdr(&hdrs->hdr[i])) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530750 IPAERR_RL("failed to add hdr %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300751 hdrs->hdr[i].status = -1;
752 } else {
753 hdrs->hdr[i].status = 0;
754 }
755 }
756
757 if (hdrs->commit) {
758 IPADBG("committing all headers to IPA core");
759 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
760 result = -EPERM;
761 goto bail;
762 }
763 }
764 result = 0;
765bail:
766 mutex_unlock(&ipa3_ctx->lock);
767 return result;
768}
769
770/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200771 * ipa3_del_hdr_by_user() - Remove the specified headers
772 * from SW and optionally commit them to IPA HW
Amir Levy9659e592016-10-27 18:08:27 +0300773 * @hdls: [inout] set of headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200774 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300775 *
776 * Returns: 0 on success, negative on failure
777 *
778 * Note: Should not be called from atomic context
779 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200780int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300781{
782 int i;
783 int result = -EFAULT;
784
785 if (hdls == NULL || hdls->num_hdls == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530786 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300787 return -EINVAL;
788 }
789
790 mutex_lock(&ipa3_ctx->lock);
791 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200792 if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530793 IPAERR_RL("failed to del hdr %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300794 hdls->hdl[i].status = -1;
795 } else {
796 hdls->hdl[i].status = 0;
797 }
798 }
799
800 if (hdls->commit) {
801 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
802 result = -EPERM;
803 goto bail;
804 }
805 }
806 result = 0;
807bail:
808 mutex_unlock(&ipa3_ctx->lock);
809 return result;
810}
811
812/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200813 * ipa3_del_hdr() - Remove the specified headers from SW
814 * and optionally commit them to IPA HW
815 * @hdls: [inout] set of headers to delete
816 *
817 * Returns: 0 on success, negative on failure
818 *
819 * Note: Should not be called from atomic context
820 */
821int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
822{
823 return ipa3_del_hdr_by_user(hdls, false);
824}
825
826/**
Amir Levy9659e592016-10-27 18:08:27 +0300827 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
828 * and optionally commit them to IPA HW
829 * @proc_ctxs: [inout] set of processing context headers to add
830 *
831 * Returns: 0 on success, negative on failure
832 *
833 * Note: Should not be called from atomic context
834 */
835int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
836{
837 int i;
838 int result = -EFAULT;
839
840 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530841 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300842 return -EINVAL;
843 }
844
845 mutex_lock(&ipa3_ctx->lock);
846 IPADBG("adding %d header processing contextes to IPA driver\n",
847 proc_ctxs->num_proc_ctxs);
848 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
849 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530850 IPAERR_RL("failed to add hdr pric ctx %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300851 proc_ctxs->proc_ctx[i].status = -1;
852 } else {
853 proc_ctxs->proc_ctx[i].status = 0;
854 }
855 }
856
857 if (proc_ctxs->commit) {
858 IPADBG("committing all headers to IPA core");
859 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
860 result = -EPERM;
861 goto bail;
862 }
863 }
864 result = 0;
865bail:
866 mutex_unlock(&ipa3_ctx->lock);
867 return result;
868}
869
870/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200871 * ipa3_del_hdr_proc_ctx_by_user() -
Amir Levy9659e592016-10-27 18:08:27 +0300872 * Remove the specified processing context headers from SW and
873 * optionally commit them to IPA HW.
874 * @hdls: [inout] set of processing context headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200875 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300876 *
877 * Returns: 0 on success, negative on failure
878 *
879 * Note: Should not be called from atomic context
880 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200881int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
882 bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300883{
884 int i;
885 int result;
886
887 if (hdls == NULL || hdls->num_hdls == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530888 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300889 return -EINVAL;
890 }
891
892 mutex_lock(&ipa3_ctx->lock);
893 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200894 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530895 IPAERR_RL("failed to del hdr %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300896 hdls->hdl[i].status = -1;
897 } else {
898 hdls->hdl[i].status = 0;
899 }
900 }
901
902 if (hdls->commit) {
903 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
904 result = -EPERM;
905 goto bail;
906 }
907 }
908 result = 0;
909bail:
910 mutex_unlock(&ipa3_ctx->lock);
911 return result;
912}
913
914/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200915 * ipa3_del_hdr_proc_ctx() -
916 * Remove the specified processing context headers from SW and
917 * optionally commit them to IPA HW.
918 * @hdls: [inout] set of processing context headers to delete
919 *
920 * Returns: 0 on success, negative on failure
921 *
922 * Note: Should not be called from atomic context
923 */
924int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
925{
926 return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
927}
928
929/**
Amir Levy9659e592016-10-27 18:08:27 +0300930 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
931 *
932 * Returns: 0 on success, negative on failure
933 *
934 * Note: Should not be called from atomic context
935 */
936int ipa3_commit_hdr(void)
937{
938 int result = -EFAULT;
939
940 /*
941 * issue a commit on the routing module since routing rules point to
942 * header table entries
943 */
944 if (ipa3_commit_rt(IPA_IP_v4))
945 return -EPERM;
946 if (ipa3_commit_rt(IPA_IP_v6))
947 return -EPERM;
948
949 mutex_lock(&ipa3_ctx->lock);
950 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
951 result = -EPERM;
952 goto bail;
953 }
954 result = 0;
955bail:
956 mutex_unlock(&ipa3_ctx->lock);
957 return result;
958}
959
960/**
961 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
962 * HW)
963 *
964 * Returns: 0 on success, negative on failure
965 *
966 * Note: Should not be called from atomic context
967 */
968int ipa3_reset_hdr(void)
969{
970 struct ipa3_hdr_entry *entry;
971 struct ipa3_hdr_entry *next;
972 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
973 struct ipa3_hdr_proc_ctx_entry *ctx_next;
974 struct ipa_hdr_offset_entry *off_entry;
975 struct ipa_hdr_offset_entry *off_next;
976 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
977 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
978 int i;
979
980 /*
981 * issue a reset on the routing module since routing rules point to
982 * header table entries
983 */
984 if (ipa3_reset_rt(IPA_IP_v4))
985 IPAERR("fail to reset v4 rt\n");
986 if (ipa3_reset_rt(IPA_IP_v6))
987 IPAERR("fail to reset v4 rt\n");
988
989 mutex_lock(&ipa3_ctx->lock);
990 IPADBG("reset hdr\n");
991 list_for_each_entry_safe(entry, next,
992 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
993
994 /* do not remove the default header */
995 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
996 if (entry->is_hdr_proc_ctx) {
997 IPAERR("default header is proc ctx\n");
998 mutex_unlock(&ipa3_ctx->lock);
999 WARN_ON(1);
1000 return -EFAULT;
1001 }
1002 continue;
1003 }
1004
1005 if (ipa3_id_find(entry->id) == NULL) {
1006 mutex_unlock(&ipa3_ctx->lock);
1007 WARN_ON(1);
1008 return -EFAULT;
1009 }
1010 if (entry->is_hdr_proc_ctx) {
1011 dma_unmap_single(ipa3_ctx->pdev,
1012 entry->phys_base,
1013 entry->hdr_len,
1014 DMA_TO_DEVICE);
1015 entry->proc_ctx = NULL;
1016 }
1017 list_del(&entry->link);
1018 entry->ref_cnt = 0;
1019 entry->cookie = 0;
1020
1021 /* remove the handle from the database */
1022 ipa3_id_remove(entry->id);
1023 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
1024
1025 }
1026 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
1027 list_for_each_entry_safe(off_entry, off_next,
1028 &ipa3_ctx->hdr_tbl.head_offset_list[i],
1029 link) {
1030
1031 /*
1032 * do not remove the default exception header which is
1033 * at offset 0
1034 */
1035 if (off_entry->offset == 0)
1036 continue;
1037
1038 list_del(&off_entry->link);
1039 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1040 }
1041 list_for_each_entry_safe(off_entry, off_next,
1042 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
1043 link) {
1044 list_del(&off_entry->link);
1045 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1046 }
1047 }
1048 /* there is one header of size 8 */
1049 ipa3_ctx->hdr_tbl.end = 8;
1050 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
1051
1052 IPADBG("reset hdr proc ctx\n");
1053 list_for_each_entry_safe(
1054 ctx_entry,
1055 ctx_next,
1056 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
1057 link) {
1058
1059 if (ipa3_id_find(ctx_entry->id) == NULL) {
1060 mutex_unlock(&ipa3_ctx->lock);
1061 WARN_ON(1);
1062 return -EFAULT;
1063 }
1064 list_del(&ctx_entry->link);
1065 ctx_entry->ref_cnt = 0;
1066 ctx_entry->cookie = 0;
1067
1068 /* remove the handle from the database */
1069 ipa3_id_remove(ctx_entry->id);
1070 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
1071
1072 }
1073 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
1074 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1075 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
1076 link) {
1077
1078 list_del(&ctx_off_entry->link);
1079 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1080 ctx_off_entry);
1081 }
1082 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1083 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
1084 link) {
1085 list_del(&ctx_off_entry->link);
1086 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1087 ctx_off_entry);
1088 }
1089 }
1090 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
1091 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
1092 mutex_unlock(&ipa3_ctx->lock);
1093
1094 return 0;
1095}
1096
1097static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1098{
1099 struct ipa3_hdr_entry *entry;
1100
1101 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301102 IPAERR_RL("Header name too long: %s\n", name);
Amir Levy9659e592016-10-27 18:08:27 +03001103 return NULL;
1104 }
1105
1106 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1107 link) {
1108 if (!strcmp(name, entry->name))
1109 return entry;
1110 }
1111
1112 return NULL;
1113}
1114
1115/**
1116 * ipa3_get_hdr() - Lookup the specified header resource
1117 * @lookup: [inout] header to lookup and its handle
1118 *
1119 * lookup the specified header resource and return handle if it exists
1120 *
1121 * Returns: 0 on success, negative on failure
1122 *
1123 * Note: Should not be called from atomic context
1124 * Caller should call ipa3_put_hdr later if this function succeeds
1125 */
1126int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1127{
1128 struct ipa3_hdr_entry *entry;
1129 int result = -1;
1130
1131 if (lookup == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301132 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001133 return -EINVAL;
1134 }
1135 mutex_lock(&ipa3_ctx->lock);
Mohammed Javidcd665892017-10-11 17:05:57 +05301136 lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +03001137 entry = __ipa_find_hdr(lookup->name);
1138 if (entry) {
1139 lookup->hdl = entry->id;
1140 result = 0;
1141 }
1142 mutex_unlock(&ipa3_ctx->lock);
1143
1144 return result;
1145}
1146
1147/**
1148 * __ipa3_release_hdr() - drop reference to header and cause
1149 * deletion if reference count permits
1150 * @hdr_hdl: [in] handle of header to be released
1151 *
1152 * Returns: 0 on success, negative on failure
1153 */
1154int __ipa3_release_hdr(u32 hdr_hdl)
1155{
1156 int result = 0;
1157
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001158 if (__ipa3_del_hdr(hdr_hdl, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001159 IPADBG("fail to del hdr %x\n", hdr_hdl);
1160 result = -EFAULT;
1161 goto bail;
1162 }
1163
1164 /* commit for put */
1165 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1166 IPAERR("fail to commit hdr\n");
1167 result = -EFAULT;
1168 goto bail;
1169 }
1170
1171bail:
1172 return result;
1173}
1174
1175/**
1176 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1177 * and cause deletion if reference count permits
1178 * @proc_ctx_hdl: [in] handle of processing context to be released
1179 *
1180 * Returns: 0 on success, negative on failure
1181 */
1182int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1183{
1184 int result = 0;
1185
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001186 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001187 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1188 result = -EFAULT;
1189 goto bail;
1190 }
1191
1192 /* commit for put */
1193 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1194 IPAERR("fail to commit hdr\n");
1195 result = -EFAULT;
1196 goto bail;
1197 }
1198
1199bail:
1200 return result;
1201}
1202
1203/**
1204 * ipa3_put_hdr() - Release the specified header handle
1205 * @hdr_hdl: [in] the header handle to release
1206 *
1207 * Returns: 0 on success, negative on failure
1208 *
1209 * Note: Should not be called from atomic context
1210 */
1211int ipa3_put_hdr(u32 hdr_hdl)
1212{
1213 struct ipa3_hdr_entry *entry;
1214 int result = -EFAULT;
1215
1216 mutex_lock(&ipa3_ctx->lock);
1217
1218 entry = ipa3_id_find(hdr_hdl);
1219 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301220 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001221 result = -EINVAL;
1222 goto bail;
1223 }
1224
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301225 if (entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301226 IPAERR_RL("invalid header entry\n");
Amir Levy9659e592016-10-27 18:08:27 +03001227 result = -EINVAL;
1228 goto bail;
1229 }
1230
1231 result = 0;
1232bail:
1233 mutex_unlock(&ipa3_ctx->lock);
1234 return result;
1235}
1236
1237/**
1238 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1239 * it
1240 * @copy: [inout] header to lookup and its copy
1241 *
1242 * lookup the specified header resource and return a copy of it (along with its
1243 * attributes) if it exists, this would be called for partial headers
1244 *
1245 * Returns: 0 on success, negative on failure
1246 *
1247 * Note: Should not be called from atomic context
1248 */
1249int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1250{
1251 struct ipa3_hdr_entry *entry;
1252 int result = -EFAULT;
1253
1254 if (copy == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301255 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001256 return -EINVAL;
1257 }
1258 mutex_lock(&ipa3_ctx->lock);
Mohammed Javidcd665892017-10-11 17:05:57 +05301259 copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +03001260 entry = __ipa_find_hdr(copy->name);
1261 if (entry) {
1262 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1263 copy->hdr_len = entry->hdr_len;
1264 copy->type = entry->type;
1265 copy->is_partial = entry->is_partial;
1266 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1267 copy->eth2_ofst = entry->eth2_ofst;
1268 result = 0;
1269 }
1270 mutex_unlock(&ipa3_ctx->lock);
1271
1272 return result;
1273}