blob: 1b4a3e30074d74d843f45b319bec223d15393bd4 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "ipa_i.h"
14#include "ipahal/ipahal.h"
15
Skylar Chang7fa22712017-04-03 18:29:21 -070016static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
Amir Levy9659e592016-10-27 18:08:27 +030017static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
18
19#define HDR_TYPE_IS_VALID(type) \
20 ((type) >= 0 && (type) < IPA_HDR_L2_MAX)
21
22#define HDR_PROC_TYPE_IS_VALID(type) \
23 ((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
24
25/**
26 * ipa3_generate_hdr_hw_tbl() - generates the headers table
27 * @mem: [out] buffer to put the header table
28 *
29 * Returns: 0 on success, negative on failure
30 */
31static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
32{
33 struct ipa3_hdr_entry *entry;
34
35 mem->size = ipa3_ctx->hdr_tbl.end;
36
37 if (mem->size == 0) {
38 IPAERR("hdr tbl empty\n");
39 return -EPERM;
40 }
41 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
42
43 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
44 &mem->phys_base, GFP_KERNEL);
45 if (!mem->base) {
46 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
47 return -ENOMEM;
48 }
49
50 memset(mem->base, 0, mem->size);
51 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
52 link) {
53 if (entry->is_hdr_proc_ctx)
54 continue;
55 IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
56 entry->offset_entry->offset);
57 ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
58 entry->hdr, entry->hdr_len);
59 }
60
61 return 0;
62}
63
64static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
65 u32 hdr_base_addr)
66{
67 struct ipa3_hdr_proc_ctx_entry *entry;
68 int ret;
Shihuan Liufe2818b2017-07-03 22:14:55 -070069 int ep;
Amir Levy9659e592016-10-27 18:08:27 +030070
71 list_for_each_entry(entry,
72 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
73 link) {
74 IPADBG_LOW("processing type %d ofst=%d\n",
75 entry->type, entry->offset_entry->offset);
Shihuan Liufe2818b2017-07-03 22:14:55 -070076
77 if (entry->l2tp_params.is_dst_pipe_valid) {
78 ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
79 if (ep >= 0) {
80 entry->l2tp_params.hdr_remove_param.
81 hdr_ofst_pkt_size_valid = ipa3_ctx->
82 ep[ep].cfg.hdr.hdr_ofst_pkt_size_valid;
83 entry->l2tp_params.hdr_remove_param.
84 hdr_ofst_pkt_size = ipa3_ctx->ep[ep].
85 cfg.hdr.hdr_ofst_pkt_size;
86 entry->l2tp_params.hdr_remove_param.
87 hdr_endianness = ipa3_ctx->ep[ep].
88 cfg.hdr_ext.hdr_little_endian ? 0 : 1;
89 }
90 }
91
Amir Levy9659e592016-10-27 18:08:27 +030092 ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
93 entry->offset_entry->offset,
94 entry->hdr->hdr_len,
95 entry->hdr->is_hdr_proc_ctx,
96 entry->hdr->phys_base,
97 hdr_base_addr,
Skylar Chang7fa22712017-04-03 18:29:21 -070098 entry->hdr->offset_entry,
99 entry->l2tp_params);
Amir Levy9659e592016-10-27 18:08:27 +0300100 if (ret)
101 return ret;
102 }
103
104 return 0;
105}
106
107/**
108 * ipa3_generate_hdr_proc_ctx_hw_tbl() -
109 * generates the headers processing context table.
110 * @mem: [out] buffer to put the processing context table
111 * @aligned_mem: [out] actual processing context table (with alignment).
112 * Processing context table needs to be 8 Bytes aligned.
113 *
114 * Returns: 0 on success, negative on failure
115 */
116static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
117 struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
118{
119 u32 hdr_base_addr;
120
121 mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
122
123 /* make sure table is aligned */
124 mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
125
126 IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
127
128 mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
129 &mem->phys_base, GFP_KERNEL);
130 if (!mem->base) {
131 IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
132 return -ENOMEM;
133 }
134
135 aligned_mem->phys_base =
136 IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
137 aligned_mem->base = mem->base +
138 (aligned_mem->phys_base - mem->phys_base);
139 aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
140 memset(aligned_mem->base, 0, aligned_mem->size);
141 hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
142 hdr_sys_addr;
143 return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
144}
145
146/**
147 * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
148 *
149 * Returns: 0 on success, negative on failure
150 */
151int __ipa_commit_hdr_v3_0(void)
152{
153 struct ipa3_desc desc[2];
154 struct ipa_mem_buffer hdr_mem;
155 struct ipa_mem_buffer ctx_mem;
156 struct ipa_mem_buffer aligned_ctx_mem;
157 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
158 struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
159 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
160 struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
161 struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
162 struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
163 int rc = -EFAULT;
164 u32 proc_ctx_size;
165 u32 proc_ctx_ofst;
166 u32 proc_ctx_size_ddr;
167
168 memset(desc, 0, 2 * sizeof(struct ipa3_desc));
169
170 if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
171 IPAERR("fail to generate HDR HW TBL\n");
172 goto end;
173 }
174
175 if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
176 &aligned_ctx_mem)) {
177 IPAERR("fail to generate HDR PROC CTX HW TBL\n");
178 goto end;
179 }
180
181 if (ipa3_ctx->hdr_tbl_lcl) {
182 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
183 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
184 IPA_MEM_PART(apps_hdr_size));
185 goto end;
186 } else {
187 dma_cmd_hdr.is_read = false; /* write operation */
188 dma_cmd_hdr.skip_pipeline_clear = false;
189 dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
190 dma_cmd_hdr.system_addr = hdr_mem.phys_base;
191 dma_cmd_hdr.size = hdr_mem.size;
192 dma_cmd_hdr.local_addr =
193 ipa3_ctx->smem_restricted_bytes +
194 IPA_MEM_PART(apps_hdr_ofst);
195 hdr_cmd_pyld = ipahal_construct_imm_cmd(
196 IPA_IMM_CMD_DMA_SHARED_MEM,
197 &dma_cmd_hdr, false);
198 if (!hdr_cmd_pyld) {
199 IPAERR("fail construct dma_shared_mem cmd\n");
200 goto end;
201 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700202 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300203 desc[0].pyld = hdr_cmd_pyld->data;
204 desc[0].len = hdr_cmd_pyld->len;
205 }
206 } else {
207 if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
208 IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
209 IPA_MEM_PART(apps_hdr_size_ddr));
210 goto end;
211 } else {
212 hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
213 hdr_cmd_pyld = ipahal_construct_imm_cmd(
214 IPA_IMM_CMD_HDR_INIT_SYSTEM,
215 &hdr_init_cmd, false);
216 if (!hdr_cmd_pyld) {
217 IPAERR("fail construct hdr_init_system cmd\n");
218 goto end;
219 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700220 desc[0].opcode = hdr_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300221 desc[0].pyld = hdr_cmd_pyld->data;
222 desc[0].len = hdr_cmd_pyld->len;
223 }
224 }
225 desc[0].type = IPA_IMM_CMD_DESC;
226 IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
227
228 proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
229 proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
230 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
231 if (aligned_ctx_mem.size > proc_ctx_size) {
232 IPAERR("tbl too big needed %d avail %d\n",
233 aligned_ctx_mem.size,
234 proc_ctx_size);
235 goto end;
236 } else {
237 dma_cmd_ctx.is_read = false; /* Write operation */
238 dma_cmd_ctx.skip_pipeline_clear = false;
239 dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
240 dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
241 dma_cmd_ctx.size = aligned_ctx_mem.size;
242 dma_cmd_ctx.local_addr =
243 ipa3_ctx->smem_restricted_bytes +
244 proc_ctx_ofst;
245 ctx_cmd_pyld = ipahal_construct_imm_cmd(
246 IPA_IMM_CMD_DMA_SHARED_MEM,
247 &dma_cmd_ctx, false);
248 if (!ctx_cmd_pyld) {
249 IPAERR("fail construct dma_shared_mem cmd\n");
250 goto end;
251 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700252 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300253 desc[1].pyld = ctx_cmd_pyld->data;
254 desc[1].len = ctx_cmd_pyld->len;
255 }
256 } else {
257 proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
258 if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
259 IPAERR("tbl too big, needed %d avail %d\n",
260 aligned_ctx_mem.size,
261 proc_ctx_size_ddr);
262 goto end;
263 } else {
264 reg_write_cmd.skip_pipeline_clear = false;
265 reg_write_cmd.pipeline_clear_options =
266 IPAHAL_HPS_CLEAR;
267 reg_write_cmd.offset =
268 ipahal_get_reg_ofst(
269 IPA_SYS_PKT_PROC_CNTXT_BASE);
270 reg_write_cmd.value = aligned_ctx_mem.phys_base;
271 reg_write_cmd.value_mask =
272 ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
273 ctx_cmd_pyld = ipahal_construct_imm_cmd(
274 IPA_IMM_CMD_REGISTER_WRITE,
275 &reg_write_cmd, false);
276 if (!ctx_cmd_pyld) {
277 IPAERR("fail construct register_write cmd\n");
278 goto end;
279 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -0700280 desc[1].opcode = ctx_cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +0300281 desc[1].pyld = ctx_cmd_pyld->data;
282 desc[1].len = ctx_cmd_pyld->len;
283 }
284 }
285 desc[1].type = IPA_IMM_CMD_DESC;
286 IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
287
288 if (ipa3_send_cmd(2, desc))
289 IPAERR("fail to send immediate command\n");
290 else
291 rc = 0;
292
293 if (ipa3_ctx->hdr_tbl_lcl) {
294 dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
295 hdr_mem.phys_base);
296 } else {
297 if (!rc) {
298 if (ipa3_ctx->hdr_mem.phys_base)
299 dma_free_coherent(ipa3_ctx->pdev,
300 ipa3_ctx->hdr_mem.size,
301 ipa3_ctx->hdr_mem.base,
302 ipa3_ctx->hdr_mem.phys_base);
303 ipa3_ctx->hdr_mem = hdr_mem;
304 }
305 }
306
307 if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
308 dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
309 ctx_mem.phys_base);
310 } else {
311 if (!rc) {
312 if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
313 dma_free_coherent(ipa3_ctx->pdev,
314 ipa3_ctx->hdr_proc_ctx_mem.size,
315 ipa3_ctx->hdr_proc_ctx_mem.base,
316 ipa3_ctx->hdr_proc_ctx_mem.phys_base);
317 ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
318 }
319 }
320
321end:
322 if (ctx_cmd_pyld)
323 ipahal_destroy_imm_cmd(ctx_cmd_pyld);
324
325 if (hdr_cmd_pyld)
326 ipahal_destroy_imm_cmd(hdr_cmd_pyld);
327
328 return rc;
329}
330
331static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
332 bool add_ref_hdr)
333{
334 struct ipa3_hdr_entry *hdr_entry;
335 struct ipa3_hdr_proc_ctx_entry *entry;
336 struct ipa3_hdr_proc_ctx_offset_entry *offset;
337 u32 bin;
338 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
339 int id;
340 int needed_len;
341 int mem_size;
342
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200343 IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300344 proc_ctx->type, proc_ctx->hdr_hdl);
345
346 if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530347 IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
Amir Levy9659e592016-10-27 18:08:27 +0300348 return -EINVAL;
349 }
350
351 hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200352 if (!hdr_entry) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530353 IPAERR_RL("hdr_hdl is invalid\n");
Amir Levy9659e592016-10-27 18:08:27 +0300354 return -EINVAL;
355 }
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530356 if (hdr_entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530357 IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200358 WARN_ON(1);
359 return -EINVAL;
360 }
361 IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
362 hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
Amir Levy9659e592016-10-27 18:08:27 +0300363
364 entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
365 if (!entry) {
366 IPAERR("failed to alloc proc_ctx object\n");
367 return -ENOMEM;
368 }
369
370 INIT_LIST_HEAD(&entry->link);
371
372 entry->type = proc_ctx->type;
373 entry->hdr = hdr_entry;
Skylar Chang7fa22712017-04-03 18:29:21 -0700374 entry->l2tp_params = proc_ctx->l2tp_params;
Amir Levy9659e592016-10-27 18:08:27 +0300375 if (add_ref_hdr)
376 hdr_entry->ref_cnt++;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530377 entry->cookie = IPA_PROC_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300378
379 needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
380
381 if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
382 bin = IPA_HDR_PROC_CTX_BIN0;
383 } else if (needed_len <=
384 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
385 bin = IPA_HDR_PROC_CTX_BIN1;
386 } else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530387 IPAERR_RL("unexpected needed len %d\n", needed_len);
Amir Levy9659e592016-10-27 18:08:27 +0300388 WARN_ON(1);
389 goto bad_len;
390 }
391
392 mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
393 IPA_MEM_PART(apps_hdr_proc_ctx_size) :
394 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
Amir Levy9659e592016-10-27 18:08:27 +0300395 if (list_empty(&htbl->head_free_offset_list[bin])) {
Skylar Changd8b80fe2017-06-08 15:47:22 -0700396 if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530397 IPAERR_RL("hdr proc ctx table overflow\n");
Skylar Changd8b80fe2017-06-08 15:47:22 -0700398 goto bad_len;
399 }
400
Amir Levy9659e592016-10-27 18:08:27 +0300401 offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
402 GFP_KERNEL);
403 if (!offset) {
404 IPAERR("failed to alloc offset object\n");
405 goto bad_len;
406 }
407 INIT_LIST_HEAD(&offset->link);
408 /*
409 * for a first item grow, set the bin and offset which are set
410 * in stone
411 */
412 offset->offset = htbl->end;
413 offset->bin = bin;
414 htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
415 list_add(&offset->link,
416 &htbl->head_offset_list[bin]);
417 } else {
418 /* get the first free slot */
419 offset =
420 list_first_entry(&htbl->head_free_offset_list[bin],
421 struct ipa3_hdr_proc_ctx_offset_entry, link);
422 list_move(&offset->link, &htbl->head_offset_list[bin]);
423 }
424
425 entry->offset_entry = offset;
426 list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
427 htbl->proc_ctx_cnt++;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200428 IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
Amir Levy9659e592016-10-27 18:08:27 +0300429 htbl->proc_ctx_cnt, offset->offset);
430
431 id = ipa3_id_alloc(entry);
432 if (id < 0) {
433 IPAERR("failed to alloc id\n");
434 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530435 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300436 }
437 entry->id = id;
438 proc_ctx->proc_ctx_hdl = id;
439 entry->ref_cnt++;
440
441 return 0;
442
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530443ipa_insert_failed:
444 if (offset)
445 list_move(&offset->link,
446 &htbl->head_free_offset_list[offset->bin]);
447 entry->offset_entry = NULL;
448 list_del(&entry->link);
449 htbl->proc_ctx_cnt--;
450
Amir Levy9659e592016-10-27 18:08:27 +0300451bad_len:
452 if (add_ref_hdr)
453 hdr_entry->ref_cnt--;
454 entry->cookie = 0;
455 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
456 return -EPERM;
457}
458
459
460static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
461{
462 struct ipa3_hdr_entry *entry;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530463 struct ipa_hdr_offset_entry *offset = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300464 u32 bin;
465 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
466 int id;
467 int mem_size;
468
469 if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530470 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300471 goto error;
472 }
473
474 if (!HDR_TYPE_IS_VALID(hdr->type)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530475 IPAERR_RL("invalid hdr type %d\n", hdr->type);
Amir Levy9659e592016-10-27 18:08:27 +0300476 goto error;
477 }
478
479 entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
480 if (!entry) {
481 IPAERR("failed to alloc hdr object\n");
482 goto error;
483 }
484
485 INIT_LIST_HEAD(&entry->link);
486
487 memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
488 entry->hdr_len = hdr->hdr_len;
489 strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
490 entry->is_partial = hdr->is_partial;
491 entry->type = hdr->type;
492 entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
493 entry->eth2_ofst = hdr->eth2_ofst;
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530494 entry->cookie = IPA_HDR_COOKIE;
Amir Levy9659e592016-10-27 18:08:27 +0300495
496 if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
497 bin = IPA_HDR_BIN0;
498 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
499 bin = IPA_HDR_BIN1;
500 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
501 bin = IPA_HDR_BIN2;
502 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
503 bin = IPA_HDR_BIN3;
504 else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
505 bin = IPA_HDR_BIN4;
506 else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530507 IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
Amir Levy9659e592016-10-27 18:08:27 +0300508 goto bad_hdr_len;
509 }
510
511 mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
512 IPA_MEM_PART(apps_hdr_size_ddr);
513
Skylar Changd8b80fe2017-06-08 15:47:22 -0700514 if (list_empty(&htbl->head_free_offset_list[bin])) {
515 /* if header does not fit to table, place it in DDR */
516 if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
517 entry->is_hdr_proc_ctx = true;
518 entry->phys_base = dma_map_single(ipa3_ctx->pdev,
519 entry->hdr,
520 entry->hdr_len,
521 DMA_TO_DEVICE);
522 if (dma_mapping_error(ipa3_ctx->pdev,
523 entry->phys_base)) {
524 IPAERR("dma_map_single failure for entry\n");
525 goto fail_dma_mapping;
526 }
527 } else {
528 entry->is_hdr_proc_ctx = false;
Amir Levy9659e592016-10-27 18:08:27 +0300529 offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
530 GFP_KERNEL);
531 if (!offset) {
532 IPAERR("failed to alloc hdr offset object\n");
533 goto bad_hdr_len;
534 }
535 INIT_LIST_HEAD(&offset->link);
536 /*
537 * for a first item grow, set the bin and offset which
538 * are set in stone
539 */
540 offset->offset = htbl->end;
541 offset->bin = bin;
542 htbl->end += ipa_hdr_bin_sz[bin];
543 list_add(&offset->link,
544 &htbl->head_offset_list[bin]);
Skylar Changd8b80fe2017-06-08 15:47:22 -0700545 entry->offset_entry = offset;
Amir Levy9659e592016-10-27 18:08:27 +0300546 }
Skylar Changd8b80fe2017-06-08 15:47:22 -0700547 } else {
548 entry->is_hdr_proc_ctx = false;
549 /* get the first free slot */
550 offset = list_first_entry(&htbl->head_free_offset_list[bin],
551 struct ipa_hdr_offset_entry, link);
552 list_move(&offset->link, &htbl->head_offset_list[bin]);
Amir Levy9659e592016-10-27 18:08:27 +0300553 entry->offset_entry = offset;
554 }
555
556 list_add(&entry->link, &htbl->head_hdr_entry_list);
557 htbl->hdr_cnt++;
558 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200559 IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300560 hdr->hdr_len,
561 htbl->hdr_cnt,
562 &entry->phys_base);
563 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200564 IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300565 hdr->hdr_len,
566 htbl->hdr_cnt,
567 entry->offset_entry->offset);
568
569 id = ipa3_id_alloc(entry);
570 if (id < 0) {
571 IPAERR("failed to alloc id\n");
572 WARN_ON(1);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530573 goto ipa_insert_failed;
Amir Levy9659e592016-10-27 18:08:27 +0300574 }
575 entry->id = id;
576 hdr->hdr_hdl = id;
577 entry->ref_cnt++;
578
579 if (entry->is_hdr_proc_ctx) {
580 struct ipa_hdr_proc_ctx_add proc_ctx;
581
582 IPADBG("adding processing context for header %s\n", hdr->name);
583 proc_ctx.type = IPA_HDR_PROC_NONE;
584 proc_ctx.hdr_hdl = id;
585 if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
586 IPAERR("failed to add hdr proc ctx\n");
587 goto fail_add_proc_ctx;
588 }
589 entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
590 }
591
592 return 0;
593
594fail_add_proc_ctx:
595 entry->ref_cnt--;
596 hdr->hdr_hdl = 0;
597 ipa3_id_remove(id);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530598ipa_insert_failed:
599 if (entry->is_hdr_proc_ctx) {
600 dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
601 entry->hdr_len, DMA_TO_DEVICE);
602 } else {
603 if (offset)
604 list_move(&offset->link,
605 &htbl->head_free_offset_list[offset->bin]);
606 entry->offset_entry = NULL;
607 }
Amir Levy9659e592016-10-27 18:08:27 +0300608 htbl->hdr_cnt--;
609 list_del(&entry->link);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530610
Utkarsh Saxenae4166a72017-05-22 13:21:55 +0530611fail_dma_mapping:
612 entry->is_hdr_proc_ctx = false;
613
Amir Levy9659e592016-10-27 18:08:27 +0300614bad_hdr_len:
615 entry->cookie = 0;
616 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
617error:
618 return -EPERM;
619}
620
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200621static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
622 bool release_hdr, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300623{
624 struct ipa3_hdr_proc_ctx_entry *entry;
625 struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
626
627 entry = ipa3_id_find(proc_ctx_hdl);
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530628 if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530629 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300630 return -EINVAL;
631 }
632
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200633 IPADBG("del proc ctx cnt=%d ofst=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +0300634 htbl->proc_ctx_cnt, entry->offset_entry->offset);
635
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200636 if (by_user && entry->user_deleted) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530637 IPAERR_RL("proc_ctx already deleted by user\n");
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200638 return -EINVAL;
639 }
640
641 if (by_user)
642 entry->user_deleted = true;
643
Amir Levy9659e592016-10-27 18:08:27 +0300644 if (--entry->ref_cnt) {
645 IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
646 proc_ctx_hdl, entry->ref_cnt);
647 return 0;
648 }
649
650 if (release_hdr)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200651 __ipa3_del_hdr(entry->hdr->id, false);
Amir Levy9659e592016-10-27 18:08:27 +0300652
653 /* move the offset entry to appropriate free list */
654 list_move(&entry->offset_entry->link,
655 &htbl->head_free_offset_list[entry->offset_entry->bin]);
656 list_del(&entry->link);
657 htbl->proc_ctx_cnt--;
658 entry->cookie = 0;
659 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
660
661 /* remove the handle from the database */
662 ipa3_id_remove(proc_ctx_hdl);
663
664 return 0;
665}
666
667
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200668int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300669{
670 struct ipa3_hdr_entry *entry;
671 struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
672
673 entry = ipa3_id_find(hdr_hdl);
674 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530675 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +0300676 return -EINVAL;
677 }
678
Mohammed Javid93e94ce2017-06-15 15:39:04 +0530679 if (entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530680 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300681 return -EINVAL;
682 }
683
684 if (entry->is_hdr_proc_ctx)
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200685 IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
Amir Levy9659e592016-10-27 18:08:27 +0300686 entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
687 else
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200688 IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
689 entry->hdr_len, htbl->hdr_cnt,
690 entry->offset_entry->offset);
Amir Levy9659e592016-10-27 18:08:27 +0300691
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200692 if (by_user && entry->user_deleted) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530693 IPAERR_RL("proc_ctx already deleted by user\n");
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200694 return -EINVAL;
695 }
696
697 if (by_user)
698 entry->user_deleted = true;
699
Amir Levy9659e592016-10-27 18:08:27 +0300700 if (--entry->ref_cnt) {
701 IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
702 return 0;
703 }
704
705 if (entry->is_hdr_proc_ctx) {
706 dma_unmap_single(ipa3_ctx->pdev,
707 entry->phys_base,
708 entry->hdr_len,
709 DMA_TO_DEVICE);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200710 __ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false, false);
Amir Levy9659e592016-10-27 18:08:27 +0300711 } else {
712 /* move the offset entry to appropriate free list */
713 list_move(&entry->offset_entry->link,
714 &htbl->head_free_offset_list[entry->offset_entry->bin]);
715 }
716 list_del(&entry->link);
717 htbl->hdr_cnt--;
718 entry->cookie = 0;
719 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
720
721 /* remove the handle from the database */
722 ipa3_id_remove(hdr_hdl);
723
724 return 0;
725}
726
727/**
728 * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
729 * to IPA HW
730 * @hdrs: [inout] set of headers to add
731 *
732 * Returns: 0 on success, negative on failure
733 *
734 * Note: Should not be called from atomic context
735 */
736int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
737{
738 int i;
739 int result = -EFAULT;
740
741 if (hdrs == NULL || hdrs->num_hdrs == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530742 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300743 return -EINVAL;
744 }
745
746 mutex_lock(&ipa3_ctx->lock);
747 IPADBG("adding %d headers to IPA driver internal data struct\n",
748 hdrs->num_hdrs);
749 for (i = 0; i < hdrs->num_hdrs; i++) {
750 if (__ipa_add_hdr(&hdrs->hdr[i])) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530751 IPAERR_RL("failed to add hdr %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300752 hdrs->hdr[i].status = -1;
753 } else {
754 hdrs->hdr[i].status = 0;
755 }
756 }
757
758 if (hdrs->commit) {
759 IPADBG("committing all headers to IPA core");
760 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
761 result = -EPERM;
762 goto bail;
763 }
764 }
765 result = 0;
766bail:
767 mutex_unlock(&ipa3_ctx->lock);
768 return result;
769}
770
771/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200772 * ipa3_del_hdr_by_user() - Remove the specified headers
773 * from SW and optionally commit them to IPA HW
Amir Levy9659e592016-10-27 18:08:27 +0300774 * @hdls: [inout] set of headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200775 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300776 *
777 * Returns: 0 on success, negative on failure
778 *
779 * Note: Should not be called from atomic context
780 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200781int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300782{
783 int i;
784 int result = -EFAULT;
785
786 if (hdls == NULL || hdls->num_hdls == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530787 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300788 return -EINVAL;
789 }
790
791 mutex_lock(&ipa3_ctx->lock);
792 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200793 if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530794 IPAERR_RL("failed to del hdr %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300795 hdls->hdl[i].status = -1;
796 } else {
797 hdls->hdl[i].status = 0;
798 }
799 }
800
801 if (hdls->commit) {
802 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
803 result = -EPERM;
804 goto bail;
805 }
806 }
807 result = 0;
808bail:
809 mutex_unlock(&ipa3_ctx->lock);
810 return result;
811}
812
813/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200814 * ipa3_del_hdr() - Remove the specified headers from SW
815 * and optionally commit them to IPA HW
816 * @hdls: [inout] set of headers to delete
817 *
818 * Returns: 0 on success, negative on failure
819 *
820 * Note: Should not be called from atomic context
821 */
822int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
823{
824 return ipa3_del_hdr_by_user(hdls, false);
825}
826
827/**
Amir Levy9659e592016-10-27 18:08:27 +0300828 * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
829 * and optionally commit them to IPA HW
830 * @proc_ctxs: [inout] set of processing context headers to add
831 *
832 * Returns: 0 on success, negative on failure
833 *
834 * Note: Should not be called from atomic context
835 */
836int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
837{
838 int i;
839 int result = -EFAULT;
840
841 if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530842 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300843 return -EINVAL;
844 }
845
846 mutex_lock(&ipa3_ctx->lock);
847 IPADBG("adding %d header processing contextes to IPA driver\n",
848 proc_ctxs->num_proc_ctxs);
849 for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
850 if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530851 IPAERR_RL("failed to add hdr pric ctx %d\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300852 proc_ctxs->proc_ctx[i].status = -1;
853 } else {
854 proc_ctxs->proc_ctx[i].status = 0;
855 }
856 }
857
858 if (proc_ctxs->commit) {
859 IPADBG("committing all headers to IPA core");
860 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
861 result = -EPERM;
862 goto bail;
863 }
864 }
865 result = 0;
866bail:
867 mutex_unlock(&ipa3_ctx->lock);
868 return result;
869}
870
871/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200872 * ipa3_del_hdr_proc_ctx_by_user() -
Amir Levy9659e592016-10-27 18:08:27 +0300873 * Remove the specified processing context headers from SW and
874 * optionally commit them to IPA HW.
875 * @hdls: [inout] set of processing context headers to delete
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200876 * @by_user: Operation requested by user?
Amir Levy9659e592016-10-27 18:08:27 +0300877 *
878 * Returns: 0 on success, negative on failure
879 *
880 * Note: Should not be called from atomic context
881 */
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200882int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
883 bool by_user)
Amir Levy9659e592016-10-27 18:08:27 +0300884{
885 int i;
886 int result;
887
888 if (hdls == NULL || hdls->num_hdls == 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530889 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +0300890 return -EINVAL;
891 }
892
893 mutex_lock(&ipa3_ctx->lock);
894 for (i = 0; i < hdls->num_hdls; i++) {
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200895 if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530896 IPAERR_RL("failed to del hdr %i\n", i);
Amir Levy9659e592016-10-27 18:08:27 +0300897 hdls->hdl[i].status = -1;
898 } else {
899 hdls->hdl[i].status = 0;
900 }
901 }
902
903 if (hdls->commit) {
904 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
905 result = -EPERM;
906 goto bail;
907 }
908 }
909 result = 0;
910bail:
911 mutex_unlock(&ipa3_ctx->lock);
912 return result;
913}
914
915/**
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200916 * ipa3_del_hdr_proc_ctx() -
917 * Remove the specified processing context headers from SW and
918 * optionally commit them to IPA HW.
919 * @hdls: [inout] set of processing context headers to delete
920 *
921 * Returns: 0 on success, negative on failure
922 *
923 * Note: Should not be called from atomic context
924 */
925int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
926{
927 return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
928}
929
930/**
Amir Levy9659e592016-10-27 18:08:27 +0300931 * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
932 *
933 * Returns: 0 on success, negative on failure
934 *
935 * Note: Should not be called from atomic context
936 */
937int ipa3_commit_hdr(void)
938{
939 int result = -EFAULT;
940
941 /*
942 * issue a commit on the routing module since routing rules point to
943 * header table entries
944 */
945 if (ipa3_commit_rt(IPA_IP_v4))
946 return -EPERM;
947 if (ipa3_commit_rt(IPA_IP_v6))
948 return -EPERM;
949
950 mutex_lock(&ipa3_ctx->lock);
951 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
952 result = -EPERM;
953 goto bail;
954 }
955 result = 0;
956bail:
957 mutex_unlock(&ipa3_ctx->lock);
958 return result;
959}
960
961/**
962 * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
963 * HW)
964 *
965 * Returns: 0 on success, negative on failure
966 *
967 * Note: Should not be called from atomic context
968 */
969int ipa3_reset_hdr(void)
970{
971 struct ipa3_hdr_entry *entry;
972 struct ipa3_hdr_entry *next;
973 struct ipa3_hdr_proc_ctx_entry *ctx_entry;
974 struct ipa3_hdr_proc_ctx_entry *ctx_next;
975 struct ipa_hdr_offset_entry *off_entry;
976 struct ipa_hdr_offset_entry *off_next;
977 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
978 struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
979 int i;
980
981 /*
982 * issue a reset on the routing module since routing rules point to
983 * header table entries
984 */
985 if (ipa3_reset_rt(IPA_IP_v4))
986 IPAERR("fail to reset v4 rt\n");
987 if (ipa3_reset_rt(IPA_IP_v6))
988 IPAERR("fail to reset v4 rt\n");
989
990 mutex_lock(&ipa3_ctx->lock);
991 IPADBG("reset hdr\n");
992 list_for_each_entry_safe(entry, next,
993 &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
994
995 /* do not remove the default header */
996 if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
997 if (entry->is_hdr_proc_ctx) {
998 IPAERR("default header is proc ctx\n");
999 mutex_unlock(&ipa3_ctx->lock);
1000 WARN_ON(1);
1001 return -EFAULT;
1002 }
1003 continue;
1004 }
1005
1006 if (ipa3_id_find(entry->id) == NULL) {
1007 mutex_unlock(&ipa3_ctx->lock);
1008 WARN_ON(1);
1009 return -EFAULT;
1010 }
1011 if (entry->is_hdr_proc_ctx) {
1012 dma_unmap_single(ipa3_ctx->pdev,
1013 entry->phys_base,
1014 entry->hdr_len,
1015 DMA_TO_DEVICE);
1016 entry->proc_ctx = NULL;
1017 }
1018 list_del(&entry->link);
1019 entry->ref_cnt = 0;
1020 entry->cookie = 0;
1021
1022 /* remove the handle from the database */
1023 ipa3_id_remove(entry->id);
1024 kmem_cache_free(ipa3_ctx->hdr_cache, entry);
1025
1026 }
1027 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
1028 list_for_each_entry_safe(off_entry, off_next,
1029 &ipa3_ctx->hdr_tbl.head_offset_list[i],
1030 link) {
1031
1032 /*
1033 * do not remove the default exception header which is
1034 * at offset 0
1035 */
1036 if (off_entry->offset == 0)
1037 continue;
1038
1039 list_del(&off_entry->link);
1040 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1041 }
1042 list_for_each_entry_safe(off_entry, off_next,
1043 &ipa3_ctx->hdr_tbl.head_free_offset_list[i],
1044 link) {
1045 list_del(&off_entry->link);
1046 kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
1047 }
1048 }
1049 /* there is one header of size 8 */
1050 ipa3_ctx->hdr_tbl.end = 8;
1051 ipa3_ctx->hdr_tbl.hdr_cnt = 1;
1052
1053 IPADBG("reset hdr proc ctx\n");
1054 list_for_each_entry_safe(
1055 ctx_entry,
1056 ctx_next,
1057 &ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
1058 link) {
1059
1060 if (ipa3_id_find(ctx_entry->id) == NULL) {
1061 mutex_unlock(&ipa3_ctx->lock);
1062 WARN_ON(1);
1063 return -EFAULT;
1064 }
1065 list_del(&ctx_entry->link);
1066 ctx_entry->ref_cnt = 0;
1067 ctx_entry->cookie = 0;
1068
1069 /* remove the handle from the database */
1070 ipa3_id_remove(ctx_entry->id);
1071 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
1072
1073 }
1074 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
1075 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1076 &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
1077 link) {
1078
1079 list_del(&ctx_off_entry->link);
1080 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1081 ctx_off_entry);
1082 }
1083 list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
1084 &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
1085 link) {
1086 list_del(&ctx_off_entry->link);
1087 kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
1088 ctx_off_entry);
1089 }
1090 }
1091 ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
1092 ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
1093 mutex_unlock(&ipa3_ctx->lock);
1094
1095 return 0;
1096}
1097
1098static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
1099{
1100 struct ipa3_hdr_entry *entry;
1101
1102 if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301103 IPAERR_RL("Header name too long: %s\n", name);
Amir Levy9659e592016-10-27 18:08:27 +03001104 return NULL;
1105 }
1106
1107 list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
1108 link) {
1109 if (!strcmp(name, entry->name))
1110 return entry;
1111 }
1112
1113 return NULL;
1114}
1115
1116/**
1117 * ipa3_get_hdr() - Lookup the specified header resource
1118 * @lookup: [inout] header to lookup and its handle
1119 *
1120 * lookup the specified header resource and return handle if it exists
1121 *
1122 * Returns: 0 on success, negative on failure
1123 *
1124 * Note: Should not be called from atomic context
1125 * Caller should call ipa3_put_hdr later if this function succeeds
1126 */
1127int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
1128{
1129 struct ipa3_hdr_entry *entry;
1130 int result = -1;
1131
1132 if (lookup == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301133 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001134 return -EINVAL;
1135 }
1136 mutex_lock(&ipa3_ctx->lock);
1137 entry = __ipa_find_hdr(lookup->name);
1138 if (entry) {
1139 lookup->hdl = entry->id;
1140 result = 0;
1141 }
1142 mutex_unlock(&ipa3_ctx->lock);
1143
1144 return result;
1145}
1146
1147/**
1148 * __ipa3_release_hdr() - drop reference to header and cause
1149 * deletion if reference count permits
1150 * @hdr_hdl: [in] handle of header to be released
1151 *
1152 * Returns: 0 on success, negative on failure
1153 */
1154int __ipa3_release_hdr(u32 hdr_hdl)
1155{
1156 int result = 0;
1157
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001158 if (__ipa3_del_hdr(hdr_hdl, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001159 IPADBG("fail to del hdr %x\n", hdr_hdl);
1160 result = -EFAULT;
1161 goto bail;
1162 }
1163
1164 /* commit for put */
1165 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1166 IPAERR("fail to commit hdr\n");
1167 result = -EFAULT;
1168 goto bail;
1169 }
1170
1171bail:
1172 return result;
1173}
1174
1175/**
1176 * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
1177 * and cause deletion if reference count permits
1178 * @proc_ctx_hdl: [in] handle of processing context to be released
1179 *
1180 * Returns: 0 on success, negative on failure
1181 */
1182int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
1183{
1184 int result = 0;
1185
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001186 if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) {
Amir Levy9659e592016-10-27 18:08:27 +03001187 IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
1188 result = -EFAULT;
1189 goto bail;
1190 }
1191
1192 /* commit for put */
1193 if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
1194 IPAERR("fail to commit hdr\n");
1195 result = -EFAULT;
1196 goto bail;
1197 }
1198
1199bail:
1200 return result;
1201}
1202
1203/**
1204 * ipa3_put_hdr() - Release the specified header handle
1205 * @hdr_hdl: [in] the header handle to release
1206 *
1207 * Returns: 0 on success, negative on failure
1208 *
1209 * Note: Should not be called from atomic context
1210 */
1211int ipa3_put_hdr(u32 hdr_hdl)
1212{
1213 struct ipa3_hdr_entry *entry;
1214 int result = -EFAULT;
1215
1216 mutex_lock(&ipa3_ctx->lock);
1217
1218 entry = ipa3_id_find(hdr_hdl);
1219 if (entry == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301220 IPAERR_RL("lookup failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03001221 result = -EINVAL;
1222 goto bail;
1223 }
1224
Mohammed Javid93e94ce2017-06-15 15:39:04 +05301225 if (entry->cookie != IPA_HDR_COOKIE) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301226 IPAERR_RL("invalid header entry\n");
Amir Levy9659e592016-10-27 18:08:27 +03001227 result = -EINVAL;
1228 goto bail;
1229 }
1230
1231 result = 0;
1232bail:
1233 mutex_unlock(&ipa3_ctx->lock);
1234 return result;
1235}
1236
1237/**
1238 * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
1239 * it
1240 * @copy: [inout] header to lookup and its copy
1241 *
1242 * lookup the specified header resource and return a copy of it (along with its
1243 * attributes) if it exists, this would be called for partial headers
1244 *
1245 * Returns: 0 on success, negative on failure
1246 *
1247 * Note: Should not be called from atomic context
1248 */
1249int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
1250{
1251 struct ipa3_hdr_entry *entry;
1252 int result = -EFAULT;
1253
1254 if (copy == NULL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301255 IPAERR_RL("bad parm\n");
Amir Levy9659e592016-10-27 18:08:27 +03001256 return -EINVAL;
1257 }
1258 mutex_lock(&ipa3_ctx->lock);
1259 entry = __ipa_find_hdr(copy->name);
1260 if (entry) {
1261 memcpy(copy->hdr, entry->hdr, entry->hdr_len);
1262 copy->hdr_len = entry->hdr_len;
1263 copy->type = entry->type;
1264 copy->is_partial = entry->is_partial;
1265 copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
1266 copy->eth2_ofst = entry->eth2_ofst;
1267 result = 0;
1268 }
1269 mutex_unlock(&ipa3_ctx->lock);
1270
1271 return result;
1272}