blob: e39874e73a90b8edffecc71ffb223a2c9163c93a [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/debugfs.h>
14#include "ipahal.h"
15#include "ipahal_i.h"
16#include "ipahal_reg_i.h"
17#include "ipahal_fltrt_i.h"
18
19struct ipahal_context *ipahal_ctx;
20
21static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
22 __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
23 __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
24 __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
25 __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
26 __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
27 __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
28 __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
29 __stringify(IPA_IMM_CMD_REGISTER_WRITE),
30 __stringify(IPA_IMM_CMD_NAT_DMA),
31 __stringify(IPA_IMM_CMD_IP_PACKET_INIT),
32 __stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
33 __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
34 __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
35};
36
37static const char *ipahal_pkt_status_exception_to_str
38 [IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
39 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
40 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
41 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
42 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
43 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
44 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
45 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
46 __stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
47};
48
49#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
50 (kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
51
52
53static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
54 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
55{
56 struct ipahal_imm_cmd_pyld *pyld;
57 struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
58 struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
59 (struct ipahal_imm_cmd_dma_task_32b_addr *)params;
60
61 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
62 if (unlikely(!pyld)) {
63 IPAHAL_ERR("kzalloc err\n");
64 return pyld;
65 }
66 pyld->len = sizeof(*data);
67 data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
68
69 if (unlikely(dma_params->size1 & ~0xFFFF)) {
70 IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
71 dma_params->size1);
72 WARN_ON(1);
73 }
74 if (unlikely(dma_params->packet_size & ~0xFFFF)) {
75 IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
76 dma_params->packet_size);
77 WARN_ON(1);
78 }
79 data->cmplt = dma_params->cmplt ? 1 : 0;
80 data->eof = dma_params->eof ? 1 : 0;
81 data->flsh = dma_params->flsh ? 1 : 0;
82 data->lock = dma_params->lock ? 1 : 0;
83 data->unlock = dma_params->unlock ? 1 : 0;
84 data->size1 = dma_params->size1;
85 data->addr1 = dma_params->addr1;
86 data->packet_size = dma_params->packet_size;
87
88 return pyld;
89}
90
91static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
92 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
93{
94 struct ipahal_imm_cmd_pyld *pyld;
95 struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
96 struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
97 (struct ipahal_imm_cmd_ip_packet_tag_status *)params;
98
99 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
100 if (unlikely(!pyld)) {
101 IPAHAL_ERR("kzalloc err\n");
102 return pyld;
103 }
104 pyld->len = sizeof(*data);
105 data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
106
107 if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
108 IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
109 tag_params->tag);
110 WARN_ON(1);
111 }
112 data->tag = tag_params->tag;
113
114 return pyld;
115}
116
117static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
118 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
119{
120 struct ipahal_imm_cmd_pyld *pyld;
121 struct ipa_imm_cmd_hw_dma_shared_mem *data;
122 struct ipahal_imm_cmd_dma_shared_mem *mem_params =
123 (struct ipahal_imm_cmd_dma_shared_mem *)params;
124
125 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
126 if (unlikely(!pyld)) {
127 IPAHAL_ERR("kzalloc err\n");
128 return pyld;
129 }
130 pyld->len = sizeof(*data);
131 data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
132
133 if (unlikely(mem_params->size & ~0xFFFF)) {
134 IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
135 mem_params->size);
136 WARN_ON(1);
137 }
138 if (unlikely(mem_params->local_addr & ~0xFFFF)) {
139 IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
140 mem_params->local_addr);
141 WARN_ON(1);
142 }
143 data->direction = mem_params->is_read ? 1 : 0;
144 data->size = mem_params->size;
145 data->local_addr = mem_params->local_addr;
146 data->system_addr = mem_params->system_addr;
147 data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
148 switch (mem_params->pipeline_clear_options) {
149 case IPAHAL_HPS_CLEAR:
150 data->pipeline_clear_options = 0;
151 break;
152 case IPAHAL_SRC_GRP_CLEAR:
153 data->pipeline_clear_options = 1;
154 break;
155 case IPAHAL_FULL_PIPELINE_CLEAR:
156 data->pipeline_clear_options = 2;
157 break;
158 default:
159 IPAHAL_ERR("unsupported pipline clear option %d\n",
160 mem_params->pipeline_clear_options);
161 WARN_ON(1);
162 };
163
164 return pyld;
165}
166
167static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
168 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
169{
170 struct ipahal_imm_cmd_pyld *pyld;
171 struct ipa_imm_cmd_hw_register_write *data;
172 struct ipahal_imm_cmd_register_write *regwrt_params =
173 (struct ipahal_imm_cmd_register_write *)params;
174
175 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
176 if (unlikely(!pyld)) {
177 IPAHAL_ERR("kzalloc err\n");
178 return pyld;
179 }
180 pyld->len = sizeof(*data);
181 data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
182
183 if (unlikely(regwrt_params->offset & ~0xFFFF)) {
184 IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
185 regwrt_params->offset);
186 WARN_ON(1);
187 }
188 data->offset = regwrt_params->offset;
189 data->value = regwrt_params->value;
190 data->value_mask = regwrt_params->value_mask;
191
192 data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
193 switch (regwrt_params->pipeline_clear_options) {
194 case IPAHAL_HPS_CLEAR:
195 data->pipeline_clear_options = 0;
196 break;
197 case IPAHAL_SRC_GRP_CLEAR:
198 data->pipeline_clear_options = 1;
199 break;
200 case IPAHAL_FULL_PIPELINE_CLEAR:
201 data->pipeline_clear_options = 2;
202 break;
203 default:
204 IPAHAL_ERR("unsupported pipline clear option %d\n",
205 regwrt_params->pipeline_clear_options);
206 WARN_ON(1);
207 };
208
209 return pyld;
210}
211
212static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
213 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
214{
215 struct ipahal_imm_cmd_pyld *pyld;
216 struct ipa_imm_cmd_hw_ip_packet_init *data;
217 struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
218 (struct ipahal_imm_cmd_ip_packet_init *)params;
219
220 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
221 if (unlikely(!pyld)) {
222 IPAHAL_ERR("kzalloc err\n");
223 return pyld;
224 }
225 pyld->len = sizeof(*data);
226 data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
227
228 if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
229 IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
230 pktinit_params->destination_pipe_index);
231 WARN_ON(1);
232 }
233 data->destination_pipe_index = pktinit_params->destination_pipe_index;
234
235 return pyld;
236}
237
238static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
239 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
240{
241 struct ipahal_imm_cmd_pyld *pyld;
242 struct ipa_imm_cmd_hw_nat_dma *data;
243 struct ipahal_imm_cmd_nat_dma *nat_params =
244 (struct ipahal_imm_cmd_nat_dma *)params;
245
246 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
247 if (unlikely(!pyld)) {
248 IPAHAL_ERR("kzalloc err\n");
249 return pyld;
250 }
251 pyld->len = sizeof(*data);
252 data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
253
254 data->table_index = nat_params->table_index;
255 data->base_addr = nat_params->base_addr;
256 data->offset = nat_params->offset;
257 data->data = nat_params->data;
258
259 return pyld;
260}
261
262static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
263 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
264{
265 struct ipahal_imm_cmd_pyld *pyld;
266 struct ipa_imm_cmd_hw_hdr_init_system *data;
267 struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
268 (struct ipahal_imm_cmd_hdr_init_system *)params;
269
270 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
271 if (unlikely(!pyld)) {
272 IPAHAL_ERR("kzalloc err\n");
273 return pyld;
274 }
275 pyld->len = sizeof(*data);
276 data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
277
278 data->hdr_table_addr = syshdr_params->hdr_table_addr;
279
280 return pyld;
281}
282
283static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
284 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
285{
286 struct ipahal_imm_cmd_pyld *pyld;
287 struct ipa_imm_cmd_hw_hdr_init_local *data;
288 struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
289 (struct ipahal_imm_cmd_hdr_init_local *)params;
290
291 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
292 if (unlikely(!pyld)) {
293 IPAHAL_ERR("kzalloc err\n");
294 return pyld;
295 }
296 pyld->len = sizeof(*data);
297 data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
298
299 if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
300 IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
301 lclhdr_params->size_hdr_table);
302 WARN_ON(1);
303 }
304 data->hdr_table_addr = lclhdr_params->hdr_table_addr;
305 data->size_hdr_table = lclhdr_params->size_hdr_table;
306 data->hdr_addr = lclhdr_params->hdr_addr;
307
308 return pyld;
309}
310
311static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
312 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
313{
314 struct ipahal_imm_cmd_pyld *pyld;
315 struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
316 struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
317 (struct ipahal_imm_cmd_ip_v6_routing_init *)params;
318
319 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
320 if (unlikely(!pyld)) {
321 IPAHAL_ERR("kzalloc err\n");
322 return pyld;
323 }
324 pyld->len = sizeof(*data);
325 data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
326
327 data->hash_rules_addr = rt6_params->hash_rules_addr;
328 data->hash_rules_size = rt6_params->hash_rules_size;
329 data->hash_local_addr = rt6_params->hash_local_addr;
330 data->nhash_rules_addr = rt6_params->nhash_rules_addr;
331 data->nhash_rules_size = rt6_params->nhash_rules_size;
332 data->nhash_local_addr = rt6_params->nhash_local_addr;
333
334 return pyld;
335}
336
337static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
338 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
339{
340 struct ipahal_imm_cmd_pyld *pyld;
341 struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
342 struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
343 (struct ipahal_imm_cmd_ip_v4_routing_init *)params;
344
345 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
346 if (unlikely(!pyld)) {
347 IPAHAL_ERR("kzalloc err\n");
348 return pyld;
349 }
350 pyld->len = sizeof(*data);
351 data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
352
353 data->hash_rules_addr = rt4_params->hash_rules_addr;
354 data->hash_rules_size = rt4_params->hash_rules_size;
355 data->hash_local_addr = rt4_params->hash_local_addr;
356 data->nhash_rules_addr = rt4_params->nhash_rules_addr;
357 data->nhash_rules_size = rt4_params->nhash_rules_size;
358 data->nhash_local_addr = rt4_params->nhash_local_addr;
359
360 return pyld;
361}
362
363static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
364 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
365{
366 struct ipahal_imm_cmd_pyld *pyld;
367 struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
368 struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
369 (struct ipahal_imm_cmd_ip_v4_nat_init *)params;
370
371 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
372 if (unlikely(!pyld)) {
373 IPAHAL_ERR("kzalloc err\n");
374 return pyld;
375 }
376 pyld->len = sizeof(*data);
377 data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
378
379 data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
380 data->ipv4_expansion_rules_addr =
381 nat4_params->ipv4_expansion_rules_addr;
382 data->index_table_addr = nat4_params->index_table_addr;
383 data->index_table_expansion_addr =
384 nat4_params->index_table_expansion_addr;
385 data->table_index = nat4_params->table_index;
386 data->ipv4_rules_addr_type =
387 nat4_params->ipv4_rules_addr_shared ? 1 : 0;
388 data->ipv4_expansion_rules_addr_type =
389 nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
390 data->index_table_addr_type =
391 nat4_params->index_table_addr_shared ? 1 : 0;
392 data->index_table_expansion_addr_type =
393 nat4_params->index_table_expansion_addr_shared ? 1 : 0;
394 data->size_base_tables = nat4_params->size_base_tables;
395 data->size_expansion_tables = nat4_params->size_expansion_tables;
396 data->public_ip_addr = nat4_params->public_ip_addr;
397
398 return pyld;
399}
400
401static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
402 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
403{
404 struct ipahal_imm_cmd_pyld *pyld;
405 struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
406 struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
407 (struct ipahal_imm_cmd_ip_v6_filter_init *)params;
408
409 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
410 if (unlikely(!pyld)) {
411 IPAHAL_ERR("kzalloc err\n");
412 return pyld;
413 }
414 pyld->len = sizeof(*data);
415 data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
416
417 data->hash_rules_addr = flt6_params->hash_rules_addr;
418 data->hash_rules_size = flt6_params->hash_rules_size;
419 data->hash_local_addr = flt6_params->hash_local_addr;
420 data->nhash_rules_addr = flt6_params->nhash_rules_addr;
421 data->nhash_rules_size = flt6_params->nhash_rules_size;
422 data->nhash_local_addr = flt6_params->nhash_local_addr;
423
424 return pyld;
425}
426
427static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
428 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
429{
430 struct ipahal_imm_cmd_pyld *pyld;
431 struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
432 struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
433 (struct ipahal_imm_cmd_ip_v4_filter_init *)params;
434
435 pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
436 if (unlikely(!pyld)) {
437 IPAHAL_ERR("kzalloc err\n");
438 return pyld;
439 }
440 pyld->len = sizeof(*data);
441 data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
442
443 data->hash_rules_addr = flt4_params->hash_rules_addr;
444 data->hash_rules_size = flt4_params->hash_rules_size;
445 data->hash_local_addr = flt4_params->hash_local_addr;
446 data->nhash_rules_addr = flt4_params->nhash_rules_addr;
447 data->nhash_rules_size = flt4_params->nhash_rules_size;
448 data->nhash_local_addr = flt4_params->nhash_local_addr;
449
450 return pyld;
451}
452
453/*
454 * struct ipahal_imm_cmd_obj - immediate command H/W information for
455 * specific IPA version
456 * @construct - CB to construct imm command payload from abstracted structure
457 * @opcode - Immediate command OpCode
458 * @dyn_op - Does this command supports Dynamic opcode?
459 * Some commands opcode are dynamic where the part of the opcode is
460 * supplied as param. This flag indicates if the specific command supports it
461 * or not.
462 */
463struct ipahal_imm_cmd_obj {
464 struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
465 const void *params, bool is_atomic_ctx);
466 u16 opcode;
467 bool dyn_op;
468};
469
470/*
471 * This table contains the info regard each immediate command for IPAv3
472 * and later.
473 * Information like: opcode and construct functions.
474 * All the information on the IMM on IPAv3 are statically defined below.
475 * If information is missing regard some IMM on some IPA version,
476 * the init function will fill it with the information from the previous
477 * IPA version.
478 * Information is considered missing if all of the fields are 0
479 * If opcode is -1, this means that the IMM is removed on the
480 * specific version
481 */
482static struct ipahal_imm_cmd_obj
483 ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
484 /* IPAv3 */
485 [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
486 ipa_imm_cmd_construct_ip_v4_filter_init,
487 3, false},
488 [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
489 ipa_imm_cmd_construct_ip_v6_filter_init,
490 4, false},
491 [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
492 ipa_imm_cmd_construct_ip_v4_nat_init,
493 5, false},
494 [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
495 ipa_imm_cmd_construct_ip_v4_routing_init,
496 7, false},
497 [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
498 ipa_imm_cmd_construct_ip_v6_routing_init,
499 8, false},
500 [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
501 ipa_imm_cmd_construct_hdr_init_local,
502 9, false},
503 [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
504 ipa_imm_cmd_construct_hdr_init_system,
505 10, false},
506 [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
507 ipa_imm_cmd_construct_register_write,
508 12, false},
509 [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
510 ipa_imm_cmd_construct_nat_dma,
511 14, false},
512 [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
513 ipa_imm_cmd_construct_ip_packet_init,
514 16, false},
515 [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
516 ipa_imm_cmd_construct_dma_task_32b_addr,
517 17, true},
518 [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
519 ipa_imm_cmd_construct_dma_shared_mem,
520 19, false},
521 [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
522 ipa_imm_cmd_construct_ip_packet_tag_status,
523 20, false},
524};
525
526/*
527 * ipahal_imm_cmd_init() - Build the Immediate command information table
528 * See ipahal_imm_cmd_objs[][] comments
529 */
530static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
531{
532 int i;
533 int j;
534 struct ipahal_imm_cmd_obj zero_obj;
535
536 IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
537
538 if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
539 IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
540 return -EINVAL;
541 }
542
543 memset(&zero_obj, 0, sizeof(zero_obj));
544 for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
545 for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
546 if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
547 sizeof(struct ipahal_imm_cmd_obj))) {
548 memcpy(&ipahal_imm_cmd_objs[i+1][j],
549 &ipahal_imm_cmd_objs[i][j],
550 sizeof(struct ipahal_imm_cmd_obj));
551 } else {
552 /*
553 * explicitly overridden immediate command.
554 * Check validity
555 */
556 if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
557 IPAHAL_ERR(
558 "imm_cmd=%s with zero opcode ipa_ver=%d\n",
559 ipahal_imm_cmd_name_str(j), i+1);
560 WARN_ON(1);
561 }
562 if (!ipahal_imm_cmd_objs[i+1][j].construct) {
563 IPAHAL_ERR(
564 "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
565 ipahal_imm_cmd_name_str(j), i+1);
566 WARN_ON(1);
567 }
568 }
569 }
570 }
571
572 return 0;
573}
574
575/*
576 * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
577 * @cmd_name: [in] Immediate command name
578 */
579const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
580{
581 if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
582 IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
583 return "Invalid IMM_CMD";
584 }
585
586 return ipahal_imm_cmd_name_to_str[cmd_name];
587}
588
589/*
590 * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
591 */
592u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
593{
594 u32 opcode;
595
596 if (cmd >= IPA_IMM_CMD_MAX) {
597 IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
598 ipa_assert();
599 return -EFAULT;
600 }
601
602 IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
603 ipahal_imm_cmd_name_str(cmd));
604 opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
605 if (opcode == -1) {
606 IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
607 ipahal_imm_cmd_name_str(cmd));
608 ipa_assert();
609 return -EFAULT;
610 }
611
612 return opcode;
613}
614
615/*
616 * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
617 * that supports dynamic opcode
618 * Some commands opcode are not totaly fixed, but part of it is
619 * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
620 * is a given parameter.
621 * This API will return the composed opcode of the command given
622 * the parameter
623 * Note: Use this API only for immediate comamnds that support Dynamic Opcode
624 */
625u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
626{
627 u32 opcode;
628
629 if (cmd >= IPA_IMM_CMD_MAX) {
630 IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
631 ipa_assert();
632 return -EFAULT;
633 }
634
635 IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
636 ipahal_imm_cmd_name_str(cmd));
637
638 if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
639 IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
640 ipahal_imm_cmd_name_str(cmd));
641 ipa_assert();
642 return -EFAULT;
643 }
644
645 /* Currently, dynamic opcode commands uses params to be set
646 * on the Opcode hi-byte (lo-byte is fixed).
647 * If this to be changed in the future, make the opcode calculation
648 * a CB per command
649 */
650 if (param & ~0xFFFF) {
651 IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
652 ipahal_imm_cmd_name_str(cmd));
653 ipa_assert();
654 return -EFAULT;
655 }
656 opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
657 if (opcode == -1) {
658 IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
659 ipahal_imm_cmd_name_str(cmd));
660 ipa_assert();
661 return -EFAULT;
662 }
663 if (opcode & ~0xFFFF) {
664 IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
665 ipahal_imm_cmd_name_str(cmd));
666 ipa_assert();
667 return -EFAULT;
668 }
669 return (opcode + (param<<8));
670}
671
672/*
673 * ipahal_construct_imm_cmd() - Construct immdiate command
674 * This function builds imm cmd bulk that can be be sent to IPA
675 * The command will be allocated dynamically.
676 * After done using it, call ipahal_destroy_imm_cmd() to release it
677 */
678struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
679 enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
680{
681 if (!params) {
682 IPAHAL_ERR("Input error: params=%p\n", params);
683 ipa_assert();
684 return NULL;
685 }
686
687 if (cmd >= IPA_IMM_CMD_MAX) {
688 IPAHAL_ERR("Invalid immediate command %u\n", cmd);
689 ipa_assert();
690 return NULL;
691 }
692
693 IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
694 return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
695 cmd, params, is_atomic_ctx);
696}
697
698/*
699 * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
700 * Core driver may want functionality to inject NOP commands to IPA
701 * to ensure e.g., PIPLINE clear before someother operation.
702 * The functionality given by this function can be reached by
703 * ipahal_construct_imm_cmd(). This function is helper to the core driver
704 * to reach this NOP functionlity easily.
705 * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
706 * @pipline_clr_opt: options for pipeline clear waiting
707 * @is_atomic_ctx: is called in atomic context or can sleep?
708 */
709struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
710 bool skip_pipline_clear,
711 enum ipahal_pipeline_clear_option pipline_clr_opt,
712 bool is_atomic_ctx)
713{
714 struct ipahal_imm_cmd_register_write cmd;
715 struct ipahal_imm_cmd_pyld *cmd_pyld;
716
717 memset(&cmd, 0, sizeof(cmd));
718 cmd.skip_pipeline_clear = skip_pipline_clear;
719 cmd.pipeline_clear_options = pipline_clr_opt;
720 cmd.value_mask = 0x0;
721
722 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
723 &cmd, is_atomic_ctx);
724
725 if (!cmd_pyld)
726 IPAHAL_ERR("failed to construct register_write imm cmd\n");
727
728 return cmd_pyld;
729}
730
731
732/* IPA Packet Status Logic */
733
734#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
735 (status->status_mask |= \
736 ((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
737
738static void ipa_pkt_status_parse(
739 const void *unparsed_status, struct ipahal_pkt_status *status)
740{
741 enum ipahal_pkt_status_opcode opcode = 0;
742 enum ipahal_pkt_status_exception exception_type = 0;
743
744 struct ipa_pkt_status_hw *hw_status =
745 (struct ipa_pkt_status_hw *)unparsed_status;
746
747 status->pkt_len = hw_status->pkt_len;
748 status->endp_src_idx = hw_status->endp_src_idx;
749 status->endp_dest_idx = hw_status->endp_dest_idx;
750 status->metadata = hw_status->metadata;
751 status->flt_local = hw_status->flt_local;
752 status->flt_hash = hw_status->flt_hash;
753 status->flt_global = hw_status->flt_hash;
754 status->flt_ret_hdr = hw_status->flt_ret_hdr;
755 status->flt_miss = ~(hw_status->flt_rule_id) ? false : true;
756 status->flt_rule_id = hw_status->flt_rule_id;
757 status->rt_local = hw_status->rt_local;
758 status->rt_hash = hw_status->rt_hash;
759 status->ucp = hw_status->ucp;
760 status->rt_tbl_idx = hw_status->rt_tbl_idx;
761 status->rt_miss = ~(hw_status->rt_rule_id) ? false : true;
762 status->rt_rule_id = hw_status->rt_rule_id;
763 status->nat_hit = hw_status->nat_hit;
764 status->nat_entry_idx = hw_status->nat_entry_idx;
765 status->tag_info = hw_status->tag_info;
766 status->seq_num = hw_status->seq_num;
767 status->time_of_day_ctr = hw_status->time_of_day_ctr;
768 status->hdr_local = hw_status->hdr_local;
769 status->hdr_offset = hw_status->hdr_offset;
770 status->frag_hit = hw_status->frag_hit;
771 status->frag_rule = hw_status->frag_rule;
772
773 switch (hw_status->status_opcode) {
774 case 0x1:
775 opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
776 break;
777 case 0x2:
778 opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
779 break;
780 case 0x4:
781 opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
782 break;
783 case 0x8:
784 opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
785 break;
786 case 0x10:
787 opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
788 break;
789 case 0x20:
790 opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
791 break;
792 case 0x40:
793 opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
794 break;
795 default:
796 IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
797 hw_status->status_opcode);
798 WARN_ON(1);
799 };
800 status->status_opcode = opcode;
801
802 switch (hw_status->nat_type) {
803 case 0:
804 status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
805 break;
806 case 1:
807 status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
808 break;
809 case 2:
810 status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
811 break;
812 default:
813 IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
814 hw_status->nat_type);
815 WARN_ON(1);
816 };
817
818 switch (hw_status->exception) {
819 case 0:
820 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
821 break;
822 case 1:
823 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
824 break;
825 case 4:
826 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
827 break;
828 case 8:
829 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
830 break;
831 case 16:
832 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
833 break;
834 case 32:
835 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
836 break;
837 case 64:
838 exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
839 break;
840 default:
841 IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
842 hw_status->exception);
843 WARN_ON(1);
844 };
845 status->exception = exception_type;
846
847 IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
848 IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
849 IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
850 IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
851 IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
852 IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
853 IPA_PKT_STATUS_SET_MSK(0x40,
854 IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
855 IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
856 IPA_PKT_STATUS_SET_MSK(0x100,
857 IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
858 IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
859 IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
860 IPA_PKT_STATUS_SET_MSK(0x800,
861 IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
862 IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
863 IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
864 IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
865 IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
866 status->status_mask &= 0xFFFF;
867}
868
869/*
870 * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
871 * specific IPA version
872 * @size: H/W size of the status packet
873 * @parse: CB that parses the H/W packet status into the abstracted structure
874 */
875struct ipahal_pkt_status_obj {
876 u32 size;
877 void (*parse)(const void *unparsed_status,
878 struct ipahal_pkt_status *status);
879};
880
881/*
882 * This table contains the info regard packet status for IPAv3 and later
883 * Information like: size of packet status and parsing function
884 * All the information on the pkt Status on IPAv3 are statically defined below.
885 * If information is missing regard some IPA version, the init function
886 * will fill it with the information from the previous IPA version.
887 * Information is considered missing if all of the fields are 0
888 */
889static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
890 /* IPAv3 */
891 [IPA_HW_v3_0] = {
892 IPA3_0_PKT_STATUS_SIZE,
893 ipa_pkt_status_parse,
894 },
895};
896
897/*
898 * ipahal_pkt_status_init() - Build the packet status information array
899 * for the different IPA versions
900 * See ipahal_pkt_status_objs[] comments
901 */
902static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
903{
904 int i;
905 struct ipahal_pkt_status_obj zero_obj;
906
907 IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
908
909 if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
910 IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
911 return -EINVAL;
912 }
913
914 /*
915 * Since structure alignment is implementation dependent,
916 * add test to avoid different and incompatible data layouts.
917 *
918 * In case new H/W has different size or structure of status packet,
919 * add a compile time validty check for it like below (as well as
920 * the new defines and/or the new strucutre in the internal header).
921 */
922 BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) !=
923 IPA3_0_PKT_STATUS_SIZE);
924
925 memset(&zero_obj, 0, sizeof(zero_obj));
926 for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
927 if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
928 sizeof(struct ipahal_pkt_status_obj))) {
929 memcpy(&ipahal_pkt_status_objs[i+1],
930 &ipahal_pkt_status_objs[i],
931 sizeof(struct ipahal_pkt_status_obj));
932 } else {
933 /*
934 * explicitly overridden Packet Status info
935 * Check validity
936 */
937 if (!ipahal_pkt_status_objs[i+1].size) {
938 IPAHAL_ERR(
939 "Packet Status with zero size ipa_ver=%d\n",
940 i+1);
941 WARN_ON(1);
942 }
943 if (!ipahal_pkt_status_objs[i+1].parse) {
944 IPAHAL_ERR(
945 "Packet Status without Parse func ipa_ver=%d\n",
946 i+1);
947 WARN_ON(1);
948 }
949 }
950 }
951
952 return 0;
953}
954
955/*
956 * ipahal_pkt_status_get_size() - Get H/W size of packet status
957 */
958u32 ipahal_pkt_status_get_size(void)
959{
960 return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
961}
962
963/*
964 * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
965 * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
966 * @status: Pointer to pre-allocated buffer where the parsed info will be stored
967 */
968void ipahal_pkt_status_parse(const void *unparsed_status,
969 struct ipahal_pkt_status *status)
970{
971 if (!unparsed_status || !status) {
972 IPAHAL_ERR("Input Error: unparsed_status=%p status=%p\n",
973 unparsed_status, status);
974 return;
975 }
976
977 IPAHAL_DBG_LOW("Parse Status Packet\n");
978 memset(status, 0, sizeof(*status));
979 ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
980 status);
981}
982
983/*
984 * ipahal_pkt_status_exception_str() - returns string represents exception type
985 * @exception: [in] The exception type
986 */
987const char *ipahal_pkt_status_exception_str(
988 enum ipahal_pkt_status_exception exception)
989{
990 if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
991 IPAHAL_ERR(
992 "requested string of invalid pkt_status exception=%d\n",
993 exception);
994 return "Invalid PKT_STATUS_EXCEPTION";
995 }
996
997 return ipahal_pkt_status_exception_to_str[exception];
998}
999
1000#ifdef CONFIG_DEBUG_FS
1001static void ipahal_debugfs_init(void)
1002{
1003 ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
1004 if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
1005 IPAHAL_ERR("fail to create ipahal debugfs folder\n");
1006 goto fail;
1007 }
1008
1009 return;
1010fail:
1011 debugfs_remove_recursive(ipahal_ctx->dent);
1012 ipahal_ctx->dent = NULL;
1013}
1014
1015static void ipahal_debugfs_remove(void)
1016{
1017 if (!ipahal_ctx)
1018 return;
1019
1020 if (IS_ERR(ipahal_ctx->dent)) {
1021 IPAHAL_ERR("ipahal debugfs folder was not created\n");
1022 return;
1023 }
1024
1025 debugfs_remove_recursive(ipahal_ctx->dent);
1026}
1027#else /* CONFIG_DEBUG_FS */
1028static void ipahal_debugfs_init(void) {}
1029static void ipahal_debugfs_remove(void) {}
1030#endif /* CONFIG_DEBUG_FS */
1031
1032/*
1033 * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
1034 * base address and offset given.
1035 * @base: dma base address
1036 * @offset: offset from base address where the data will be copied
1037 * @hdr: the header to be copied
1038 * @hdr_len: the length of the header
1039 */
1040static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
1041 u8 *const hdr, u32 hdr_len)
1042{
1043 memcpy(base + offset, hdr, hdr_len);
1044}
1045
1046/*
1047 * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
1048 * base address and offset given.
1049 * @type: header processing context type (no processing context,
1050 * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
1051 * @base: dma base address
1052 * @offset: offset from base address where the data will be copied
1053 * @hdr_len: the length of the header
1054 * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
1055 * @phys_base: memory location in DDR
1056 * @hdr_base_addr: base address in table
1057 * @offset_entry: offset from hdr_base_addr in table
1058 */
1059static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
1060 void *const base, u32 offset,
1061 u32 hdr_len, bool is_hdr_proc_ctx,
1062 dma_addr_t phys_base, u32 hdr_base_addr,
1063 struct ipa_hdr_offset_entry *offset_entry){
1064 if (type == IPA_HDR_PROC_NONE) {
1065 struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
1066
1067 ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
1068 (base + offset);
1069 ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
1070 ctx->hdr_add.tlv.length = 1;
1071 ctx->hdr_add.tlv.value = hdr_len;
1072 ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
1073 hdr_base_addr + offset_entry->offset;
1074 IPAHAL_DBG("header address 0x%x\n",
1075 ctx->hdr_add.hdr_addr);
1076 ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
1077 ctx->end.length = 0;
1078 ctx->end.value = 0;
1079 } else {
1080 struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
1081
1082 ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
1083 (base + offset);
1084 ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
1085 ctx->hdr_add.tlv.length = 1;
1086 ctx->hdr_add.tlv.value = hdr_len;
1087 ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
1088 hdr_base_addr + offset_entry->offset;
1089 IPAHAL_DBG("header address 0x%x\n",
1090 ctx->hdr_add.hdr_addr);
1091 ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
1092 ctx->cmd.length = 0;
1093 switch (type) {
1094 case IPA_HDR_PROC_ETHII_TO_ETHII:
1095 ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
1096 break;
1097 case IPA_HDR_PROC_ETHII_TO_802_3:
1098 ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
1099 break;
1100 case IPA_HDR_PROC_802_3_TO_ETHII:
1101 ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
1102 break;
1103 case IPA_HDR_PROC_802_3_TO_802_3:
1104 ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
1105 break;
1106 default:
1107 IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
1108 WARN_ON(1);
1109 return -EINVAL;
1110 }
1111 IPAHAL_DBG("command id %d\n", ctx->cmd.value);
1112 ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
1113 ctx->end.length = 0;
1114 ctx->end.value = 0;
1115 }
1116
1117 return 0;
1118}
1119
1120/*
1121 * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
1122 * addition of header processing context according to the type of processing
1123 * context.
1124 * @type: header processing context type (no processing context,
1125 * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
1126 */
1127static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
1128{
1129 return (type == IPA_HDR_PROC_NONE) ?
1130 sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
1131 sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
1132}
1133
1134/*
1135 * struct ipahal_hdr_funcs - headers handling functions for specific IPA
1136 * version
1137 * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
1138 */
1139struct ipahal_hdr_funcs {
1140 void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
1141 u8 *const hdr, u32 hdr_len);
1142
1143 int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
1144 void *const base, u32 offset, u32 hdr_len,
1145 bool is_hdr_proc_ctx, dma_addr_t phys_base,
1146 u32 hdr_base_addr,
1147 struct ipa_hdr_offset_entry *offset_entry);
1148
1149 int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
1150};
1151
1152static struct ipahal_hdr_funcs hdr_funcs;
1153
1154static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
1155{
1156
1157 IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
1158
1159 /*
1160 * once there are changes in HW and need to use different case, insert
1161 * new case for the new h/w. put the default always for the latest HW
1162 * and make sure all previous supported versions have their cases.
1163 */
1164 switch (ipa_hw_type) {
1165 case IPA_HW_v3_0:
1166 default:
1167 hdr_funcs.ipahal_cp_hdr_to_hw_buff =
1168 ipahal_cp_hdr_to_hw_buff_v3;
1169 hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
1170 ipahal_cp_proc_ctx_to_hw_buff_v3;
1171 hdr_funcs.ipahal_get_proc_ctx_needed_len =
1172 ipahal_get_proc_ctx_needed_len_v3;
1173 }
1174 IPAHAL_DBG("Exit\n");
1175}
1176
1177/*
1178 * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
1179 * base address and offset given.
1180 * @base: dma base address
1181 * @offset: offset from base address where the data will be copied
1182 * @hdr: the header to be copied
1183 * @hdr_len: the length of the header
1184 */
1185void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
1186 u32 hdr_len)
1187{
1188 IPAHAL_DBG_LOW("Entry\n");
1189 IPAHAL_DBG("base %p, offset %d, hdr %p, hdr_len %d\n", base,
1190 offset, hdr, hdr_len);
1191 if (!base || !hdr_len || !hdr) {
1192 IPAHAL_ERR("failed on validating params");
1193 return;
1194 }
1195
1196 hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
1197
1198 IPAHAL_DBG_LOW("Exit\n");
1199}
1200
1201/*
1202 * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
1203 * base address and offset given.
1204 * @type: type of header processing context
1205 * @base: dma base address
1206 * @offset: offset from base address where the data will be copied
1207 * @hdr_len: the length of the header
1208 * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
1209 * @phys_base: memory location in DDR
1210 * @hdr_base_addr: base address in table
1211 * @offset_entry: offset from hdr_base_addr in table
1212 */
1213int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
1214 void *const base, u32 offset, u32 hdr_len,
1215 bool is_hdr_proc_ctx, dma_addr_t phys_base,
1216 u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
1217{
1218 IPAHAL_DBG(
1219 "type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
1220 , type, base, offset, hdr_len, is_hdr_proc_ctx,
1221 hdr_base_addr, offset_entry);
1222
1223 if (!base ||
1224 !hdr_len ||
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001225 (is_hdr_proc_ctx && !phys_base) ||
1226 (!is_hdr_proc_ctx && !offset_entry) ||
1227 (!is_hdr_proc_ctx && !hdr_base_addr)) {
Amir Levy9659e592016-10-27 18:08:27 +03001228 IPAHAL_ERR(
1229 "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
1230 , hdr_len, &phys_base, hdr_base_addr
1231 , is_hdr_proc_ctx, offset_entry);
1232 return -EINVAL;
1233 }
1234
1235 return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
1236 hdr_len, is_hdr_proc_ctx, phys_base,
1237 hdr_base_addr, offset_entry);
1238}
1239
1240/*
1241 * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
1242 * addition of header processing context according to the type of processing
1243 * context
1244 * @type: header processing context type (no processing context,
1245 * IPA_HDR_PROC_ETHII_TO_ETHII etc.)
1246 */
1247int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
1248{
1249 int res;
1250
1251 IPAHAL_DBG("entry\n");
1252
1253 res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
1254
1255 IPAHAL_DBG("Exit\n");
1256
1257 return res;
1258}
1259
1260
1261int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
1262 struct device *ipa_pdev)
1263{
1264 int result;
1265
1266 IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p ipa_pdev=%p\n",
1267 ipa_hw_type, base, ipa_pdev);
1268
1269 ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
1270 if (!ipahal_ctx) {
1271 IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
1272 result = -ENOMEM;
1273 goto bail_err_exit;
1274 }
1275
1276 if (ipa_hw_type < IPA_HW_v3_0) {
1277 IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
1278 result = -EINVAL;
1279 goto bail_free_ctx;
1280 }
1281
1282 if (ipa_hw_type >= IPA_HW_MAX) {
1283 IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
1284 result = -EINVAL;
1285 goto bail_free_ctx;
1286 }
1287
1288 if (!base) {
1289 IPAHAL_ERR("invalid memory io mapping addr\n");
1290 result = -EINVAL;
1291 goto bail_free_ctx;
1292 }
1293
1294 if (!ipa_pdev) {
1295 IPAHAL_ERR("invalid IPA platform device\n");
1296 result = -EINVAL;
1297 goto bail_free_ctx;
1298 }
1299
1300 ipahal_ctx->hw_type = ipa_hw_type;
1301 ipahal_ctx->base = base;
1302 ipahal_ctx->ipa_pdev = ipa_pdev;
1303
1304 if (ipahal_reg_init(ipa_hw_type)) {
1305 IPAHAL_ERR("failed to init ipahal reg\n");
1306 result = -EFAULT;
1307 goto bail_free_ctx;
1308 }
1309
1310 if (ipahal_imm_cmd_init(ipa_hw_type)) {
1311 IPAHAL_ERR("failed to init ipahal imm cmd\n");
1312 result = -EFAULT;
1313 goto bail_free_ctx;
1314 }
1315
1316 if (ipahal_pkt_status_init(ipa_hw_type)) {
1317 IPAHAL_ERR("failed to init ipahal pkt status\n");
1318 result = -EFAULT;
1319 goto bail_free_ctx;
1320 }
1321
1322 ipahal_hdr_init(ipa_hw_type);
1323
1324 if (ipahal_fltrt_init(ipa_hw_type)) {
1325 IPAHAL_ERR("failed to init ipahal flt rt\n");
1326 result = -EFAULT;
1327 goto bail_free_ctx;
1328 }
1329
1330 ipahal_debugfs_init();
1331
1332 return 0;
1333
1334bail_free_ctx:
1335 kfree(ipahal_ctx);
1336 ipahal_ctx = NULL;
1337bail_err_exit:
1338 return result;
1339}
1340
1341void ipahal_destroy(void)
1342{
1343 IPAHAL_DBG("Entry\n");
1344 ipahal_fltrt_destroy();
1345 ipahal_debugfs_remove();
1346 kfree(ipahal_ctx);
1347 ipahal_ctx = NULL;
1348}
1349
1350void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
1351{
1352 if (likely(mem)) {
1353 dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
1354 mem->phys_base);
1355 mem->size = 0;
1356 mem->base = NULL;
1357 mem->phys_base = 0;
1358 }
1359}