blob: ffca1f589f7e84c6713065f30f1f7138ccf50b83 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <net/ip.h>
14#include <linux/genalloc.h> /* gen_pool_alloc() */
15#include <linux/io.h>
16#include <linux/ratelimit.h>
17#include <linux/msm-bus.h>
18#include <linux/msm-bus-board.h>
19#include "ipa_i.h"
20#include "../ipa_rm_i.h"
21
22#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
23#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
24#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
25#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
26#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
27#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1)
28#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1)
29#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1)
30#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL
31
32#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000)
33#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600)
34
35/* Max pipes + ICs for TAG process */
36#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6)
37
38#define IPA_TAG_SLEEP_MIN_USEC (1000)
39#define IPA_TAG_SLEEP_MAX_USEC (2000)
40#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
41#define IPA_BCR_REG_VAL (0x001FFF7F)
42#define IPA_AGGR_GRAN_MIN (1)
43#define IPA_AGGR_GRAN_MAX (32)
44#define IPA_EOT_COAL_GRAN_MIN (1)
45#define IPA_EOT_COAL_GRAN_MAX (16)
46#define MSEC 1000
47#define MIN_RX_POLL_TIME 1
48#define MAX_RX_POLL_TIME 5
49#define UPPER_CUTOFF 50
50#define LOWER_CUTOFF 10
51
52#define IPA_DEFAULT_SYS_YELLOW_WM 32
53
54#define IPA_AGGR_BYTE_LIMIT (\
55 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
56 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
57#define IPA_AGGR_PKT_LIMIT (\
58 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
59 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
60
61static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
62 IPA_OFFSET_MEQ32_1, -1 };
63static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
64 IPA_OFFSET_MEQ128_1, -1 };
65static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
66 IPA_IHL_OFFSET_RANGE16_1, -1 };
67static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
68 IPA_IHL_OFFSET_MEQ32_1, -1 };
69#define IPA_1_1 (0)
70#define IPA_2_0 (1)
71#define IPA_2_6L (2)
72
73#define INVALID_EP_MAPPING_INDEX (-1)
74
Skylar Changa9516582017-05-09 11:36:47 -070075struct ipa_ep_confing {
76 bool valid;
77 int pipe_num;
78};
Amir Levy9659e592016-10-27 18:08:27 +030079
Skylar Changa9516582017-05-09 11:36:47 -070080static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = {
81 [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = {true, 19},
82 [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = {true, 12},
83 [IPA_1_1][IPA_CLIENT_USB2_PROD] = {true, 12},
84 [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = {true, 13},
85 [IPA_1_1][IPA_CLIENT_USB3_PROD] = {true, 13},
86 [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = {true, 0},
87 [IPA_1_1][IPA_CLIENT_USB4_PROD] = {true, 0},
88 [IPA_1_1][IPA_CLIENT_USB_PROD] = {true, 11},
89 [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = {true, 15},
90 [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = {true, 8},
91 [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = {true, 6},
92 [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 2},
93 [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = {true, 1},
94 [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = {true, 5},
95
96 [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = {true, 14},
97 [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = {true, 16},
98 [IPA_1_1][IPA_CLIENT_USB2_CONS] = {true, 16},
99 [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = {true, 17},
100 [IPA_1_1][IPA_CLIENT_USB3_CONS] = {true, 17},
101 [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = {true, 18},
102 [IPA_1_1][IPA_CLIENT_USB4_CONS] = {true, 18},
103 [IPA_1_1][IPA_CLIENT_USB_CONS] = {true, 10},
104 [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = {true, 9},
105 [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = {true, 7},
106 [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = {true, 3},
107 [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = {true, 4},
Amir Levy9659e592016-10-27 18:08:27 +0300108
109
Skylar Changa9516582017-05-09 11:36:47 -0700110 [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = {true, 12},
111 [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = {true, 18},
112 [IPA_2_0][IPA_CLIENT_USB2_PROD] = {true, 12},
113 [IPA_2_0][IPA_CLIENT_USB3_PROD] = {true, 13},
114 [IPA_2_0][IPA_CLIENT_USB4_PROD] = {true, 0},
115 [IPA_2_0][IPA_CLIENT_USB_PROD] = {true, 11},
116 [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4},
117 [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = {true, 3},
118 [IPA_2_0][IPA_CLIENT_ODU_PROD] = {true, 12},
119 [IPA_2_0][IPA_CLIENT_MHI_PROD] = {true, 18},
120 [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
121 [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
Amir Levy9659e592016-10-27 18:08:27 +0300122 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
Skylar Changa9516582017-05-09 11:36:47 -0700123 = {true, 12},
Amir Levy9659e592016-10-27 18:08:27 +0300124 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
Skylar Changa9516582017-05-09 11:36:47 -0700125 = {true, 19},
126 [IPA_2_0][IPA_CLIENT_ETHERNET_PROD] = {true, 12},
Amir Levy9659e592016-10-27 18:08:27 +0300127 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700128 [IPA_2_0][IPA_CLIENT_TEST_PROD] = {true, 19},
129 [IPA_2_0][IPA_CLIENT_TEST1_PROD] = {true, 19},
130 [IPA_2_0][IPA_CLIENT_TEST2_PROD] = {true, 12},
131 [IPA_2_0][IPA_CLIENT_TEST3_PROD] = {true, 11},
132 [IPA_2_0][IPA_CLIENT_TEST4_PROD] = {true, 0},
Amir Levy9659e592016-10-27 18:08:27 +0300133
Skylar Changa9516582017-05-09 11:36:47 -0700134 [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = {true, 13},
135 [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = {true, 17},
136 [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = {true, 16},
137 [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = {true, 14},
138 [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = {true, 19},
139 [IPA_2_0][IPA_CLIENT_USB_CONS] = {true, 15},
140 [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = {true, 0},
141 [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = {true, 2},
142 [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = {true, 5},
143 [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = {true, 13},
144 [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = {true, 1},
145 [IPA_2_0][IPA_CLIENT_MHI_CONS] = {true, 17},
146 [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = {true, 8},
147 [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = {true, 9},
Amir Levy9659e592016-10-27 18:08:27 +0300148 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700149 = {true, 13},
Amir Levy9659e592016-10-27 18:08:27 +0300150 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700151 = {true, 16},
Amir Levy9659e592016-10-27 18:08:27 +0300152 [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700153 = {true, 10},
154 [IPA_2_0][IPA_CLIENT_ETHERNET_CONS] = {true, 1},
155
Amir Levy9659e592016-10-27 18:08:27 +0300156 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700157 [IPA_2_0][IPA_CLIENT_TEST_CONS] = {true, 1},
158 [IPA_2_0][IPA_CLIENT_TEST1_CONS] = {true, 1},
159 [IPA_2_0][IPA_CLIENT_TEST2_CONS] = {true, 16},
160 [IPA_2_0][IPA_CLIENT_TEST3_CONS] = {true, 13},
161 [IPA_2_0][IPA_CLIENT_TEST4_CONS] = {true, 15},
Amir Levy9659e592016-10-27 18:08:27 +0300162
163
Skylar Changa9516582017-05-09 11:36:47 -0700164 [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4},
165 [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = {true, 3},
166 [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
167 [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
168 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = {true, 11},
169 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = {true, 13},
Amir Levy9659e592016-10-27 18:08:27 +0300170
Amir Levy9659e592016-10-27 18:08:27 +0300171 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700172 [IPA_2_6L][IPA_CLIENT_TEST_PROD] = {true, 11},
173 [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = {true, 11},
174 [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = {true, 12},
175 [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = {true, 13},
176 [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = {true, 14},
177
178 [IPA_2_6L][IPA_CLIENT_USB_CONS] = {true, 0},
179 [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = {true, 10},
180 [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = {true, 2},
181 [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = {true, 5},
182 [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = {true, 8},
183 [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = {true, 9},
184 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = {true, 12},
185 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = {true, 14},
186
187 /* Only for test purpose */
188 [IPA_2_6L][IPA_CLIENT_TEST_CONS] = {true, 15},
189 [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = {true, 15},
190 [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = {true, 0},
191 [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = {true, 1},
192 [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = {true, 10},
Amir Levy9659e592016-10-27 18:08:27 +0300193};
194
195static struct msm_bus_vectors ipa_init_vectors_v1_1[] = {
196 {
197 .src = MSM_BUS_MASTER_IPA,
198 .dst = MSM_BUS_SLAVE_EBI_CH0,
199 .ab = 0,
200 .ib = 0,
201 },
202 {
203 .src = MSM_BUS_MASTER_BAM_DMA,
204 .dst = MSM_BUS_SLAVE_EBI_CH0,
205 .ab = 0,
206 .ib = 0,
207 },
208 {
209 .src = MSM_BUS_MASTER_BAM_DMA,
210 .dst = MSM_BUS_SLAVE_OCIMEM,
211 .ab = 0,
212 .ib = 0,
213 },
214};
215
216static struct msm_bus_vectors ipa_init_vectors_v2_0[] = {
217 {
218 .src = MSM_BUS_MASTER_IPA,
219 .dst = MSM_BUS_SLAVE_EBI_CH0,
220 .ab = 0,
221 .ib = 0,
222 },
223 {
224 .src = MSM_BUS_MASTER_IPA,
225 .dst = MSM_BUS_SLAVE_OCIMEM,
226 .ab = 0,
227 .ib = 0,
228 },
229};
230
231static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = {
232 {
233 .src = MSM_BUS_MASTER_IPA,
234 .dst = MSM_BUS_SLAVE_EBI_CH0,
235 .ab = 50000000,
236 .ib = 960000000,
237 },
238 {
239 .src = MSM_BUS_MASTER_BAM_DMA,
240 .dst = MSM_BUS_SLAVE_EBI_CH0,
241 .ab = 50000000,
242 .ib = 960000000,
243 },
244 {
245 .src = MSM_BUS_MASTER_BAM_DMA,
246 .dst = MSM_BUS_SLAVE_OCIMEM,
247 .ab = 50000000,
248 .ib = 960000000,
249 },
250};
251
252static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = {
253 {
254 .src = MSM_BUS_MASTER_IPA,
255 .dst = MSM_BUS_SLAVE_EBI_CH0,
256 .ab = 100000000,
257 .ib = 1300000000,
258 },
259 {
260 .src = MSM_BUS_MASTER_IPA,
261 .dst = MSM_BUS_SLAVE_OCIMEM,
262 .ab = 100000000,
263 .ib = 1300000000,
264 },
265};
266
267static struct msm_bus_paths ipa_usecases_v1_1[] = {
268 {
269 ARRAY_SIZE(ipa_init_vectors_v1_1),
270 ipa_init_vectors_v1_1,
271 },
272 {
273 ARRAY_SIZE(ipa_max_perf_vectors_v1_1),
274 ipa_max_perf_vectors_v1_1,
275 },
276};
277
278static struct msm_bus_paths ipa_usecases_v2_0[] = {
279 {
280 ARRAY_SIZE(ipa_init_vectors_v2_0),
281 ipa_init_vectors_v2_0,
282 },
283 {
284 ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0),
285 ipa_nominal_perf_vectors_v2_0,
286 },
287};
288
289static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = {
290 ipa_usecases_v1_1,
291 ARRAY_SIZE(ipa_usecases_v1_1),
292 .name = "ipa",
293};
294
295static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = {
296 ipa_usecases_v2_0,
297 ARRAY_SIZE(ipa_usecases_v2_0),
298 .name = "ipa",
299};
300
301void ipa_active_clients_lock(void)
302{
303 unsigned long flags;
304
305 mutex_lock(&ipa_ctx->ipa_active_clients.mutex);
306 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
307 ipa_ctx->ipa_active_clients.mutex_locked = true;
308 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
309}
310
311int ipa_active_clients_trylock(unsigned long *flags)
312{
313 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags);
314 if (ipa_ctx->ipa_active_clients.mutex_locked) {
315 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock,
316 *flags);
317 return 0;
318 }
319
320 return 1;
321}
322
323void ipa_active_clients_trylock_unlock(unsigned long *flags)
324{
325 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags);
326}
327
328void ipa_active_clients_unlock(void)
329{
330 unsigned long flags;
331
332 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
333 ipa_ctx->ipa_active_clients.mutex_locked = false;
334 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
335 mutex_unlock(&ipa_ctx->ipa_active_clients.mutex);
336}
337
338/**
339 * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an
340 * IPA_RM resource
341 *
342 * @resource: [IN] IPA Resource Manager resource
343 * @clients: [OUT] Empty array which will contain the list of clients. The
344 * caller must initialize this array.
345 *
346 * Return codes: 0 on success, negative on failure.
347 */
348int ipa_get_clients_from_rm_resource(
349 enum ipa_rm_resource_name resource,
350 struct ipa_client_names *clients)
351{
352 int i = 0;
353
354 if (resource < 0 ||
355 resource >= IPA_RM_RESOURCE_MAX ||
356 !clients) {
357 IPAERR("Bad parameters\n");
358 return -EINVAL;
359 }
360
361 switch (resource) {
362 case IPA_RM_RESOURCE_USB_CONS:
363 clients->names[i++] = IPA_CLIENT_USB_CONS;
364 break;
365 case IPA_RM_RESOURCE_HSIC_CONS:
366 clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
367 break;
368 case IPA_RM_RESOURCE_WLAN_CONS:
369 clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
370 clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
371 clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
372 clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
373 break;
374 case IPA_RM_RESOURCE_MHI_CONS:
375 clients->names[i++] = IPA_CLIENT_MHI_CONS;
376 break;
Skylar Chang79699ec2016-11-18 10:21:33 -0800377 case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
378 clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
379 clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
380 break;
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800381 case IPA_RM_RESOURCE_ETHERNET_CONS:
382 clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
383 break;
Amir Levy9659e592016-10-27 18:08:27 +0300384 case IPA_RM_RESOURCE_USB_PROD:
385 clients->names[i++] = IPA_CLIENT_USB_PROD;
386 break;
387 case IPA_RM_RESOURCE_HSIC_PROD:
388 clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
389 break;
390 case IPA_RM_RESOURCE_MHI_PROD:
391 clients->names[i++] = IPA_CLIENT_MHI_PROD;
392 break;
Skylar Chang79699ec2016-11-18 10:21:33 -0800393 case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
394 clients->names[i++] = IPA_CLIENT_ODU_PROD;
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800395 break;
396 case IPA_RM_RESOURCE_ETHERNET_PROD:
397 clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
398 break;
Amir Levy9659e592016-10-27 18:08:27 +0300399 default:
400 break;
401 }
402 clients->length = i;
403
404 return 0;
405}
406
407/**
408 * ipa_should_pipe_be_suspended() - returns true when the client's pipe should
409 * be suspended during a power save scenario. False otherwise.
410 *
411 * @client: [IN] IPA client
412 */
413bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
414{
415 struct ipa_ep_context *ep;
416 int ipa_ep_idx;
417
418 ipa_ep_idx = ipa2_get_ep_mapping(client);
419 if (ipa_ep_idx == -1) {
420 IPAERR("Invalid client.\n");
421 WARN_ON(1);
422 return false;
423 }
424
425 ep = &ipa_ctx->ep[ipa_ep_idx];
426
427 if (ep->keep_ipa_awake)
428 return false;
429
Skylar Chang79699ec2016-11-18 10:21:33 -0800430 if (client == IPA_CLIENT_USB_CONS ||
431 client == IPA_CLIENT_MHI_CONS ||
432 client == IPA_CLIENT_HSIC1_CONS ||
433 client == IPA_CLIENT_WLAN1_CONS ||
434 client == IPA_CLIENT_WLAN2_CONS ||
435 client == IPA_CLIENT_WLAN3_CONS ||
436 client == IPA_CLIENT_WLAN4_CONS ||
437 client == IPA_CLIENT_ODU_EMB_CONS ||
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800438 client == IPA_CLIENT_ODU_TETH_CONS ||
439 client == IPA_CLIENT_ETHERNET_CONS)
Amir Levy9659e592016-10-27 18:08:27 +0300440 return true;
441
442 return false;
443}
444
445/**
446 * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
447 * resource and decrement active clients counter, which may result in clock
448 * gating of IPA clocks.
449 *
450 * @resource: [IN] IPA Resource Manager resource
451 *
452 * Return codes: 0 on success, negative on failure.
453 */
454int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource)
455{
456 struct ipa_client_names clients;
457 int res;
458 int index;
459 struct ipa_ep_cfg_ctrl suspend;
460 enum ipa_client_type client;
461 int ipa_ep_idx;
462 bool pipe_suspended = false;
463
464 memset(&clients, 0, sizeof(clients));
465 res = ipa_get_clients_from_rm_resource(resource, &clients);
466 if (res) {
467 IPAERR("Bad params.\n");
468 return res;
469 }
470
471 for (index = 0; index < clients.length; index++) {
472 client = clients.names[index];
473 ipa_ep_idx = ipa2_get_ep_mapping(client);
474 if (ipa_ep_idx == -1) {
475 IPAERR("Invalid client.\n");
476 res = -EINVAL;
477 continue;
478 }
479 ipa_ctx->resume_on_connect[client] = false;
480 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
481 ipa_should_pipe_be_suspended(client)) {
482 if (ipa_ctx->ep[ipa_ep_idx].valid) {
483 /* suspend endpoint */
484 memset(&suspend, 0, sizeof(suspend));
485 suspend.ipa_ep_suspend = true;
486 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
487 pipe_suspended = true;
488 }
489 }
490 }
491 /* Sleep ~1 msec */
492 if (pipe_suspended)
493 usleep_range(1000, 2000);
494
495 /* before gating IPA clocks do TAG process */
496 ipa_ctx->tag_process_before_gating = true;
497 IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
498
499 return 0;
500}
501
502/**
503 * ipa2_suspend_resource_no_block() - suspend client endpoints related to the
504 * IPA_RM resource and decrement active clients counter. This function is
505 * guaranteed to avoid sleeping.
506 *
507 * @resource: [IN] IPA Resource Manager resource
508 *
509 * Return codes: 0 on success, negative on failure.
510 */
511int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource)
512{
513 int res;
514 struct ipa_client_names clients;
515 int index;
516 enum ipa_client_type client;
517 struct ipa_ep_cfg_ctrl suspend;
518 int ipa_ep_idx;
519 unsigned long flags;
520 struct ipa_active_client_logging_info log_info;
521
522 if (ipa_active_clients_trylock(&flags) == 0)
523 return -EPERM;
524 if (ipa_ctx->ipa_active_clients.cnt == 1) {
525 res = -EPERM;
526 goto bail;
527 }
528
529 memset(&clients, 0, sizeof(clients));
530 res = ipa_get_clients_from_rm_resource(resource, &clients);
531 if (res) {
532 IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n"
533 , resource);
534 goto bail;
535 }
536
537 for (index = 0; index < clients.length; index++) {
538 client = clients.names[index];
539 ipa_ep_idx = ipa2_get_ep_mapping(client);
540 if (ipa_ep_idx == -1) {
541 IPAERR("Invalid client.\n");
542 res = -EINVAL;
543 continue;
544 }
545 ipa_ctx->resume_on_connect[client] = false;
546 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
547 ipa_should_pipe_be_suspended(client)) {
548 if (ipa_ctx->ep[ipa_ep_idx].valid) {
549 /* suspend endpoint */
550 memset(&suspend, 0, sizeof(suspend));
551 suspend.ipa_ep_suspend = true;
552 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
553 }
554 }
555 }
556
557 if (res == 0) {
558 IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
559 ipa_rm_resource_str(resource));
560 ipa2_active_clients_log_dec(&log_info, true);
561 ipa_ctx->ipa_active_clients.cnt--;
562 IPADBG("active clients = %d\n",
563 ipa_ctx->ipa_active_clients.cnt);
564 }
565bail:
566 ipa_active_clients_trylock_unlock(&flags);
567
568 return res;
569}
570
571/**
572 * ipa2_resume_resource() - resume client endpoints related to the IPA_RM
573 * resource.
574 *
575 * @resource: [IN] IPA Resource Manager resource
576 *
577 * Return codes: 0 on success, negative on failure.
578 */
579int ipa2_resume_resource(enum ipa_rm_resource_name resource)
580{
581
582 struct ipa_client_names clients;
583 int res;
584 int index;
585 struct ipa_ep_cfg_ctrl suspend;
586 enum ipa_client_type client;
587 int ipa_ep_idx;
588
589 memset(&clients, 0, sizeof(clients));
590 res = ipa_get_clients_from_rm_resource(resource, &clients);
591 if (res) {
592 IPAERR("ipa_get_clients_from_rm_resource() failed.\n");
593 return res;
594 }
595
596 for (index = 0; index < clients.length; index++) {
597 client = clients.names[index];
598 ipa_ep_idx = ipa2_get_ep_mapping(client);
599 if (ipa_ep_idx == -1) {
600 IPAERR("Invalid client.\n");
601 res = -EINVAL;
602 continue;
603 }
604 /*
605 * The related ep, will be resumed on connect
606 * while its resource is granted
607 */
608 ipa_ctx->resume_on_connect[client] = true;
609 IPADBG("%d will be resumed on connect.\n", client);
610 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
611 ipa_should_pipe_be_suspended(client)) {
612 spin_lock(&ipa_ctx->disconnect_lock);
613 if (ipa_ctx->ep[ipa_ep_idx].valid &&
614 !ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) {
615 memset(&suspend, 0, sizeof(suspend));
616 suspend.ipa_ep_suspend = false;
617 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
618 }
619 spin_unlock(&ipa_ctx->disconnect_lock);
620 }
621 }
622
623 return res;
624}
625
626/* read how much SRAM is available for SW use
627 * In case of IPAv2.0 this will also supply an offset from
628 * which we can start write
629 */
630void _ipa_sram_settings_read_v1_1(void)
631{
632 ipa_ctx->smem_restricted_bytes = 0;
633 ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
634 IPA_SHARED_MEM_SIZE_OFST_v1_1);
635 ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST;
636 ipa_ctx->hdr_tbl_lcl = 1;
637 ipa_ctx->ip4_rt_tbl_lcl = 0;
638 ipa_ctx->ip6_rt_tbl_lcl = 0;
639 ipa_ctx->ip4_flt_tbl_lcl = 1;
640 ipa_ctx->ip6_flt_tbl_lcl = 1;
641}
642
643void _ipa_sram_settings_read_v2_0(void)
644{
645 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
646 IPA_SHARED_MEM_SIZE_OFST_v2_0,
647 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
648 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
649 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
650 IPA_SHARED_MEM_SIZE_OFST_v2_0,
651 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
652 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
653 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
654 ipa_ctx->hdr_tbl_lcl = 0;
655 ipa_ctx->ip4_rt_tbl_lcl = 0;
656 ipa_ctx->ip6_rt_tbl_lcl = 0;
657 ipa_ctx->ip4_flt_tbl_lcl = 0;
658 ipa_ctx->ip6_flt_tbl_lcl = 0;
659}
660
661void _ipa_sram_settings_read_v2_5(void)
662{
663 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
664 IPA_SHARED_MEM_SIZE_OFST_v2_0,
665 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
666 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
667 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
668 IPA_SHARED_MEM_SIZE_OFST_v2_0,
669 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
670 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
671 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
672 ipa_ctx->hdr_tbl_lcl = 0;
673 ipa_ctx->hdr_proc_ctx_tbl_lcl = 1;
674
675 /*
676 * when proc ctx table is located in internal memory,
677 * modem entries resides first.
678 */
679 if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
680 ipa_ctx->hdr_proc_ctx_tbl.start_offset =
681 IPA_MEM_PART(modem_hdr_proc_ctx_size);
682 }
683 ipa_ctx->ip4_rt_tbl_lcl = 0;
684 ipa_ctx->ip6_rt_tbl_lcl = 0;
685 ipa_ctx->ip4_flt_tbl_lcl = 0;
686 ipa_ctx->ip6_flt_tbl_lcl = 0;
687}
688
689void _ipa_sram_settings_read_v2_6L(void)
690{
691 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
692 IPA_SHARED_MEM_SIZE_OFST_v2_0,
693 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
694 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
695 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
696 IPA_SHARED_MEM_SIZE_OFST_v2_0,
697 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
698 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
699 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
700 ipa_ctx->hdr_tbl_lcl = 0;
701 ipa_ctx->ip4_rt_tbl_lcl = 0;
702 ipa_ctx->ip6_rt_tbl_lcl = 0;
703 ipa_ctx->ip4_flt_tbl_lcl = 0;
704 ipa_ctx->ip6_flt_tbl_lcl = 0;
705}
706
707void _ipa_cfg_route_v1_1(struct ipa_route *route)
708{
709 u32 reg_val = 0;
710
711 IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
712 IPA_ROUTE_ROUTE_DIS_SHFT,
713 IPA_ROUTE_ROUTE_DIS_BMSK);
714
715 IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
716 IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
717 IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
718
719 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
720 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
721 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
722
723 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
724 IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
725 IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
726
727 ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
728}
729
730void _ipa_cfg_route_v2_0(struct ipa_route *route)
731{
732 u32 reg_val = 0;
733
734 IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
735 IPA_ROUTE_ROUTE_DIS_SHFT,
736 IPA_ROUTE_ROUTE_DIS_BMSK);
737
738 IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
739 IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
740 IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
741
742 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
743 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
744 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
745
746 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
747 IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
748 IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
749
750 IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe,
751 IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
752 IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
753
754 ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
755}
756
757/**
758 * ipa_cfg_route() - configure IPA route
759 * @route: IPA route
760 *
761 * Return codes:
762 * 0: success
763 */
764int ipa_cfg_route(struct ipa_route *route)
765{
766
767 IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
768 route->route_dis,
769 route->route_def_pipe,
770 route->route_def_hdr_table);
771 IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
772 route->route_def_hdr_ofst,
773 route->route_frag_def_pipe);
774
775 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
776
777 ipa_ctx->ctrl->ipa_cfg_route(route);
778
779 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
780
781 return 0;
782}
783
784/**
785 * ipa_cfg_filter() - configure filter
786 * @disable: disable value
787 *
788 * Return codes:
789 * 0: success
790 */
791int ipa_cfg_filter(u32 disable)
792{
793 u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1;
794
795 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
796 ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
797 IPA_SETFIELD(!disable,
798 IPA_FILTER_FILTER_EN_SHFT,
799 IPA_FILTER_FILTER_EN_BMSK));
800 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
801
802 return 0;
803}
804
805/**
806 * ipa_init_hw() - initialize HW
807 *
808 * Return codes:
809 * 0: success
810 */
811int ipa_init_hw(void)
812{
813 u32 ipa_version = 0;
814
815 /* do soft reset of IPA */
816 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
817 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
818
819 /* enable IPA */
820 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
821
822 /* Read IPA version and make sure we have access to the registers */
823 ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
824 if (ipa_version == 0)
825 return -EFAULT;
826
827 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
828 /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */
829 ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL);
830 }
831 return 0;
832}
833
834/**
835 * ipa2_get_ep_mapping() - provide endpoint mapping
836 * @client: client type
837 *
838 * Return value: endpoint mapping
839 */
840int ipa2_get_ep_mapping(enum ipa_client_type client)
841{
842 u8 hw_type_index = IPA_1_1;
843
844 if (unlikely(!ipa_ctx)) {
845 IPAERR("IPA driver was not initialized\n");
846 return INVALID_EP_MAPPING_INDEX;
847 }
848
849 if (client >= IPA_CLIENT_MAX || client < 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530850 IPAERR_RL("Bad client number! client =%d\n", client);
Amir Levy9659e592016-10-27 18:08:27 +0300851 return INVALID_EP_MAPPING_INDEX;
852 }
853
854 switch (ipa_ctx->ipa_hw_type) {
855 case IPA_HW_v2_0:
856 case IPA_HW_v2_5:
857 hw_type_index = IPA_2_0;
858 break;
859 case IPA_HW_v2_6L:
860 hw_type_index = IPA_2_6L;
861 break;
862 default:
863 hw_type_index = IPA_1_1;
864 break;
865 }
866
Skylar Changa9516582017-05-09 11:36:47 -0700867 if (!ep_mapping[hw_type_index][client].valid)
868 return INVALID_EP_MAPPING_INDEX;
869
870 return ep_mapping[hw_type_index][client].pipe_num;
Amir Levy9659e592016-10-27 18:08:27 +0300871}
872
873/* ipa2_set_client() - provide client mapping
874 * @client: client type
875 *
876 * Return value: none
877 */
878
879void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
880{
Skylar Chang09e0e252017-03-20 14:51:29 -0700881 if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
Amir Levy9659e592016-10-27 18:08:27 +0300882 IPAERR("Bad client number! client =%d\n", client);
883 } else if (index >= IPA_MAX_NUM_PIPES || index < 0) {
884 IPAERR("Bad pipe index! index =%d\n", index);
885 } else {
886 ipa_ctx->ipacm_client[index].client_enum = client;
887 ipa_ctx->ipacm_client[index].uplink = uplink;
888 }
889}
890
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530891/* ipa2_get_wlan_stats() - get ipa wifi stats
892 *
893 * Return value: success or failure
894 */
895int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
896{
897 if (ipa_ctx->uc_wdi_ctx.stats_notify) {
898 ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
899 wdi_sap_stats);
900 } else {
901 IPAERR("uc_wdi_ctx.stats_notify not registered\n");
902 return -EFAULT;
903 }
904 return 0;
905}
906
907int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
908{
909 if (ipa_ctx->uc_wdi_ctx.stats_notify) {
910 ipa_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
911 wdi_quota);
912 } else {
913 IPAERR("uc_wdi_ctx.stats_notify not registered\n");
914 return -EFAULT;
915 }
916 return 0;
917}
918
Amir Levy9659e592016-10-27 18:08:27 +0300919/**
920 * ipa2_get_client() - provide client mapping
921 * @client: client type
922 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530923 * Return value: client mapping enum
Amir Levy9659e592016-10-27 18:08:27 +0300924 */
925enum ipacm_client_enum ipa2_get_client(int pipe_idx)
926{
927 if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) {
928 IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
929 return IPACM_CLIENT_MAX;
930 } else {
931 return ipa_ctx->ipacm_client[pipe_idx].client_enum;
932 }
933}
934
935/**
936 * ipa2_get_client_uplink() - provide client mapping
937 * @client: client type
938 *
939 * Return value: none
940 */
941bool ipa2_get_client_uplink(int pipe_idx)
942{
Skylar Chang53f855e2017-06-12 10:50:12 -0700943 if (pipe_idx < 0 || pipe_idx >= IPA_MAX_NUM_PIPES) {
944 IPAERR("invalid pipe idx %d\n", pipe_idx);
945 return false;
946 }
947
Amir Levy9659e592016-10-27 18:08:27 +0300948 return ipa_ctx->ipacm_client[pipe_idx].uplink;
949}
950
951/**
952 * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
953 * the supplied pipe index.
954 *
955 * @pipe_idx:
956 *
957 * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
958 * found.
959 */
960enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx)
961{
962 int i;
963 int j;
964 enum ipa_client_type client;
965 struct ipa_client_names clients;
966 bool found = false;
967
968 if (unlikely(!ipa_ctx)) {
969 IPAERR("IPA driver was not initialized\n");
970 return -EINVAL;
971 }
972
973 if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
974 IPAERR("Bad pipe index!\n");
975 return -EINVAL;
976 }
977
978 client = ipa_ctx->ep[pipe_idx].client;
979
980 for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
981 memset(&clients, 0, sizeof(clients));
982 ipa_get_clients_from_rm_resource(i, &clients);
983 for (j = 0; j < clients.length; j++) {
984 if (clients.names[j] == client) {
985 found = true;
986 break;
987 }
988 }
989 if (found)
990 break;
991 }
992
993 if (!found)
994 return -EFAULT;
995
996 return i;
997}
998
999/**
1000 * ipa2_get_client_mapping() - provide client mapping
1001 * @pipe_idx: IPA end-point number
1002 *
1003 * Return value: client mapping
1004 */
1005enum ipa_client_type ipa2_get_client_mapping(int pipe_idx)
1006{
1007 if (unlikely(!ipa_ctx)) {
1008 IPAERR("IPA driver was not initialized\n");
1009 return -EINVAL;
1010 }
1011
1012 if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
1013 IPAERR("Bad pipe index!\n");
1014 return -EINVAL;
1015 }
1016
1017 return ipa_ctx->ep[pipe_idx].client;
1018}
1019
1020void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset,
1021 const uint8_t mac_addr_mask[ETH_ALEN],
1022 const uint8_t mac_addr[ETH_ALEN])
1023{
1024 *buf = ipa_write_8(hdr_mac_addr_offset, *buf);
1025
1026 /* MAC addr mask copied as little endian each 4 bytes */
1027 *buf = ipa_write_8(mac_addr_mask[3], *buf);
1028 *buf = ipa_write_8(mac_addr_mask[2], *buf);
1029 *buf = ipa_write_8(mac_addr_mask[1], *buf);
1030 *buf = ipa_write_8(mac_addr_mask[0], *buf);
1031 *buf = ipa_write_16(0, *buf);
1032 *buf = ipa_write_8(mac_addr_mask[5], *buf);
1033 *buf = ipa_write_8(mac_addr_mask[4], *buf);
1034 *buf = ipa_write_32(0, *buf);
1035 *buf = ipa_write_32(0, *buf);
1036
1037 /* MAC addr copied as little endian each 4 bytes */
1038 *buf = ipa_write_8(mac_addr[3], *buf);
1039 *buf = ipa_write_8(mac_addr[2], *buf);
1040 *buf = ipa_write_8(mac_addr[1], *buf);
1041 *buf = ipa_write_8(mac_addr[0], *buf);
1042 *buf = ipa_write_16(0, *buf);
1043 *buf = ipa_write_8(mac_addr[5], *buf);
1044 *buf = ipa_write_8(mac_addr[4], *buf);
1045 *buf = ipa_write_32(0, *buf);
1046 *buf = ipa_write_32(0, *buf);
1047 *buf = ipa_pad_to_32(*buf);
1048}
1049
1050/**
1051 * ipa_generate_hw_rule() - generate HW rule
1052 * @ip: IP address type
1053 * @attrib: IPA rule attribute
1054 * @buf: output buffer
1055 * @en_rule: rule
1056 *
1057 * Return codes:
1058 * 0: success
1059 * -EPERM: wrong input
1060 */
1061int ipa_generate_hw_rule(enum ipa_ip_type ip,
1062 const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
1063{
1064 u8 ofst_meq32 = 0;
1065 u8 ihl_ofst_rng16 = 0;
1066 u8 ihl_ofst_meq32 = 0;
1067 u8 ofst_meq128 = 0;
1068
1069 if (ip == IPA_IP_v4) {
1070
1071 /* error check */
1072 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
1073 attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
1074 IPA_FLT_FLOW_LABEL) {
1075 IPAERR("v6 attrib's specified for v4 rule\n");
1076 return -EPERM;
1077 }
1078
1079 if (attrib->attrib_mask & IPA_FLT_TOS) {
1080 *en_rule |= IPA_TOS_EQ;
1081 *buf = ipa_write_8(attrib->u.v4.tos, *buf);
1082 *buf = ipa_pad_to_32(*buf);
1083 }
1084
1085 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1086 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1087 IPAERR("ran out of meq32 eq\n");
1088 return -EPERM;
1089 }
1090 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1091 /* 0 => offset of TOS in v4 header */
1092 *buf = ipa_write_8(0, *buf);
1093 *buf = ipa_write_32((attrib->tos_mask << 16), *buf);
1094 *buf = ipa_write_32((attrib->tos_value << 16), *buf);
1095 *buf = ipa_pad_to_32(*buf);
1096 ofst_meq32++;
1097 }
1098
1099 if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1100 *en_rule |= IPA_PROTOCOL_EQ;
1101 *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
1102 *buf = ipa_pad_to_32(*buf);
1103 }
1104
1105 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1106 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1107 IPAERR("ran out of meq32 eq\n");
1108 return -EPERM;
1109 }
1110 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1111 /* 12 => offset of src ip in v4 header */
1112 *buf = ipa_write_8(12, *buf);
1113 *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
1114 *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
1115 *buf = ipa_pad_to_32(*buf);
1116 ofst_meq32++;
1117 }
1118
1119 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1120 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1121 IPAERR("ran out of meq32 eq\n");
1122 return -EPERM;
1123 }
1124 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1125 /* 16 => offset of dst ip in v4 header */
1126 *buf = ipa_write_8(16, *buf);
1127 *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
1128 *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
1129 *buf = ipa_pad_to_32(*buf);
1130 ofst_meq32++;
1131 }
1132
1133 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1134 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1135 IPAERR("ran out of meq32 eq\n");
1136 return -EPERM;
1137 }
1138 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1139 /* -2 => offset of ether type in L2 hdr */
1140 *buf = ipa_write_8((u8)-2, *buf);
1141 *buf = ipa_write_16(0, *buf);
1142 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1143 *buf = ipa_write_16(0, *buf);
1144 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1145 *buf = ipa_pad_to_32(*buf);
1146 ofst_meq32++;
1147 }
1148
1149 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1150 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1151 IPAERR("ran out of ihl_rng16 eq\n");
1152 return -EPERM;
1153 }
1154 if (attrib->src_port_hi < attrib->src_port_lo) {
1155 IPAERR("bad src port range param\n");
1156 return -EPERM;
1157 }
1158 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1159 /* 0 => offset of src port after v4 header */
1160 *buf = ipa_write_8(0, *buf);
1161 *buf = ipa_write_16(attrib->src_port_hi, *buf);
1162 *buf = ipa_write_16(attrib->src_port_lo, *buf);
1163 *buf = ipa_pad_to_32(*buf);
1164 ihl_ofst_rng16++;
1165 }
1166
1167 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1168 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1169 IPAERR("ran out of ihl_rng16 eq\n");
1170 return -EPERM;
1171 }
1172 if (attrib->dst_port_hi < attrib->dst_port_lo) {
1173 IPAERR("bad dst port range param\n");
1174 return -EPERM;
1175 }
1176 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1177 /* 2 => offset of dst port after v4 header */
1178 *buf = ipa_write_8(2, *buf);
1179 *buf = ipa_write_16(attrib->dst_port_hi, *buf);
1180 *buf = ipa_write_16(attrib->dst_port_lo, *buf);
1181 *buf = ipa_pad_to_32(*buf);
1182 ihl_ofst_rng16++;
1183 }
1184
1185 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1186 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1187 IPAERR("ran out of ihl_meq32 eq\n");
1188 return -EPERM;
1189 }
1190 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1191 /* 0 => offset of type after v4 header */
1192 *buf = ipa_write_8(0, *buf);
1193 *buf = ipa_write_32(0xFF, *buf);
1194 *buf = ipa_write_32(attrib->type, *buf);
1195 *buf = ipa_pad_to_32(*buf);
1196 ihl_ofst_meq32++;
1197 }
1198
1199 if (attrib->attrib_mask & IPA_FLT_CODE) {
1200 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1201 IPAERR("ran out of ihl_meq32 eq\n");
1202 return -EPERM;
1203 }
1204 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1205 /* 1 => offset of code after v4 header */
1206 *buf = ipa_write_8(1, *buf);
1207 *buf = ipa_write_32(0xFF, *buf);
1208 *buf = ipa_write_32(attrib->code, *buf);
1209 *buf = ipa_pad_to_32(*buf);
1210 ihl_ofst_meq32++;
1211 }
1212
1213 if (attrib->attrib_mask & IPA_FLT_SPI) {
1214 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1215 IPAERR("ran out of ihl_meq32 eq\n");
1216 return -EPERM;
1217 }
1218 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1219 /* 0 => offset of SPI after v4 header FIXME */
1220 *buf = ipa_write_8(0, *buf);
1221 *buf = ipa_write_32(0xFFFFFFFF, *buf);
1222 *buf = ipa_write_32(attrib->spi, *buf);
1223 *buf = ipa_pad_to_32(*buf);
1224 ihl_ofst_meq32++;
1225 }
1226
1227 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1228 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1229 IPAERR("ran out of ihl_rng16 eq\n");
1230 return -EPERM;
1231 }
1232 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1233 /* 0 => offset of src port after v4 header */
1234 *buf = ipa_write_8(0, *buf);
1235 *buf = ipa_write_16(attrib->src_port, *buf);
1236 *buf = ipa_write_16(attrib->src_port, *buf);
1237 *buf = ipa_pad_to_32(*buf);
1238 ihl_ofst_rng16++;
1239 }
1240
1241 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1242 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1243 IPAERR("ran out of ihl_rng16 eq\n");
1244 return -EPERM;
1245 }
1246 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1247 /* 2 => offset of dst port after v4 header */
1248 *buf = ipa_write_8(2, *buf);
1249 *buf = ipa_write_16(attrib->dst_port, *buf);
1250 *buf = ipa_write_16(attrib->dst_port, *buf);
1251 *buf = ipa_pad_to_32(*buf);
1252 ihl_ofst_rng16++;
1253 }
1254
1255 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1256 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1257 IPAERR("ran out of meq128 eq\n");
1258 return -EPERM;
1259 }
1260 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1261
1262 /* -14 => offset of dst mac addr in Ethernet II hdr */
1263 ipa_generate_mac_addr_hw_rule(
1264 buf,
1265 -14,
1266 attrib->dst_mac_addr_mask,
1267 attrib->dst_mac_addr);
1268
1269 ofst_meq128++;
1270 }
1271
1272 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1273 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1274 IPAERR("ran out of meq128 eq\n");
1275 return -EPERM;
1276 }
1277 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1278
1279 /* -8 => offset of src mac addr in Ethernet II hdr */
1280 ipa_generate_mac_addr_hw_rule(
1281 buf,
1282 -8,
1283 attrib->src_mac_addr_mask,
1284 attrib->src_mac_addr);
1285
1286 ofst_meq128++;
1287 }
1288
1289 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1290 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1291 IPAERR("ran out of meq128 eq\n");
1292 return -EPERM;
1293 }
1294 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1295
1296 /* -22 => offset of dst mac addr in 802.3 hdr */
1297 ipa_generate_mac_addr_hw_rule(
1298 buf,
1299 -22,
1300 attrib->dst_mac_addr_mask,
1301 attrib->dst_mac_addr);
1302
1303 ofst_meq128++;
1304 }
1305
1306 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1307 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1308 IPAERR("ran out of meq128 eq\n");
1309 return -EPERM;
1310 }
1311 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1312
1313 /* -16 => offset of src mac addr in 802.3 hdr */
1314 ipa_generate_mac_addr_hw_rule(
1315 buf,
1316 -16,
1317 attrib->src_mac_addr_mask,
1318 attrib->src_mac_addr);
1319
1320 ofst_meq128++;
1321 }
1322
1323 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1324 *en_rule |= IPA_METADATA_COMPARE;
1325 *buf = ipa_write_8(0, *buf); /* offset, reserved */
1326 *buf = ipa_write_32(attrib->meta_data_mask, *buf);
1327 *buf = ipa_write_32(attrib->meta_data, *buf);
1328 *buf = ipa_pad_to_32(*buf);
1329 }
1330
1331 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1332 *en_rule |= IPA_IS_FRAG;
1333 *buf = ipa_pad_to_32(*buf);
1334 }
1335 } else if (ip == IPA_IP_v6) {
1336
1337 /* v6 code below assumes no extension headers TODO: fix this */
1338
1339 /* error check */
1340 if (attrib->attrib_mask & IPA_FLT_TOS ||
1341 attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1342 IPAERR("v4 attrib's specified for v6 rule\n");
1343 return -EPERM;
1344 }
1345
1346 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
1347 *en_rule |= IPA_PROTOCOL_EQ;
1348 *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
1349 *buf = ipa_pad_to_32(*buf);
1350 }
1351
1352 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1353 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1354 IPAERR("ran out of meq32 eq\n");
1355 return -EPERM;
1356 }
1357 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1358 /* -2 => offset of ether type in L2 hdr */
1359 *buf = ipa_write_8((u8)-2, *buf);
1360 *buf = ipa_write_16(0, *buf);
1361 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1362 *buf = ipa_write_16(0, *buf);
1363 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1364 *buf = ipa_pad_to_32(*buf);
1365 ofst_meq32++;
1366 }
1367
1368 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1369 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1370 IPAERR("ran out of ihl_meq32 eq\n");
1371 return -EPERM;
1372 }
1373 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1374 /* 0 => offset of type after v6 header */
1375 *buf = ipa_write_8(0, *buf);
1376 *buf = ipa_write_32(0xFF, *buf);
1377 *buf = ipa_write_32(attrib->type, *buf);
1378 *buf = ipa_pad_to_32(*buf);
1379 ihl_ofst_meq32++;
1380 }
1381
1382 if (attrib->attrib_mask & IPA_FLT_CODE) {
1383 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1384 IPAERR("ran out of ihl_meq32 eq\n");
1385 return -EPERM;
1386 }
1387 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1388 /* 1 => offset of code after v6 header */
1389 *buf = ipa_write_8(1, *buf);
1390 *buf = ipa_write_32(0xFF, *buf);
1391 *buf = ipa_write_32(attrib->code, *buf);
1392 *buf = ipa_pad_to_32(*buf);
1393 ihl_ofst_meq32++;
1394 }
1395
1396 if (attrib->attrib_mask & IPA_FLT_SPI) {
1397 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1398 IPAERR("ran out of ihl_meq32 eq\n");
1399 return -EPERM;
1400 }
1401 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1402 /* 0 => offset of SPI after v6 header FIXME */
1403 *buf = ipa_write_8(0, *buf);
1404 *buf = ipa_write_32(0xFFFFFFFF, *buf);
1405 *buf = ipa_write_32(attrib->spi, *buf);
1406 *buf = ipa_pad_to_32(*buf);
1407 ihl_ofst_meq32++;
1408 }
1409
1410 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1411 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1412 IPAERR("ran out of ihl_rng16 eq\n");
1413 return -EPERM;
1414 }
1415 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1416 /* 0 => offset of src port after v6 header */
1417 *buf = ipa_write_8(0, *buf);
1418 *buf = ipa_write_16(attrib->src_port, *buf);
1419 *buf = ipa_write_16(attrib->src_port, *buf);
1420 *buf = ipa_pad_to_32(*buf);
1421 ihl_ofst_rng16++;
1422 }
1423
1424 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1425 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1426 IPAERR("ran out of ihl_rng16 eq\n");
1427 return -EPERM;
1428 }
1429 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1430 /* 2 => offset of dst port after v6 header */
1431 *buf = ipa_write_8(2, *buf);
1432 *buf = ipa_write_16(attrib->dst_port, *buf);
1433 *buf = ipa_write_16(attrib->dst_port, *buf);
1434 *buf = ipa_pad_to_32(*buf);
1435 ihl_ofst_rng16++;
1436 }
1437
1438 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1439 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1440 IPAERR("ran out of ihl_rng16 eq\n");
1441 return -EPERM;
1442 }
1443 if (attrib->src_port_hi < attrib->src_port_lo) {
1444 IPAERR("bad src port range param\n");
1445 return -EPERM;
1446 }
1447 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1448 /* 0 => offset of src port after v6 header */
1449 *buf = ipa_write_8(0, *buf);
1450 *buf = ipa_write_16(attrib->src_port_hi, *buf);
1451 *buf = ipa_write_16(attrib->src_port_lo, *buf);
1452 *buf = ipa_pad_to_32(*buf);
1453 ihl_ofst_rng16++;
1454 }
1455
1456 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1457 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1458 IPAERR("ran out of ihl_rng16 eq\n");
1459 return -EPERM;
1460 }
1461 if (attrib->dst_port_hi < attrib->dst_port_lo) {
1462 IPAERR("bad dst port range param\n");
1463 return -EPERM;
1464 }
1465 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1466 /* 2 => offset of dst port after v6 header */
1467 *buf = ipa_write_8(2, *buf);
1468 *buf = ipa_write_16(attrib->dst_port_hi, *buf);
1469 *buf = ipa_write_16(attrib->dst_port_lo, *buf);
1470 *buf = ipa_pad_to_32(*buf);
1471 ihl_ofst_rng16++;
1472 }
1473
1474 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1475 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1476 IPAERR("ran out of meq128 eq\n");
1477 return -EPERM;
1478 }
1479 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1480 /* 8 => offset of src ip in v6 header */
1481 *buf = ipa_write_8(8, *buf);
1482 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
1483 *buf);
1484 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
1485 *buf);
1486 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
1487 *buf);
1488 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
1489 *buf);
1490 *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
1491 *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
1492 *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
1493 *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
1494 *buf = ipa_pad_to_32(*buf);
1495 ofst_meq128++;
1496 }
1497
1498 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1499 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1500 IPAERR("ran out of meq128 eq\n");
1501 return -EPERM;
1502 }
1503 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1504 /* 24 => offset of dst ip in v6 header */
1505 *buf = ipa_write_8(24, *buf);
1506 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
1507 *buf);
1508 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
1509 *buf);
1510 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
1511 *buf);
1512 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
1513 *buf);
1514 *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
1515 *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
1516 *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
1517 *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
1518 *buf = ipa_pad_to_32(*buf);
1519 ofst_meq128++;
1520 }
1521
1522 if (attrib->attrib_mask & IPA_FLT_TC) {
1523 *en_rule |= IPA_FLT_TC;
1524 *buf = ipa_write_8(attrib->u.v6.tc, *buf);
1525 *buf = ipa_pad_to_32(*buf);
1526 }
1527
1528 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1529 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1530 IPAERR("ran out of meq128 eq\n");
1531 return -EPERM;
1532 }
1533 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1534 /* 0 => offset of TOS in v6 header */
1535 *buf = ipa_write_8(0, *buf);
1536 *buf = ipa_write_32((attrib->tos_mask << 20), *buf);
1537 *buf = ipa_write_32(0, *buf);
1538 *buf = ipa_write_32(0, *buf);
1539 *buf = ipa_write_32(0, *buf);
1540
1541 *buf = ipa_write_32((attrib->tos_value << 20), *buf);
1542 *buf = ipa_write_32(0, *buf);
1543 *buf = ipa_write_32(0, *buf);
1544 *buf = ipa_write_32(0, *buf);
1545 *buf = ipa_pad_to_32(*buf);
1546 ofst_meq128++;
1547 }
1548
1549 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1550 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1551 IPAERR("ran out of meq128 eq\n");
1552 return -EPERM;
1553 }
1554 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1555
1556 /* -14 => offset of dst mac addr in Ethernet II hdr */
1557 ipa_generate_mac_addr_hw_rule(
1558 buf,
1559 -14,
1560 attrib->dst_mac_addr_mask,
1561 attrib->dst_mac_addr);
1562
1563 ofst_meq128++;
1564 }
1565
1566 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1567 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1568 IPAERR("ran out of meq128 eq\n");
1569 return -EPERM;
1570 }
1571 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1572
1573 /* -8 => offset of src mac addr in Ethernet II hdr */
1574 ipa_generate_mac_addr_hw_rule(
1575 buf,
1576 -8,
1577 attrib->src_mac_addr_mask,
1578 attrib->src_mac_addr);
1579
1580 ofst_meq128++;
1581 }
1582
1583 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1584 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1585 IPAERR("ran out of meq128 eq\n");
1586 return -EPERM;
1587 }
1588 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1589
1590 /* -22 => offset of dst mac addr in 802.3 hdr */
1591 ipa_generate_mac_addr_hw_rule(
1592 buf,
1593 -22,
1594 attrib->dst_mac_addr_mask,
1595 attrib->dst_mac_addr);
1596
1597 ofst_meq128++;
1598 }
1599
1600 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1601 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1602 IPAERR("ran out of meq128 eq\n");
1603 return -EPERM;
1604 }
1605 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1606
1607 /* -16 => offset of src mac addr in 802.3 hdr */
1608 ipa_generate_mac_addr_hw_rule(
1609 buf,
1610 -16,
1611 attrib->src_mac_addr_mask,
1612 attrib->src_mac_addr);
1613
1614 ofst_meq128++;
1615 }
1616
1617 if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
1618 *en_rule |= IPA_FLT_FLOW_LABEL;
1619 /* FIXME FL is only 20 bits */
1620 *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
1621 *buf = ipa_pad_to_32(*buf);
1622 }
1623
1624 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1625 *en_rule |= IPA_METADATA_COMPARE;
1626 *buf = ipa_write_8(0, *buf); /* offset, reserved */
1627 *buf = ipa_write_32(attrib->meta_data_mask, *buf);
1628 *buf = ipa_write_32(attrib->meta_data, *buf);
1629 *buf = ipa_pad_to_32(*buf);
1630 }
1631
1632 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1633 *en_rule |= IPA_IS_FRAG;
1634 *buf = ipa_pad_to_32(*buf);
1635 }
1636 } else {
1637 IPAERR("unsupported ip %d\n", ip);
1638 return -EPERM;
1639 }
1640
1641 /*
1642 * default "rule" means no attributes set -> map to
1643 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
1644 */
1645 if (attrib->attrib_mask == 0) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301646 IPADBG_LOW("building default rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001647 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1648 IPAERR("ran out of meq32 eq\n");
1649 return -EPERM;
1650 }
1651 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1652 *buf = ipa_write_8(0, *buf); /* offset */
1653 *buf = ipa_write_32(0, *buf); /* mask */
1654 *buf = ipa_write_32(0, *buf); /* val */
1655 *buf = ipa_pad_to_32(*buf);
1656 ofst_meq32++;
1657 }
1658
1659 return 0;
1660}
1661
1662void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
1663 u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
1664 const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
1665{
1666 eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
1667 eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3];
1668 eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2];
1669 eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1];
1670 eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0];
1671 eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0;
1672 eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0;
1673 eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5];
1674 eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4];
1675 memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8);
1676 eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3];
1677 eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2];
1678 eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1];
1679 eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0];
1680 eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0;
1681 eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0;
1682 eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5];
1683 eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4];
1684 memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8);
1685}
1686
1687int ipa_generate_flt_eq(enum ipa_ip_type ip,
1688 const struct ipa_rule_attrib *attrib,
1689 struct ipa_ipfltri_rule_eq *eq_atrb)
1690{
1691 u8 ofst_meq32 = 0;
1692 u8 ihl_ofst_rng16 = 0;
1693 u8 ihl_ofst_meq32 = 0;
1694 u8 ofst_meq128 = 0;
1695 u16 eq_bitmap = 0;
1696 u16 *en_rule = &eq_bitmap;
1697
1698 if (ip == IPA_IP_v4) {
1699
1700 /* error check */
1701 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
1702 attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
1703 IPA_FLT_FLOW_LABEL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301704 IPAERR_RL("v6 attrib's specified for v4 rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001705 return -EPERM;
1706 }
1707
1708 if (attrib->attrib_mask & IPA_FLT_TOS) {
1709 *en_rule |= IPA_TOS_EQ;
1710 eq_atrb->tos_eq_present = 1;
1711 eq_atrb->tos_eq = attrib->u.v4.tos;
1712 }
1713
1714 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1715 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301716 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001717 return -EPERM;
1718 }
1719 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1720 eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
1721 eq_atrb->offset_meq_32[ofst_meq32].mask =
1722 attrib->tos_mask << 16;
1723 eq_atrb->offset_meq_32[ofst_meq32].value =
1724 attrib->tos_value << 16;
1725 ofst_meq32++;
1726 }
1727
1728 if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1729 *en_rule |= IPA_PROTOCOL_EQ;
1730 eq_atrb->protocol_eq_present = 1;
1731 eq_atrb->protocol_eq = attrib->u.v4.protocol;
1732 }
1733
1734 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1735 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301736 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001737 return -EPERM;
1738 }
1739 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1740 eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
1741 eq_atrb->offset_meq_32[ofst_meq32].mask =
1742 attrib->u.v4.src_addr_mask;
1743 eq_atrb->offset_meq_32[ofst_meq32].value =
1744 attrib->u.v4.src_addr;
1745 ofst_meq32++;
1746 }
1747
1748 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1749 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301750 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001751 return -EPERM;
1752 }
1753 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1754 eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
1755 eq_atrb->offset_meq_32[ofst_meq32].mask =
1756 attrib->u.v4.dst_addr_mask;
1757 eq_atrb->offset_meq_32[ofst_meq32].value =
1758 attrib->u.v4.dst_addr;
1759 ofst_meq32++;
1760 }
1761
1762 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1763 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301764 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001765 return -EPERM;
1766 }
1767 if (attrib->src_port_hi < attrib->src_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301768 IPAERR_RL("bad src port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03001769 return -EPERM;
1770 }
1771 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1772 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
1773 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1774 = attrib->src_port_lo;
1775 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1776 = attrib->src_port_hi;
1777 ihl_ofst_rng16++;
1778 }
1779
1780 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1781 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301782 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001783 return -EPERM;
1784 }
1785 if (attrib->dst_port_hi < attrib->dst_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301786 IPAERR_RL("bad dst port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03001787 return -EPERM;
1788 }
1789 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1790 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
1791 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1792 = attrib->dst_port_lo;
1793 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1794 = attrib->dst_port_hi;
1795 ihl_ofst_rng16++;
1796 }
1797
1798 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1799 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301800 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001801 return -EPERM;
1802 }
1803 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1804 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1805 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1806 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1807 attrib->type;
1808 ihl_ofst_meq32++;
1809 }
1810
1811 if (attrib->attrib_mask & IPA_FLT_CODE) {
1812 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301813 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001814 return -EPERM;
1815 }
1816 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1817 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
1818 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1819 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1820 attrib->code;
1821 ihl_ofst_meq32++;
1822 }
1823
1824 if (attrib->attrib_mask & IPA_FLT_SPI) {
1825 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301826 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001827 return -EPERM;
1828 }
1829 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1830 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1831 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
1832 0xFFFFFFFF;
1833 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1834 attrib->spi;
1835 ihl_ofst_meq32++;
1836 }
1837
1838 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1839 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301840 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001841 return -EPERM;
1842 }
1843 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1844 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
1845 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1846 = attrib->src_port;
1847 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1848 = attrib->src_port;
1849 ihl_ofst_rng16++;
1850 }
1851
1852 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1853 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301854 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001855 return -EPERM;
1856 }
1857 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1858 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
1859 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1860 = attrib->dst_port;
1861 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1862 = attrib->dst_port;
1863 ihl_ofst_rng16++;
1864 }
1865
1866 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1867 *en_rule |= IPA_METADATA_COMPARE;
1868 eq_atrb->metadata_meq32_present = 1;
1869 eq_atrb->metadata_meq32.offset = 0;
1870 eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
1871 eq_atrb->metadata_meq32.value = attrib->meta_data;
1872 }
1873
1874 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1875 *en_rule |= IPA_IS_FRAG;
1876 eq_atrb->ipv4_frag_eq_present = 1;
1877 }
1878
1879 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1880 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301881 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001882 return -EPERM;
1883 }
1884 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1885
1886 /* -14 => offset of dst mac addr in Ethernet II hdr */
1887 ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
1888 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
1889 ofst_meq128);
1890
1891 ofst_meq128++;
1892 }
1893
1894 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1895 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301896 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001897 return -EPERM;
1898 }
1899 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1900
1901 /* -8 => offset of src mac addr in Ethernet II hdr */
1902 ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
1903 attrib->src_mac_addr_mask, attrib->src_mac_addr,
1904 ofst_meq128);
1905
1906 ofst_meq128++;
1907 }
1908
1909 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1910 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301911 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001912 return -EPERM;
1913 }
1914 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1915
1916 /* -22 => offset of dst mac addr in 802.3 hdr */
1917 ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
1918 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
1919 ofst_meq128);
1920
1921 ofst_meq128++;
1922 }
1923
1924 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1925 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301926 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001927 return -EPERM;
1928 }
1929 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1930
1931 /* -16 => offset of src mac addr in 802.3 hdr */
1932 ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
1933 attrib->src_mac_addr_mask, attrib->src_mac_addr,
1934 ofst_meq128);
1935
1936 ofst_meq128++;
1937 }
1938
1939 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1940 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301941 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001942 return -EPERM;
1943 }
1944 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1945 eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
1946 eq_atrb->offset_meq_32[ofst_meq32].mask =
1947 htons(attrib->ether_type);
1948 eq_atrb->offset_meq_32[ofst_meq32].value =
1949 htons(attrib->ether_type);
1950 ofst_meq32++;
1951 }
1952 } else if (ip == IPA_IP_v6) {
1953
1954 /* v6 code below assumes no extension headers TODO: fix this */
1955
1956 /* error check */
1957 if (attrib->attrib_mask & IPA_FLT_TOS ||
1958 attrib->attrib_mask & IPA_FLT_PROTOCOL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301959 IPAERR_RL("v4 attrib's specified for v6 rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001960 return -EPERM;
1961 }
1962
1963 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
1964 *en_rule |= IPA_PROTOCOL_EQ;
1965 eq_atrb->protocol_eq_present = 1;
1966 eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
1967 }
1968
1969 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1970 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301971 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001972 return -EPERM;
1973 }
1974 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1975 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1976 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1977 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1978 attrib->type;
1979 ihl_ofst_meq32++;
1980 }
1981
1982 if (attrib->attrib_mask & IPA_FLT_CODE) {
1983 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301984 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001985 return -EPERM;
1986 }
1987 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1988 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
1989 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1990 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1991 attrib->code;
1992 ihl_ofst_meq32++;
1993 }
1994
1995 if (attrib->attrib_mask & IPA_FLT_SPI) {
1996 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301997 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001998 return -EPERM;
1999 }
2000 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2001 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
2002 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2003 0xFFFFFFFF;
2004 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2005 attrib->spi;
2006 ihl_ofst_meq32++;
2007 }
2008
2009 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
2010 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302011 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002012 return -EPERM;
2013 }
2014 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2015 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
2016 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2017 = attrib->src_port;
2018 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2019 = attrib->src_port;
2020 ihl_ofst_rng16++;
2021 }
2022
2023 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
2024 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302025 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002026 return -EPERM;
2027 }
2028 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2029 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
2030 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2031 = attrib->dst_port;
2032 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2033 = attrib->dst_port;
2034 ihl_ofst_rng16++;
2035 }
2036
2037 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
2038 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302039 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002040 return -EPERM;
2041 }
2042 if (attrib->src_port_hi < attrib->src_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302043 IPAERR_RL("bad src port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03002044 return -EPERM;
2045 }
2046 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2047 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
2048 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2049 = attrib->src_port_lo;
2050 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2051 = attrib->src_port_hi;
2052 ihl_ofst_rng16++;
2053 }
2054
2055 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
2056 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302057 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002058 return -EPERM;
2059 }
2060 if (attrib->dst_port_hi < attrib->dst_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302061 IPAERR_RL("bad dst port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03002062 return -EPERM;
2063 }
2064 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2065 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
2066 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2067 = attrib->dst_port_lo;
2068 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2069 = attrib->dst_port_hi;
2070 ihl_ofst_rng16++;
2071 }
2072
2073 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
2074 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302075 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002076 return -EPERM;
2077 }
2078 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2079 eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
2080 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2081 = attrib->u.v6.src_addr_mask[0];
2082 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2083 = attrib->u.v6.src_addr_mask[1];
2084 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2085 = attrib->u.v6.src_addr_mask[2];
2086 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2087 = attrib->u.v6.src_addr_mask[3];
2088 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2089 = attrib->u.v6.src_addr[0];
2090 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2091 = attrib->u.v6.src_addr[1];
2092 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2093 = attrib->u.v6.src_addr[2];
2094 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2095 12) = attrib->u.v6.src_addr[3];
2096 ofst_meq128++;
2097 }
2098
2099 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
2100 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302101 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002102 return -EPERM;
2103 }
2104 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2105 eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
2106 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2107 = attrib->u.v6.dst_addr_mask[0];
2108 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2109 = attrib->u.v6.dst_addr_mask[1];
2110 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2111 = attrib->u.v6.dst_addr_mask[2];
2112 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2113 = attrib->u.v6.dst_addr_mask[3];
2114 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2115 = attrib->u.v6.dst_addr[0];
2116 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2117 = attrib->u.v6.dst_addr[1];
2118 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2119 = attrib->u.v6.dst_addr[2];
2120 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2121 12) = attrib->u.v6.dst_addr[3];
2122 ofst_meq128++;
2123 }
2124
2125 if (attrib->attrib_mask & IPA_FLT_TC) {
2126 *en_rule |= IPA_FLT_TC;
2127 eq_atrb->tc_eq_present = 1;
2128 eq_atrb->tc_eq = attrib->u.v6.tc;
2129 }
2130
2131 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
2132 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302133 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002134 return -EPERM;
2135 }
2136 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2137 eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
2138 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2139 = attrib->tos_mask << 20;
2140 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2141 = 0;
2142 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2143 = 0;
2144 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2145 = 0;
2146 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2147 = attrib->tos_value << 20;
2148 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2149 = 0;
2150 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2151 = 0;
2152 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2153 12) = 0;
2154 ofst_meq128++;
2155 }
2156
2157 if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
2158 *en_rule |= IPA_FLT_FLOW_LABEL;
2159 eq_atrb->fl_eq_present = 1;
2160 eq_atrb->fl_eq = attrib->u.v6.flow_label;
2161 }
2162
2163 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
2164 *en_rule |= IPA_METADATA_COMPARE;
2165 eq_atrb->metadata_meq32_present = 1;
2166 eq_atrb->metadata_meq32.offset = 0;
2167 eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
2168 eq_atrb->metadata_meq32.value = attrib->meta_data;
2169 }
2170
2171 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
2172 *en_rule |= IPA_IS_FRAG;
2173 eq_atrb->ipv4_frag_eq_present = 1;
2174 }
2175
2176 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
2177 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302178 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002179 return -EPERM;
2180 }
2181 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2182
2183 /* -14 => offset of dst mac addr in Ethernet II hdr */
2184 ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
2185 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
2186 ofst_meq128);
2187
2188 ofst_meq128++;
2189 }
2190
2191 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
2192 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302193 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002194 return -EPERM;
2195 }
2196 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2197
2198 /* -8 => offset of src mac addr in Ethernet II hdr */
2199 ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
2200 attrib->src_mac_addr_mask, attrib->src_mac_addr,
2201 ofst_meq128);
2202
2203 ofst_meq128++;
2204 }
2205
2206 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
2207 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302208 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002209 return -EPERM;
2210 }
2211 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2212
2213 /* -22 => offset of dst mac addr in 802.3 hdr */
2214 ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
2215 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
2216 ofst_meq128);
2217
2218 ofst_meq128++;
2219 }
2220
2221 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
2222 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302223 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002224 return -EPERM;
2225 }
2226 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2227
2228 /* -16 => offset of src mac addr in 802.3 hdr */
2229 ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
2230 attrib->src_mac_addr_mask, attrib->src_mac_addr,
2231 ofst_meq128);
2232
2233 ofst_meq128++;
2234 }
2235
2236 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
2237 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302238 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002239 return -EPERM;
2240 }
2241 *en_rule |= ipa_ofst_meq32[ofst_meq32];
2242 eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
2243 eq_atrb->offset_meq_32[ofst_meq32].mask =
2244 htons(attrib->ether_type);
2245 eq_atrb->offset_meq_32[ofst_meq32].value =
2246 htons(attrib->ether_type);
2247 ofst_meq32++;
2248 }
2249
2250 } else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302251 IPAERR_RL("unsupported ip %d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +03002252 return -EPERM;
2253 }
2254
2255 /*
2256 * default "rule" means no attributes set -> map to
2257 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
2258 */
2259 if (attrib->attrib_mask == 0) {
2260 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302261 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002262 return -EPERM;
2263 }
2264 *en_rule |= ipa_ofst_meq32[ofst_meq32];
2265 eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
2266 eq_atrb->offset_meq_32[ofst_meq32].mask = 0;
2267 eq_atrb->offset_meq_32[ofst_meq32].value = 0;
2268 ofst_meq32++;
2269 }
2270
2271 eq_atrb->rule_eq_bitmap = *en_rule;
2272 eq_atrb->num_offset_meq_32 = ofst_meq32;
2273 eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
2274 eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
2275 eq_atrb->num_offset_meq_128 = ofst_meq128;
2276
2277 return 0;
2278}
2279
2280/**
2281 * ipa2_cfg_ep - IPA end-point configuration
2282 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2283 * @ipa_ep_cfg: [in] IPA end-point configuration params
2284 *
2285 * This includes nat, header, mode, aggregation and route settings and is a one
2286 * shot API to configure the IPA end-point fully
2287 *
2288 * Returns: 0 on success, negative on failure
2289 *
2290 * Note: Should not be called from atomic context
2291 */
2292int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
2293{
2294 int result = -EINVAL;
2295
2296 if (unlikely(!ipa_ctx)) {
2297 IPAERR("IPA driver was not initialized\n");
2298 return -EINVAL;
2299 }
2300
2301 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2302 ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
2303 IPAERR("bad parm.\n");
2304 return -EINVAL;
2305 }
2306
2307 result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
2308 if (result)
2309 return result;
2310
2311 result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
2312 if (result)
2313 return result;
2314
2315 result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
2316 if (result)
2317 return result;
2318
2319 result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
2320 if (result)
2321 return result;
2322
2323 if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
2324 result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
2325 if (result)
2326 return result;
2327
2328 result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
2329 if (result)
2330 return result;
2331
2332 result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
2333 if (result)
2334 return result;
2335
2336 result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
2337 if (result)
2338 return result;
2339 } else {
2340 result = ipa2_cfg_ep_metadata_mask(clnt_hdl,
2341 &ipa_ep_cfg->metadata_mask);
2342 if (result)
2343 return result;
2344 }
2345
2346 return 0;
2347}
2348
2349const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en)
2350{
2351 switch (nat_en) {
2352 case (IPA_BYPASS_NAT):
2353 return "NAT disabled";
2354 case (IPA_SRC_NAT):
2355 return "Source NAT";
2356 case (IPA_DST_NAT):
2357 return "Dst NAT";
2358 }
2359
2360 return "undefined";
2361}
2362
2363void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl,
2364 const struct ipa_ep_cfg_nat *ep_nat)
2365{
2366 u32 reg_val = 0;
2367
2368 IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
2369 IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
2370 IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
2371
2372 ipa_write_reg(ipa_ctx->mmio,
2373 IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl),
2374 reg_val);
2375}
2376
2377void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl,
2378 const struct ipa_ep_cfg_nat *ep_nat)
2379{
2380 u32 reg_val = 0;
2381
2382 IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
2383 IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
2384 IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
2385
2386 ipa_write_reg(ipa_ctx->mmio,
2387 IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl),
2388 reg_val);
2389}
2390
2391/**
2392 * ipa2_cfg_ep_nat() - IPA end-point NAT configuration
2393 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2394 * @ipa_ep_cfg: [in] IPA end-point configuration params
2395 *
2396 * Returns: 0 on success, negative on failure
2397 *
2398 * Note: Should not be called from atomic context
2399 */
2400int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
2401{
2402 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2403 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
2404 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2405 clnt_hdl,
2406 ipa_ctx->ep[clnt_hdl].valid);
2407 return -EINVAL;
2408 }
2409
2410 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
2411 IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
2412 return -EINVAL;
2413 }
2414
2415 IPADBG("pipe=%d, nat_en=%d(%s)\n",
2416 clnt_hdl,
2417 ep_nat->nat_en,
2418 ipa_get_nat_en_str(ep_nat->nat_en));
2419
2420 /* copy over EP cfg */
2421 ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
2422
2423 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2424
2425 ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat);
2426
2427 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2428
2429 return 0;
2430}
2431
2432static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl,
2433 const struct ipa_ep_cfg_status *ep_status)
2434{
2435 IPADBG("Not supported for version 1.1\n");
2436}
2437
2438static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl,
2439 const struct ipa_ep_cfg_status *ep_status)
2440{
2441 u32 reg_val = 0;
2442
2443 IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en,
2444 IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
2445 IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
2446
2447 IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep,
2448 IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
2449 IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
2450
2451 ipa_write_reg(ipa_ctx->mmio,
2452 IPA_ENDP_STATUS_n_OFST(clnt_hdl),
2453 reg_val);
2454}
2455
2456/**
2457 * ipa2_cfg_ep_status() - IPA end-point status configuration
2458 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2459 * @ipa_ep_cfg: [in] IPA end-point configuration params
2460 *
2461 * Returns: 0 on success, negative on failure
2462 *
2463 * Note: Should not be called from atomic context
2464 */
2465int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status)
2466{
2467 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2468 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
2469 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2470 clnt_hdl,
2471 ipa_ctx->ep[clnt_hdl].valid);
2472 return -EINVAL;
2473 }
2474
2475 IPADBG("pipe=%d, status_en=%d status_ep=%d\n",
2476 clnt_hdl,
2477 ep_status->status_en,
2478 ep_status->status_ep);
2479
2480 /* copy over EP cfg */
2481 ipa_ctx->ep[clnt_hdl].status = *ep_status;
2482
2483 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2484
2485 ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status);
2486
2487 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2488
2489 return 0;
2490}
2491
2492static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl,
2493 const struct ipa_ep_cfg_cfg *cfg)
2494{
2495 IPADBG("Not supported for version 1.1\n");
2496}
2497
2498static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl,
2499 const struct ipa_ep_cfg_cfg *cfg)
2500{
2501 u32 reg_val = 0;
2502
2503 IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en,
2504 IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
2505 IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
2506 IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en,
2507 IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
2508 IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
2509 IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset,
2510 IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
2511 IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
2512
2513 ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl),
2514 reg_val);
2515}
2516
2517/**
2518 * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration
2519 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2520 * @ipa_ep_cfg: [in] IPA end-point configuration params
2521 *
2522 * Returns: 0 on success, negative on failure
2523 *
2524 * Note: Should not be called from atomic context
2525 */
2526int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
2527{
2528 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2529 ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
2530 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2531 clnt_hdl,
2532 ipa_ctx->ep[clnt_hdl].valid);
2533 return -EINVAL;
2534 }
2535
2536 IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n",
2537 clnt_hdl,
2538 cfg->frag_offload_en,
2539 cfg->cs_offload_en,
2540 cfg->cs_metadata_hdr_offset);
2541
2542 /* copy over EP cfg */
2543 ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
2544
2545 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2546
2547 ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg);
2548
2549 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2550
2551 return 0;
2552}
2553
2554static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl,
2555 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2556{
2557 IPADBG("Not supported for version 1.1\n");
2558}
2559
2560static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl,
2561 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2562{
2563 u32 reg_val = 0;
2564
2565 IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask,
2566 IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
2567 IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
2568
2569 ipa_write_reg(ipa_ctx->mmio,
2570 IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl),
2571 reg_val);
2572}
2573
2574/**
2575 * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
2576 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2577 * @ipa_ep_cfg: [in] IPA end-point configuration params
2578 *
2579 * Returns: 0 on success, negative on failure
2580 *
2581 * Note: Should not be called from atomic context
2582 */
2583int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
2584 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2585{
2586 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2587 ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
2588 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2589 clnt_hdl,
2590 ipa_ctx->ep[clnt_hdl].valid);
2591 return -EINVAL;
2592 }
2593
2594 IPADBG("pipe=%d, metadata_mask=0x%x\n",
2595 clnt_hdl,
2596 metadata_mask->metadata_mask);
2597
2598 /* copy over EP cfg */
2599 ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
2600
2601 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2602
2603 ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
2604
2605 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2606
2607 return 0;
2608}
2609
2610void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number,
2611 const struct ipa_ep_cfg_hdr *ep_hdr)
2612{
2613 u32 val = 0;
2614
2615 val = IPA_SETFIELD(ep_hdr->hdr_len,
2616 IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
2617 IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) |
2618 IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid,
2619 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
2620 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) |
2621 IPA_SETFIELD(ep_hdr->hdr_ofst_metadata,
2622 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
2623 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) |
2624 IPA_SETFIELD(ep_hdr->hdr_additional_const_len,
2625 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
2626 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) |
2627 IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid,
2628 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
2629 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) |
2630 IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size,
2631 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
2632 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) |
2633 IPA_SETFIELD(ep_hdr->hdr_a5_mux,
2634 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
2635 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
2636 ipa_write_reg(ipa_ctx->mmio,
2637 IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val);
2638}
2639
2640void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number,
2641 const struct ipa_ep_cfg_hdr *ep_hdr)
2642{
2643 u32 reg_val = 0;
2644
2645 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid,
2646 IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2,
2647 IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2);
2648
2649 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional,
2650 IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
2651 IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
2652
2653 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux,
2654 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
2655 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
2656
2657 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size,
2658 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
2659 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK);
2660
2661 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid,
2662 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
2663 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK);
2664
2665 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len,
2666 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
2667 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK);
2668
2669 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata,
2670 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
2671 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK);
2672
2673 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid,
2674 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
2675 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK);
2676
2677 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len,
2678 IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
2679 IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK);
2680
2681 ipa_write_reg(ipa_ctx->mmio,
2682 IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val);
2683}
2684
2685/**
2686 * ipa2_cfg_ep_hdr() - IPA end-point header configuration
2687 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2688 * @ipa_ep_cfg: [in] IPA end-point configuration params
2689 *
2690 * Returns: 0 on success, negative on failure
2691 *
2692 * Note: Should not be called from atomic context
2693 */
2694int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
2695{
2696 struct ipa_ep_context *ep;
2697
2698 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2699 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
2700 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2701 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
2702 return -EINVAL;
2703 }
2704 IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
2705 clnt_hdl,
2706 ep_hdr->hdr_remove_additional,
2707 ep_hdr->hdr_a5_mux,
2708 ep_hdr->hdr_ofst_pkt_size);
2709
2710 IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
2711 ep_hdr->hdr_ofst_pkt_size_valid,
2712 ep_hdr->hdr_additional_const_len);
2713
2714 IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
2715 ep_hdr->hdr_ofst_metadata,
2716 ep_hdr->hdr_ofst_metadata_valid,
2717 ep_hdr->hdr_len);
2718
2719 ep = &ipa_ctx->ep[clnt_hdl];
2720
2721 /* copy over EP cfg */
2722 ep->cfg.hdr = *ep_hdr;
2723
2724 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2725
2726 ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
2727
2728 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2729
2730 return 0;
2731}
2732
2733static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl,
2734 const struct ipa_ep_cfg_hdr_ext *ep_hdr)
2735{
2736 IPADBG("Not supported for version 1.1\n");
2737 return 0;
2738}
2739
2740static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
2741 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val)
2742{
2743 u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
2744
2745 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset,
2746 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
2747 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
2748
2749 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding,
2750 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
2751 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
2752
2753 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad,
2754 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
2755 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
2756
2757 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid,
2758 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
2759 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
2760
2761 IPA_SETFIELD_IN_REG(reg_val, hdr_endianness,
2762 IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
2763 IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
2764
2765 ipa_write_reg(ipa_ctx->mmio,
2766 IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val);
2767
2768 return 0;
2769}
2770
2771static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl,
2772 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2773{
2774 u32 reg_val = 0;
2775
2776 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2777 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2778 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0);
2779
2780 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2781}
2782
2783static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl,
2784 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2785{
2786 u32 reg_val = 0;
2787
2788 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2789 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2790 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
2791
2792 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2793
2794}
2795
2796static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl,
2797 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2798{
2799 u32 reg_val = 0;
2800
2801 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2802 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2803 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
2804
2805 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2806
2807}
2808
2809/**
2810 * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration
2811 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2812 * @ep_hdr_ext: [in] IPA end-point configuration params
2813 *
2814 * Returns: 0 on success, negative on failure
2815 *
2816 * Note: Should not be called from atomic context
2817 */
2818int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
2819 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2820{
2821 struct ipa_ep_context *ep;
2822
2823 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2824 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
2825 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2826 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
2827 return -EINVAL;
2828 }
2829
2830 IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
2831 clnt_hdl,
2832 ep_hdr_ext->hdr_pad_to_alignment);
2833
2834 IPADBG("hdr_total_len_or_pad_offset=%d\n",
2835 ep_hdr_ext->hdr_total_len_or_pad_offset);
2836
2837 IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
2838 ep_hdr_ext->hdr_payload_len_inc_padding,
2839 ep_hdr_ext->hdr_total_len_or_pad);
2840
2841 IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
2842 ep_hdr_ext->hdr_total_len_or_pad_valid,
2843 ep_hdr_ext->hdr_little_endian);
2844
2845 ep = &ipa_ctx->ep[clnt_hdl];
2846
2847 /* copy over EP cfg */
2848 ep->cfg.hdr_ext = *ep_hdr_ext;
2849
2850 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2851
2852 ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
2853
2854 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2855
2856 return 0;
2857}
2858
2859/**
2860 * ipa2_cfg_ep_hdr() - IPA end-point Control configuration
2861 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2862 * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
2863 *
2864 * Returns: 0 on success, negative on failure
2865 */
2866int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
2867{
2868 u32 reg_val = 0;
2869
2870 if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) {
2871 IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
2872 return -EINVAL;
2873 }
2874
2875 IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
2876 clnt_hdl,
2877 ep_ctrl->ipa_ep_suspend,
2878 ep_ctrl->ipa_ep_delay);
2879
2880 IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend,
2881 IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT,
2882 IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK);
2883
2884 IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay,
2885 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
2886 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
2887
2888 ipa_write_reg(ipa_ctx->mmio,
2889 IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val);
2890
2891 return 0;
2892
2893}
2894
2895/**
2896 * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration
2897 * @aggr_granularity: [in] defines the granularity of AGGR timers
2898 * number of units of 1/32msec
2899 *
2900 * Returns: 0 on success, negative on failure
2901 */
2902int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity)
2903{
2904 u32 reg_val = 0;
2905
2906 if (aggr_granularity <= IPA_AGGR_GRAN_MIN ||
2907 aggr_granularity > IPA_AGGR_GRAN_MAX) {
2908 IPAERR("bad param, aggr_granularity = %d\n",
2909 aggr_granularity);
2910 return -EINVAL;
2911 }
2912 IPADBG("aggr_granularity=%d\n", aggr_granularity);
2913
2914 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
2915 reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
2916
2917 IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1,
2918 IPA_COUNTER_CFG_AGGR_GRAN_SHFT,
2919 IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
2920
2921 ipa_write_reg(ipa_ctx->mmio,
2922 IPA_COUNTER_CFG_OFST, reg_val);
2923
2924 return 0;
2925
2926}
2927EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity);
2928
2929/**
2930 * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer
2931 * configuration
2932 * @eot_coal_granularity: defines the granularity of EOT_COAL timers
2933 * number of units of 1/32msec
2934 *
2935 * Returns: 0 on success, negative on failure
2936 */
2937int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity)
2938{
2939 u32 reg_val = 0;
2940
2941 if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN ||
2942 eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) {
2943 IPAERR("bad parm, eot_coal_granularity = %d\n",
2944 eot_coal_granularity);
2945 return -EINVAL;
2946 }
2947 IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity);
2948
2949 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
2950 reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
2951
2952 IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1,
2953 IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT,
2954 IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
2955
2956 ipa_write_reg(ipa_ctx->mmio,
2957 IPA_COUNTER_CFG_OFST, reg_val);
2958
2959 return 0;
2960
2961}
2962EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity);
2963
2964const char *ipa_get_mode_type_str(enum ipa_mode_type mode)
2965{
2966 switch (mode) {
2967 case (IPA_BASIC):
2968 return "Basic";
2969 case (IPA_ENABLE_FRAMING_HDLC):
2970 return "HDLC framing";
2971 case (IPA_ENABLE_DEFRAMING_HDLC):
2972 return "HDLC de-framing";
2973 case (IPA_DMA):
2974 return "DMA";
2975 }
2976
2977 return "undefined";
2978}
2979
2980void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number,
2981 const struct ipa_ep_cfg_mode *ep_mode)
2982{
2983 u32 reg_val = 0;
2984
2985 IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
2986 IPA_ENDP_INIT_MODE_N_MODE_SHFT,
2987 IPA_ENDP_INIT_MODE_N_MODE_BMSK);
2988
2989 IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
2990 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1,
2991 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1);
2992
2993 ipa_write_reg(ipa_ctx->mmio,
2994 IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val);
2995}
2996
2997void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number,
2998 const struct ipa_ep_cfg_mode *ep_mode)
2999{
3000 u32 reg_val = 0;
3001
3002 IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
3003 IPA_ENDP_INIT_MODE_N_MODE_SHFT,
3004 IPA_ENDP_INIT_MODE_N_MODE_BMSK);
3005
3006 IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
3007 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0,
3008 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0);
3009
3010 ipa_write_reg(ipa_ctx->mmio,
3011 IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val);
3012}
3013
3014/**
3015 * ipa2_cfg_ep_mode() - IPA end-point mode configuration
3016 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3017 * @ipa_ep_cfg: [in] IPA end-point configuration params
3018 *
3019 * Returns: 0 on success, negative on failure
3020 *
3021 * Note: Should not be called from atomic context
3022 */
3023int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
3024{
3025 int ep;
3026
3027 if (unlikely(!ipa_ctx)) {
3028 IPAERR("IPA driver was not initialized\n");
3029 return -EINVAL;
3030 }
3031
3032 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3033 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
3034 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3035 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3036 return -EINVAL;
3037 }
3038
3039 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
3040 IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
3041 return -EINVAL;
3042 }
3043
3044 ep = ipa2_get_ep_mapping(ep_mode->dst);
3045 if (ep == -1 && ep_mode->mode == IPA_DMA) {
3046 IPAERR("dst %d does not exist\n", ep_mode->dst);
3047 return -EINVAL;
3048 }
3049
3050 WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
3051
3052 if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
3053 ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
3054
3055 IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
3056 clnt_hdl,
3057 ep_mode->mode,
3058 ipa_get_mode_type_str(ep_mode->mode),
3059 ep_mode->dst);
3060
3061 /* copy over EP cfg */
3062 ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
3063 ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
3064
3065 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3066
3067 ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl,
3068 ipa_ctx->ep[clnt_hdl].dst_pipe_index,
3069 ep_mode);
3070
3071 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3072
3073 return 0;
3074}
3075
3076const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
3077{
3078 switch (aggr_en) {
3079 case (IPA_BYPASS_AGGR):
3080 return "no aggregation";
3081 case (IPA_ENABLE_AGGR):
3082 return "aggregation enabled";
3083 case (IPA_ENABLE_DEAGGR):
3084 return "de-aggregation enabled";
3085 }
3086
3087 return "undefined";
3088}
3089
3090const char *get_aggr_type_str(enum ipa_aggr_type aggr_type)
3091{
3092 switch (aggr_type) {
3093 case (IPA_MBIM_16):
3094 return "MBIM_16";
3095 case (IPA_HDLC):
3096 return "HDLC";
3097 case (IPA_TLP):
3098 return "TLP";
3099 case (IPA_RNDIS):
3100 return "RNDIS";
3101 case (IPA_GENERIC):
3102 return "GENERIC";
3103 case (IPA_QCMAP):
3104 return "QCMAP";
3105 }
3106 return "undefined";
3107}
3108
3109void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number,
3110 const struct ipa_ep_cfg_aggr *ep_aggr)
3111{
3112 u32 reg_val = 0;
3113
3114 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
3115 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
3116 IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
3117
3118 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
3119 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
3120 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
3121
3122 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
3123 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
3124 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
3125
3126 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
3127 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
3128 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
3129
3130 ipa_write_reg(ipa_ctx->mmio,
3131 IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val);
3132}
3133
3134void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number,
3135 const struct ipa_ep_cfg_aggr *ep_aggr)
3136{
3137 u32 reg_val = 0;
3138
3139 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
3140 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
3141 IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
3142
3143 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
3144 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
3145 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
3146
3147 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
3148 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
3149 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
3150
3151 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
3152 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
3153 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
3154
3155 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit,
3156 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
3157 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
3158
3159 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active,
3160 IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
3161 IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
3162
3163 ipa_write_reg(ipa_ctx->mmio,
3164 IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val);
3165}
3166
3167/**
3168 * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration
3169 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3170 * @ipa_ep_cfg: [in] IPA end-point configuration params
3171 *
3172 * Returns: 0 on success, negative on failure
3173 *
3174 * Note: Should not be called from atomic context
3175 */
3176int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
3177{
3178 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3179 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
3180 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3181 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3182 return -EINVAL;
3183 }
3184
3185 IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
3186 clnt_hdl,
3187 ep_aggr->aggr_en,
3188 get_aggr_enable_str(ep_aggr->aggr_en),
3189 ep_aggr->aggr,
3190 get_aggr_type_str(ep_aggr->aggr),
3191 ep_aggr->aggr_byte_limit,
3192 ep_aggr->aggr_time_limit);
3193
3194 /* copy over EP cfg */
3195 ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
3196
3197 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3198
3199 ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr);
3200
3201 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3202
3203 return 0;
3204}
3205
3206void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index)
3207{
3208 int reg_val = 0;
3209
3210 IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
3211 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
3212 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
3213
3214 ipa_write_reg(ipa_ctx->mmio,
3215 IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index),
3216 reg_val);
3217}
3218
3219void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index)
3220{
3221 int reg_val = 0;
3222
3223 IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
3224 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
3225 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
3226
3227 ipa_write_reg(ipa_ctx->mmio,
3228 IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index),
3229 reg_val);
3230}
3231
3232/**
3233 * ipa2_cfg_ep_route() - IPA end-point routing configuration
3234 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3235 * @ipa_ep_cfg: [in] IPA end-point configuration params
3236 *
3237 * Returns: 0 on success, negative on failure
3238 *
3239 * Note: Should not be called from atomic context
3240 */
3241int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
3242{
3243 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3244 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
3245 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3246 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3247 return -EINVAL;
3248 }
3249
3250 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
3251 IPAERR("ROUTE does not apply to IPA out EP %d\n",
3252 clnt_hdl);
3253 return -EINVAL;
3254 }
3255
3256 /*
3257 * if DMA mode was configured previously for this EP, return with
3258 * success
3259 */
3260 if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
3261 IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
3262 clnt_hdl);
3263 return 0;
3264 }
3265
3266 if (ep_route->rt_tbl_hdl)
3267 IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
3268
3269 IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
3270 clnt_hdl,
3271 ep_route->rt_tbl_hdl);
3272
3273 /* always use "default" routing table when programming EP ROUTE reg */
3274 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3275 ipa_ctx->ep[clnt_hdl].rt_tbl_idx =
3276 IPA_MEM_PART(v4_apps_rt_index_lo);
3277 else
3278 ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
3279
3280 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3281
3282 ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl,
3283 ipa_ctx->ep[clnt_hdl].rt_tbl_idx);
3284
3285 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3286
3287 return 0;
3288}
3289
3290void _ipa_cfg_ep_holb_v1_1(u32 pipe_number,
3291 const struct ipa_ep_cfg_holb *ep_holb)
3292{
3293 ipa_write_reg(ipa_ctx->mmio,
3294 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number),
3295 ep_holb->en);
3296
3297 ipa_write_reg(ipa_ctx->mmio,
3298 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number),
3299 (u16)ep_holb->tmr_val);
3300}
3301
3302void _ipa_cfg_ep_holb_v2_0(u32 pipe_number,
3303 const struct ipa_ep_cfg_holb *ep_holb)
3304{
3305 ipa_write_reg(ipa_ctx->mmio,
3306 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3307 ep_holb->en);
3308
3309 ipa_write_reg(ipa_ctx->mmio,
3310 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3311 (u16)ep_holb->tmr_val);
3312}
3313
3314void _ipa_cfg_ep_holb_v2_5(u32 pipe_number,
3315 const struct ipa_ep_cfg_holb *ep_holb)
3316{
3317 ipa_write_reg(ipa_ctx->mmio,
3318 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3319 ep_holb->en);
3320
3321 ipa_write_reg(ipa_ctx->mmio,
3322 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3323 ep_holb->tmr_val);
3324}
3325
3326void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number,
3327 const struct ipa_ep_cfg_holb *ep_holb)
3328{
3329 ipa_write_reg(ipa_ctx->mmio,
3330 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3331 ep_holb->en);
3332
3333 ipa_write_reg(ipa_ctx->mmio,
3334 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3335 ep_holb->tmr_val);
3336}
3337
3338/**
3339 * ipa2_cfg_ep_holb() - IPA end-point holb configuration
3340 *
3341 * If an IPA producer pipe is full, IPA HW by default will block
3342 * indefinitely till space opens up. During this time no packets
3343 * including those from unrelated pipes will be processed. Enabling
3344 * HOLB means IPA HW will be allowed to drop packets as/when needed
3345 * and indefinite blocking is avoided.
3346 *
3347 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3348 * @ipa_ep_cfg: [in] IPA end-point configuration params
3349 *
3350 * Returns: 0 on success, negative on failure
3351 */
3352int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
3353{
3354 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3355 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
3356 ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val ||
3357 ep_holb->en > 1) {
3358 IPAERR("bad parm.\n");
3359 return -EINVAL;
3360 }
3361
3362 if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
3363 IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
3364 return -EINVAL;
3365 }
3366
3367 if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) {
3368 IPAERR("HOLB is not supported for this IPA core\n");
3369 return -EINVAL;
3370 }
3371
3372 ipa_ctx->ep[clnt_hdl].holb = *ep_holb;
3373
3374 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3375
3376 ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb);
3377
3378 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3379
3380 IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
3381 ep_holb->tmr_val);
3382
3383 return 0;
3384}
3385
3386/**
3387 * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration
3388 *
3389 * Wrapper function for ipa_cfg_ep_holb() with client name instead of
3390 * client handle. This function is used for clients that does not have
3391 * client handle.
3392 *
3393 * @client: [in] client name
3394 * @ipa_ep_cfg: [in] IPA end-point configuration params
3395 *
3396 * Returns: 0 on success, negative on failure
3397 */
3398int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
3399 const struct ipa_ep_cfg_holb *ep_holb)
3400{
3401 return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb);
3402}
3403
3404static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl,
3405 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3406{
3407 IPADBG("Not supported for version 1.1\n");
3408 return 0;
3409}
3410
3411static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl,
3412 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3413{
3414 u32 reg_val = 0;
3415
3416 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len,
3417 IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
3418 IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
3419
3420 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid,
3421 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
3422 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
3423
3424 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location,
3425 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
3426 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
3427
3428 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len,
3429 IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
3430 IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
3431
3432 ipa_write_reg(ipa_ctx->mmio,
3433 IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val);
3434
3435 return 0;
3436}
3437
3438/**
3439 * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration
3440 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3441 * @ep_deaggr: [in] IPA end-point configuration params
3442 *
3443 * Returns: 0 on success, negative on failure
3444 *
3445 * Note: Should not be called from atomic context
3446 */
3447int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
3448 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3449{
3450 struct ipa_ep_context *ep;
3451
3452 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3453 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
3454 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3455 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3456 return -EINVAL;
3457 }
3458
3459 IPADBG("pipe=%d deaggr_hdr_len=%d\n",
3460 clnt_hdl,
3461 ep_deaggr->deaggr_hdr_len);
3462
3463 IPADBG("packet_offset_valid=%d\n",
3464 ep_deaggr->packet_offset_valid);
3465
3466 IPADBG("packet_offset_location=%d max_packet_len=%d\n",
3467 ep_deaggr->packet_offset_location,
3468 ep_deaggr->max_packet_len);
3469
3470 ep = &ipa_ctx->ep[clnt_hdl];
3471
3472 /* copy over EP cfg */
3473 ep->cfg.deaggr = *ep_deaggr;
3474
3475 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3476
3477 ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
3478
3479 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3480
3481 return 0;
3482}
3483
3484static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number,
3485 const struct ipa_ep_cfg_metadata *meta)
3486{
3487 IPADBG("Not supported for version 1.1\n");
3488}
3489
3490static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number,
3491 const struct ipa_ep_cfg_metadata *meta)
3492{
3493 u32 reg_val = 0;
3494
3495 IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id,
3496 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT,
3497 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK);
3498
3499 ipa_write_reg(ipa_ctx->mmio,
3500 IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number),
3501 reg_val);
3502}
3503
3504/**
3505 * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration
3506 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3507 * @ipa_ep_cfg: [in] IPA end-point configuration params
3508 *
3509 * Returns: 0 on success, negative on failure
3510 *
3511 * Note: Should not be called from atomic context
3512 */
3513int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
3514{
3515 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3516 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
3517 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3518 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3519 return -EINVAL;
3520 }
3521
3522 IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
3523
3524 /* copy over EP cfg */
3525 ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
3526
3527 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3528
3529 ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md);
3530 ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
3531 ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr);
3532
3533 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3534
3535 return 0;
3536}
3537EXPORT_SYMBOL(ipa2_cfg_ep_metadata);
3538
3539int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
3540{
3541 struct ipa_ep_cfg_metadata meta;
3542 struct ipa_ep_context *ep;
3543 int ipa_ep_idx;
3544 int result = -EINVAL;
3545
3546 if (unlikely(!ipa_ctx)) {
3547 IPAERR("IPA driver was not initialized\n");
3548 return -EINVAL;
3549 }
3550
3551 if (param_in->client >= IPA_CLIENT_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303552 IPAERR_RL("bad parm client:%d\n", param_in->client);
Amir Levy9659e592016-10-27 18:08:27 +03003553 goto fail;
3554 }
3555
3556 ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
3557 if (ipa_ep_idx == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303558 IPAERR_RL("Invalid client.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003559 goto fail;
3560 }
3561
3562 ep = &ipa_ctx->ep[ipa_ep_idx];
3563 if (!ep->valid) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303564 IPAERR_RL("EP not allocated.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003565 goto fail;
3566 }
3567
3568 meta.qmap_id = param_in->qmap_id;
3569 if (param_in->client == IPA_CLIENT_USB_PROD ||
3570 param_in->client == IPA_CLIENT_HSIC1_PROD ||
Sunil Paidimarri5139aa22017-02-13 11:07:32 -08003571 param_in->client == IPA_CLIENT_ODU_PROD ||
3572 param_in->client == IPA_CLIENT_ETHERNET_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03003573 result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
3574 } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
3575 ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
3576 result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
3577 if (result)
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303578 IPAERR_RL("qmap_id %d write failed on ep=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +03003579 meta.qmap_id, ipa_ep_idx);
3580 result = 0;
3581 }
3582
3583fail:
3584 return result;
3585}
3586
3587/**
3588 * ipa_dump_buff_internal() - dumps buffer for debug purposes
3589 * @base: buffer base address
3590 * @phy_base: buffer physical base address
3591 * @size: size of the buffer
3592 */
3593void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
3594{
3595 int i;
3596 u32 *cur = (u32 *)base;
3597 u8 *byt;
3598
3599 IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
3600 for (i = 0; i < size / 4; i++) {
3601 byt = (u8 *)(cur + i);
3602 IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
3603 byt[0], byt[1], byt[2], byt[3]);
3604 }
3605 IPADBG("END\n");
3606}
3607
3608/**
3609 * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling
3610 * @time: time fom dtsi entry or from debugfs file system
3611 * @min: rx polling min timeout
3612 * @max: rx polling max timeout
3613 * Maximum time could be of 10Msec allowed.
3614 */
3615void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time)
3616{
3617 if ((time >= MIN_RX_POLL_TIME) &&
3618 (time <= MAX_RX_POLL_TIME)) {
3619 *min = (time * MSEC) + LOWER_CUTOFF;
3620 *max = (time * MSEC) + UPPER_CUTOFF;
3621 } else {
3622 /* Setting up the default min max time */
3623 IPADBG("Setting up default rx polling timeout\n");
3624 *min = (MIN_RX_POLL_TIME * MSEC) +
3625 LOWER_CUTOFF;
3626 *max = (MIN_RX_POLL_TIME * MSEC) +
3627 UPPER_CUTOFF;
3628 }
3629 IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max);
3630}
3631
3632/**
3633 * ipa_pipe_mem_init() - initialize the pipe memory
3634 * @start_ofst: start offset
3635 * @size: size
3636 *
3637 * Return value:
3638 * 0: success
3639 * -ENOMEM: no memory
3640 */
3641int ipa_pipe_mem_init(u32 start_ofst, u32 size)
3642{
3643 int res;
3644 u32 aligned_start_ofst;
3645 u32 aligned_size;
3646 struct gen_pool *pool;
3647
3648 if (!size) {
3649 IPAERR("no IPA pipe memory allocated\n");
3650 goto fail;
3651 }
3652
3653 aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
3654 aligned_size = size - (aligned_start_ofst - start_ofst);
3655
3656 IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
3657 start_ofst, aligned_start_ofst, size, aligned_size);
3658
3659 /* allocation order of 8 i.e. 128 bytes, global pool */
3660 pool = gen_pool_create(8, -1);
3661 if (!pool) {
3662 IPAERR("Failed to create a new memory pool.\n");
3663 goto fail;
3664 }
3665
3666 res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
3667 if (res) {
3668 IPAERR("Failed to add memory to IPA pipe pool\n");
3669 goto err_pool_add;
3670 }
3671
3672 ipa_ctx->pipe_mem_pool = pool;
3673 return 0;
3674
3675err_pool_add:
3676 gen_pool_destroy(pool);
3677fail:
3678 return -ENOMEM;
3679}
3680
3681/**
3682 * ipa_pipe_mem_alloc() - allocate pipe memory
3683 * @ofst: offset
3684 * @size: size
3685 *
3686 * Return value:
3687 * 0: success
3688 */
3689int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
3690{
3691 u32 vaddr;
3692 int res = -1;
3693
3694 if (!ipa_ctx->pipe_mem_pool || !size) {
3695 IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
3696 ipa_ctx->pipe_mem_pool);
3697 return res;
3698 }
3699
3700 vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
3701
3702 if (vaddr) {
3703 *ofst = vaddr;
3704 res = 0;
3705 IPADBG("size=%u ofst=%u\n", size, vaddr);
3706 } else {
3707 IPAERR("size=%u failed\n", size);
3708 }
3709
3710 return res;
3711}
3712
3713/**
3714 * ipa_pipe_mem_free() - free pipe memory
3715 * @ofst: offset
3716 * @size: size
3717 *
3718 * Return value:
3719 * 0: success
3720 */
3721int ipa_pipe_mem_free(u32 ofst, u32 size)
3722{
3723 IPADBG("size=%u ofst=%u\n", size, ofst);
3724 if (ipa_ctx->pipe_mem_pool && size)
3725 gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
3726 return 0;
3727}
3728
3729/**
3730 * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting
3731 * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
3732 * etc
3733 *
3734 * Returns: 0 on success
3735 */
3736int ipa2_set_aggr_mode(enum ipa_aggr_mode mode)
3737{
3738 u32 reg_val;
3739
3740 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3741 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
3742 ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
3743 (reg_val & 0xfffffffe));
3744 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3745
3746 return 0;
3747}
3748
3749/**
3750 * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
3751 * mode
3752 * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
3753 * "QND")
3754 *
3755 * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
3756 * (expected to be 'P') needs to be set using the header addition mechanism
3757 *
3758 * Returns: 0 on success, negative on failure
3759 */
3760int ipa2_set_qcncm_ndp_sig(char sig[3])
3761{
3762 u32 reg_val;
3763
3764 if (sig == NULL) {
3765 IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
3766 return -EINVAL;
3767 }
3768 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3769 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
3770 ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
3771 (sig[1] << 12) | (sig[2] << 4) |
3772 (reg_val & 0xf000000f));
3773 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3774
3775 return 0;
3776}
3777
3778/**
3779 * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
3780 * configuration
3781 * @enable: [in] true for single NDP/MBIM; false otherwise
3782 *
3783 * Returns: 0 on success
3784 */
3785int ipa2_set_single_ndp_per_mbim(bool enable)
3786{
3787 u32 reg_val;
3788
3789 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3790 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
3791 ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
3792 (enable & 0x1) | (reg_val & 0xfffffffe));
3793 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3794
3795 return 0;
3796}
3797
3798/**
3799 * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix
3800 * for MBIM aggregation.
3801 * @enable: [in] true for enable HW fix; false otherwise
3802 *
3803 * Returns: 0 on success
3804 */
3805int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
3806{
3807 u32 reg_val;
3808
3809 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3810 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
3811 ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
3812 (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
3813 (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
3814 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3815 return 0;
3816}
3817EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
3818
3819/**
3820 * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
3821 * @start: start address of the memory buffer
3822 * @end: end address of the memory buffer
3823 * @boundary: boundary
3824 *
3825 * Return value:
3826 * 1: if the interval [start, end] straddles boundary
3827 * 0: otherwise
3828 */
3829int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
3830{
3831 u32 next_start;
3832 u32 prev_end;
3833
3834 IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
3835
3836 next_start = (start + (boundary - 1)) & ~(boundary - 1);
3837 prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
3838
3839 while (next_start < prev_end)
3840 next_start += boundary;
3841
3842 if (next_start == prev_end)
3843 return 1;
3844 else
3845 return 0;
3846}
3847
3848/**
3849 * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
3850 *
3851 * Function is rate limited to avoid flooding kernel log buffer
3852 */
3853void ipa2_bam_reg_dump(void)
3854{
3855 static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
3856
3857 if (__ratelimit(&_rs)) {
3858 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3859 pr_err("IPA BAM START\n");
3860 if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
3861 sps_get_bam_debug_info(ipa_ctx->bam_handle, 5,
3862 511950, 0, 0);
3863 sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0,
3864 0, 0);
3865 } else {
3866 sps_get_bam_debug_info(ipa_ctx->bam_handle, 93,
3867 (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS))
3868 |
3869 SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))),
3870 0, 2);
3871 }
3872 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3873 }
3874}
3875
3876static void ipa_init_mem_partition_v2(void)
3877{
3878 IPADBG("Memory partition IPA 2\n");
3879 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
3880 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
3881 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
3882 IPA_MEM_PART(nat_size));
3883
3884 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START;
3885 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
3886
3887 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST;
3888 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE;
3889 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
3890 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3891 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
3892 IPA_MEM_PART(v4_flt_size_ddr));
3893
3894 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST;
3895 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE;
3896 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
3897 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3898 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
3899 IPA_MEM_PART(v6_flt_size_ddr));
3900
3901 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST;
3902 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
3903
3904 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX;
3905 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
3906
3907 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO;
3908 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI;
3909 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
3910 IPA_MEM_PART(v4_modem_rt_index_lo),
3911 IPA_MEM_PART(v4_modem_rt_index_hi));
3912
3913 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO;
3914 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI;
3915 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
3916 IPA_MEM_PART(v4_apps_rt_index_lo),
3917 IPA_MEM_PART(v4_apps_rt_index_hi));
3918
3919 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE;
3920 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
3921 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
3922 IPA_MEM_PART(v4_rt_size_ddr));
3923
3924 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST;
3925 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
3926
3927 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX;
3928 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
3929
3930 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO;
3931 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI;
3932 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
3933 IPA_MEM_PART(v6_modem_rt_index_lo),
3934 IPA_MEM_PART(v6_modem_rt_index_hi));
3935
3936 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO;
3937 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI;
3938 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
3939 IPA_MEM_PART(v6_apps_rt_index_lo),
3940 IPA_MEM_PART(v6_apps_rt_index_hi));
3941
3942 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE;
3943 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
3944 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
3945 IPA_MEM_PART(v6_rt_size_ddr));
3946
3947 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST;
3948 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE;
3949 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
3950 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
3951
3952 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST;
3953 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE;
3954 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR;
3955 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3956 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
3957 IPA_MEM_PART(apps_hdr_size_ddr));
3958
3959 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST;
3960 IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE;
3961 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
3962 IPA_MEM_PART(modem_size));
3963
3964 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST;
3965 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE;
3966 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
3967 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
3968
3969 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST;
3970 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE;
3971 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
3972 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
3973
3974 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST;
3975 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE;
3976 IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
3977 IPA_MEM_PART(uc_info_size));
3978
3979 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST;
3980 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST;
3981 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE;
3982 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST;
3983 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE;
3984}
3985
3986static void ipa_init_mem_partition_v2_5(void)
3987{
3988 IPADBG("Memory partition IPA 2.5\n");
3989 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
3990 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
3991 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
3992 IPA_MEM_PART(nat_size));
3993
3994 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST;
3995 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE;
3996 IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
3997 IPA_MEM_PART(uc_info_size));
3998
3999 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START;
4000 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
4001
4002 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST;
4003 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE;
4004 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
4005 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4006 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
4007 IPA_MEM_PART(v4_flt_size_ddr));
4008
4009 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST;
4010 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE;
4011 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
4012 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4013 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
4014 IPA_MEM_PART(v6_flt_size_ddr));
4015
4016 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST;
4017 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
4018
4019 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX;
4020 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
4021
4022 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO;
4023 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI;
4024 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
4025 IPA_MEM_PART(v4_modem_rt_index_lo),
4026 IPA_MEM_PART(v4_modem_rt_index_hi));
4027
4028 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO;
4029 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI;
4030 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
4031 IPA_MEM_PART(v4_apps_rt_index_lo),
4032 IPA_MEM_PART(v4_apps_rt_index_hi));
4033
4034 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE;
4035 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
4036 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
4037 IPA_MEM_PART(v4_rt_size_ddr));
4038
4039 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST;
4040 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
4041
4042 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX;
4043 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
4044
4045 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO;
4046 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI;
4047 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
4048 IPA_MEM_PART(v6_modem_rt_index_lo),
4049 IPA_MEM_PART(v6_modem_rt_index_hi));
4050
4051 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO;
4052 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI;
4053 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4054 IPA_MEM_PART(v6_apps_rt_index_lo),
4055 IPA_MEM_PART(v6_apps_rt_index_hi));
4056
4057 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE;
4058 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4059 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4060 IPA_MEM_PART(v6_rt_size_ddr));
4061
4062 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST;
4063 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE;
4064 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4065 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4066
4067 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST;
4068 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE;
4069 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR;
4070 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4071 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4072 IPA_MEM_PART(apps_hdr_size_ddr));
4073
4074 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) =
4075 IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST;
4076 IPA_MEM_PART(modem_hdr_proc_ctx_size) =
4077 IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE;
4078 IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
4079 IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
4080 IPA_MEM_PART(modem_hdr_proc_ctx_size));
4081
4082 IPA_MEM_PART(apps_hdr_proc_ctx_ofst) =
4083 IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST;
4084 IPA_MEM_PART(apps_hdr_proc_ctx_size) =
4085 IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE;
4086 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) =
4087 IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR;
4088 IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4089 IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
4090 IPA_MEM_PART(apps_hdr_proc_ctx_size),
4091 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
4092
4093 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST;
4094 IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE;
4095 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4096 IPA_MEM_PART(modem_size));
4097
4098 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST;
4099 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE;
4100 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4101 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4102
4103 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST;
4104 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE;
4105 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4106 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4107
4108 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST;
4109 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST;
4110 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE;
4111 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST;
4112 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE;
4113}
4114
4115static void ipa_init_mem_partition_v2_6L(void)
4116{
4117 IPADBG("Memory partition IPA 2.6Lite\n");
4118 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
4119 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
4120 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
4121 IPA_MEM_PART(nat_size));
4122
4123 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST;
4124 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE;
4125 IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4126 IPA_MEM_PART(uc_info_size));
4127
4128 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START;
4129 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
4130
4131 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST;
4132 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE;
4133 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
4134 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4135 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
4136 IPA_MEM_PART(v4_flt_size_ddr));
4137
4138 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST;
4139 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE;
4140 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
4141 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4142 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
4143 IPA_MEM_PART(v6_flt_size_ddr));
4144
4145 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST;
4146 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
4147
4148 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX;
4149 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
4150
4151 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO;
4152 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI;
4153 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
4154 IPA_MEM_PART(v4_modem_rt_index_lo),
4155 IPA_MEM_PART(v4_modem_rt_index_hi));
4156
4157 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO;
4158 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI;
4159 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
4160 IPA_MEM_PART(v4_apps_rt_index_lo),
4161 IPA_MEM_PART(v4_apps_rt_index_hi));
4162
4163 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE;
4164 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
4165 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
4166 IPA_MEM_PART(v4_rt_size_ddr));
4167
4168 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST;
4169 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
4170
4171 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX;
4172 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
4173
4174 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO;
4175 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI;
4176 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
4177 IPA_MEM_PART(v6_modem_rt_index_lo),
4178 IPA_MEM_PART(v6_modem_rt_index_hi));
4179
4180 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO;
4181 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI;
4182 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4183 IPA_MEM_PART(v6_apps_rt_index_lo),
4184 IPA_MEM_PART(v6_apps_rt_index_hi));
4185
4186 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE;
4187 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4188 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4189 IPA_MEM_PART(v6_rt_size_ddr));
4190
4191 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST;
4192 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE;
4193 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4194 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4195
4196 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST;
4197 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE;
4198 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR;
4199 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4200 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4201 IPA_MEM_PART(apps_hdr_size_ddr));
4202
4203 IPA_MEM_PART(modem_comp_decomp_ofst) =
4204 IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST;
4205 IPA_MEM_PART(modem_comp_decomp_size) =
4206 IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE;
4207 IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n",
4208 IPA_MEM_PART(modem_comp_decomp_ofst),
4209 IPA_MEM_PART(modem_comp_decomp_size));
4210
4211 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST;
4212 IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE;
4213 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4214 IPA_MEM_PART(modem_size));
4215
4216 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST;
4217 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE;
4218 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4219 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4220
4221 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST;
4222 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE;
4223 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4224 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4225
4226 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST;
4227 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST;
4228 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE;
4229 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST;
4230 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE;
4231}
4232
4233/**
4234 * ipa_controller_shared_static_bind() - set the appropriate shared methods for
4235 * for IPA HW version 2.0, 2.5, 2.6 and 2.6L
4236 *
4237 * @ctrl: data structure which holds the function pointers
4238 */
4239void ipa_controller_shared_static_bind(struct ipa_controller *ctrl)
4240{
4241 ctrl->ipa_init_rt4 = _ipa_init_rt4_v2;
4242 ctrl->ipa_init_rt6 = _ipa_init_rt6_v2;
4243 ctrl->ipa_init_flt4 = _ipa_init_flt4_v2;
4244 ctrl->ipa_init_flt6 = _ipa_init_flt6_v2;
4245 ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0;
4246 ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0;
4247 ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0;
4248 ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0;
4249 ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0;
4250 ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0;
4251 ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0;
4252 ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0;
4253 ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0;
4254 ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0;
4255 ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO;
4256 ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL;
4257 ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS;
4258 ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0;
4259 ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0;
4260 ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0;
4261 ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0;
4262 ctrl->ipa_commit_flt = __ipa_commit_flt_v2;
4263 ctrl->ipa_commit_rt = __ipa_commit_rt_v2;
4264 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
4265 ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0;
4266 ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0;
4267 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0;
4268 ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0;
4269 ctrl->clock_scaling_bw_threshold_nominal =
4270 IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS;
4271 ctrl->clock_scaling_bw_threshold_turbo =
4272 IPA_V2_0_BW_THRESHOLD_TURBO_MBPS;
4273}
4274
4275/**
4276 * ipa_ctrl_static_bind() - set the appropriate methods for
4277 * IPA Driver based on the HW version
4278 *
4279 * @ctrl: data structure which holds the function pointers
4280 * @hw_type: the HW type in use
4281 *
4282 * This function can avoid the runtime assignment by using C99 special
4283 * struct initialization - hard decision... time.vs.mem
4284 */
4285int ipa_controller_static_bind(struct ipa_controller *ctrl,
4286 enum ipa_hw_type hw_type)
4287{
4288 switch (hw_type) {
4289 case (IPA_HW_v1_1):
4290 ipa_init_mem_partition_v2();
4291 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1;
4292 ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1;
4293 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1;
4294 ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1;
4295 ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1;
4296 ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1;
4297 ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1;
4298 ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1;
4299 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1;
4300 ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1;
4301 ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1;
4302 ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1;
4303 ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1;
4304 ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE;
4305 ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE;
4306 ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE;
4307 ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1;
4308 ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1;
4309 ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1;
4310 ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1;
4311 ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1;
4312 ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1;
4313 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1;
4314 ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1;
4315 ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1;
4316 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1;
4317 ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1;
4318 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
4319 ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL;
4320 break;
4321 case (IPA_HW_v2_0):
4322 ipa_init_mem_partition_v2();
4323 ipa_controller_shared_static_bind(ctrl);
4324 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0;
4325 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
4326 ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL;
4327 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0;
4328 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0;
4329 ctrl->ipa_init_sram = _ipa_init_sram_v2;
4330 ctrl->ipa_init_hdr = _ipa_init_hdr_v2;
4331 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
4332 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2;
4333 break;
4334 case (IPA_HW_v2_5):
4335 ipa_init_mem_partition_v2_5();
4336 ipa_controller_shared_static_bind(ctrl);
4337 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5;
4338 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5;
4339 ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL;
4340 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5;
4341 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5;
4342 ctrl->ipa_init_sram = _ipa_init_sram_v2_5;
4343 ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5;
4344 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5;
4345 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5;
4346 break;
4347 case (IPA_HW_v2_6L):
4348 ipa_init_mem_partition_v2_6L();
4349 ipa_controller_shared_static_bind(ctrl);
4350 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L;
4351 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L;
4352 ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL;
4353 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L;
4354 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L;
4355 ctrl->ipa_init_sram = _ipa_init_sram_v2_6L;
4356 ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L;
4357 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L;
4358 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L;
4359 break;
4360 default:
4361 return -EPERM;
4362 }
4363
4364 return 0;
4365}
4366
4367void ipa_skb_recycle(struct sk_buff *skb)
4368{
4369 struct skb_shared_info *shinfo;
4370
4371 shinfo = skb_shinfo(skb);
4372 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
4373 atomic_set(&shinfo->dataref, 1);
4374
4375 memset(skb, 0, offsetof(struct sk_buff, tail));
4376 skb->data = skb->head + NET_SKB_PAD;
4377 skb_reset_tail_pointer(skb);
4378}
4379
4380int ipa_id_alloc(void *ptr)
4381{
4382 int id;
4383
4384 idr_preload(GFP_KERNEL);
4385 spin_lock(&ipa_ctx->idr_lock);
4386 id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
4387 spin_unlock(&ipa_ctx->idr_lock);
4388 idr_preload_end();
4389
4390 return id;
4391}
4392
4393void *ipa_id_find(u32 id)
4394{
4395 void *ptr;
4396
4397 spin_lock(&ipa_ctx->idr_lock);
4398 ptr = idr_find(&ipa_ctx->ipa_idr, id);
4399 spin_unlock(&ipa_ctx->idr_lock);
4400
4401 return ptr;
4402}
4403
4404void ipa_id_remove(u32 id)
4405{
4406 spin_lock(&ipa_ctx->idr_lock);
4407 idr_remove(&ipa_ctx->ipa_idr, id);
4408 spin_unlock(&ipa_ctx->idr_lock);
4409}
4410
4411static void ipa_tag_free_buf(void *user1, int user2)
4412{
4413 kfree(user1);
4414}
4415
4416static void ipa_tag_free_skb(void *user1, int user2)
4417{
4418 dev_kfree_skb_any((struct sk_buff *)user1);
4419}
4420
4421#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
4422
4423/* ipa_tag_process() - Initiates a tag process. Incorporates the input
4424 * descriptors
4425 *
4426 * @desc: descriptors with commands for IC
4427 * @desc_size: amount of descriptors in the above variable
4428 *
4429 * Note: The descriptors are copied (if there's room), the client needs to
4430 * free his descriptors afterwards
4431 *
4432 * Return: 0 or negative in case of failure
4433 */
4434int ipa_tag_process(struct ipa_desc desc[],
4435 int descs_num,
4436 unsigned long timeout)
4437{
4438 struct ipa_sys_context *sys;
4439 struct ipa_desc *tag_desc;
4440 int desc_idx = 0;
4441 struct ipa_ip_packet_init *pkt_init;
4442 struct ipa_register_write *reg_write_nop;
4443 struct ipa_ip_packet_tag_status *status;
4444 int i;
4445 struct sk_buff *dummy_skb;
4446 int res;
4447 struct ipa_tag_completion *comp;
4448 int ep_idx;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05304449 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03004450
4451 /* Not enough room for the required descriptors for the tag process */
4452 if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
4453 IPAERR("up to %d descriptors are allowed (received %d)\n",
4454 IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
4455 descs_num);
4456 return -ENOMEM;
4457 }
4458
4459 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
4460 if (-1 == ep_idx) {
4461 IPAERR("Client %u is not mapped\n",
4462 IPA_CLIENT_APPS_CMD_PROD);
4463 return -EFAULT;
4464 }
4465 sys = ipa_ctx->ep[ep_idx].sys;
4466
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05304467 tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag);
Amir Levy9659e592016-10-27 18:08:27 +03004468 if (!tag_desc) {
4469 IPAERR("failed to allocate memory\n");
4470 res = -ENOMEM;
4471 goto fail_alloc_desc;
4472 }
4473
4474 /* IP_PACKET_INIT IC for tag status to be sent to apps */
Mohammed Javid097ca402017-11-02 19:10:22 +05304475 pkt_init = kzalloc(sizeof(*pkt_init), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004476 if (!pkt_init) {
4477 IPAERR("failed to allocate memory\n");
4478 res = -ENOMEM;
4479 goto fail_alloc_pkt_init;
4480 }
4481
4482 pkt_init->destination_pipe_index =
4483 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
4484
4485 tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT;
4486 tag_desc[desc_idx].pyld = pkt_init;
4487 tag_desc[desc_idx].len = sizeof(*pkt_init);
4488 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4489 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4490 tag_desc[desc_idx].user1 = pkt_init;
4491 desc_idx++;
4492
4493 /* NO-OP IC for ensuring that IPA pipeline is empty */
Mohammed Javid097ca402017-11-02 19:10:22 +05304494 reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004495 if (!reg_write_nop) {
4496 IPAERR("no mem\n");
4497 res = -ENOMEM;
4498 goto fail_free_desc;
4499 }
4500
4501 reg_write_nop->skip_pipeline_clear = 0;
4502 reg_write_nop->value_mask = 0x0;
4503
4504 tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE;
4505 tag_desc[desc_idx].pyld = reg_write_nop;
4506 tag_desc[desc_idx].len = sizeof(*reg_write_nop);
4507 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4508 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4509 tag_desc[desc_idx].user1 = reg_write_nop;
4510 desc_idx++;
4511
4512 /* status IC */
Mohammed Javid097ca402017-11-02 19:10:22 +05304513 status = kzalloc(sizeof(*status), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004514 if (!status) {
4515 IPAERR("no mem\n");
4516 res = -ENOMEM;
4517 goto fail_free_desc;
4518 }
4519
4520 status->tag_f_2 = IPA_COOKIE;
4521
4522 tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS;
4523 tag_desc[desc_idx].pyld = status;
4524 tag_desc[desc_idx].len = sizeof(*status);
4525 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4526 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4527 tag_desc[desc_idx].user1 = status;
4528 desc_idx++;
4529
4530 /* Copy the required descriptors from the client now */
4531 if (desc) {
4532 memcpy(&(tag_desc[desc_idx]), desc, descs_num *
4533 sizeof(struct ipa_desc));
4534 desc_idx += descs_num;
4535 }
4536
4537 comp = kzalloc(sizeof(*comp), GFP_KERNEL);
4538 if (!comp) {
4539 IPAERR("no mem\n");
4540 res = -ENOMEM;
4541 goto fail_free_desc;
4542 }
4543 init_completion(&comp->comp);
4544
4545 /* completion needs to be released from both here and rx handler */
4546 atomic_set(&comp->cnt, 2);
4547
4548 /* dummy packet to send to IPA. packet payload is a completion object */
Mohammed Javid097ca402017-11-02 19:10:22 +05304549 dummy_skb = alloc_skb(sizeof(comp), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004550 if (!dummy_skb) {
4551 IPAERR("failed to allocate memory\n");
4552 res = -ENOMEM;
4553 goto fail_free_skb;
4554 }
4555
4556 memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
4557
4558 tag_desc[desc_idx].pyld = dummy_skb->data;
4559 tag_desc[desc_idx].len = dummy_skb->len;
4560 tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
4561 tag_desc[desc_idx].callback = ipa_tag_free_skb;
4562 tag_desc[desc_idx].user1 = dummy_skb;
4563 desc_idx++;
4564
4565 /* send all descriptors to IPA with single EOT */
4566 res = ipa_send(sys, desc_idx, tag_desc, true);
4567 if (res) {
4568 IPAERR("failed to send TAG packets %d\n", res);
4569 res = -ENOMEM;
4570 goto fail_send;
4571 }
4572 kfree(tag_desc);
4573 tag_desc = NULL;
4574
4575 IPADBG("waiting for TAG response\n");
4576 res = wait_for_completion_timeout(&comp->comp, timeout);
4577 if (res == 0) {
4578 IPAERR("timeout (%lu msec) on waiting for TAG response\n",
4579 timeout);
4580 WARN_ON(1);
4581 if (atomic_dec_return(&comp->cnt) == 0)
4582 kfree(comp);
4583 return -ETIME;
4584 }
4585
4586 IPADBG("TAG response arrived!\n");
4587 if (atomic_dec_return(&comp->cnt) == 0)
4588 kfree(comp);
4589
4590 /* sleep for short period to ensure IPA wrote all packets to BAM */
4591 usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
4592
4593 return 0;
4594
4595fail_send:
4596 dev_kfree_skb_any(dummy_skb);
4597 desc_idx--;
4598fail_free_skb:
4599 kfree(comp);
4600fail_free_desc:
4601 /*
4602 * Free only the first descriptors allocated here.
4603 * [pkt_init, status, nop]
4604 * The user is responsible to free his allocations
4605 * in case of failure.
4606 * The min is required because we may fail during
4607 * of the initial allocations above
4608 */
4609 for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++)
4610 kfree(tag_desc[i].user1);
4611
4612fail_alloc_pkt_init:
4613 kfree(tag_desc);
4614fail_alloc_desc:
4615 return res;
4616}
4617
4618/**
4619 * ipa_tag_generate_force_close_desc() - generate descriptors for force close
4620 * immediate command
4621 *
4622 * @desc: descriptors for IC
4623 * @desc_size: desc array size
4624 * @start_pipe: first pipe to close aggregation
4625 * @end_pipe: last (non-inclusive) pipe to close aggregation
4626 *
4627 * Return: number of descriptors written or negative in case of failure
4628 */
4629static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[],
4630 int desc_size, int start_pipe, int end_pipe)
4631{
4632 int i;
4633 u32 aggr_init;
4634 int desc_idx = 0;
4635 int res;
4636 struct ipa_register_write *reg_write_agg_close;
4637
4638 for (i = start_pipe; i < end_pipe; i++) {
4639 aggr_init = ipa_read_reg(ipa_ctx->mmio,
4640 IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i));
4641 if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
4642 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR)
4643 continue;
4644 IPADBG("Force close ep: %d\n", i);
4645 if (desc_idx + 1 > desc_size) {
4646 IPAERR("Internal error - no descriptors\n");
4647 res = -EFAULT;
4648 goto fail_no_desc;
4649 }
4650
4651 reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close),
4652 GFP_KERNEL);
4653 if (!reg_write_agg_close) {
4654 IPAERR("no mem\n");
4655 res = -ENOMEM;
4656 goto fail_alloc_reg_write_agg_close;
4657 }
4658
4659 reg_write_agg_close->skip_pipeline_clear = 0;
4660 reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i);
4661 reg_write_agg_close->value =
4662 (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
4663 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
4664 reg_write_agg_close->value_mask =
4665 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
4666 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
4667
4668 desc[desc_idx].opcode = IPA_REGISTER_WRITE;
4669 desc[desc_idx].pyld = reg_write_agg_close;
4670 desc[desc_idx].len = sizeof(*reg_write_agg_close);
4671 desc[desc_idx].type = IPA_IMM_CMD_DESC;
4672 desc[desc_idx].callback = ipa_tag_free_buf;
4673 desc[desc_idx].user1 = reg_write_agg_close;
4674 desc_idx++;
4675 }
4676
4677 return desc_idx;
4678
4679fail_alloc_reg_write_agg_close:
4680 for (i = 0; i < desc_idx; i++)
4681 kfree(desc[desc_idx].user1);
4682fail_no_desc:
4683 return res;
4684}
4685
4686/**
4687 * ipa_tag_aggr_force_close() - Force close aggregation
4688 *
4689 * @pipe_num: pipe number or -1 for all pipes
4690 */
4691int ipa_tag_aggr_force_close(int pipe_num)
4692{
4693 struct ipa_desc *desc;
4694 int res = -1;
4695 int start_pipe;
4696 int end_pipe;
4697 int num_descs;
4698 int num_aggr_descs;
4699
4700 if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) {
4701 IPAERR("Invalid pipe number %d\n", pipe_num);
4702 return -EINVAL;
4703 }
4704
4705 if (pipe_num == -1) {
4706 start_pipe = 0;
4707 end_pipe = ipa_ctx->ipa_num_pipes;
4708 } else {
4709 start_pipe = pipe_num;
4710 end_pipe = pipe_num + 1;
4711 }
4712
4713 num_descs = end_pipe - start_pipe;
4714
4715 desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
4716 if (!desc) {
4717 IPAERR("no mem\n");
4718 return -ENOMEM;
4719 }
4720
4721 /* Force close aggregation on all valid pipes with aggregation */
4722 num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs,
4723 start_pipe, end_pipe);
4724 if (num_aggr_descs < 0) {
4725 IPAERR("ipa_tag_generate_force_close_desc failed %d\n",
4726 num_aggr_descs);
4727 goto fail_free_desc;
4728 }
4729
4730 res = ipa_tag_process(desc, num_aggr_descs,
4731 IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
4732
4733fail_free_desc:
4734 kfree(desc);
4735
4736 return res;
4737}
4738
4739/**
4740 * ipa2_is_ready() - check if IPA module was initialized
4741 * successfully
4742 *
4743 * Return value: true for yes; false for no
4744 */
4745bool ipa2_is_ready(void)
4746{
4747 return (ipa_ctx != NULL) ? true : false;
4748}
4749
4750/**
4751 * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle
4752 *
4753 * Return value: true for yes; false for no
4754 */
4755bool ipa2_is_client_handle_valid(u32 clnt_hdl)
4756{
4757 if (unlikely(!ipa_ctx)) {
4758 IPAERR("IPA driver was not initialized\n");
4759 return false;
4760 }
4761
4762 if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes)
4763 return true;
4764 return false;
4765}
4766
4767/**
4768 * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote
4769 *
4770 * Return value: none
4771 */
4772void ipa2_proxy_clk_unvote(void)
4773{
4774 if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) {
4775 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
4776 ipa_ctx->q6_proxy_clk_vote_valid = false;
4777 }
4778}
4779
4780/**
4781 * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote
4782 *
4783 * Return value: none
4784 */
4785void ipa2_proxy_clk_vote(void)
4786{
4787 if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) {
4788 IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
4789 ipa_ctx->q6_proxy_clk_vote_valid = true;
4790 }
4791}
4792
4793
4794/**
4795 * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes
4796 *
4797 * Return value: u16 - number of IPA smem restricted bytes
4798 */
4799u16 ipa2_get_smem_restr_bytes(void)
4800{
4801 if (ipa_ctx)
4802 return ipa_ctx->smem_restricted_bytes;
4803
4804 IPAERR("IPA Driver not initialized\n");
4805
4806 return 0;
4807}
4808
4809/**
4810 * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
4811 *
4812 * Return value: true if modem configures embedded pipe flt, false otherwise
4813 */
4814bool ipa2_get_modem_cfg_emb_pipe_flt(void)
4815{
4816 if (ipa_ctx)
4817 return ipa_ctx->modem_cfg_emb_pipe_flt;
4818
4819 IPAERR("IPA driver has not been initialized\n");
4820
4821 return false;
4822}
4823/**
4824 * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS
4825 *
4826 * Return value: enum ipa_transport_type
4827 */
4828enum ipa_transport_type ipa2_get_transport_type(void)
4829{
4830 return IPA_TRANSPORT_TYPE_SPS;
4831}
4832
4833u32 ipa_get_num_pipes(void)
4834{
4835 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
4836 return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST);
4837 else
4838 return IPA_MAX_NUM_PIPES;
4839}
4840EXPORT_SYMBOL(ipa_get_num_pipes);
4841
4842/**
4843 * ipa2_disable_apps_wan_cons_deaggr()-
4844 * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
4845 *
4846 * Return value: 0 or negative in case of failure
4847 */
4848int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
4849{
4850 int res = -1;
4851
4852 /* checking if IPA-HW can support */
4853 if ((agg_size >> 10) >
4854 IPA_AGGR_BYTE_LIMIT) {
4855 IPAWANERR("IPA-AGG byte limit %d\n",
4856 IPA_AGGR_BYTE_LIMIT);
4857 IPAWANERR("exceed aggr_byte_limit\n");
4858 return res;
4859 }
4860 if (agg_count >
4861 IPA_AGGR_PKT_LIMIT) {
4862 IPAWANERR("IPA-AGG pkt limit %d\n",
4863 IPA_AGGR_PKT_LIMIT);
4864 IPAWANERR("exceed aggr_pkt_limit\n");
4865 return res;
4866 }
4867
4868 if (ipa_ctx) {
4869 ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true;
4870 return 0;
4871 }
4872 return res;
4873}
4874
Amir Levy3be373c2017-03-05 16:31:30 +02004875static const struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info
4876 (enum ipa_client_type client)
Amir Levy9659e592016-10-27 18:08:27 +03004877{
4878 IPAERR("Not supported for IPA 2.x\n");
4879 return NULL;
4880}
4881
4882static int ipa2_stop_gsi_channel(u32 clnt_hdl)
4883{
4884 IPAERR("Not supported for IPA 2.x\n");
4885 return -EFAULT;
4886}
4887
4888static void *ipa2_get_ipc_logbuf(void)
4889{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304890 if (ipa_ctx)
4891 return ipa_ctx->logbuf;
4892
Amir Levy9659e592016-10-27 18:08:27 +03004893 return NULL;
4894}
4895
4896static void *ipa2_get_ipc_logbuf_low(void)
4897{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304898 if (ipa_ctx)
4899 return ipa_ctx->logbuf_low;
4900
Amir Levy9659e592016-10-27 18:08:27 +03004901 return NULL;
4902}
4903
4904static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
4905{
4906 *holb = ipa_ctx->ep[ep_idx].holb;
4907}
4908
4909static int ipa2_generate_tag_process(void)
4910{
4911 int res;
4912
4913 res = ipa_tag_process(NULL, 0, HZ);
4914 if (res)
4915 IPAERR("TAG process failed\n");
4916
4917 return res;
4918}
4919
4920static void ipa2_set_tag_process_before_gating(bool val)
4921{
4922 ipa_ctx->tag_process_before_gating = val;
4923}
4924
4925int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
4926 struct ipa_api_controller *api_ctrl)
4927{
4928 if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) {
4929 IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
4930 WARN_ON(1);
4931 return -EPERM;
4932 }
4933
4934 api_ctrl->ipa_connect = ipa2_connect;
4935 api_ctrl->ipa_disconnect = ipa2_disconnect;
4936 api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
4937 api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
4938 api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
4939 api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
4940 api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
4941 api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
4942 api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext;
4943 api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode;
4944 api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr;
4945 api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr;
4946 api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route;
4947 api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb;
4948 api_ctrl->ipa_get_holb = ipa2_get_holb;
4949 api_ctrl->ipa_set_tag_process_before_gating =
4950 ipa2_set_tag_process_before_gating;
4951 api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg;
4952 api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask;
4953 api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
4954 api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl;
4955 api_ctrl->ipa_add_hdr = ipa2_add_hdr;
4956 api_ctrl->ipa_del_hdr = ipa2_del_hdr;
4957 api_ctrl->ipa_commit_hdr = ipa2_commit_hdr;
4958 api_ctrl->ipa_reset_hdr = ipa2_reset_hdr;
4959 api_ctrl->ipa_get_hdr = ipa2_get_hdr;
4960 api_ctrl->ipa_put_hdr = ipa2_put_hdr;
4961 api_ctrl->ipa_copy_hdr = ipa2_copy_hdr;
4962 api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx;
4963 api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx;
4964 api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule;
4965 api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule;
4966 api_ctrl->ipa_commit_rt = ipa2_commit_rt;
4967 api_ctrl->ipa_reset_rt = ipa2_reset_rt;
4968 api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl;
4969 api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl;
4970 api_ctrl->ipa_query_rt_index = ipa2_query_rt_index;
4971 api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule;
4972 api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule;
4973 api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule;
4974 api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
4975 api_ctrl->ipa_commit_flt = ipa2_commit_flt;
4976 api_ctrl->ipa_reset_flt = ipa2_reset_flt;
Amir Levy479cfdd2017-10-26 12:23:14 +03004977 api_ctrl->ipa_allocate_nat_device = ipa2_allocate_nat_device;
Amir Levy9659e592016-10-27 18:08:27 +03004978 api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd;
4979 api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd;
4980 api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd;
4981 api_ctrl->ipa_send_msg = ipa2_send_msg;
4982 api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg;
4983 api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg;
4984 api_ctrl->ipa_register_intf = ipa2_register_intf;
4985 api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext;
4986 api_ctrl->ipa_deregister_intf = ipa2_deregister_intf;
4987 api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode;
4988 api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig;
4989 api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim;
4990 api_ctrl->ipa_tx_dp = ipa2_tx_dp;
4991 api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul;
4992 api_ctrl->ipa_free_skb = ipa2_free_skb;
4993 api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe;
4994 api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe;
4995 api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls;
4996 api_ctrl->ipa_sys_setup = ipa2_sys_setup;
4997 api_ctrl->ipa_sys_teardown = ipa2_sys_teardown;
4998 api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe;
4999 api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe;
5000 api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe;
5001 api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe;
5002 api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe;
5003 api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe;
5004 api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats;
5005 api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05305006 api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
5007 ipa2_broadcast_wdi_quota_reach_ind;
Amir Levy9659e592016-10-27 18:08:27 +03005008 api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
5009 api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
5010 api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
5011 api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
5012 api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
5013 api_ctrl->teth_bridge_init = ipa2_teth_bridge_init;
5014 api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect;
5015 api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect;
5016 api_ctrl->ipa_set_client = ipa2_set_client;
5017 api_ctrl->ipa_get_client = ipa2_get_client;
5018 api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink;
5019 api_ctrl->ipa_dma_init = ipa2_dma_init;
5020 api_ctrl->ipa_dma_enable = ipa2_dma_enable;
5021 api_ctrl->ipa_dma_disable = ipa2_dma_disable;
5022 api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy;
5023 api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy;
5024 api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy;
5025 api_ctrl->ipa_dma_destroy = ipa2_dma_destroy;
5026 api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine;
5027 api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe;
5028 api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe;
5029 api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel;
5030 api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty;
5031 api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process;
5032 api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe;
5033 api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
5034 qmi_enable_force_clear_datapath_send;
5035 api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
5036 qmi_disable_force_clear_datapath_send;
5037 api_ctrl->ipa_mhi_reset_channel_internal =
5038 ipa2_mhi_reset_channel_internal;
5039 api_ctrl->ipa_mhi_start_channel_internal =
5040 ipa2_mhi_start_channel_internal;
5041 api_ctrl->ipa_mhi_resume_channels_internal =
5042 ipa2_mhi_resume_channels_internal;
5043 api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
5044 ipa2_uc_mhi_send_dl_ul_sync_info;
5045 api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init;
5046 api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel;
5047 api_ctrl->ipa_uc_mhi_stop_event_update_channel =
5048 ipa2_uc_mhi_stop_event_update_channel;
5049 api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup;
5050 api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats;
5051 api_ctrl->ipa_uc_state_check = ipa2_uc_state_check;
5052 api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id;
5053 api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler;
5054 api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler;
5055 api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler;
5056 api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump;
5057 api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping;
5058 api_ctrl->ipa_is_ready = ipa2_is_ready;
5059 api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote;
5060 api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote;
5061 api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid;
5062 api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping;
5063 api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep;
5064 api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
5065 ipa2_get_modem_cfg_emb_pipe_flt;
5066 api_ctrl->ipa_get_transport_type = ipa2_get_transport_type;
5067 api_ctrl->ipa_ap_suspend = ipa2_ap_suspend;
5068 api_ctrl->ipa_ap_resume = ipa2_ap_resume;
5069 api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain;
5070 api_ctrl->ipa_disable_apps_wan_cons_deaggr =
5071 ipa2_disable_apps_wan_cons_deaggr;
5072 api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
5073 api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
5074 api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
5075 api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
5076 api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
5077 api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
5078 api_ctrl->ipa_inc_client_enable_clks_no_block =
5079 ipa2_inc_client_enable_clks_no_block;
5080 api_ctrl->ipa_suspend_resource_no_block =
5081 ipa2_suspend_resource_no_block;
5082 api_ctrl->ipa_resume_resource = ipa2_resume_resource;
5083 api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync;
5084 api_ctrl->ipa_set_required_perf_profile =
5085 ipa2_set_required_perf_profile;
5086 api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
5087 api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
5088 api_ctrl->ipa_rx_poll = ipa2_rx_poll;
5089 api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
5090 api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
5091 api_ctrl->ipa_tear_down_uc_offload_pipes =
5092 ipa2_tear_down_uc_offload_pipes;
Amir Levyc4222c92016-11-07 16:14:54 +02005093 api_ctrl->ipa_get_pdev = ipa2_get_pdev;
Sunil Paidimarrifbbcd072017-04-04 17:43:50 -07005094 api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB;
5095 api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB;
Shihuan Liuf4328772017-09-14 17:03:09 -07005096 api_ctrl->ipa_conn_wdi3_pipes = ipa2_conn_wdi3_pipes;
5097 api_ctrl->ipa_disconn_wdi3_pipes = ipa2_disconn_wdi3_pipes;
5098 api_ctrl->ipa_enable_wdi3_pipes = ipa2_enable_wdi3_pipes;
5099 api_ctrl->ipa_disable_wdi3_pipes = ipa2_disable_wdi3_pipes;
Amir Levy9659e592016-10-27 18:08:27 +03005100
5101 return 0;
5102}
5103
5104/**
5105 * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes.
5106 *
5107 * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L,
5108 * IPA_DEFAULT_SYS_YELLOW_WM otherwise.
5109 */
5110u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys)
5111{
Utkarsh Saxena4badc042017-03-03 15:38:45 +05305112 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L &&
5113 ipa_ctx->ipa_uc_monitor_holb) {
Amir Levy9659e592016-10-27 18:08:27 +03005114 return ipa_read_reg(ipa_ctx->mmio,
5115 IPA_YELLOW_MARKER_SYS_CFG_OFST);
5116 } else {
5117 if (!sys)
5118 return 0;
5119
5120 return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz;
5121 }
5122}
5123EXPORT_SYMBOL(ipa_get_sys_yellow_wm);
5124
5125void ipa_suspend_apps_pipes(bool suspend)
5126{
5127 struct ipa_ep_cfg_ctrl cfg;
5128 int ipa_ep_idx;
5129 u32 lan_empty = 0, wan_empty = 0;
5130 int ret;
5131 struct sps_event_notify notify;
5132 struct ipa_ep_context *ep;
5133
5134 memset(&cfg, 0, sizeof(cfg));
5135 cfg.ipa_ep_suspend = suspend;
5136
5137 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
5138 ep = &ipa_ctx->ep[ipa_ep_idx];
5139 if (ep->valid) {
5140 ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
5141 /* Check if the pipes are empty. */
5142 ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty);
5143 if (ret) {
5144 IPAERR("%s: sps_is_pipe_empty failed with %d\n",
5145 __func__, ret);
5146 }
5147 if (!lan_empty) {
5148 IPADBG("LAN Cons is not-empty. Enter poll mode.\n");
5149 notify.user = ep->sys;
5150 notify.event_id = SPS_EVENT_EOT;
5151 if (ep->sys->sps_callback)
5152 ep->sys->sps_callback(&notify);
5153 }
5154 }
5155
5156 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
5157 /* Considering the case for SSR. */
5158 if (ipa_ep_idx == -1) {
5159 IPADBG("Invalid client.\n");
5160 return;
5161 }
5162 ep = &ipa_ctx->ep[ipa_ep_idx];
5163 if (ep->valid) {
5164 ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
5165 /* Check if the pipes are empty. */
5166 ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty);
5167 if (ret) {
5168 IPAERR("%s: sps_is_pipe_empty failed with %d\n",
5169 __func__, ret);
5170 }
5171 if (!wan_empty) {
5172 IPADBG("WAN Cons is not-empty. Enter poll mode.\n");
5173 notify.user = ep->sys;
5174 notify.event_id = SPS_EVENT_EOT;
5175 if (ep->sys->sps_callback)
5176 ep->sys->sps_callback(&notify);
5177 }
5178 }
5179}
Amir Levyc4222c92016-11-07 16:14:54 +02005180
5181/**
5182 * ipa2_get_pdev() - return a pointer to IPA dev struct
5183 *
5184 * Return value: a pointer to IPA dev struct
5185 *
5186 */
5187struct device *ipa2_get_pdev(void)
5188{
5189 if (!ipa_ctx)
5190 return NULL;
5191
5192 return ipa_ctx->pdev;
5193}