blob: 8a3fbd45a71541c6085cb4edd1d7cc49ed313958 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <net/ip.h>
14#include <linux/genalloc.h> /* gen_pool_alloc() */
15#include <linux/io.h>
16#include <linux/ratelimit.h>
17#include <linux/msm-bus.h>
18#include <linux/msm-bus-board.h>
19#include "ipa_i.h"
20#include "../ipa_rm_i.h"
21
22#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
23#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
24#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
25#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
26#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
27#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1)
28#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1)
29#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1)
30#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL
31
32#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000)
33#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600)
34
35/* Max pipes + ICs for TAG process */
36#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6)
37
38#define IPA_TAG_SLEEP_MIN_USEC (1000)
39#define IPA_TAG_SLEEP_MAX_USEC (2000)
40#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
41#define IPA_BCR_REG_VAL (0x001FFF7F)
42#define IPA_AGGR_GRAN_MIN (1)
43#define IPA_AGGR_GRAN_MAX (32)
44#define IPA_EOT_COAL_GRAN_MIN (1)
45#define IPA_EOT_COAL_GRAN_MAX (16)
46#define MSEC 1000
47#define MIN_RX_POLL_TIME 1
48#define MAX_RX_POLL_TIME 5
49#define UPPER_CUTOFF 50
50#define LOWER_CUTOFF 10
51
52#define IPA_DEFAULT_SYS_YELLOW_WM 32
53
54#define IPA_AGGR_BYTE_LIMIT (\
55 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
56 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
57#define IPA_AGGR_PKT_LIMIT (\
58 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
59 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
60
61static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
62 IPA_OFFSET_MEQ32_1, -1 };
63static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
64 IPA_OFFSET_MEQ128_1, -1 };
65static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
66 IPA_IHL_OFFSET_RANGE16_1, -1 };
67static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
68 IPA_IHL_OFFSET_MEQ32_1, -1 };
69#define IPA_1_1 (0)
70#define IPA_2_0 (1)
71#define IPA_2_6L (2)
72
73#define INVALID_EP_MAPPING_INDEX (-1)
74
Skylar Changa9516582017-05-09 11:36:47 -070075struct ipa_ep_confing {
76 bool valid;
77 int pipe_num;
78};
Amir Levy9659e592016-10-27 18:08:27 +030079
Skylar Changa9516582017-05-09 11:36:47 -070080static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = {
81 [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = {true, 19},
82 [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = {true, 12},
83 [IPA_1_1][IPA_CLIENT_USB2_PROD] = {true, 12},
84 [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = {true, 13},
85 [IPA_1_1][IPA_CLIENT_USB3_PROD] = {true, 13},
86 [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = {true, 0},
87 [IPA_1_1][IPA_CLIENT_USB4_PROD] = {true, 0},
88 [IPA_1_1][IPA_CLIENT_USB_PROD] = {true, 11},
89 [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = {true, 15},
90 [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = {true, 8},
91 [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = {true, 6},
92 [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 2},
93 [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = {true, 1},
94 [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = {true, 5},
95
96 [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = {true, 14},
97 [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = {true, 16},
98 [IPA_1_1][IPA_CLIENT_USB2_CONS] = {true, 16},
99 [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = {true, 17},
100 [IPA_1_1][IPA_CLIENT_USB3_CONS] = {true, 17},
101 [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = {true, 18},
102 [IPA_1_1][IPA_CLIENT_USB4_CONS] = {true, 18},
103 [IPA_1_1][IPA_CLIENT_USB_CONS] = {true, 10},
104 [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = {true, 9},
105 [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = {true, 7},
106 [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = {true, 3},
107 [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = {true, 4},
Amir Levy9659e592016-10-27 18:08:27 +0300108
109
Skylar Changa9516582017-05-09 11:36:47 -0700110 [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = {true, 12},
111 [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = {true, 18},
112 [IPA_2_0][IPA_CLIENT_USB2_PROD] = {true, 12},
113 [IPA_2_0][IPA_CLIENT_USB3_PROD] = {true, 13},
114 [IPA_2_0][IPA_CLIENT_USB4_PROD] = {true, 0},
115 [IPA_2_0][IPA_CLIENT_USB_PROD] = {true, 11},
116 [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4},
117 [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = {true, 3},
118 [IPA_2_0][IPA_CLIENT_ODU_PROD] = {true, 12},
119 [IPA_2_0][IPA_CLIENT_MHI_PROD] = {true, 18},
120 [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
121 [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
Amir Levy9659e592016-10-27 18:08:27 +0300122 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
Skylar Changa9516582017-05-09 11:36:47 -0700123 = {true, 12},
Amir Levy9659e592016-10-27 18:08:27 +0300124 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
Skylar Changa9516582017-05-09 11:36:47 -0700125 = {true, 19},
126 [IPA_2_0][IPA_CLIENT_ETHERNET_PROD] = {true, 12},
Amir Levy9659e592016-10-27 18:08:27 +0300127 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700128 [IPA_2_0][IPA_CLIENT_TEST_PROD] = {true, 19},
129 [IPA_2_0][IPA_CLIENT_TEST1_PROD] = {true, 19},
130 [IPA_2_0][IPA_CLIENT_TEST2_PROD] = {true, 12},
131 [IPA_2_0][IPA_CLIENT_TEST3_PROD] = {true, 11},
132 [IPA_2_0][IPA_CLIENT_TEST4_PROD] = {true, 0},
Amir Levy9659e592016-10-27 18:08:27 +0300133
Skylar Changa9516582017-05-09 11:36:47 -0700134 [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = {true, 13},
135 [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = {true, 17},
136 [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = {true, 16},
137 [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = {true, 14},
138 [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = {true, 19},
139 [IPA_2_0][IPA_CLIENT_USB_CONS] = {true, 15},
140 [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = {true, 0},
141 [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = {true, 2},
142 [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = {true, 5},
143 [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = {true, 13},
144 [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = {true, 1},
145 [IPA_2_0][IPA_CLIENT_MHI_CONS] = {true, 17},
146 [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = {true, 8},
147 [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = {true, 9},
Amir Levy9659e592016-10-27 18:08:27 +0300148 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700149 = {true, 13},
Amir Levy9659e592016-10-27 18:08:27 +0300150 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700151 = {true, 16},
Amir Levy9659e592016-10-27 18:08:27 +0300152 [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700153 = {true, 10},
154 [IPA_2_0][IPA_CLIENT_ETHERNET_CONS] = {true, 1},
155
Amir Levy9659e592016-10-27 18:08:27 +0300156 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700157 [IPA_2_0][IPA_CLIENT_TEST_CONS] = {true, 1},
158 [IPA_2_0][IPA_CLIENT_TEST1_CONS] = {true, 1},
159 [IPA_2_0][IPA_CLIENT_TEST2_CONS] = {true, 16},
160 [IPA_2_0][IPA_CLIENT_TEST3_CONS] = {true, 13},
161 [IPA_2_0][IPA_CLIENT_TEST4_CONS] = {true, 15},
Amir Levy9659e592016-10-27 18:08:27 +0300162
163
Skylar Changa9516582017-05-09 11:36:47 -0700164 [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4},
165 [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = {true, 3},
166 [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
167 [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
168 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = {true, 11},
169 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = {true, 13},
Amir Levy9659e592016-10-27 18:08:27 +0300170
Amir Levy9659e592016-10-27 18:08:27 +0300171 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700172 [IPA_2_6L][IPA_CLIENT_TEST_PROD] = {true, 11},
173 [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = {true, 11},
174 [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = {true, 12},
175 [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = {true, 13},
176 [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = {true, 14},
177
178 [IPA_2_6L][IPA_CLIENT_USB_CONS] = {true, 0},
179 [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = {true, 10},
180 [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = {true, 2},
181 [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = {true, 5},
182 [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = {true, 8},
183 [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = {true, 9},
184 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = {true, 12},
185 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = {true, 14},
186
187 /* Only for test purpose */
188 [IPA_2_6L][IPA_CLIENT_TEST_CONS] = {true, 15},
189 [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = {true, 15},
190 [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = {true, 0},
191 [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = {true, 1},
192 [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = {true, 10},
Amir Levy9659e592016-10-27 18:08:27 +0300193};
194
195static struct msm_bus_vectors ipa_init_vectors_v1_1[] = {
196 {
197 .src = MSM_BUS_MASTER_IPA,
198 .dst = MSM_BUS_SLAVE_EBI_CH0,
199 .ab = 0,
200 .ib = 0,
201 },
202 {
203 .src = MSM_BUS_MASTER_BAM_DMA,
204 .dst = MSM_BUS_SLAVE_EBI_CH0,
205 .ab = 0,
206 .ib = 0,
207 },
208 {
209 .src = MSM_BUS_MASTER_BAM_DMA,
210 .dst = MSM_BUS_SLAVE_OCIMEM,
211 .ab = 0,
212 .ib = 0,
213 },
214};
215
216static struct msm_bus_vectors ipa_init_vectors_v2_0[] = {
217 {
218 .src = MSM_BUS_MASTER_IPA,
219 .dst = MSM_BUS_SLAVE_EBI_CH0,
220 .ab = 0,
221 .ib = 0,
222 },
223 {
224 .src = MSM_BUS_MASTER_IPA,
225 .dst = MSM_BUS_SLAVE_OCIMEM,
226 .ab = 0,
227 .ib = 0,
228 },
229};
230
231static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = {
232 {
233 .src = MSM_BUS_MASTER_IPA,
234 .dst = MSM_BUS_SLAVE_EBI_CH0,
235 .ab = 50000000,
236 .ib = 960000000,
237 },
238 {
239 .src = MSM_BUS_MASTER_BAM_DMA,
240 .dst = MSM_BUS_SLAVE_EBI_CH0,
241 .ab = 50000000,
242 .ib = 960000000,
243 },
244 {
245 .src = MSM_BUS_MASTER_BAM_DMA,
246 .dst = MSM_BUS_SLAVE_OCIMEM,
247 .ab = 50000000,
248 .ib = 960000000,
249 },
250};
251
252static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = {
253 {
254 .src = MSM_BUS_MASTER_IPA,
255 .dst = MSM_BUS_SLAVE_EBI_CH0,
256 .ab = 100000000,
257 .ib = 1300000000,
258 },
259 {
260 .src = MSM_BUS_MASTER_IPA,
261 .dst = MSM_BUS_SLAVE_OCIMEM,
262 .ab = 100000000,
263 .ib = 1300000000,
264 },
265};
266
267static struct msm_bus_paths ipa_usecases_v1_1[] = {
268 {
269 ARRAY_SIZE(ipa_init_vectors_v1_1),
270 ipa_init_vectors_v1_1,
271 },
272 {
273 ARRAY_SIZE(ipa_max_perf_vectors_v1_1),
274 ipa_max_perf_vectors_v1_1,
275 },
276};
277
278static struct msm_bus_paths ipa_usecases_v2_0[] = {
279 {
280 ARRAY_SIZE(ipa_init_vectors_v2_0),
281 ipa_init_vectors_v2_0,
282 },
283 {
284 ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0),
285 ipa_nominal_perf_vectors_v2_0,
286 },
287};
288
289static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = {
Mohammed Javid4c4037e2017-11-27 16:23:35 +0530290 .usecase = ipa_usecases_v1_1,
291 .num_usecases = ARRAY_SIZE(ipa_usecases_v1_1),
Amir Levy9659e592016-10-27 18:08:27 +0300292 .name = "ipa",
293};
294
295static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = {
Mohammed Javid4c4037e2017-11-27 16:23:35 +0530296 .usecase = ipa_usecases_v2_0,
297 .num_usecases = ARRAY_SIZE(ipa_usecases_v2_0),
Amir Levy9659e592016-10-27 18:08:27 +0300298 .name = "ipa",
299};
300
301void ipa_active_clients_lock(void)
302{
303 unsigned long flags;
304
305 mutex_lock(&ipa_ctx->ipa_active_clients.mutex);
306 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
307 ipa_ctx->ipa_active_clients.mutex_locked = true;
308 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
309}
310
311int ipa_active_clients_trylock(unsigned long *flags)
312{
313 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags);
314 if (ipa_ctx->ipa_active_clients.mutex_locked) {
315 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock,
316 *flags);
317 return 0;
318 }
319
320 return 1;
321}
322
323void ipa_active_clients_trylock_unlock(unsigned long *flags)
324{
325 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags);
326}
327
328void ipa_active_clients_unlock(void)
329{
330 unsigned long flags;
331
332 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
333 ipa_ctx->ipa_active_clients.mutex_locked = false;
334 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
335 mutex_unlock(&ipa_ctx->ipa_active_clients.mutex);
336}
337
338/**
339 * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an
340 * IPA_RM resource
341 *
342 * @resource: [IN] IPA Resource Manager resource
343 * @clients: [OUT] Empty array which will contain the list of clients. The
344 * caller must initialize this array.
345 *
346 * Return codes: 0 on success, negative on failure.
347 */
348int ipa_get_clients_from_rm_resource(
349 enum ipa_rm_resource_name resource,
350 struct ipa_client_names *clients)
351{
352 int i = 0;
353
354 if (resource < 0 ||
355 resource >= IPA_RM_RESOURCE_MAX ||
356 !clients) {
357 IPAERR("Bad parameters\n");
358 return -EINVAL;
359 }
360
361 switch (resource) {
362 case IPA_RM_RESOURCE_USB_CONS:
363 clients->names[i++] = IPA_CLIENT_USB_CONS;
364 break;
365 case IPA_RM_RESOURCE_HSIC_CONS:
366 clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
367 break;
368 case IPA_RM_RESOURCE_WLAN_CONS:
369 clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
370 clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
371 clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
372 clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
373 break;
374 case IPA_RM_RESOURCE_MHI_CONS:
375 clients->names[i++] = IPA_CLIENT_MHI_CONS;
376 break;
Skylar Chang79699ec2016-11-18 10:21:33 -0800377 case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
378 clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
379 clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
380 break;
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800381 case IPA_RM_RESOURCE_ETHERNET_CONS:
382 clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
383 break;
Amir Levy9659e592016-10-27 18:08:27 +0300384 case IPA_RM_RESOURCE_USB_PROD:
385 clients->names[i++] = IPA_CLIENT_USB_PROD;
386 break;
387 case IPA_RM_RESOURCE_HSIC_PROD:
388 clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
389 break;
390 case IPA_RM_RESOURCE_MHI_PROD:
391 clients->names[i++] = IPA_CLIENT_MHI_PROD;
392 break;
Skylar Chang79699ec2016-11-18 10:21:33 -0800393 case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
394 clients->names[i++] = IPA_CLIENT_ODU_PROD;
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800395 break;
396 case IPA_RM_RESOURCE_ETHERNET_PROD:
397 clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
398 break;
Amir Levy9659e592016-10-27 18:08:27 +0300399 default:
400 break;
401 }
402 clients->length = i;
403
404 return 0;
405}
406
407/**
408 * ipa_should_pipe_be_suspended() - returns true when the client's pipe should
409 * be suspended during a power save scenario. False otherwise.
410 *
411 * @client: [IN] IPA client
412 */
413bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
414{
415 struct ipa_ep_context *ep;
416 int ipa_ep_idx;
417
418 ipa_ep_idx = ipa2_get_ep_mapping(client);
419 if (ipa_ep_idx == -1) {
420 IPAERR("Invalid client.\n");
421 WARN_ON(1);
422 return false;
423 }
424
425 ep = &ipa_ctx->ep[ipa_ep_idx];
426
427 if (ep->keep_ipa_awake)
428 return false;
429
Skylar Chang79699ec2016-11-18 10:21:33 -0800430 if (client == IPA_CLIENT_USB_CONS ||
431 client == IPA_CLIENT_MHI_CONS ||
432 client == IPA_CLIENT_HSIC1_CONS ||
433 client == IPA_CLIENT_WLAN1_CONS ||
434 client == IPA_CLIENT_WLAN2_CONS ||
435 client == IPA_CLIENT_WLAN3_CONS ||
436 client == IPA_CLIENT_WLAN4_CONS ||
437 client == IPA_CLIENT_ODU_EMB_CONS ||
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800438 client == IPA_CLIENT_ODU_TETH_CONS ||
439 client == IPA_CLIENT_ETHERNET_CONS)
Amir Levy9659e592016-10-27 18:08:27 +0300440 return true;
441
442 return false;
443}
444
445/**
446 * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
447 * resource and decrement active clients counter, which may result in clock
448 * gating of IPA clocks.
449 *
450 * @resource: [IN] IPA Resource Manager resource
451 *
452 * Return codes: 0 on success, negative on failure.
453 */
454int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource)
455{
456 struct ipa_client_names clients;
457 int res;
458 int index;
459 struct ipa_ep_cfg_ctrl suspend;
460 enum ipa_client_type client;
461 int ipa_ep_idx;
462 bool pipe_suspended = false;
463
464 memset(&clients, 0, sizeof(clients));
465 res = ipa_get_clients_from_rm_resource(resource, &clients);
466 if (res) {
467 IPAERR("Bad params.\n");
468 return res;
469 }
470
471 for (index = 0; index < clients.length; index++) {
472 client = clients.names[index];
473 ipa_ep_idx = ipa2_get_ep_mapping(client);
474 if (ipa_ep_idx == -1) {
475 IPAERR("Invalid client.\n");
476 res = -EINVAL;
477 continue;
478 }
479 ipa_ctx->resume_on_connect[client] = false;
480 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
481 ipa_should_pipe_be_suspended(client)) {
482 if (ipa_ctx->ep[ipa_ep_idx].valid) {
483 /* suspend endpoint */
484 memset(&suspend, 0, sizeof(suspend));
485 suspend.ipa_ep_suspend = true;
486 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
487 pipe_suspended = true;
488 }
489 }
490 }
491 /* Sleep ~1 msec */
492 if (pipe_suspended)
493 usleep_range(1000, 2000);
494
495 /* before gating IPA clocks do TAG process */
496 ipa_ctx->tag_process_before_gating = true;
497 IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
498
499 return 0;
500}
501
502/**
503 * ipa2_suspend_resource_no_block() - suspend client endpoints related to the
504 * IPA_RM resource and decrement active clients counter. This function is
505 * guaranteed to avoid sleeping.
506 *
507 * @resource: [IN] IPA Resource Manager resource
508 *
509 * Return codes: 0 on success, negative on failure.
510 */
511int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource)
512{
513 int res;
514 struct ipa_client_names clients;
515 int index;
516 enum ipa_client_type client;
517 struct ipa_ep_cfg_ctrl suspend;
518 int ipa_ep_idx;
519 unsigned long flags;
520 struct ipa_active_client_logging_info log_info;
521
522 if (ipa_active_clients_trylock(&flags) == 0)
523 return -EPERM;
524 if (ipa_ctx->ipa_active_clients.cnt == 1) {
525 res = -EPERM;
526 goto bail;
527 }
528
529 memset(&clients, 0, sizeof(clients));
530 res = ipa_get_clients_from_rm_resource(resource, &clients);
531 if (res) {
532 IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n"
533 , resource);
534 goto bail;
535 }
536
537 for (index = 0; index < clients.length; index++) {
538 client = clients.names[index];
539 ipa_ep_idx = ipa2_get_ep_mapping(client);
540 if (ipa_ep_idx == -1) {
541 IPAERR("Invalid client.\n");
542 res = -EINVAL;
543 continue;
544 }
545 ipa_ctx->resume_on_connect[client] = false;
546 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
547 ipa_should_pipe_be_suspended(client)) {
548 if (ipa_ctx->ep[ipa_ep_idx].valid) {
549 /* suspend endpoint */
550 memset(&suspend, 0, sizeof(suspend));
551 suspend.ipa_ep_suspend = true;
552 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
553 }
554 }
555 }
556
557 if (res == 0) {
558 IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
559 ipa_rm_resource_str(resource));
560 ipa2_active_clients_log_dec(&log_info, true);
561 ipa_ctx->ipa_active_clients.cnt--;
562 IPADBG("active clients = %d\n",
563 ipa_ctx->ipa_active_clients.cnt);
564 }
565bail:
566 ipa_active_clients_trylock_unlock(&flags);
567
568 return res;
569}
570
571/**
572 * ipa2_resume_resource() - resume client endpoints related to the IPA_RM
573 * resource.
574 *
575 * @resource: [IN] IPA Resource Manager resource
576 *
577 * Return codes: 0 on success, negative on failure.
578 */
579int ipa2_resume_resource(enum ipa_rm_resource_name resource)
580{
581
582 struct ipa_client_names clients;
583 int res;
584 int index;
585 struct ipa_ep_cfg_ctrl suspend;
586 enum ipa_client_type client;
587 int ipa_ep_idx;
588
589 memset(&clients, 0, sizeof(clients));
590 res = ipa_get_clients_from_rm_resource(resource, &clients);
591 if (res) {
592 IPAERR("ipa_get_clients_from_rm_resource() failed.\n");
593 return res;
594 }
595
596 for (index = 0; index < clients.length; index++) {
597 client = clients.names[index];
598 ipa_ep_idx = ipa2_get_ep_mapping(client);
599 if (ipa_ep_idx == -1) {
600 IPAERR("Invalid client.\n");
601 res = -EINVAL;
602 continue;
603 }
604 /*
605 * The related ep, will be resumed on connect
606 * while its resource is granted
607 */
608 ipa_ctx->resume_on_connect[client] = true;
609 IPADBG("%d will be resumed on connect.\n", client);
610 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
611 ipa_should_pipe_be_suspended(client)) {
612 spin_lock(&ipa_ctx->disconnect_lock);
613 if (ipa_ctx->ep[ipa_ep_idx].valid &&
614 !ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) {
615 memset(&suspend, 0, sizeof(suspend));
616 suspend.ipa_ep_suspend = false;
617 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
618 }
619 spin_unlock(&ipa_ctx->disconnect_lock);
620 }
621 }
622
623 return res;
624}
625
626/* read how much SRAM is available for SW use
627 * In case of IPAv2.0 this will also supply an offset from
628 * which we can start write
629 */
630void _ipa_sram_settings_read_v1_1(void)
631{
632 ipa_ctx->smem_restricted_bytes = 0;
633 ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
634 IPA_SHARED_MEM_SIZE_OFST_v1_1);
635 ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST;
636 ipa_ctx->hdr_tbl_lcl = 1;
637 ipa_ctx->ip4_rt_tbl_lcl = 0;
638 ipa_ctx->ip6_rt_tbl_lcl = 0;
639 ipa_ctx->ip4_flt_tbl_lcl = 1;
640 ipa_ctx->ip6_flt_tbl_lcl = 1;
641}
642
643void _ipa_sram_settings_read_v2_0(void)
644{
645 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
646 IPA_SHARED_MEM_SIZE_OFST_v2_0,
647 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
648 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
649 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
650 IPA_SHARED_MEM_SIZE_OFST_v2_0,
651 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
652 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
653 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
654 ipa_ctx->hdr_tbl_lcl = 0;
655 ipa_ctx->ip4_rt_tbl_lcl = 0;
656 ipa_ctx->ip6_rt_tbl_lcl = 0;
657 ipa_ctx->ip4_flt_tbl_lcl = 0;
658 ipa_ctx->ip6_flt_tbl_lcl = 0;
659}
660
661void _ipa_sram_settings_read_v2_5(void)
662{
663 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
664 IPA_SHARED_MEM_SIZE_OFST_v2_0,
665 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
666 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
667 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
668 IPA_SHARED_MEM_SIZE_OFST_v2_0,
669 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
670 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
671 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
672 ipa_ctx->hdr_tbl_lcl = 0;
673 ipa_ctx->hdr_proc_ctx_tbl_lcl = 1;
674
675 /*
676 * when proc ctx table is located in internal memory,
677 * modem entries resides first.
678 */
679 if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
680 ipa_ctx->hdr_proc_ctx_tbl.start_offset =
681 IPA_MEM_PART(modem_hdr_proc_ctx_size);
682 }
683 ipa_ctx->ip4_rt_tbl_lcl = 0;
684 ipa_ctx->ip6_rt_tbl_lcl = 0;
685 ipa_ctx->ip4_flt_tbl_lcl = 0;
686 ipa_ctx->ip6_flt_tbl_lcl = 0;
687}
688
689void _ipa_sram_settings_read_v2_6L(void)
690{
691 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
692 IPA_SHARED_MEM_SIZE_OFST_v2_0,
693 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
694 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
695 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
696 IPA_SHARED_MEM_SIZE_OFST_v2_0,
697 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
698 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
699 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
700 ipa_ctx->hdr_tbl_lcl = 0;
701 ipa_ctx->ip4_rt_tbl_lcl = 0;
702 ipa_ctx->ip6_rt_tbl_lcl = 0;
703 ipa_ctx->ip4_flt_tbl_lcl = 0;
704 ipa_ctx->ip6_flt_tbl_lcl = 0;
705}
706
707void _ipa_cfg_route_v1_1(struct ipa_route *route)
708{
709 u32 reg_val = 0;
710
711 IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
712 IPA_ROUTE_ROUTE_DIS_SHFT,
713 IPA_ROUTE_ROUTE_DIS_BMSK);
714
715 IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
716 IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
717 IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
718
719 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
720 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
721 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
722
723 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
724 IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
725 IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
726
727 ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
728}
729
730void _ipa_cfg_route_v2_0(struct ipa_route *route)
731{
732 u32 reg_val = 0;
733
734 IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
735 IPA_ROUTE_ROUTE_DIS_SHFT,
736 IPA_ROUTE_ROUTE_DIS_BMSK);
737
738 IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
739 IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
740 IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
741
742 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
743 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
744 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
745
746 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
747 IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
748 IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
749
750 IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe,
751 IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
752 IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
753
754 ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
755}
756
757/**
758 * ipa_cfg_route() - configure IPA route
759 * @route: IPA route
760 *
761 * Return codes:
762 * 0: success
763 */
764int ipa_cfg_route(struct ipa_route *route)
765{
766
767 IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
768 route->route_dis,
769 route->route_def_pipe,
770 route->route_def_hdr_table);
771 IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
772 route->route_def_hdr_ofst,
773 route->route_frag_def_pipe);
774
775 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
776
777 ipa_ctx->ctrl->ipa_cfg_route(route);
778
779 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
780
781 return 0;
782}
783
784/**
785 * ipa_cfg_filter() - configure filter
786 * @disable: disable value
787 *
788 * Return codes:
789 * 0: success
790 */
791int ipa_cfg_filter(u32 disable)
792{
793 u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1;
794
795 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
796 ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
797 IPA_SETFIELD(!disable,
798 IPA_FILTER_FILTER_EN_SHFT,
799 IPA_FILTER_FILTER_EN_BMSK));
800 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
801
802 return 0;
803}
804
805/**
806 * ipa_init_hw() - initialize HW
807 *
808 * Return codes:
809 * 0: success
810 */
811int ipa_init_hw(void)
812{
813 u32 ipa_version = 0;
814
815 /* do soft reset of IPA */
816 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
817 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
818
819 /* enable IPA */
820 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
821
822 /* Read IPA version and make sure we have access to the registers */
823 ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
824 if (ipa_version == 0)
825 return -EFAULT;
826
827 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
828 /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */
829 ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL);
830 }
831 return 0;
832}
833
834/**
835 * ipa2_get_ep_mapping() - provide endpoint mapping
836 * @client: client type
837 *
838 * Return value: endpoint mapping
839 */
840int ipa2_get_ep_mapping(enum ipa_client_type client)
841{
842 u8 hw_type_index = IPA_1_1;
843
844 if (unlikely(!ipa_ctx)) {
845 IPAERR("IPA driver was not initialized\n");
846 return INVALID_EP_MAPPING_INDEX;
847 }
848
849 if (client >= IPA_CLIENT_MAX || client < 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530850 IPAERR_RL("Bad client number! client =%d\n", client);
Amir Levy9659e592016-10-27 18:08:27 +0300851 return INVALID_EP_MAPPING_INDEX;
852 }
853
854 switch (ipa_ctx->ipa_hw_type) {
855 case IPA_HW_v2_0:
856 case IPA_HW_v2_5:
857 hw_type_index = IPA_2_0;
858 break;
859 case IPA_HW_v2_6L:
860 hw_type_index = IPA_2_6L;
861 break;
862 default:
863 hw_type_index = IPA_1_1;
864 break;
865 }
866
Skylar Changa9516582017-05-09 11:36:47 -0700867 if (!ep_mapping[hw_type_index][client].valid)
868 return INVALID_EP_MAPPING_INDEX;
869
870 return ep_mapping[hw_type_index][client].pipe_num;
Amir Levy9659e592016-10-27 18:08:27 +0300871}
872
873/* ipa2_set_client() - provide client mapping
874 * @client: client type
875 *
876 * Return value: none
877 */
878
879void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
880{
Skylar Chang09e0e252017-03-20 14:51:29 -0700881 if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
Amir Levy9659e592016-10-27 18:08:27 +0300882 IPAERR("Bad client number! client =%d\n", client);
883 } else if (index >= IPA_MAX_NUM_PIPES || index < 0) {
884 IPAERR("Bad pipe index! index =%d\n", index);
885 } else {
886 ipa_ctx->ipacm_client[index].client_enum = client;
887 ipa_ctx->ipacm_client[index].uplink = uplink;
888 }
889}
890
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530891/* ipa2_get_wlan_stats() - get ipa wifi stats
892 *
893 * Return value: success or failure
894 */
895int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
896{
897 if (ipa_ctx->uc_wdi_ctx.stats_notify) {
898 ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
899 wdi_sap_stats);
900 } else {
901 IPAERR("uc_wdi_ctx.stats_notify not registered\n");
902 return -EFAULT;
903 }
904 return 0;
905}
906
907int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
908{
909 if (ipa_ctx->uc_wdi_ctx.stats_notify) {
910 ipa_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
911 wdi_quota);
912 } else {
913 IPAERR("uc_wdi_ctx.stats_notify not registered\n");
914 return -EFAULT;
915 }
916 return 0;
917}
918
Amir Levy9659e592016-10-27 18:08:27 +0300919/**
920 * ipa2_get_client() - provide client mapping
921 * @client: client type
922 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530923 * Return value: client mapping enum
Amir Levy9659e592016-10-27 18:08:27 +0300924 */
925enum ipacm_client_enum ipa2_get_client(int pipe_idx)
926{
927 if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) {
928 IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
929 return IPACM_CLIENT_MAX;
930 } else {
931 return ipa_ctx->ipacm_client[pipe_idx].client_enum;
932 }
933}
934
935/**
936 * ipa2_get_client_uplink() - provide client mapping
937 * @client: client type
938 *
939 * Return value: none
940 */
941bool ipa2_get_client_uplink(int pipe_idx)
942{
Skylar Chang53f855e2017-06-12 10:50:12 -0700943 if (pipe_idx < 0 || pipe_idx >= IPA_MAX_NUM_PIPES) {
944 IPAERR("invalid pipe idx %d\n", pipe_idx);
945 return false;
946 }
947
Amir Levy9659e592016-10-27 18:08:27 +0300948 return ipa_ctx->ipacm_client[pipe_idx].uplink;
949}
950
951/**
952 * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
953 * the supplied pipe index.
954 *
955 * @pipe_idx:
956 *
957 * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
958 * found.
959 */
960enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx)
961{
962 int i;
963 int j;
964 enum ipa_client_type client;
965 struct ipa_client_names clients;
966 bool found = false;
967
968 if (unlikely(!ipa_ctx)) {
969 IPAERR("IPA driver was not initialized\n");
970 return -EINVAL;
971 }
972
973 if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
974 IPAERR("Bad pipe index!\n");
975 return -EINVAL;
976 }
977
978 client = ipa_ctx->ep[pipe_idx].client;
979
980 for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
981 memset(&clients, 0, sizeof(clients));
982 ipa_get_clients_from_rm_resource(i, &clients);
983 for (j = 0; j < clients.length; j++) {
984 if (clients.names[j] == client) {
985 found = true;
986 break;
987 }
988 }
989 if (found)
990 break;
991 }
992
993 if (!found)
994 return -EFAULT;
995
996 return i;
997}
998
999/**
1000 * ipa2_get_client_mapping() - provide client mapping
1001 * @pipe_idx: IPA end-point number
1002 *
1003 * Return value: client mapping
1004 */
1005enum ipa_client_type ipa2_get_client_mapping(int pipe_idx)
1006{
1007 if (unlikely(!ipa_ctx)) {
1008 IPAERR("IPA driver was not initialized\n");
1009 return -EINVAL;
1010 }
1011
1012 if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
1013 IPAERR("Bad pipe index!\n");
1014 return -EINVAL;
1015 }
1016
1017 return ipa_ctx->ep[pipe_idx].client;
1018}
1019
1020void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset,
1021 const uint8_t mac_addr_mask[ETH_ALEN],
1022 const uint8_t mac_addr[ETH_ALEN])
1023{
1024 *buf = ipa_write_8(hdr_mac_addr_offset, *buf);
1025
1026 /* MAC addr mask copied as little endian each 4 bytes */
1027 *buf = ipa_write_8(mac_addr_mask[3], *buf);
1028 *buf = ipa_write_8(mac_addr_mask[2], *buf);
1029 *buf = ipa_write_8(mac_addr_mask[1], *buf);
1030 *buf = ipa_write_8(mac_addr_mask[0], *buf);
1031 *buf = ipa_write_16(0, *buf);
1032 *buf = ipa_write_8(mac_addr_mask[5], *buf);
1033 *buf = ipa_write_8(mac_addr_mask[4], *buf);
1034 *buf = ipa_write_32(0, *buf);
1035 *buf = ipa_write_32(0, *buf);
1036
1037 /* MAC addr copied as little endian each 4 bytes */
1038 *buf = ipa_write_8(mac_addr[3], *buf);
1039 *buf = ipa_write_8(mac_addr[2], *buf);
1040 *buf = ipa_write_8(mac_addr[1], *buf);
1041 *buf = ipa_write_8(mac_addr[0], *buf);
1042 *buf = ipa_write_16(0, *buf);
1043 *buf = ipa_write_8(mac_addr[5], *buf);
1044 *buf = ipa_write_8(mac_addr[4], *buf);
1045 *buf = ipa_write_32(0, *buf);
1046 *buf = ipa_write_32(0, *buf);
1047 *buf = ipa_pad_to_32(*buf);
1048}
1049
1050/**
1051 * ipa_generate_hw_rule() - generate HW rule
1052 * @ip: IP address type
1053 * @attrib: IPA rule attribute
1054 * @buf: output buffer
1055 * @en_rule: rule
1056 *
1057 * Return codes:
1058 * 0: success
1059 * -EPERM: wrong input
1060 */
1061int ipa_generate_hw_rule(enum ipa_ip_type ip,
1062 const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
1063{
1064 u8 ofst_meq32 = 0;
1065 u8 ihl_ofst_rng16 = 0;
1066 u8 ihl_ofst_meq32 = 0;
1067 u8 ofst_meq128 = 0;
1068
1069 if (ip == IPA_IP_v4) {
1070
1071 /* error check */
1072 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
1073 attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
1074 IPA_FLT_FLOW_LABEL) {
1075 IPAERR("v6 attrib's specified for v4 rule\n");
1076 return -EPERM;
1077 }
1078
1079 if (attrib->attrib_mask & IPA_FLT_TOS) {
1080 *en_rule |= IPA_TOS_EQ;
1081 *buf = ipa_write_8(attrib->u.v4.tos, *buf);
1082 *buf = ipa_pad_to_32(*buf);
1083 }
1084
1085 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1086 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1087 IPAERR("ran out of meq32 eq\n");
1088 return -EPERM;
1089 }
1090 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1091 /* 0 => offset of TOS in v4 header */
1092 *buf = ipa_write_8(0, *buf);
1093 *buf = ipa_write_32((attrib->tos_mask << 16), *buf);
1094 *buf = ipa_write_32((attrib->tos_value << 16), *buf);
1095 *buf = ipa_pad_to_32(*buf);
1096 ofst_meq32++;
1097 }
1098
1099 if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1100 *en_rule |= IPA_PROTOCOL_EQ;
1101 *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
1102 *buf = ipa_pad_to_32(*buf);
1103 }
1104
1105 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1106 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1107 IPAERR("ran out of meq32 eq\n");
1108 return -EPERM;
1109 }
1110 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1111 /* 12 => offset of src ip in v4 header */
1112 *buf = ipa_write_8(12, *buf);
1113 *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
1114 *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
1115 *buf = ipa_pad_to_32(*buf);
1116 ofst_meq32++;
1117 }
1118
1119 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1120 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1121 IPAERR("ran out of meq32 eq\n");
1122 return -EPERM;
1123 }
1124 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1125 /* 16 => offset of dst ip in v4 header */
1126 *buf = ipa_write_8(16, *buf);
1127 *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
1128 *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
1129 *buf = ipa_pad_to_32(*buf);
1130 ofst_meq32++;
1131 }
1132
1133 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1134 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1135 IPAERR("ran out of meq32 eq\n");
1136 return -EPERM;
1137 }
1138 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1139 /* -2 => offset of ether type in L2 hdr */
1140 *buf = ipa_write_8((u8)-2, *buf);
1141 *buf = ipa_write_16(0, *buf);
1142 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1143 *buf = ipa_write_16(0, *buf);
1144 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1145 *buf = ipa_pad_to_32(*buf);
1146 ofst_meq32++;
1147 }
1148
1149 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1150 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1151 IPAERR("ran out of ihl_rng16 eq\n");
1152 return -EPERM;
1153 }
1154 if (attrib->src_port_hi < attrib->src_port_lo) {
1155 IPAERR("bad src port range param\n");
1156 return -EPERM;
1157 }
1158 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1159 /* 0 => offset of src port after v4 header */
1160 *buf = ipa_write_8(0, *buf);
1161 *buf = ipa_write_16(attrib->src_port_hi, *buf);
1162 *buf = ipa_write_16(attrib->src_port_lo, *buf);
1163 *buf = ipa_pad_to_32(*buf);
1164 ihl_ofst_rng16++;
1165 }
1166
1167 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1168 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1169 IPAERR("ran out of ihl_rng16 eq\n");
1170 return -EPERM;
1171 }
1172 if (attrib->dst_port_hi < attrib->dst_port_lo) {
1173 IPAERR("bad dst port range param\n");
1174 return -EPERM;
1175 }
1176 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1177 /* 2 => offset of dst port after v4 header */
1178 *buf = ipa_write_8(2, *buf);
1179 *buf = ipa_write_16(attrib->dst_port_hi, *buf);
1180 *buf = ipa_write_16(attrib->dst_port_lo, *buf);
1181 *buf = ipa_pad_to_32(*buf);
1182 ihl_ofst_rng16++;
1183 }
1184
1185 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1186 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1187 IPAERR("ran out of ihl_meq32 eq\n");
1188 return -EPERM;
1189 }
1190 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1191 /* 0 => offset of type after v4 header */
1192 *buf = ipa_write_8(0, *buf);
1193 *buf = ipa_write_32(0xFF, *buf);
1194 *buf = ipa_write_32(attrib->type, *buf);
1195 *buf = ipa_pad_to_32(*buf);
1196 ihl_ofst_meq32++;
1197 }
1198
1199 if (attrib->attrib_mask & IPA_FLT_CODE) {
1200 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1201 IPAERR("ran out of ihl_meq32 eq\n");
1202 return -EPERM;
1203 }
1204 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1205 /* 1 => offset of code after v4 header */
1206 *buf = ipa_write_8(1, *buf);
1207 *buf = ipa_write_32(0xFF, *buf);
1208 *buf = ipa_write_32(attrib->code, *buf);
1209 *buf = ipa_pad_to_32(*buf);
1210 ihl_ofst_meq32++;
1211 }
1212
1213 if (attrib->attrib_mask & IPA_FLT_SPI) {
1214 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1215 IPAERR("ran out of ihl_meq32 eq\n");
1216 return -EPERM;
1217 }
1218 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1219 /* 0 => offset of SPI after v4 header FIXME */
1220 *buf = ipa_write_8(0, *buf);
1221 *buf = ipa_write_32(0xFFFFFFFF, *buf);
1222 *buf = ipa_write_32(attrib->spi, *buf);
1223 *buf = ipa_pad_to_32(*buf);
1224 ihl_ofst_meq32++;
1225 }
1226
1227 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1228 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1229 IPAERR("ran out of ihl_rng16 eq\n");
1230 return -EPERM;
1231 }
1232 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1233 /* 0 => offset of src port after v4 header */
1234 *buf = ipa_write_8(0, *buf);
1235 *buf = ipa_write_16(attrib->src_port, *buf);
1236 *buf = ipa_write_16(attrib->src_port, *buf);
1237 *buf = ipa_pad_to_32(*buf);
1238 ihl_ofst_rng16++;
1239 }
1240
1241 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1242 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1243 IPAERR("ran out of ihl_rng16 eq\n");
1244 return -EPERM;
1245 }
1246 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1247 /* 2 => offset of dst port after v4 header */
1248 *buf = ipa_write_8(2, *buf);
1249 *buf = ipa_write_16(attrib->dst_port, *buf);
1250 *buf = ipa_write_16(attrib->dst_port, *buf);
1251 *buf = ipa_pad_to_32(*buf);
1252 ihl_ofst_rng16++;
1253 }
1254
1255 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1256 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1257 IPAERR("ran out of meq128 eq\n");
1258 return -EPERM;
1259 }
1260 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1261
1262 /* -14 => offset of dst mac addr in Ethernet II hdr */
1263 ipa_generate_mac_addr_hw_rule(
1264 buf,
1265 -14,
1266 attrib->dst_mac_addr_mask,
1267 attrib->dst_mac_addr);
1268
1269 ofst_meq128++;
1270 }
1271
1272 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1273 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1274 IPAERR("ran out of meq128 eq\n");
1275 return -EPERM;
1276 }
1277 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1278
1279 /* -8 => offset of src mac addr in Ethernet II hdr */
1280 ipa_generate_mac_addr_hw_rule(
1281 buf,
1282 -8,
1283 attrib->src_mac_addr_mask,
1284 attrib->src_mac_addr);
1285
1286 ofst_meq128++;
1287 }
1288
1289 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1290 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1291 IPAERR("ran out of meq128 eq\n");
1292 return -EPERM;
1293 }
1294 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1295
1296 /* -22 => offset of dst mac addr in 802.3 hdr */
1297 ipa_generate_mac_addr_hw_rule(
1298 buf,
1299 -22,
1300 attrib->dst_mac_addr_mask,
1301 attrib->dst_mac_addr);
1302
1303 ofst_meq128++;
1304 }
1305
1306 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1307 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1308 IPAERR("ran out of meq128 eq\n");
1309 return -EPERM;
1310 }
1311 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1312
1313 /* -16 => offset of src mac addr in 802.3 hdr */
1314 ipa_generate_mac_addr_hw_rule(
1315 buf,
1316 -16,
1317 attrib->src_mac_addr_mask,
1318 attrib->src_mac_addr);
1319
1320 ofst_meq128++;
1321 }
1322
1323 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1324 *en_rule |= IPA_METADATA_COMPARE;
1325 *buf = ipa_write_8(0, *buf); /* offset, reserved */
1326 *buf = ipa_write_32(attrib->meta_data_mask, *buf);
1327 *buf = ipa_write_32(attrib->meta_data, *buf);
1328 *buf = ipa_pad_to_32(*buf);
1329 }
1330
1331 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1332 *en_rule |= IPA_IS_FRAG;
1333 *buf = ipa_pad_to_32(*buf);
1334 }
1335 } else if (ip == IPA_IP_v6) {
1336
1337 /* v6 code below assumes no extension headers TODO: fix this */
1338
1339 /* error check */
1340 if (attrib->attrib_mask & IPA_FLT_TOS ||
1341 attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1342 IPAERR("v4 attrib's specified for v6 rule\n");
1343 return -EPERM;
1344 }
1345
1346 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
1347 *en_rule |= IPA_PROTOCOL_EQ;
1348 *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
1349 *buf = ipa_pad_to_32(*buf);
1350 }
1351
1352 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1353 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1354 IPAERR("ran out of meq32 eq\n");
1355 return -EPERM;
1356 }
1357 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1358 /* -2 => offset of ether type in L2 hdr */
1359 *buf = ipa_write_8((u8)-2, *buf);
1360 *buf = ipa_write_16(0, *buf);
1361 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1362 *buf = ipa_write_16(0, *buf);
1363 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1364 *buf = ipa_pad_to_32(*buf);
1365 ofst_meq32++;
1366 }
1367
1368 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1369 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1370 IPAERR("ran out of ihl_meq32 eq\n");
1371 return -EPERM;
1372 }
1373 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1374 /* 0 => offset of type after v6 header */
1375 *buf = ipa_write_8(0, *buf);
1376 *buf = ipa_write_32(0xFF, *buf);
1377 *buf = ipa_write_32(attrib->type, *buf);
1378 *buf = ipa_pad_to_32(*buf);
1379 ihl_ofst_meq32++;
1380 }
1381
1382 if (attrib->attrib_mask & IPA_FLT_CODE) {
1383 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1384 IPAERR("ran out of ihl_meq32 eq\n");
1385 return -EPERM;
1386 }
1387 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1388 /* 1 => offset of code after v6 header */
1389 *buf = ipa_write_8(1, *buf);
1390 *buf = ipa_write_32(0xFF, *buf);
1391 *buf = ipa_write_32(attrib->code, *buf);
1392 *buf = ipa_pad_to_32(*buf);
1393 ihl_ofst_meq32++;
1394 }
1395
1396 if (attrib->attrib_mask & IPA_FLT_SPI) {
1397 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1398 IPAERR("ran out of ihl_meq32 eq\n");
1399 return -EPERM;
1400 }
1401 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1402 /* 0 => offset of SPI after v6 header FIXME */
1403 *buf = ipa_write_8(0, *buf);
1404 *buf = ipa_write_32(0xFFFFFFFF, *buf);
1405 *buf = ipa_write_32(attrib->spi, *buf);
1406 *buf = ipa_pad_to_32(*buf);
1407 ihl_ofst_meq32++;
1408 }
1409
Shihuan Liuf4433442017-09-28 17:46:41 -07001410 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
1411 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1412 IPAERR("ran out of ihl_meq32 eq\n");
1413 return -EPERM;
1414 }
1415 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1416 /* 22 => offset of IP type after v6 header */
1417 *buf = ipa_write_8(22, *buf);
1418 *buf = ipa_write_32(0xF0000000, *buf);
1419 if (attrib->type == 0x40)
1420 *buf = ipa_write_32(0x40000000, *buf);
1421 else
1422 *buf = ipa_write_32(0x60000000, *buf);
1423 *buf = ipa_pad_to_32(*buf);
1424 ihl_ofst_meq32++;
1425 }
1426
1427 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
1428 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1429 IPAERR("ran out of ihl_meq32 eq\n");
1430 return -EPERM;
1431 }
1432 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1433 /* 38 => offset of inner IPv4 addr */
1434 *buf = ipa_write_8(38, *buf);
1435 *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
1436 *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
1437 *buf = ipa_pad_to_32(*buf);
1438 ihl_ofst_meq32++;
1439 }
1440
Amir Levy9659e592016-10-27 18:08:27 +03001441 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1442 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1443 IPAERR("ran out of ihl_rng16 eq\n");
1444 return -EPERM;
1445 }
1446 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1447 /* 0 => offset of src port after v6 header */
1448 *buf = ipa_write_8(0, *buf);
1449 *buf = ipa_write_16(attrib->src_port, *buf);
1450 *buf = ipa_write_16(attrib->src_port, *buf);
1451 *buf = ipa_pad_to_32(*buf);
1452 ihl_ofst_rng16++;
1453 }
1454
1455 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1456 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1457 IPAERR("ran out of ihl_rng16 eq\n");
1458 return -EPERM;
1459 }
1460 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1461 /* 2 => offset of dst port after v6 header */
1462 *buf = ipa_write_8(2, *buf);
1463 *buf = ipa_write_16(attrib->dst_port, *buf);
1464 *buf = ipa_write_16(attrib->dst_port, *buf);
1465 *buf = ipa_pad_to_32(*buf);
1466 ihl_ofst_rng16++;
1467 }
1468
1469 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1470 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1471 IPAERR("ran out of ihl_rng16 eq\n");
1472 return -EPERM;
1473 }
1474 if (attrib->src_port_hi < attrib->src_port_lo) {
1475 IPAERR("bad src port range param\n");
1476 return -EPERM;
1477 }
1478 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1479 /* 0 => offset of src port after v6 header */
1480 *buf = ipa_write_8(0, *buf);
1481 *buf = ipa_write_16(attrib->src_port_hi, *buf);
1482 *buf = ipa_write_16(attrib->src_port_lo, *buf);
1483 *buf = ipa_pad_to_32(*buf);
1484 ihl_ofst_rng16++;
1485 }
1486
1487 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1488 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1489 IPAERR("ran out of ihl_rng16 eq\n");
1490 return -EPERM;
1491 }
1492 if (attrib->dst_port_hi < attrib->dst_port_lo) {
1493 IPAERR("bad dst port range param\n");
1494 return -EPERM;
1495 }
1496 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1497 /* 2 => offset of dst port after v6 header */
1498 *buf = ipa_write_8(2, *buf);
1499 *buf = ipa_write_16(attrib->dst_port_hi, *buf);
1500 *buf = ipa_write_16(attrib->dst_port_lo, *buf);
1501 *buf = ipa_pad_to_32(*buf);
1502 ihl_ofst_rng16++;
1503 }
1504
1505 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1506 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1507 IPAERR("ran out of meq128 eq\n");
1508 return -EPERM;
1509 }
1510 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1511 /* 8 => offset of src ip in v6 header */
1512 *buf = ipa_write_8(8, *buf);
1513 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
1514 *buf);
1515 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
1516 *buf);
1517 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
1518 *buf);
1519 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
1520 *buf);
1521 *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
1522 *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
1523 *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
1524 *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
1525 *buf = ipa_pad_to_32(*buf);
1526 ofst_meq128++;
1527 }
1528
1529 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1530 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1531 IPAERR("ran out of meq128 eq\n");
1532 return -EPERM;
1533 }
1534 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1535 /* 24 => offset of dst ip in v6 header */
1536 *buf = ipa_write_8(24, *buf);
1537 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
1538 *buf);
1539 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
1540 *buf);
1541 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
1542 *buf);
1543 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
1544 *buf);
1545 *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
1546 *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
1547 *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
1548 *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
1549 *buf = ipa_pad_to_32(*buf);
1550 ofst_meq128++;
1551 }
1552
1553 if (attrib->attrib_mask & IPA_FLT_TC) {
1554 *en_rule |= IPA_FLT_TC;
1555 *buf = ipa_write_8(attrib->u.v6.tc, *buf);
1556 *buf = ipa_pad_to_32(*buf);
1557 }
1558
1559 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1560 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1561 IPAERR("ran out of meq128 eq\n");
1562 return -EPERM;
1563 }
1564 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1565 /* 0 => offset of TOS in v6 header */
1566 *buf = ipa_write_8(0, *buf);
1567 *buf = ipa_write_32((attrib->tos_mask << 20), *buf);
1568 *buf = ipa_write_32(0, *buf);
1569 *buf = ipa_write_32(0, *buf);
1570 *buf = ipa_write_32(0, *buf);
1571
1572 *buf = ipa_write_32((attrib->tos_value << 20), *buf);
1573 *buf = ipa_write_32(0, *buf);
1574 *buf = ipa_write_32(0, *buf);
1575 *buf = ipa_write_32(0, *buf);
1576 *buf = ipa_pad_to_32(*buf);
1577 ofst_meq128++;
1578 }
1579
1580 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1581 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1582 IPAERR("ran out of meq128 eq\n");
1583 return -EPERM;
1584 }
1585 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1586
1587 /* -14 => offset of dst mac addr in Ethernet II hdr */
1588 ipa_generate_mac_addr_hw_rule(
1589 buf,
1590 -14,
1591 attrib->dst_mac_addr_mask,
1592 attrib->dst_mac_addr);
1593
1594 ofst_meq128++;
1595 }
1596
1597 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1598 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1599 IPAERR("ran out of meq128 eq\n");
1600 return -EPERM;
1601 }
1602 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1603
1604 /* -8 => offset of src mac addr in Ethernet II hdr */
1605 ipa_generate_mac_addr_hw_rule(
1606 buf,
1607 -8,
1608 attrib->src_mac_addr_mask,
1609 attrib->src_mac_addr);
1610
1611 ofst_meq128++;
1612 }
1613
1614 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1615 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1616 IPAERR("ran out of meq128 eq\n");
1617 return -EPERM;
1618 }
1619 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1620
1621 /* -22 => offset of dst mac addr in 802.3 hdr */
1622 ipa_generate_mac_addr_hw_rule(
1623 buf,
1624 -22,
1625 attrib->dst_mac_addr_mask,
1626 attrib->dst_mac_addr);
1627
1628 ofst_meq128++;
1629 }
1630
1631 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1632 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1633 IPAERR("ran out of meq128 eq\n");
1634 return -EPERM;
1635 }
1636 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1637
1638 /* -16 => offset of src mac addr in 802.3 hdr */
1639 ipa_generate_mac_addr_hw_rule(
1640 buf,
1641 -16,
1642 attrib->src_mac_addr_mask,
1643 attrib->src_mac_addr);
1644
1645 ofst_meq128++;
1646 }
1647
1648 if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
1649 *en_rule |= IPA_FLT_FLOW_LABEL;
1650 /* FIXME FL is only 20 bits */
1651 *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
1652 *buf = ipa_pad_to_32(*buf);
1653 }
1654
1655 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1656 *en_rule |= IPA_METADATA_COMPARE;
1657 *buf = ipa_write_8(0, *buf); /* offset, reserved */
1658 *buf = ipa_write_32(attrib->meta_data_mask, *buf);
1659 *buf = ipa_write_32(attrib->meta_data, *buf);
1660 *buf = ipa_pad_to_32(*buf);
1661 }
1662
1663 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1664 *en_rule |= IPA_IS_FRAG;
1665 *buf = ipa_pad_to_32(*buf);
1666 }
1667 } else {
1668 IPAERR("unsupported ip %d\n", ip);
1669 return -EPERM;
1670 }
1671
1672 /*
1673 * default "rule" means no attributes set -> map to
1674 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
1675 */
1676 if (attrib->attrib_mask == 0) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301677 IPADBG_LOW("building default rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001678 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1679 IPAERR("ran out of meq32 eq\n");
1680 return -EPERM;
1681 }
1682 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1683 *buf = ipa_write_8(0, *buf); /* offset */
1684 *buf = ipa_write_32(0, *buf); /* mask */
1685 *buf = ipa_write_32(0, *buf); /* val */
1686 *buf = ipa_pad_to_32(*buf);
1687 ofst_meq32++;
1688 }
1689
1690 return 0;
1691}
1692
1693void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
1694 u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
1695 const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
1696{
1697 eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
1698 eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3];
1699 eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2];
1700 eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1];
1701 eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0];
1702 eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0;
1703 eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0;
1704 eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5];
1705 eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4];
1706 memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8);
1707 eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3];
1708 eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2];
1709 eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1];
1710 eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0];
1711 eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0;
1712 eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0;
1713 eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5];
1714 eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4];
1715 memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8);
1716}
1717
1718int ipa_generate_flt_eq(enum ipa_ip_type ip,
1719 const struct ipa_rule_attrib *attrib,
1720 struct ipa_ipfltri_rule_eq *eq_atrb)
1721{
1722 u8 ofst_meq32 = 0;
1723 u8 ihl_ofst_rng16 = 0;
1724 u8 ihl_ofst_meq32 = 0;
1725 u8 ofst_meq128 = 0;
1726 u16 eq_bitmap = 0;
1727 u16 *en_rule = &eq_bitmap;
1728
1729 if (ip == IPA_IP_v4) {
1730
1731 /* error check */
1732 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
1733 attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
1734 IPA_FLT_FLOW_LABEL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301735 IPAERR_RL("v6 attrib's specified for v4 rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001736 return -EPERM;
1737 }
1738
1739 if (attrib->attrib_mask & IPA_FLT_TOS) {
1740 *en_rule |= IPA_TOS_EQ;
1741 eq_atrb->tos_eq_present = 1;
1742 eq_atrb->tos_eq = attrib->u.v4.tos;
1743 }
1744
1745 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1746 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301747 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001748 return -EPERM;
1749 }
1750 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1751 eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
1752 eq_atrb->offset_meq_32[ofst_meq32].mask =
1753 attrib->tos_mask << 16;
1754 eq_atrb->offset_meq_32[ofst_meq32].value =
1755 attrib->tos_value << 16;
1756 ofst_meq32++;
1757 }
1758
1759 if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1760 *en_rule |= IPA_PROTOCOL_EQ;
1761 eq_atrb->protocol_eq_present = 1;
1762 eq_atrb->protocol_eq = attrib->u.v4.protocol;
1763 }
1764
1765 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1766 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301767 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001768 return -EPERM;
1769 }
1770 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1771 eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
1772 eq_atrb->offset_meq_32[ofst_meq32].mask =
1773 attrib->u.v4.src_addr_mask;
1774 eq_atrb->offset_meq_32[ofst_meq32].value =
1775 attrib->u.v4.src_addr;
1776 ofst_meq32++;
1777 }
1778
1779 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1780 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301781 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001782 return -EPERM;
1783 }
1784 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1785 eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
1786 eq_atrb->offset_meq_32[ofst_meq32].mask =
1787 attrib->u.v4.dst_addr_mask;
1788 eq_atrb->offset_meq_32[ofst_meq32].value =
1789 attrib->u.v4.dst_addr;
1790 ofst_meq32++;
1791 }
1792
1793 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1794 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301795 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001796 return -EPERM;
1797 }
1798 if (attrib->src_port_hi < attrib->src_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301799 IPAERR_RL("bad src port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03001800 return -EPERM;
1801 }
1802 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1803 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
1804 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1805 = attrib->src_port_lo;
1806 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1807 = attrib->src_port_hi;
1808 ihl_ofst_rng16++;
1809 }
1810
1811 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1812 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301813 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001814 return -EPERM;
1815 }
1816 if (attrib->dst_port_hi < attrib->dst_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301817 IPAERR_RL("bad dst port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03001818 return -EPERM;
1819 }
1820 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1821 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
1822 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1823 = attrib->dst_port_lo;
1824 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1825 = attrib->dst_port_hi;
1826 ihl_ofst_rng16++;
1827 }
1828
1829 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1830 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301831 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001832 return -EPERM;
1833 }
1834 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1835 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1836 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1837 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1838 attrib->type;
1839 ihl_ofst_meq32++;
1840 }
1841
1842 if (attrib->attrib_mask & IPA_FLT_CODE) {
1843 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301844 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001845 return -EPERM;
1846 }
1847 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1848 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
1849 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1850 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1851 attrib->code;
1852 ihl_ofst_meq32++;
1853 }
1854
1855 if (attrib->attrib_mask & IPA_FLT_SPI) {
1856 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301857 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001858 return -EPERM;
1859 }
1860 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1861 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1862 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
1863 0xFFFFFFFF;
1864 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1865 attrib->spi;
1866 ihl_ofst_meq32++;
1867 }
1868
1869 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1870 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301871 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001872 return -EPERM;
1873 }
1874 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1875 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
1876 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1877 = attrib->src_port;
1878 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1879 = attrib->src_port;
1880 ihl_ofst_rng16++;
1881 }
1882
1883 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1884 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301885 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001886 return -EPERM;
1887 }
1888 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1889 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
1890 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1891 = attrib->dst_port;
1892 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1893 = attrib->dst_port;
1894 ihl_ofst_rng16++;
1895 }
1896
1897 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1898 *en_rule |= IPA_METADATA_COMPARE;
1899 eq_atrb->metadata_meq32_present = 1;
1900 eq_atrb->metadata_meq32.offset = 0;
1901 eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
1902 eq_atrb->metadata_meq32.value = attrib->meta_data;
1903 }
1904
1905 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1906 *en_rule |= IPA_IS_FRAG;
1907 eq_atrb->ipv4_frag_eq_present = 1;
1908 }
1909
1910 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1911 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301912 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001913 return -EPERM;
1914 }
1915 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1916
1917 /* -14 => offset of dst mac addr in Ethernet II hdr */
1918 ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
1919 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
1920 ofst_meq128);
1921
1922 ofst_meq128++;
1923 }
1924
1925 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1926 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301927 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001928 return -EPERM;
1929 }
1930 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1931
1932 /* -8 => offset of src mac addr in Ethernet II hdr */
1933 ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
1934 attrib->src_mac_addr_mask, attrib->src_mac_addr,
1935 ofst_meq128);
1936
1937 ofst_meq128++;
1938 }
1939
1940 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1941 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301942 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001943 return -EPERM;
1944 }
1945 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1946
1947 /* -22 => offset of dst mac addr in 802.3 hdr */
1948 ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
1949 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
1950 ofst_meq128);
1951
1952 ofst_meq128++;
1953 }
1954
1955 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1956 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301957 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001958 return -EPERM;
1959 }
1960 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1961
1962 /* -16 => offset of src mac addr in 802.3 hdr */
1963 ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
1964 attrib->src_mac_addr_mask, attrib->src_mac_addr,
1965 ofst_meq128);
1966
1967 ofst_meq128++;
1968 }
1969
1970 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1971 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301972 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001973 return -EPERM;
1974 }
1975 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1976 eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
1977 eq_atrb->offset_meq_32[ofst_meq32].mask =
1978 htons(attrib->ether_type);
1979 eq_atrb->offset_meq_32[ofst_meq32].value =
1980 htons(attrib->ether_type);
1981 ofst_meq32++;
1982 }
1983 } else if (ip == IPA_IP_v6) {
1984
1985 /* v6 code below assumes no extension headers TODO: fix this */
1986
1987 /* error check */
1988 if (attrib->attrib_mask & IPA_FLT_TOS ||
1989 attrib->attrib_mask & IPA_FLT_PROTOCOL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301990 IPAERR_RL("v4 attrib's specified for v6 rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001991 return -EPERM;
1992 }
1993
1994 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
1995 *en_rule |= IPA_PROTOCOL_EQ;
1996 eq_atrb->protocol_eq_present = 1;
1997 eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
1998 }
1999
2000 if (attrib->attrib_mask & IPA_FLT_TYPE) {
2001 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302002 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002003 return -EPERM;
2004 }
2005 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2006 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
2007 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
2008 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2009 attrib->type;
2010 ihl_ofst_meq32++;
2011 }
2012
2013 if (attrib->attrib_mask & IPA_FLT_CODE) {
2014 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302015 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002016 return -EPERM;
2017 }
2018 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2019 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
2020 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
2021 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2022 attrib->code;
2023 ihl_ofst_meq32++;
2024 }
2025
2026 if (attrib->attrib_mask & IPA_FLT_SPI) {
2027 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302028 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002029 return -EPERM;
2030 }
2031 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2032 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
2033 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2034 0xFFFFFFFF;
2035 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2036 attrib->spi;
2037 ihl_ofst_meq32++;
2038 }
2039
Shihuan Liuf4433442017-09-28 17:46:41 -07002040 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
2041 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
2042 IPAERR("ran out of ihl_meq32 eq\n");
2043 return -EPERM;
2044 }
2045 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2046 /* 22 => offset of inner IP type after v6 header */
2047 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
2048 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2049 0xF0000000;
2050 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2051 (u32)attrib->type << 24;
2052 ihl_ofst_meq32++;
2053 }
2054
2055 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
2056 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
2057 IPAERR("ran out of ihl_meq32 eq\n");
2058 return -EPERM;
2059 }
2060 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2061 /* 38 => offset of inner IPv4 addr */
2062 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
2063 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2064 attrib->u.v4.dst_addr_mask;
2065 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2066 attrib->u.v4.dst_addr;
2067 ihl_ofst_meq32++;
2068 }
2069
Amir Levy9659e592016-10-27 18:08:27 +03002070 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
2071 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302072 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002073 return -EPERM;
2074 }
2075 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2076 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
2077 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2078 = attrib->src_port;
2079 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2080 = attrib->src_port;
2081 ihl_ofst_rng16++;
2082 }
2083
2084 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
2085 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302086 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002087 return -EPERM;
2088 }
2089 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2090 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
2091 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2092 = attrib->dst_port;
2093 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2094 = attrib->dst_port;
2095 ihl_ofst_rng16++;
2096 }
2097
2098 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
2099 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302100 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002101 return -EPERM;
2102 }
2103 if (attrib->src_port_hi < attrib->src_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302104 IPAERR_RL("bad src port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03002105 return -EPERM;
2106 }
2107 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2108 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
2109 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2110 = attrib->src_port_lo;
2111 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2112 = attrib->src_port_hi;
2113 ihl_ofst_rng16++;
2114 }
2115
2116 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
2117 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302118 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002119 return -EPERM;
2120 }
2121 if (attrib->dst_port_hi < attrib->dst_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302122 IPAERR_RL("bad dst port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03002123 return -EPERM;
2124 }
2125 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2126 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
2127 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2128 = attrib->dst_port_lo;
2129 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2130 = attrib->dst_port_hi;
2131 ihl_ofst_rng16++;
2132 }
2133
2134 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
2135 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302136 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002137 return -EPERM;
2138 }
2139 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2140 eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
2141 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2142 = attrib->u.v6.src_addr_mask[0];
2143 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2144 = attrib->u.v6.src_addr_mask[1];
2145 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2146 = attrib->u.v6.src_addr_mask[2];
2147 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2148 = attrib->u.v6.src_addr_mask[3];
2149 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2150 = attrib->u.v6.src_addr[0];
2151 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2152 = attrib->u.v6.src_addr[1];
2153 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2154 = attrib->u.v6.src_addr[2];
2155 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2156 12) = attrib->u.v6.src_addr[3];
2157 ofst_meq128++;
2158 }
2159
2160 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
2161 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302162 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002163 return -EPERM;
2164 }
2165 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2166 eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
2167 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2168 = attrib->u.v6.dst_addr_mask[0];
2169 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2170 = attrib->u.v6.dst_addr_mask[1];
2171 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2172 = attrib->u.v6.dst_addr_mask[2];
2173 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2174 = attrib->u.v6.dst_addr_mask[3];
2175 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2176 = attrib->u.v6.dst_addr[0];
2177 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2178 = attrib->u.v6.dst_addr[1];
2179 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2180 = attrib->u.v6.dst_addr[2];
2181 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2182 12) = attrib->u.v6.dst_addr[3];
2183 ofst_meq128++;
2184 }
2185
2186 if (attrib->attrib_mask & IPA_FLT_TC) {
2187 *en_rule |= IPA_FLT_TC;
2188 eq_atrb->tc_eq_present = 1;
2189 eq_atrb->tc_eq = attrib->u.v6.tc;
2190 }
2191
2192 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
2193 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302194 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002195 return -EPERM;
2196 }
2197 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2198 eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
2199 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2200 = attrib->tos_mask << 20;
2201 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2202 = 0;
2203 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2204 = 0;
2205 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2206 = 0;
2207 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2208 = attrib->tos_value << 20;
2209 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2210 = 0;
2211 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2212 = 0;
2213 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2214 12) = 0;
2215 ofst_meq128++;
2216 }
2217
2218 if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
2219 *en_rule |= IPA_FLT_FLOW_LABEL;
2220 eq_atrb->fl_eq_present = 1;
2221 eq_atrb->fl_eq = attrib->u.v6.flow_label;
2222 }
2223
2224 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
2225 *en_rule |= IPA_METADATA_COMPARE;
2226 eq_atrb->metadata_meq32_present = 1;
2227 eq_atrb->metadata_meq32.offset = 0;
2228 eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
2229 eq_atrb->metadata_meq32.value = attrib->meta_data;
2230 }
2231
2232 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
2233 *en_rule |= IPA_IS_FRAG;
2234 eq_atrb->ipv4_frag_eq_present = 1;
2235 }
2236
2237 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
2238 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302239 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002240 return -EPERM;
2241 }
2242 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2243
2244 /* -14 => offset of dst mac addr in Ethernet II hdr */
2245 ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
2246 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
2247 ofst_meq128);
2248
2249 ofst_meq128++;
2250 }
2251
2252 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
2253 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302254 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002255 return -EPERM;
2256 }
2257 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2258
2259 /* -8 => offset of src mac addr in Ethernet II hdr */
2260 ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
2261 attrib->src_mac_addr_mask, attrib->src_mac_addr,
2262 ofst_meq128);
2263
2264 ofst_meq128++;
2265 }
2266
2267 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
2268 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302269 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002270 return -EPERM;
2271 }
2272 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2273
2274 /* -22 => offset of dst mac addr in 802.3 hdr */
2275 ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
2276 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
2277 ofst_meq128);
2278
2279 ofst_meq128++;
2280 }
2281
2282 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
2283 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302284 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002285 return -EPERM;
2286 }
2287 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2288
2289 /* -16 => offset of src mac addr in 802.3 hdr */
2290 ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
2291 attrib->src_mac_addr_mask, attrib->src_mac_addr,
2292 ofst_meq128);
2293
2294 ofst_meq128++;
2295 }
2296
2297 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
2298 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302299 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002300 return -EPERM;
2301 }
2302 *en_rule |= ipa_ofst_meq32[ofst_meq32];
2303 eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
2304 eq_atrb->offset_meq_32[ofst_meq32].mask =
2305 htons(attrib->ether_type);
2306 eq_atrb->offset_meq_32[ofst_meq32].value =
2307 htons(attrib->ether_type);
2308 ofst_meq32++;
2309 }
2310
2311 } else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302312 IPAERR_RL("unsupported ip %d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +03002313 return -EPERM;
2314 }
2315
2316 /*
2317 * default "rule" means no attributes set -> map to
2318 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
2319 */
2320 if (attrib->attrib_mask == 0) {
2321 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302322 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002323 return -EPERM;
2324 }
2325 *en_rule |= ipa_ofst_meq32[ofst_meq32];
2326 eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
2327 eq_atrb->offset_meq_32[ofst_meq32].mask = 0;
2328 eq_atrb->offset_meq_32[ofst_meq32].value = 0;
2329 ofst_meq32++;
2330 }
2331
2332 eq_atrb->rule_eq_bitmap = *en_rule;
2333 eq_atrb->num_offset_meq_32 = ofst_meq32;
2334 eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
2335 eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
2336 eq_atrb->num_offset_meq_128 = ofst_meq128;
2337
2338 return 0;
2339}
2340
2341/**
2342 * ipa2_cfg_ep - IPA end-point configuration
2343 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2344 * @ipa_ep_cfg: [in] IPA end-point configuration params
2345 *
2346 * This includes nat, header, mode, aggregation and route settings and is a one
2347 * shot API to configure the IPA end-point fully
2348 *
2349 * Returns: 0 on success, negative on failure
2350 *
2351 * Note: Should not be called from atomic context
2352 */
2353int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
2354{
2355 int result = -EINVAL;
2356
2357 if (unlikely(!ipa_ctx)) {
2358 IPAERR("IPA driver was not initialized\n");
2359 return -EINVAL;
2360 }
2361
2362 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2363 ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
2364 IPAERR("bad parm.\n");
2365 return -EINVAL;
2366 }
2367
2368 result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
2369 if (result)
2370 return result;
2371
2372 result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
2373 if (result)
2374 return result;
2375
2376 result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
2377 if (result)
2378 return result;
2379
2380 result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
2381 if (result)
2382 return result;
2383
2384 if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
2385 result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
2386 if (result)
2387 return result;
2388
2389 result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
2390 if (result)
2391 return result;
2392
2393 result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
2394 if (result)
2395 return result;
2396
2397 result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
2398 if (result)
2399 return result;
2400 } else {
2401 result = ipa2_cfg_ep_metadata_mask(clnt_hdl,
2402 &ipa_ep_cfg->metadata_mask);
2403 if (result)
2404 return result;
2405 }
2406
2407 return 0;
2408}
2409
2410const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en)
2411{
2412 switch (nat_en) {
2413 case (IPA_BYPASS_NAT):
2414 return "NAT disabled";
2415 case (IPA_SRC_NAT):
2416 return "Source NAT";
2417 case (IPA_DST_NAT):
2418 return "Dst NAT";
2419 }
2420
2421 return "undefined";
2422}
2423
2424void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl,
2425 const struct ipa_ep_cfg_nat *ep_nat)
2426{
2427 u32 reg_val = 0;
2428
2429 IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
2430 IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
2431 IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
2432
2433 ipa_write_reg(ipa_ctx->mmio,
2434 IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl),
2435 reg_val);
2436}
2437
2438void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl,
2439 const struct ipa_ep_cfg_nat *ep_nat)
2440{
2441 u32 reg_val = 0;
2442
2443 IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
2444 IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
2445 IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
2446
2447 ipa_write_reg(ipa_ctx->mmio,
2448 IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl),
2449 reg_val);
2450}
2451
2452/**
2453 * ipa2_cfg_ep_nat() - IPA end-point NAT configuration
2454 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2455 * @ipa_ep_cfg: [in] IPA end-point configuration params
2456 *
2457 * Returns: 0 on success, negative on failure
2458 *
2459 * Note: Should not be called from atomic context
2460 */
2461int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
2462{
2463 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2464 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
2465 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2466 clnt_hdl,
2467 ipa_ctx->ep[clnt_hdl].valid);
2468 return -EINVAL;
2469 }
2470
2471 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
2472 IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
2473 return -EINVAL;
2474 }
2475
2476 IPADBG("pipe=%d, nat_en=%d(%s)\n",
2477 clnt_hdl,
2478 ep_nat->nat_en,
2479 ipa_get_nat_en_str(ep_nat->nat_en));
2480
2481 /* copy over EP cfg */
2482 ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
2483
2484 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2485
2486 ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat);
2487
2488 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2489
2490 return 0;
2491}
2492
2493static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl,
2494 const struct ipa_ep_cfg_status *ep_status)
2495{
2496 IPADBG("Not supported for version 1.1\n");
2497}
2498
2499static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl,
2500 const struct ipa_ep_cfg_status *ep_status)
2501{
2502 u32 reg_val = 0;
2503
2504 IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en,
2505 IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
2506 IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
2507
2508 IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep,
2509 IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
2510 IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
2511
2512 ipa_write_reg(ipa_ctx->mmio,
2513 IPA_ENDP_STATUS_n_OFST(clnt_hdl),
2514 reg_val);
2515}
2516
2517/**
2518 * ipa2_cfg_ep_status() - IPA end-point status configuration
2519 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2520 * @ipa_ep_cfg: [in] IPA end-point configuration params
2521 *
2522 * Returns: 0 on success, negative on failure
2523 *
2524 * Note: Should not be called from atomic context
2525 */
2526int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status)
2527{
2528 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2529 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
2530 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2531 clnt_hdl,
2532 ipa_ctx->ep[clnt_hdl].valid);
2533 return -EINVAL;
2534 }
2535
2536 IPADBG("pipe=%d, status_en=%d status_ep=%d\n",
2537 clnt_hdl,
2538 ep_status->status_en,
2539 ep_status->status_ep);
2540
2541 /* copy over EP cfg */
2542 ipa_ctx->ep[clnt_hdl].status = *ep_status;
2543
2544 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2545
2546 ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status);
2547
2548 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2549
2550 return 0;
2551}
2552
2553static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl,
2554 const struct ipa_ep_cfg_cfg *cfg)
2555{
2556 IPADBG("Not supported for version 1.1\n");
2557}
2558
2559static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl,
2560 const struct ipa_ep_cfg_cfg *cfg)
2561{
2562 u32 reg_val = 0;
2563
2564 IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en,
2565 IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
2566 IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
2567 IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en,
2568 IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
2569 IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
2570 IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset,
2571 IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
2572 IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
2573
2574 ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl),
2575 reg_val);
2576}
2577
2578/**
2579 * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration
2580 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2581 * @ipa_ep_cfg: [in] IPA end-point configuration params
2582 *
2583 * Returns: 0 on success, negative on failure
2584 *
2585 * Note: Should not be called from atomic context
2586 */
2587int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
2588{
2589 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2590 ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
2591 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2592 clnt_hdl,
2593 ipa_ctx->ep[clnt_hdl].valid);
2594 return -EINVAL;
2595 }
2596
2597 IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n",
2598 clnt_hdl,
2599 cfg->frag_offload_en,
2600 cfg->cs_offload_en,
2601 cfg->cs_metadata_hdr_offset);
2602
2603 /* copy over EP cfg */
2604 ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
2605
2606 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2607
2608 ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg);
2609
2610 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2611
2612 return 0;
2613}
2614
2615static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl,
2616 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2617{
2618 IPADBG("Not supported for version 1.1\n");
2619}
2620
2621static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl,
2622 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2623{
2624 u32 reg_val = 0;
2625
2626 IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask,
2627 IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
2628 IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
2629
2630 ipa_write_reg(ipa_ctx->mmio,
2631 IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl),
2632 reg_val);
2633}
2634
2635/**
2636 * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
2637 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2638 * @ipa_ep_cfg: [in] IPA end-point configuration params
2639 *
2640 * Returns: 0 on success, negative on failure
2641 *
2642 * Note: Should not be called from atomic context
2643 */
2644int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
2645 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2646{
2647 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2648 ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
2649 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2650 clnt_hdl,
2651 ipa_ctx->ep[clnt_hdl].valid);
2652 return -EINVAL;
2653 }
2654
2655 IPADBG("pipe=%d, metadata_mask=0x%x\n",
2656 clnt_hdl,
2657 metadata_mask->metadata_mask);
2658
2659 /* copy over EP cfg */
2660 ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
2661
2662 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2663
2664 ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
2665
2666 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2667
2668 return 0;
2669}
2670
2671void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number,
2672 const struct ipa_ep_cfg_hdr *ep_hdr)
2673{
2674 u32 val = 0;
2675
2676 val = IPA_SETFIELD(ep_hdr->hdr_len,
2677 IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
2678 IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) |
2679 IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid,
2680 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
2681 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) |
2682 IPA_SETFIELD(ep_hdr->hdr_ofst_metadata,
2683 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
2684 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) |
2685 IPA_SETFIELD(ep_hdr->hdr_additional_const_len,
2686 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
2687 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) |
2688 IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid,
2689 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
2690 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) |
2691 IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size,
2692 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
2693 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) |
2694 IPA_SETFIELD(ep_hdr->hdr_a5_mux,
2695 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
2696 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
2697 ipa_write_reg(ipa_ctx->mmio,
2698 IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val);
2699}
2700
2701void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number,
2702 const struct ipa_ep_cfg_hdr *ep_hdr)
2703{
2704 u32 reg_val = 0;
2705
2706 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid,
2707 IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2,
2708 IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2);
2709
2710 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional,
2711 IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
2712 IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
2713
2714 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux,
2715 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
2716 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
2717
2718 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size,
2719 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
2720 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK);
2721
2722 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid,
2723 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
2724 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK);
2725
2726 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len,
2727 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
2728 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK);
2729
2730 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata,
2731 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
2732 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK);
2733
2734 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid,
2735 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
2736 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK);
2737
2738 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len,
2739 IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
2740 IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK);
2741
2742 ipa_write_reg(ipa_ctx->mmio,
2743 IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val);
2744}
2745
2746/**
2747 * ipa2_cfg_ep_hdr() - IPA end-point header configuration
2748 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2749 * @ipa_ep_cfg: [in] IPA end-point configuration params
2750 *
2751 * Returns: 0 on success, negative on failure
2752 *
2753 * Note: Should not be called from atomic context
2754 */
2755int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
2756{
2757 struct ipa_ep_context *ep;
2758
2759 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2760 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
2761 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2762 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
2763 return -EINVAL;
2764 }
2765 IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
2766 clnt_hdl,
2767 ep_hdr->hdr_remove_additional,
2768 ep_hdr->hdr_a5_mux,
2769 ep_hdr->hdr_ofst_pkt_size);
2770
2771 IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
2772 ep_hdr->hdr_ofst_pkt_size_valid,
2773 ep_hdr->hdr_additional_const_len);
2774
2775 IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
2776 ep_hdr->hdr_ofst_metadata,
2777 ep_hdr->hdr_ofst_metadata_valid,
2778 ep_hdr->hdr_len);
2779
2780 ep = &ipa_ctx->ep[clnt_hdl];
2781
2782 /* copy over EP cfg */
2783 ep->cfg.hdr = *ep_hdr;
2784
2785 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2786
2787 ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
2788
2789 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2790
2791 return 0;
2792}
2793
2794static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl,
2795 const struct ipa_ep_cfg_hdr_ext *ep_hdr)
2796{
2797 IPADBG("Not supported for version 1.1\n");
2798 return 0;
2799}
2800
2801static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
2802 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val)
2803{
2804 u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
2805
2806 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset,
2807 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
2808 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
2809
2810 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding,
2811 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
2812 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
2813
2814 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad,
2815 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
2816 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
2817
2818 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid,
2819 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
2820 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
2821
2822 IPA_SETFIELD_IN_REG(reg_val, hdr_endianness,
2823 IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
2824 IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
2825
2826 ipa_write_reg(ipa_ctx->mmio,
2827 IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val);
2828
2829 return 0;
2830}
2831
2832static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl,
2833 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2834{
2835 u32 reg_val = 0;
2836
2837 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2838 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2839 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0);
2840
2841 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2842}
2843
2844static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl,
2845 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2846{
2847 u32 reg_val = 0;
2848
2849 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2850 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2851 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
2852
2853 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2854
2855}
2856
2857static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl,
2858 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2859{
2860 u32 reg_val = 0;
2861
2862 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2863 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2864 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
2865
2866 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2867
2868}
2869
2870/**
2871 * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration
2872 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2873 * @ep_hdr_ext: [in] IPA end-point configuration params
2874 *
2875 * Returns: 0 on success, negative on failure
2876 *
2877 * Note: Should not be called from atomic context
2878 */
2879int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
2880 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2881{
2882 struct ipa_ep_context *ep;
2883
2884 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2885 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
2886 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2887 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
2888 return -EINVAL;
2889 }
2890
2891 IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
2892 clnt_hdl,
2893 ep_hdr_ext->hdr_pad_to_alignment);
2894
2895 IPADBG("hdr_total_len_or_pad_offset=%d\n",
2896 ep_hdr_ext->hdr_total_len_or_pad_offset);
2897
2898 IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
2899 ep_hdr_ext->hdr_payload_len_inc_padding,
2900 ep_hdr_ext->hdr_total_len_or_pad);
2901
2902 IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
2903 ep_hdr_ext->hdr_total_len_or_pad_valid,
2904 ep_hdr_ext->hdr_little_endian);
2905
2906 ep = &ipa_ctx->ep[clnt_hdl];
2907
2908 /* copy over EP cfg */
2909 ep->cfg.hdr_ext = *ep_hdr_ext;
2910
2911 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2912
2913 ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
2914
2915 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2916
2917 return 0;
2918}
2919
2920/**
2921 * ipa2_cfg_ep_hdr() - IPA end-point Control configuration
2922 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2923 * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
2924 *
2925 * Returns: 0 on success, negative on failure
2926 */
2927int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
2928{
2929 u32 reg_val = 0;
2930
2931 if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) {
2932 IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
2933 return -EINVAL;
2934 }
2935
2936 IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
2937 clnt_hdl,
2938 ep_ctrl->ipa_ep_suspend,
2939 ep_ctrl->ipa_ep_delay);
2940
2941 IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend,
2942 IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT,
2943 IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK);
2944
2945 IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay,
2946 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
2947 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
2948
2949 ipa_write_reg(ipa_ctx->mmio,
2950 IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val);
2951
2952 return 0;
2953
2954}
2955
2956/**
2957 * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration
2958 * @aggr_granularity: [in] defines the granularity of AGGR timers
2959 * number of units of 1/32msec
2960 *
2961 * Returns: 0 on success, negative on failure
2962 */
2963int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity)
2964{
2965 u32 reg_val = 0;
2966
2967 if (aggr_granularity <= IPA_AGGR_GRAN_MIN ||
2968 aggr_granularity > IPA_AGGR_GRAN_MAX) {
2969 IPAERR("bad param, aggr_granularity = %d\n",
2970 aggr_granularity);
2971 return -EINVAL;
2972 }
2973 IPADBG("aggr_granularity=%d\n", aggr_granularity);
2974
2975 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
2976 reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
2977
2978 IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1,
2979 IPA_COUNTER_CFG_AGGR_GRAN_SHFT,
2980 IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
2981
2982 ipa_write_reg(ipa_ctx->mmio,
2983 IPA_COUNTER_CFG_OFST, reg_val);
2984
2985 return 0;
2986
2987}
2988EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity);
2989
2990/**
2991 * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer
2992 * configuration
2993 * @eot_coal_granularity: defines the granularity of EOT_COAL timers
2994 * number of units of 1/32msec
2995 *
2996 * Returns: 0 on success, negative on failure
2997 */
2998int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity)
2999{
3000 u32 reg_val = 0;
3001
3002 if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN ||
3003 eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) {
3004 IPAERR("bad parm, eot_coal_granularity = %d\n",
3005 eot_coal_granularity);
3006 return -EINVAL;
3007 }
3008 IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity);
3009
3010 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
3011 reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
3012
3013 IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1,
3014 IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT,
3015 IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
3016
3017 ipa_write_reg(ipa_ctx->mmio,
3018 IPA_COUNTER_CFG_OFST, reg_val);
3019
3020 return 0;
3021
3022}
3023EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity);
3024
3025const char *ipa_get_mode_type_str(enum ipa_mode_type mode)
3026{
3027 switch (mode) {
3028 case (IPA_BASIC):
3029 return "Basic";
3030 case (IPA_ENABLE_FRAMING_HDLC):
3031 return "HDLC framing";
3032 case (IPA_ENABLE_DEFRAMING_HDLC):
3033 return "HDLC de-framing";
3034 case (IPA_DMA):
3035 return "DMA";
3036 }
3037
3038 return "undefined";
3039}
3040
3041void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number,
3042 const struct ipa_ep_cfg_mode *ep_mode)
3043{
3044 u32 reg_val = 0;
3045
3046 IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
3047 IPA_ENDP_INIT_MODE_N_MODE_SHFT,
3048 IPA_ENDP_INIT_MODE_N_MODE_BMSK);
3049
3050 IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
3051 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1,
3052 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1);
3053
3054 ipa_write_reg(ipa_ctx->mmio,
3055 IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val);
3056}
3057
3058void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number,
3059 const struct ipa_ep_cfg_mode *ep_mode)
3060{
3061 u32 reg_val = 0;
3062
3063 IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
3064 IPA_ENDP_INIT_MODE_N_MODE_SHFT,
3065 IPA_ENDP_INIT_MODE_N_MODE_BMSK);
3066
3067 IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
3068 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0,
3069 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0);
3070
3071 ipa_write_reg(ipa_ctx->mmio,
3072 IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val);
3073}
3074
3075/**
3076 * ipa2_cfg_ep_mode() - IPA end-point mode configuration
3077 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3078 * @ipa_ep_cfg: [in] IPA end-point configuration params
3079 *
3080 * Returns: 0 on success, negative on failure
3081 *
3082 * Note: Should not be called from atomic context
3083 */
3084int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
3085{
3086 int ep;
3087
3088 if (unlikely(!ipa_ctx)) {
3089 IPAERR("IPA driver was not initialized\n");
3090 return -EINVAL;
3091 }
3092
3093 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3094 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
3095 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3096 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3097 return -EINVAL;
3098 }
3099
3100 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
3101 IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
3102 return -EINVAL;
3103 }
3104
3105 ep = ipa2_get_ep_mapping(ep_mode->dst);
3106 if (ep == -1 && ep_mode->mode == IPA_DMA) {
3107 IPAERR("dst %d does not exist\n", ep_mode->dst);
3108 return -EINVAL;
3109 }
3110
3111 WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
3112
3113 if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
3114 ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
3115
3116 IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
3117 clnt_hdl,
3118 ep_mode->mode,
3119 ipa_get_mode_type_str(ep_mode->mode),
3120 ep_mode->dst);
3121
3122 /* copy over EP cfg */
3123 ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
3124 ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
3125
3126 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3127
3128 ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl,
3129 ipa_ctx->ep[clnt_hdl].dst_pipe_index,
3130 ep_mode);
3131
3132 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3133
3134 return 0;
3135}
3136
3137const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
3138{
3139 switch (aggr_en) {
3140 case (IPA_BYPASS_AGGR):
3141 return "no aggregation";
3142 case (IPA_ENABLE_AGGR):
3143 return "aggregation enabled";
3144 case (IPA_ENABLE_DEAGGR):
3145 return "de-aggregation enabled";
3146 }
3147
3148 return "undefined";
3149}
3150
3151const char *get_aggr_type_str(enum ipa_aggr_type aggr_type)
3152{
3153 switch (aggr_type) {
3154 case (IPA_MBIM_16):
3155 return "MBIM_16";
3156 case (IPA_HDLC):
3157 return "HDLC";
3158 case (IPA_TLP):
3159 return "TLP";
3160 case (IPA_RNDIS):
3161 return "RNDIS";
3162 case (IPA_GENERIC):
3163 return "GENERIC";
3164 case (IPA_QCMAP):
3165 return "QCMAP";
3166 }
3167 return "undefined";
3168}
3169
3170void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number,
3171 const struct ipa_ep_cfg_aggr *ep_aggr)
3172{
3173 u32 reg_val = 0;
3174
3175 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
3176 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
3177 IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
3178
3179 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
3180 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
3181 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
3182
3183 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
3184 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
3185 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
3186
3187 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
3188 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
3189 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
3190
3191 ipa_write_reg(ipa_ctx->mmio,
3192 IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val);
3193}
3194
3195void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number,
3196 const struct ipa_ep_cfg_aggr *ep_aggr)
3197{
3198 u32 reg_val = 0;
3199
3200 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
3201 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
3202 IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
3203
3204 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
3205 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
3206 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
3207
3208 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
3209 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
3210 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
3211
3212 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
3213 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
3214 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
3215
3216 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit,
3217 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
3218 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
3219
3220 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active,
3221 IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
3222 IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
3223
3224 ipa_write_reg(ipa_ctx->mmio,
3225 IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val);
3226}
3227
3228/**
3229 * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration
3230 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3231 * @ipa_ep_cfg: [in] IPA end-point configuration params
3232 *
3233 * Returns: 0 on success, negative on failure
3234 *
3235 * Note: Should not be called from atomic context
3236 */
3237int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
3238{
3239 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3240 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
3241 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3242 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3243 return -EINVAL;
3244 }
3245
3246 IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
3247 clnt_hdl,
3248 ep_aggr->aggr_en,
3249 get_aggr_enable_str(ep_aggr->aggr_en),
3250 ep_aggr->aggr,
3251 get_aggr_type_str(ep_aggr->aggr),
3252 ep_aggr->aggr_byte_limit,
3253 ep_aggr->aggr_time_limit);
3254
3255 /* copy over EP cfg */
3256 ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
3257
3258 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3259
3260 ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr);
3261
3262 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3263
3264 return 0;
3265}
3266
3267void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index)
3268{
3269 int reg_val = 0;
3270
3271 IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
3272 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
3273 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
3274
3275 ipa_write_reg(ipa_ctx->mmio,
3276 IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index),
3277 reg_val);
3278}
3279
3280void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index)
3281{
3282 int reg_val = 0;
3283
3284 IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
3285 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
3286 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
3287
3288 ipa_write_reg(ipa_ctx->mmio,
3289 IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index),
3290 reg_val);
3291}
3292
3293/**
3294 * ipa2_cfg_ep_route() - IPA end-point routing configuration
3295 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3296 * @ipa_ep_cfg: [in] IPA end-point configuration params
3297 *
3298 * Returns: 0 on success, negative on failure
3299 *
3300 * Note: Should not be called from atomic context
3301 */
3302int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
3303{
3304 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3305 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
3306 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3307 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3308 return -EINVAL;
3309 }
3310
3311 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
3312 IPAERR("ROUTE does not apply to IPA out EP %d\n",
3313 clnt_hdl);
3314 return -EINVAL;
3315 }
3316
3317 /*
3318 * if DMA mode was configured previously for this EP, return with
3319 * success
3320 */
3321 if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
3322 IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
3323 clnt_hdl);
3324 return 0;
3325 }
3326
3327 if (ep_route->rt_tbl_hdl)
3328 IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
3329
3330 IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
3331 clnt_hdl,
3332 ep_route->rt_tbl_hdl);
3333
3334 /* always use "default" routing table when programming EP ROUTE reg */
3335 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3336 ipa_ctx->ep[clnt_hdl].rt_tbl_idx =
3337 IPA_MEM_PART(v4_apps_rt_index_lo);
3338 else
3339 ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
3340
3341 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3342
3343 ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl,
3344 ipa_ctx->ep[clnt_hdl].rt_tbl_idx);
3345
3346 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3347
3348 return 0;
3349}
3350
3351void _ipa_cfg_ep_holb_v1_1(u32 pipe_number,
3352 const struct ipa_ep_cfg_holb *ep_holb)
3353{
3354 ipa_write_reg(ipa_ctx->mmio,
3355 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number),
3356 ep_holb->en);
3357
3358 ipa_write_reg(ipa_ctx->mmio,
3359 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number),
3360 (u16)ep_holb->tmr_val);
3361}
3362
3363void _ipa_cfg_ep_holb_v2_0(u32 pipe_number,
3364 const struct ipa_ep_cfg_holb *ep_holb)
3365{
3366 ipa_write_reg(ipa_ctx->mmio,
3367 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3368 ep_holb->en);
3369
3370 ipa_write_reg(ipa_ctx->mmio,
3371 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3372 (u16)ep_holb->tmr_val);
3373}
3374
3375void _ipa_cfg_ep_holb_v2_5(u32 pipe_number,
3376 const struct ipa_ep_cfg_holb *ep_holb)
3377{
3378 ipa_write_reg(ipa_ctx->mmio,
3379 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3380 ep_holb->en);
3381
3382 ipa_write_reg(ipa_ctx->mmio,
3383 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3384 ep_holb->tmr_val);
3385}
3386
3387void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number,
3388 const struct ipa_ep_cfg_holb *ep_holb)
3389{
3390 ipa_write_reg(ipa_ctx->mmio,
3391 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3392 ep_holb->en);
3393
3394 ipa_write_reg(ipa_ctx->mmio,
3395 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3396 ep_holb->tmr_val);
3397}
3398
3399/**
3400 * ipa2_cfg_ep_holb() - IPA end-point holb configuration
3401 *
3402 * If an IPA producer pipe is full, IPA HW by default will block
3403 * indefinitely till space opens up. During this time no packets
3404 * including those from unrelated pipes will be processed. Enabling
3405 * HOLB means IPA HW will be allowed to drop packets as/when needed
3406 * and indefinite blocking is avoided.
3407 *
3408 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3409 * @ipa_ep_cfg: [in] IPA end-point configuration params
3410 *
3411 * Returns: 0 on success, negative on failure
3412 */
3413int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
3414{
3415 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3416 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
3417 ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val ||
3418 ep_holb->en > 1) {
3419 IPAERR("bad parm.\n");
3420 return -EINVAL;
3421 }
3422
3423 if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
3424 IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
3425 return -EINVAL;
3426 }
3427
3428 if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) {
3429 IPAERR("HOLB is not supported for this IPA core\n");
3430 return -EINVAL;
3431 }
3432
3433 ipa_ctx->ep[clnt_hdl].holb = *ep_holb;
3434
3435 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3436
3437 ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb);
3438
3439 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3440
3441 IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
3442 ep_holb->tmr_val);
3443
3444 return 0;
3445}
3446
3447/**
3448 * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration
3449 *
3450 * Wrapper function for ipa_cfg_ep_holb() with client name instead of
3451 * client handle. This function is used for clients that does not have
3452 * client handle.
3453 *
3454 * @client: [in] client name
3455 * @ipa_ep_cfg: [in] IPA end-point configuration params
3456 *
3457 * Returns: 0 on success, negative on failure
3458 */
3459int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
3460 const struct ipa_ep_cfg_holb *ep_holb)
3461{
3462 return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb);
3463}
3464
3465static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl,
3466 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3467{
3468 IPADBG("Not supported for version 1.1\n");
3469 return 0;
3470}
3471
3472static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl,
3473 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3474{
3475 u32 reg_val = 0;
3476
3477 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len,
3478 IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
3479 IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
3480
3481 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid,
3482 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
3483 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
3484
3485 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location,
3486 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
3487 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
3488
3489 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len,
3490 IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
3491 IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
3492
3493 ipa_write_reg(ipa_ctx->mmio,
3494 IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val);
3495
3496 return 0;
3497}
3498
3499/**
3500 * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration
3501 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3502 * @ep_deaggr: [in] IPA end-point configuration params
3503 *
3504 * Returns: 0 on success, negative on failure
3505 *
3506 * Note: Should not be called from atomic context
3507 */
3508int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
3509 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3510{
3511 struct ipa_ep_context *ep;
3512
3513 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3514 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
3515 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3516 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3517 return -EINVAL;
3518 }
3519
3520 IPADBG("pipe=%d deaggr_hdr_len=%d\n",
3521 clnt_hdl,
3522 ep_deaggr->deaggr_hdr_len);
3523
3524 IPADBG("packet_offset_valid=%d\n",
3525 ep_deaggr->packet_offset_valid);
3526
3527 IPADBG("packet_offset_location=%d max_packet_len=%d\n",
3528 ep_deaggr->packet_offset_location,
3529 ep_deaggr->max_packet_len);
3530
3531 ep = &ipa_ctx->ep[clnt_hdl];
3532
3533 /* copy over EP cfg */
3534 ep->cfg.deaggr = *ep_deaggr;
3535
3536 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3537
3538 ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
3539
3540 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3541
3542 return 0;
3543}
3544
3545static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number,
3546 const struct ipa_ep_cfg_metadata *meta)
3547{
3548 IPADBG("Not supported for version 1.1\n");
3549}
3550
3551static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number,
3552 const struct ipa_ep_cfg_metadata *meta)
3553{
3554 u32 reg_val = 0;
3555
3556 IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id,
3557 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT,
3558 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK);
3559
3560 ipa_write_reg(ipa_ctx->mmio,
3561 IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number),
3562 reg_val);
3563}
3564
3565/**
3566 * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration
3567 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3568 * @ipa_ep_cfg: [in] IPA end-point configuration params
3569 *
3570 * Returns: 0 on success, negative on failure
3571 *
3572 * Note: Should not be called from atomic context
3573 */
3574int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
3575{
3576 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3577 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
3578 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3579 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3580 return -EINVAL;
3581 }
3582
3583 IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
3584
3585 /* copy over EP cfg */
3586 ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
3587
3588 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3589
3590 ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md);
3591 ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
3592 ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr);
3593
3594 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3595
3596 return 0;
3597}
3598EXPORT_SYMBOL(ipa2_cfg_ep_metadata);
3599
3600int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
3601{
3602 struct ipa_ep_cfg_metadata meta;
3603 struct ipa_ep_context *ep;
3604 int ipa_ep_idx;
3605 int result = -EINVAL;
3606
3607 if (unlikely(!ipa_ctx)) {
3608 IPAERR("IPA driver was not initialized\n");
3609 return -EINVAL;
3610 }
3611
3612 if (param_in->client >= IPA_CLIENT_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303613 IPAERR_RL("bad parm client:%d\n", param_in->client);
Amir Levy9659e592016-10-27 18:08:27 +03003614 goto fail;
3615 }
3616
3617 ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
3618 if (ipa_ep_idx == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303619 IPAERR_RL("Invalid client.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003620 goto fail;
3621 }
3622
3623 ep = &ipa_ctx->ep[ipa_ep_idx];
3624 if (!ep->valid) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303625 IPAERR_RL("EP not allocated.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003626 goto fail;
3627 }
3628
3629 meta.qmap_id = param_in->qmap_id;
3630 if (param_in->client == IPA_CLIENT_USB_PROD ||
3631 param_in->client == IPA_CLIENT_HSIC1_PROD ||
Sunil Paidimarri5139aa22017-02-13 11:07:32 -08003632 param_in->client == IPA_CLIENT_ODU_PROD ||
3633 param_in->client == IPA_CLIENT_ETHERNET_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03003634 result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
3635 } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
3636 ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
3637 result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
3638 if (result)
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303639 IPAERR_RL("qmap_id %d write failed on ep=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +03003640 meta.qmap_id, ipa_ep_idx);
3641 result = 0;
3642 }
3643
3644fail:
3645 return result;
3646}
3647
3648/**
3649 * ipa_dump_buff_internal() - dumps buffer for debug purposes
3650 * @base: buffer base address
3651 * @phy_base: buffer physical base address
3652 * @size: size of the buffer
3653 */
3654void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
3655{
3656 int i;
3657 u32 *cur = (u32 *)base;
3658 u8 *byt;
3659
3660 IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
3661 for (i = 0; i < size / 4; i++) {
3662 byt = (u8 *)(cur + i);
3663 IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
3664 byt[0], byt[1], byt[2], byt[3]);
3665 }
3666 IPADBG("END\n");
3667}
3668
3669/**
3670 * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling
3671 * @time: time fom dtsi entry or from debugfs file system
3672 * @min: rx polling min timeout
3673 * @max: rx polling max timeout
3674 * Maximum time could be of 10Msec allowed.
3675 */
3676void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time)
3677{
3678 if ((time >= MIN_RX_POLL_TIME) &&
3679 (time <= MAX_RX_POLL_TIME)) {
3680 *min = (time * MSEC) + LOWER_CUTOFF;
3681 *max = (time * MSEC) + UPPER_CUTOFF;
3682 } else {
3683 /* Setting up the default min max time */
3684 IPADBG("Setting up default rx polling timeout\n");
3685 *min = (MIN_RX_POLL_TIME * MSEC) +
3686 LOWER_CUTOFF;
3687 *max = (MIN_RX_POLL_TIME * MSEC) +
3688 UPPER_CUTOFF;
3689 }
3690 IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max);
3691}
3692
3693/**
3694 * ipa_pipe_mem_init() - initialize the pipe memory
3695 * @start_ofst: start offset
3696 * @size: size
3697 *
3698 * Return value:
3699 * 0: success
3700 * -ENOMEM: no memory
3701 */
3702int ipa_pipe_mem_init(u32 start_ofst, u32 size)
3703{
3704 int res;
3705 u32 aligned_start_ofst;
3706 u32 aligned_size;
3707 struct gen_pool *pool;
3708
3709 if (!size) {
3710 IPAERR("no IPA pipe memory allocated\n");
3711 goto fail;
3712 }
3713
3714 aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
3715 aligned_size = size - (aligned_start_ofst - start_ofst);
3716
3717 IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
3718 start_ofst, aligned_start_ofst, size, aligned_size);
3719
3720 /* allocation order of 8 i.e. 128 bytes, global pool */
3721 pool = gen_pool_create(8, -1);
3722 if (!pool) {
3723 IPAERR("Failed to create a new memory pool.\n");
3724 goto fail;
3725 }
3726
3727 res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
3728 if (res) {
3729 IPAERR("Failed to add memory to IPA pipe pool\n");
3730 goto err_pool_add;
3731 }
3732
3733 ipa_ctx->pipe_mem_pool = pool;
3734 return 0;
3735
3736err_pool_add:
3737 gen_pool_destroy(pool);
3738fail:
3739 return -ENOMEM;
3740}
3741
3742/**
3743 * ipa_pipe_mem_alloc() - allocate pipe memory
3744 * @ofst: offset
3745 * @size: size
3746 *
3747 * Return value:
3748 * 0: success
3749 */
3750int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
3751{
3752 u32 vaddr;
3753 int res = -1;
3754
3755 if (!ipa_ctx->pipe_mem_pool || !size) {
3756 IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
3757 ipa_ctx->pipe_mem_pool);
3758 return res;
3759 }
3760
3761 vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
3762
3763 if (vaddr) {
3764 *ofst = vaddr;
3765 res = 0;
3766 IPADBG("size=%u ofst=%u\n", size, vaddr);
3767 } else {
3768 IPAERR("size=%u failed\n", size);
3769 }
3770
3771 return res;
3772}
3773
3774/**
3775 * ipa_pipe_mem_free() - free pipe memory
3776 * @ofst: offset
3777 * @size: size
3778 *
3779 * Return value:
3780 * 0: success
3781 */
3782int ipa_pipe_mem_free(u32 ofst, u32 size)
3783{
3784 IPADBG("size=%u ofst=%u\n", size, ofst);
3785 if (ipa_ctx->pipe_mem_pool && size)
3786 gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
3787 return 0;
3788}
3789
3790/**
3791 * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting
3792 * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
3793 * etc
3794 *
3795 * Returns: 0 on success
3796 */
3797int ipa2_set_aggr_mode(enum ipa_aggr_mode mode)
3798{
3799 u32 reg_val;
3800
3801 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3802 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
3803 ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
3804 (reg_val & 0xfffffffe));
3805 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3806
3807 return 0;
3808}
3809
3810/**
3811 * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
3812 * mode
3813 * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
3814 * "QND")
3815 *
3816 * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
3817 * (expected to be 'P') needs to be set using the header addition mechanism
3818 *
3819 * Returns: 0 on success, negative on failure
3820 */
3821int ipa2_set_qcncm_ndp_sig(char sig[3])
3822{
3823 u32 reg_val;
3824
3825 if (sig == NULL) {
3826 IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
3827 return -EINVAL;
3828 }
3829 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3830 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
3831 ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
3832 (sig[1] << 12) | (sig[2] << 4) |
3833 (reg_val & 0xf000000f));
3834 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3835
3836 return 0;
3837}
3838
3839/**
3840 * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
3841 * configuration
3842 * @enable: [in] true for single NDP/MBIM; false otherwise
3843 *
3844 * Returns: 0 on success
3845 */
3846int ipa2_set_single_ndp_per_mbim(bool enable)
3847{
3848 u32 reg_val;
3849
3850 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3851 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
3852 ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
3853 (enable & 0x1) | (reg_val & 0xfffffffe));
3854 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3855
3856 return 0;
3857}
3858
3859/**
3860 * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix
3861 * for MBIM aggregation.
3862 * @enable: [in] true for enable HW fix; false otherwise
3863 *
3864 * Returns: 0 on success
3865 */
3866int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
3867{
3868 u32 reg_val;
3869
3870 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3871 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
3872 ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
3873 (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
3874 (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
3875 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3876 return 0;
3877}
3878EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
3879
3880/**
3881 * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
3882 * @start: start address of the memory buffer
3883 * @end: end address of the memory buffer
3884 * @boundary: boundary
3885 *
3886 * Return value:
3887 * 1: if the interval [start, end] straddles boundary
3888 * 0: otherwise
3889 */
3890int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
3891{
3892 u32 next_start;
3893 u32 prev_end;
3894
3895 IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
3896
3897 next_start = (start + (boundary - 1)) & ~(boundary - 1);
3898 prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
3899
3900 while (next_start < prev_end)
3901 next_start += boundary;
3902
3903 if (next_start == prev_end)
3904 return 1;
3905 else
3906 return 0;
3907}
3908
3909/**
3910 * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
3911 *
3912 * Function is rate limited to avoid flooding kernel log buffer
3913 */
3914void ipa2_bam_reg_dump(void)
3915{
3916 static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
3917
3918 if (__ratelimit(&_rs)) {
3919 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3920 pr_err("IPA BAM START\n");
3921 if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
3922 sps_get_bam_debug_info(ipa_ctx->bam_handle, 5,
3923 511950, 0, 0);
3924 sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0,
3925 0, 0);
3926 } else {
3927 sps_get_bam_debug_info(ipa_ctx->bam_handle, 93,
3928 (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS))
3929 |
3930 SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))),
3931 0, 2);
3932 }
3933 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3934 }
3935}
3936
3937static void ipa_init_mem_partition_v2(void)
3938{
3939 IPADBG("Memory partition IPA 2\n");
3940 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
3941 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
3942 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
3943 IPA_MEM_PART(nat_size));
3944
3945 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START;
3946 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
3947
3948 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST;
3949 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE;
3950 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
3951 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3952 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
3953 IPA_MEM_PART(v4_flt_size_ddr));
3954
3955 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST;
3956 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE;
3957 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
3958 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3959 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
3960 IPA_MEM_PART(v6_flt_size_ddr));
3961
3962 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST;
3963 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
3964
3965 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX;
3966 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
3967
3968 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO;
3969 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI;
3970 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
3971 IPA_MEM_PART(v4_modem_rt_index_lo),
3972 IPA_MEM_PART(v4_modem_rt_index_hi));
3973
3974 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO;
3975 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI;
3976 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
3977 IPA_MEM_PART(v4_apps_rt_index_lo),
3978 IPA_MEM_PART(v4_apps_rt_index_hi));
3979
3980 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE;
3981 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
3982 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
3983 IPA_MEM_PART(v4_rt_size_ddr));
3984
3985 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST;
3986 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
3987
3988 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX;
3989 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
3990
3991 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO;
3992 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI;
3993 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
3994 IPA_MEM_PART(v6_modem_rt_index_lo),
3995 IPA_MEM_PART(v6_modem_rt_index_hi));
3996
3997 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO;
3998 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI;
3999 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4000 IPA_MEM_PART(v6_apps_rt_index_lo),
4001 IPA_MEM_PART(v6_apps_rt_index_hi));
4002
4003 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE;
4004 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4005 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4006 IPA_MEM_PART(v6_rt_size_ddr));
4007
4008 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST;
4009 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE;
4010 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4011 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4012
4013 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST;
4014 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE;
4015 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR;
4016 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4017 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4018 IPA_MEM_PART(apps_hdr_size_ddr));
4019
4020 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST;
4021 IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE;
4022 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4023 IPA_MEM_PART(modem_size));
4024
4025 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST;
4026 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE;
4027 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4028 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4029
4030 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST;
4031 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE;
4032 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4033 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4034
4035 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST;
4036 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE;
4037 IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4038 IPA_MEM_PART(uc_info_size));
4039
4040 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST;
4041 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST;
4042 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE;
4043 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST;
4044 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE;
4045}
4046
4047static void ipa_init_mem_partition_v2_5(void)
4048{
4049 IPADBG("Memory partition IPA 2.5\n");
4050 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
4051 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
4052 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
4053 IPA_MEM_PART(nat_size));
4054
4055 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST;
4056 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE;
4057 IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4058 IPA_MEM_PART(uc_info_size));
4059
4060 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START;
4061 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
4062
4063 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST;
4064 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE;
4065 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
4066 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4067 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
4068 IPA_MEM_PART(v4_flt_size_ddr));
4069
4070 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST;
4071 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE;
4072 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
4073 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4074 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
4075 IPA_MEM_PART(v6_flt_size_ddr));
4076
4077 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST;
4078 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
4079
4080 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX;
4081 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
4082
4083 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO;
4084 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI;
4085 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
4086 IPA_MEM_PART(v4_modem_rt_index_lo),
4087 IPA_MEM_PART(v4_modem_rt_index_hi));
4088
4089 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO;
4090 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI;
4091 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
4092 IPA_MEM_PART(v4_apps_rt_index_lo),
4093 IPA_MEM_PART(v4_apps_rt_index_hi));
4094
4095 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE;
4096 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
4097 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
4098 IPA_MEM_PART(v4_rt_size_ddr));
4099
4100 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST;
4101 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
4102
4103 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX;
4104 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
4105
4106 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO;
4107 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI;
4108 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
4109 IPA_MEM_PART(v6_modem_rt_index_lo),
4110 IPA_MEM_PART(v6_modem_rt_index_hi));
4111
4112 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO;
4113 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI;
4114 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4115 IPA_MEM_PART(v6_apps_rt_index_lo),
4116 IPA_MEM_PART(v6_apps_rt_index_hi));
4117
4118 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE;
4119 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4120 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4121 IPA_MEM_PART(v6_rt_size_ddr));
4122
4123 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST;
4124 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE;
4125 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4126 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4127
4128 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST;
4129 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE;
4130 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR;
4131 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4132 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4133 IPA_MEM_PART(apps_hdr_size_ddr));
4134
4135 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) =
4136 IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST;
4137 IPA_MEM_PART(modem_hdr_proc_ctx_size) =
4138 IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE;
4139 IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
4140 IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
4141 IPA_MEM_PART(modem_hdr_proc_ctx_size));
4142
4143 IPA_MEM_PART(apps_hdr_proc_ctx_ofst) =
4144 IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST;
4145 IPA_MEM_PART(apps_hdr_proc_ctx_size) =
4146 IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE;
4147 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) =
4148 IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR;
4149 IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4150 IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
4151 IPA_MEM_PART(apps_hdr_proc_ctx_size),
4152 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
4153
4154 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST;
4155 IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE;
4156 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4157 IPA_MEM_PART(modem_size));
4158
4159 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST;
4160 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE;
4161 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4162 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4163
4164 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST;
4165 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE;
4166 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4167 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4168
4169 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST;
4170 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST;
4171 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE;
4172 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST;
4173 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE;
4174}
4175
4176static void ipa_init_mem_partition_v2_6L(void)
4177{
4178 IPADBG("Memory partition IPA 2.6Lite\n");
4179 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
4180 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
4181 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
4182 IPA_MEM_PART(nat_size));
4183
4184 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST;
4185 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE;
4186 IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4187 IPA_MEM_PART(uc_info_size));
4188
4189 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START;
4190 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
4191
4192 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST;
4193 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE;
4194 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
4195 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4196 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
4197 IPA_MEM_PART(v4_flt_size_ddr));
4198
4199 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST;
4200 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE;
4201 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
4202 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4203 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
4204 IPA_MEM_PART(v6_flt_size_ddr));
4205
4206 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST;
4207 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
4208
4209 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX;
4210 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
4211
4212 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO;
4213 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI;
4214 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
4215 IPA_MEM_PART(v4_modem_rt_index_lo),
4216 IPA_MEM_PART(v4_modem_rt_index_hi));
4217
4218 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO;
4219 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI;
4220 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
4221 IPA_MEM_PART(v4_apps_rt_index_lo),
4222 IPA_MEM_PART(v4_apps_rt_index_hi));
4223
4224 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE;
4225 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
4226 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
4227 IPA_MEM_PART(v4_rt_size_ddr));
4228
4229 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST;
4230 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
4231
4232 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX;
4233 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
4234
4235 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO;
4236 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI;
4237 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
4238 IPA_MEM_PART(v6_modem_rt_index_lo),
4239 IPA_MEM_PART(v6_modem_rt_index_hi));
4240
4241 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO;
4242 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI;
4243 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4244 IPA_MEM_PART(v6_apps_rt_index_lo),
4245 IPA_MEM_PART(v6_apps_rt_index_hi));
4246
4247 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE;
4248 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4249 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4250 IPA_MEM_PART(v6_rt_size_ddr));
4251
4252 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST;
4253 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE;
4254 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4255 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4256
4257 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST;
4258 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE;
4259 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR;
4260 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4261 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4262 IPA_MEM_PART(apps_hdr_size_ddr));
4263
4264 IPA_MEM_PART(modem_comp_decomp_ofst) =
4265 IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST;
4266 IPA_MEM_PART(modem_comp_decomp_size) =
4267 IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE;
4268 IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n",
4269 IPA_MEM_PART(modem_comp_decomp_ofst),
4270 IPA_MEM_PART(modem_comp_decomp_size));
4271
4272 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST;
4273 IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE;
4274 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4275 IPA_MEM_PART(modem_size));
4276
4277 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST;
4278 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE;
4279 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4280 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4281
4282 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST;
4283 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE;
4284 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4285 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4286
4287 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST;
4288 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST;
4289 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE;
4290 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST;
4291 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE;
4292}
4293
4294/**
4295 * ipa_controller_shared_static_bind() - set the appropriate shared methods for
4296 * for IPA HW version 2.0, 2.5, 2.6 and 2.6L
4297 *
4298 * @ctrl: data structure which holds the function pointers
4299 */
4300void ipa_controller_shared_static_bind(struct ipa_controller *ctrl)
4301{
4302 ctrl->ipa_init_rt4 = _ipa_init_rt4_v2;
4303 ctrl->ipa_init_rt6 = _ipa_init_rt6_v2;
4304 ctrl->ipa_init_flt4 = _ipa_init_flt4_v2;
4305 ctrl->ipa_init_flt6 = _ipa_init_flt6_v2;
4306 ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0;
4307 ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0;
4308 ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0;
4309 ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0;
4310 ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0;
4311 ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0;
4312 ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0;
4313 ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0;
4314 ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0;
4315 ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0;
4316 ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO;
4317 ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL;
4318 ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS;
4319 ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0;
4320 ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0;
4321 ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0;
4322 ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0;
4323 ctrl->ipa_commit_flt = __ipa_commit_flt_v2;
4324 ctrl->ipa_commit_rt = __ipa_commit_rt_v2;
4325 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
4326 ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0;
4327 ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0;
4328 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0;
4329 ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0;
4330 ctrl->clock_scaling_bw_threshold_nominal =
4331 IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS;
4332 ctrl->clock_scaling_bw_threshold_turbo =
4333 IPA_V2_0_BW_THRESHOLD_TURBO_MBPS;
4334}
4335
4336/**
4337 * ipa_ctrl_static_bind() - set the appropriate methods for
4338 * IPA Driver based on the HW version
4339 *
4340 * @ctrl: data structure which holds the function pointers
4341 * @hw_type: the HW type in use
4342 *
4343 * This function can avoid the runtime assignment by using C99 special
4344 * struct initialization - hard decision... time.vs.mem
4345 */
4346int ipa_controller_static_bind(struct ipa_controller *ctrl,
4347 enum ipa_hw_type hw_type)
4348{
4349 switch (hw_type) {
4350 case (IPA_HW_v1_1):
4351 ipa_init_mem_partition_v2();
4352 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1;
4353 ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1;
4354 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1;
4355 ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1;
4356 ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1;
4357 ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1;
4358 ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1;
4359 ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1;
4360 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1;
4361 ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1;
4362 ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1;
4363 ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1;
4364 ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1;
4365 ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE;
4366 ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE;
4367 ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE;
4368 ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1;
4369 ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1;
4370 ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1;
4371 ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1;
4372 ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1;
4373 ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1;
4374 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1;
4375 ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1;
4376 ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1;
4377 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1;
4378 ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1;
4379 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
4380 ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL;
4381 break;
4382 case (IPA_HW_v2_0):
4383 ipa_init_mem_partition_v2();
4384 ipa_controller_shared_static_bind(ctrl);
4385 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0;
4386 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
4387 ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL;
4388 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0;
4389 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0;
4390 ctrl->ipa_init_sram = _ipa_init_sram_v2;
4391 ctrl->ipa_init_hdr = _ipa_init_hdr_v2;
4392 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
4393 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2;
4394 break;
4395 case (IPA_HW_v2_5):
4396 ipa_init_mem_partition_v2_5();
4397 ipa_controller_shared_static_bind(ctrl);
4398 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5;
4399 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5;
4400 ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL;
4401 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5;
4402 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5;
4403 ctrl->ipa_init_sram = _ipa_init_sram_v2_5;
4404 ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5;
4405 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5;
4406 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5;
4407 break;
4408 case (IPA_HW_v2_6L):
4409 ipa_init_mem_partition_v2_6L();
4410 ipa_controller_shared_static_bind(ctrl);
4411 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L;
4412 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L;
4413 ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL;
4414 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L;
4415 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L;
4416 ctrl->ipa_init_sram = _ipa_init_sram_v2_6L;
4417 ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L;
4418 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L;
4419 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L;
4420 break;
4421 default:
4422 return -EPERM;
4423 }
4424
4425 return 0;
4426}
4427
4428void ipa_skb_recycle(struct sk_buff *skb)
4429{
4430 struct skb_shared_info *shinfo;
4431
4432 shinfo = skb_shinfo(skb);
4433 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
4434 atomic_set(&shinfo->dataref, 1);
4435
4436 memset(skb, 0, offsetof(struct sk_buff, tail));
4437 skb->data = skb->head + NET_SKB_PAD;
4438 skb_reset_tail_pointer(skb);
4439}
4440
4441int ipa_id_alloc(void *ptr)
4442{
4443 int id;
4444
4445 idr_preload(GFP_KERNEL);
4446 spin_lock(&ipa_ctx->idr_lock);
4447 id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
4448 spin_unlock(&ipa_ctx->idr_lock);
4449 idr_preload_end();
4450
4451 return id;
4452}
4453
4454void *ipa_id_find(u32 id)
4455{
4456 void *ptr;
4457
4458 spin_lock(&ipa_ctx->idr_lock);
4459 ptr = idr_find(&ipa_ctx->ipa_idr, id);
4460 spin_unlock(&ipa_ctx->idr_lock);
4461
4462 return ptr;
4463}
4464
4465void ipa_id_remove(u32 id)
4466{
4467 spin_lock(&ipa_ctx->idr_lock);
4468 idr_remove(&ipa_ctx->ipa_idr, id);
4469 spin_unlock(&ipa_ctx->idr_lock);
4470}
4471
4472static void ipa_tag_free_buf(void *user1, int user2)
4473{
4474 kfree(user1);
4475}
4476
4477static void ipa_tag_free_skb(void *user1, int user2)
4478{
4479 dev_kfree_skb_any((struct sk_buff *)user1);
4480}
4481
4482#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
4483
4484/* ipa_tag_process() - Initiates a tag process. Incorporates the input
4485 * descriptors
4486 *
4487 * @desc: descriptors with commands for IC
4488 * @desc_size: amount of descriptors in the above variable
4489 *
4490 * Note: The descriptors are copied (if there's room), the client needs to
4491 * free his descriptors afterwards
4492 *
4493 * Return: 0 or negative in case of failure
4494 */
4495int ipa_tag_process(struct ipa_desc desc[],
4496 int descs_num,
4497 unsigned long timeout)
4498{
4499 struct ipa_sys_context *sys;
4500 struct ipa_desc *tag_desc;
4501 int desc_idx = 0;
4502 struct ipa_ip_packet_init *pkt_init;
4503 struct ipa_register_write *reg_write_nop;
4504 struct ipa_ip_packet_tag_status *status;
4505 int i;
4506 struct sk_buff *dummy_skb;
4507 int res;
4508 struct ipa_tag_completion *comp;
4509 int ep_idx;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05304510 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03004511
4512 /* Not enough room for the required descriptors for the tag process */
4513 if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
4514 IPAERR("up to %d descriptors are allowed (received %d)\n",
4515 IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
4516 descs_num);
4517 return -ENOMEM;
4518 }
4519
4520 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
4521 if (-1 == ep_idx) {
4522 IPAERR("Client %u is not mapped\n",
4523 IPA_CLIENT_APPS_CMD_PROD);
4524 return -EFAULT;
4525 }
4526 sys = ipa_ctx->ep[ep_idx].sys;
4527
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05304528 tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag);
Amir Levy9659e592016-10-27 18:08:27 +03004529 if (!tag_desc) {
4530 IPAERR("failed to allocate memory\n");
4531 res = -ENOMEM;
4532 goto fail_alloc_desc;
4533 }
4534
4535 /* IP_PACKET_INIT IC for tag status to be sent to apps */
Mohammed Javid097ca402017-11-02 19:10:22 +05304536 pkt_init = kzalloc(sizeof(*pkt_init), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004537 if (!pkt_init) {
4538 IPAERR("failed to allocate memory\n");
4539 res = -ENOMEM;
4540 goto fail_alloc_pkt_init;
4541 }
4542
4543 pkt_init->destination_pipe_index =
4544 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
4545
4546 tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT;
4547 tag_desc[desc_idx].pyld = pkt_init;
4548 tag_desc[desc_idx].len = sizeof(*pkt_init);
4549 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4550 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4551 tag_desc[desc_idx].user1 = pkt_init;
4552 desc_idx++;
4553
4554 /* NO-OP IC for ensuring that IPA pipeline is empty */
Mohammed Javid097ca402017-11-02 19:10:22 +05304555 reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004556 if (!reg_write_nop) {
4557 IPAERR("no mem\n");
4558 res = -ENOMEM;
4559 goto fail_free_desc;
4560 }
4561
4562 reg_write_nop->skip_pipeline_clear = 0;
4563 reg_write_nop->value_mask = 0x0;
4564
4565 tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE;
4566 tag_desc[desc_idx].pyld = reg_write_nop;
4567 tag_desc[desc_idx].len = sizeof(*reg_write_nop);
4568 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4569 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4570 tag_desc[desc_idx].user1 = reg_write_nop;
4571 desc_idx++;
4572
4573 /* status IC */
Mohammed Javid097ca402017-11-02 19:10:22 +05304574 status = kzalloc(sizeof(*status), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004575 if (!status) {
4576 IPAERR("no mem\n");
4577 res = -ENOMEM;
4578 goto fail_free_desc;
4579 }
4580
4581 status->tag_f_2 = IPA_COOKIE;
4582
4583 tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS;
4584 tag_desc[desc_idx].pyld = status;
4585 tag_desc[desc_idx].len = sizeof(*status);
4586 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4587 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4588 tag_desc[desc_idx].user1 = status;
4589 desc_idx++;
4590
4591 /* Copy the required descriptors from the client now */
4592 if (desc) {
4593 memcpy(&(tag_desc[desc_idx]), desc, descs_num *
4594 sizeof(struct ipa_desc));
4595 desc_idx += descs_num;
4596 }
4597
4598 comp = kzalloc(sizeof(*comp), GFP_KERNEL);
4599 if (!comp) {
4600 IPAERR("no mem\n");
4601 res = -ENOMEM;
4602 goto fail_free_desc;
4603 }
4604 init_completion(&comp->comp);
4605
4606 /* completion needs to be released from both here and rx handler */
4607 atomic_set(&comp->cnt, 2);
4608
4609 /* dummy packet to send to IPA. packet payload is a completion object */
Mohammed Javid097ca402017-11-02 19:10:22 +05304610 dummy_skb = alloc_skb(sizeof(comp), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004611 if (!dummy_skb) {
4612 IPAERR("failed to allocate memory\n");
4613 res = -ENOMEM;
4614 goto fail_free_skb;
4615 }
4616
4617 memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
4618
4619 tag_desc[desc_idx].pyld = dummy_skb->data;
4620 tag_desc[desc_idx].len = dummy_skb->len;
4621 tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
4622 tag_desc[desc_idx].callback = ipa_tag_free_skb;
4623 tag_desc[desc_idx].user1 = dummy_skb;
4624 desc_idx++;
4625
4626 /* send all descriptors to IPA with single EOT */
4627 res = ipa_send(sys, desc_idx, tag_desc, true);
4628 if (res) {
4629 IPAERR("failed to send TAG packets %d\n", res);
4630 res = -ENOMEM;
4631 goto fail_send;
4632 }
4633 kfree(tag_desc);
4634 tag_desc = NULL;
4635
4636 IPADBG("waiting for TAG response\n");
4637 res = wait_for_completion_timeout(&comp->comp, timeout);
4638 if (res == 0) {
4639 IPAERR("timeout (%lu msec) on waiting for TAG response\n",
4640 timeout);
4641 WARN_ON(1);
4642 if (atomic_dec_return(&comp->cnt) == 0)
4643 kfree(comp);
4644 return -ETIME;
4645 }
4646
4647 IPADBG("TAG response arrived!\n");
4648 if (atomic_dec_return(&comp->cnt) == 0)
4649 kfree(comp);
4650
4651 /* sleep for short period to ensure IPA wrote all packets to BAM */
4652 usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
4653
4654 return 0;
4655
4656fail_send:
4657 dev_kfree_skb_any(dummy_skb);
4658 desc_idx--;
4659fail_free_skb:
4660 kfree(comp);
4661fail_free_desc:
4662 /*
4663 * Free only the first descriptors allocated here.
4664 * [pkt_init, status, nop]
4665 * The user is responsible to free his allocations
4666 * in case of failure.
4667 * The min is required because we may fail during
4668 * of the initial allocations above
4669 */
4670 for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++)
4671 kfree(tag_desc[i].user1);
4672
4673fail_alloc_pkt_init:
4674 kfree(tag_desc);
4675fail_alloc_desc:
4676 return res;
4677}
4678
4679/**
4680 * ipa_tag_generate_force_close_desc() - generate descriptors for force close
4681 * immediate command
4682 *
4683 * @desc: descriptors for IC
4684 * @desc_size: desc array size
4685 * @start_pipe: first pipe to close aggregation
4686 * @end_pipe: last (non-inclusive) pipe to close aggregation
4687 *
4688 * Return: number of descriptors written or negative in case of failure
4689 */
4690static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[],
4691 int desc_size, int start_pipe, int end_pipe)
4692{
4693 int i;
4694 u32 aggr_init;
4695 int desc_idx = 0;
4696 int res;
4697 struct ipa_register_write *reg_write_agg_close;
4698
4699 for (i = start_pipe; i < end_pipe; i++) {
4700 aggr_init = ipa_read_reg(ipa_ctx->mmio,
4701 IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i));
4702 if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
4703 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR)
4704 continue;
4705 IPADBG("Force close ep: %d\n", i);
4706 if (desc_idx + 1 > desc_size) {
4707 IPAERR("Internal error - no descriptors\n");
4708 res = -EFAULT;
4709 goto fail_no_desc;
4710 }
4711
4712 reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close),
4713 GFP_KERNEL);
4714 if (!reg_write_agg_close) {
4715 IPAERR("no mem\n");
4716 res = -ENOMEM;
4717 goto fail_alloc_reg_write_agg_close;
4718 }
4719
4720 reg_write_agg_close->skip_pipeline_clear = 0;
4721 reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i);
4722 reg_write_agg_close->value =
4723 (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
4724 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
4725 reg_write_agg_close->value_mask =
4726 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
4727 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
4728
4729 desc[desc_idx].opcode = IPA_REGISTER_WRITE;
4730 desc[desc_idx].pyld = reg_write_agg_close;
4731 desc[desc_idx].len = sizeof(*reg_write_agg_close);
4732 desc[desc_idx].type = IPA_IMM_CMD_DESC;
4733 desc[desc_idx].callback = ipa_tag_free_buf;
4734 desc[desc_idx].user1 = reg_write_agg_close;
4735 desc_idx++;
4736 }
4737
4738 return desc_idx;
4739
4740fail_alloc_reg_write_agg_close:
4741 for (i = 0; i < desc_idx; i++)
4742 kfree(desc[desc_idx].user1);
4743fail_no_desc:
4744 return res;
4745}
4746
4747/**
4748 * ipa_tag_aggr_force_close() - Force close aggregation
4749 *
4750 * @pipe_num: pipe number or -1 for all pipes
4751 */
4752int ipa_tag_aggr_force_close(int pipe_num)
4753{
4754 struct ipa_desc *desc;
4755 int res = -1;
4756 int start_pipe;
4757 int end_pipe;
4758 int num_descs;
4759 int num_aggr_descs;
4760
4761 if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) {
4762 IPAERR("Invalid pipe number %d\n", pipe_num);
4763 return -EINVAL;
4764 }
4765
4766 if (pipe_num == -1) {
4767 start_pipe = 0;
4768 end_pipe = ipa_ctx->ipa_num_pipes;
4769 } else {
4770 start_pipe = pipe_num;
4771 end_pipe = pipe_num + 1;
4772 }
4773
4774 num_descs = end_pipe - start_pipe;
4775
4776 desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
4777 if (!desc) {
4778 IPAERR("no mem\n");
4779 return -ENOMEM;
4780 }
4781
4782 /* Force close aggregation on all valid pipes with aggregation */
4783 num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs,
4784 start_pipe, end_pipe);
4785 if (num_aggr_descs < 0) {
4786 IPAERR("ipa_tag_generate_force_close_desc failed %d\n",
4787 num_aggr_descs);
4788 goto fail_free_desc;
4789 }
4790
4791 res = ipa_tag_process(desc, num_aggr_descs,
4792 IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
4793
4794fail_free_desc:
4795 kfree(desc);
4796
4797 return res;
4798}
4799
4800/**
4801 * ipa2_is_ready() - check if IPA module was initialized
4802 * successfully
4803 *
4804 * Return value: true for yes; false for no
4805 */
4806bool ipa2_is_ready(void)
4807{
4808 return (ipa_ctx != NULL) ? true : false;
4809}
4810
4811/**
4812 * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle
4813 *
4814 * Return value: true for yes; false for no
4815 */
4816bool ipa2_is_client_handle_valid(u32 clnt_hdl)
4817{
4818 if (unlikely(!ipa_ctx)) {
4819 IPAERR("IPA driver was not initialized\n");
4820 return false;
4821 }
4822
4823 if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes)
4824 return true;
4825 return false;
4826}
4827
4828/**
4829 * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote
4830 *
4831 * Return value: none
4832 */
4833void ipa2_proxy_clk_unvote(void)
4834{
4835 if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) {
4836 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
4837 ipa_ctx->q6_proxy_clk_vote_valid = false;
4838 }
4839}
4840
4841/**
4842 * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote
4843 *
4844 * Return value: none
4845 */
4846void ipa2_proxy_clk_vote(void)
4847{
4848 if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) {
4849 IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
4850 ipa_ctx->q6_proxy_clk_vote_valid = true;
4851 }
4852}
4853
4854
4855/**
4856 * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes
4857 *
4858 * Return value: u16 - number of IPA smem restricted bytes
4859 */
4860u16 ipa2_get_smem_restr_bytes(void)
4861{
4862 if (ipa_ctx)
4863 return ipa_ctx->smem_restricted_bytes;
4864
4865 IPAERR("IPA Driver not initialized\n");
4866
4867 return 0;
4868}
4869
4870/**
4871 * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
4872 *
4873 * Return value: true if modem configures embedded pipe flt, false otherwise
4874 */
4875bool ipa2_get_modem_cfg_emb_pipe_flt(void)
4876{
4877 if (ipa_ctx)
4878 return ipa_ctx->modem_cfg_emb_pipe_flt;
4879
4880 IPAERR("IPA driver has not been initialized\n");
4881
4882 return false;
4883}
4884/**
4885 * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS
4886 *
4887 * Return value: enum ipa_transport_type
4888 */
4889enum ipa_transport_type ipa2_get_transport_type(void)
4890{
4891 return IPA_TRANSPORT_TYPE_SPS;
4892}
4893
4894u32 ipa_get_num_pipes(void)
4895{
4896 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
4897 return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST);
4898 else
4899 return IPA_MAX_NUM_PIPES;
4900}
4901EXPORT_SYMBOL(ipa_get_num_pipes);
4902
4903/**
4904 * ipa2_disable_apps_wan_cons_deaggr()-
4905 * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
4906 *
4907 * Return value: 0 or negative in case of failure
4908 */
4909int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
4910{
4911 int res = -1;
4912
4913 /* checking if IPA-HW can support */
4914 if ((agg_size >> 10) >
4915 IPA_AGGR_BYTE_LIMIT) {
4916 IPAWANERR("IPA-AGG byte limit %d\n",
4917 IPA_AGGR_BYTE_LIMIT);
4918 IPAWANERR("exceed aggr_byte_limit\n");
4919 return res;
4920 }
4921 if (agg_count >
4922 IPA_AGGR_PKT_LIMIT) {
4923 IPAWANERR("IPA-AGG pkt limit %d\n",
4924 IPA_AGGR_PKT_LIMIT);
4925 IPAWANERR("exceed aggr_pkt_limit\n");
4926 return res;
4927 }
4928
4929 if (ipa_ctx) {
4930 ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true;
4931 return 0;
4932 }
4933 return res;
4934}
4935
Amir Levy3be373c2017-03-05 16:31:30 +02004936static const struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info
4937 (enum ipa_client_type client)
Amir Levy9659e592016-10-27 18:08:27 +03004938{
4939 IPAERR("Not supported for IPA 2.x\n");
4940 return NULL;
4941}
4942
4943static int ipa2_stop_gsi_channel(u32 clnt_hdl)
4944{
4945 IPAERR("Not supported for IPA 2.x\n");
4946 return -EFAULT;
4947}
4948
4949static void *ipa2_get_ipc_logbuf(void)
4950{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304951 if (ipa_ctx)
4952 return ipa_ctx->logbuf;
4953
Amir Levy9659e592016-10-27 18:08:27 +03004954 return NULL;
4955}
4956
4957static void *ipa2_get_ipc_logbuf_low(void)
4958{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304959 if (ipa_ctx)
4960 return ipa_ctx->logbuf_low;
4961
Amir Levy9659e592016-10-27 18:08:27 +03004962 return NULL;
4963}
4964
4965static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
4966{
4967 *holb = ipa_ctx->ep[ep_idx].holb;
4968}
4969
4970static int ipa2_generate_tag_process(void)
4971{
4972 int res;
4973
4974 res = ipa_tag_process(NULL, 0, HZ);
4975 if (res)
4976 IPAERR("TAG process failed\n");
4977
4978 return res;
4979}
4980
4981static void ipa2_set_tag_process_before_gating(bool val)
4982{
4983 ipa_ctx->tag_process_before_gating = val;
4984}
4985
4986int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
4987 struct ipa_api_controller *api_ctrl)
4988{
4989 if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) {
4990 IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
4991 WARN_ON(1);
4992 return -EPERM;
4993 }
4994
4995 api_ctrl->ipa_connect = ipa2_connect;
4996 api_ctrl->ipa_disconnect = ipa2_disconnect;
4997 api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
4998 api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
4999 api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
5000 api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
5001 api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
5002 api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
5003 api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext;
5004 api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode;
5005 api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr;
5006 api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr;
5007 api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route;
5008 api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb;
5009 api_ctrl->ipa_get_holb = ipa2_get_holb;
5010 api_ctrl->ipa_set_tag_process_before_gating =
5011 ipa2_set_tag_process_before_gating;
5012 api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg;
5013 api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask;
5014 api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
5015 api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl;
5016 api_ctrl->ipa_add_hdr = ipa2_add_hdr;
5017 api_ctrl->ipa_del_hdr = ipa2_del_hdr;
5018 api_ctrl->ipa_commit_hdr = ipa2_commit_hdr;
5019 api_ctrl->ipa_reset_hdr = ipa2_reset_hdr;
5020 api_ctrl->ipa_get_hdr = ipa2_get_hdr;
5021 api_ctrl->ipa_put_hdr = ipa2_put_hdr;
5022 api_ctrl->ipa_copy_hdr = ipa2_copy_hdr;
5023 api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx;
5024 api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx;
5025 api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule;
5026 api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule;
5027 api_ctrl->ipa_commit_rt = ipa2_commit_rt;
5028 api_ctrl->ipa_reset_rt = ipa2_reset_rt;
5029 api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl;
5030 api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl;
5031 api_ctrl->ipa_query_rt_index = ipa2_query_rt_index;
5032 api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule;
5033 api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule;
5034 api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule;
5035 api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
5036 api_ctrl->ipa_commit_flt = ipa2_commit_flt;
5037 api_ctrl->ipa_reset_flt = ipa2_reset_flt;
Amir Levy479cfdd2017-10-26 12:23:14 +03005038 api_ctrl->ipa_allocate_nat_device = ipa2_allocate_nat_device;
Amir Levy9659e592016-10-27 18:08:27 +03005039 api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd;
5040 api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd;
5041 api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd;
5042 api_ctrl->ipa_send_msg = ipa2_send_msg;
5043 api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg;
5044 api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg;
5045 api_ctrl->ipa_register_intf = ipa2_register_intf;
5046 api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext;
5047 api_ctrl->ipa_deregister_intf = ipa2_deregister_intf;
5048 api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode;
5049 api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig;
5050 api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim;
5051 api_ctrl->ipa_tx_dp = ipa2_tx_dp;
5052 api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul;
5053 api_ctrl->ipa_free_skb = ipa2_free_skb;
5054 api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe;
5055 api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe;
5056 api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls;
5057 api_ctrl->ipa_sys_setup = ipa2_sys_setup;
5058 api_ctrl->ipa_sys_teardown = ipa2_sys_teardown;
5059 api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe;
5060 api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe;
5061 api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe;
5062 api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe;
5063 api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe;
5064 api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe;
5065 api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats;
5066 api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05305067 api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
5068 ipa2_broadcast_wdi_quota_reach_ind;
Amir Levy9659e592016-10-27 18:08:27 +03005069 api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
5070 api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
5071 api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
5072 api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
5073 api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
5074 api_ctrl->teth_bridge_init = ipa2_teth_bridge_init;
5075 api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect;
5076 api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect;
5077 api_ctrl->ipa_set_client = ipa2_set_client;
5078 api_ctrl->ipa_get_client = ipa2_get_client;
5079 api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink;
5080 api_ctrl->ipa_dma_init = ipa2_dma_init;
5081 api_ctrl->ipa_dma_enable = ipa2_dma_enable;
5082 api_ctrl->ipa_dma_disable = ipa2_dma_disable;
5083 api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy;
5084 api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy;
5085 api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy;
5086 api_ctrl->ipa_dma_destroy = ipa2_dma_destroy;
5087 api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine;
5088 api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe;
5089 api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe;
5090 api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel;
5091 api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty;
5092 api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process;
5093 api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe;
5094 api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
5095 qmi_enable_force_clear_datapath_send;
5096 api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
5097 qmi_disable_force_clear_datapath_send;
5098 api_ctrl->ipa_mhi_reset_channel_internal =
5099 ipa2_mhi_reset_channel_internal;
5100 api_ctrl->ipa_mhi_start_channel_internal =
5101 ipa2_mhi_start_channel_internal;
5102 api_ctrl->ipa_mhi_resume_channels_internal =
5103 ipa2_mhi_resume_channels_internal;
5104 api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
5105 ipa2_uc_mhi_send_dl_ul_sync_info;
5106 api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init;
5107 api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel;
5108 api_ctrl->ipa_uc_mhi_stop_event_update_channel =
5109 ipa2_uc_mhi_stop_event_update_channel;
5110 api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup;
5111 api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats;
5112 api_ctrl->ipa_uc_state_check = ipa2_uc_state_check;
5113 api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id;
5114 api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler;
5115 api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler;
5116 api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler;
5117 api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump;
5118 api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping;
5119 api_ctrl->ipa_is_ready = ipa2_is_ready;
5120 api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote;
5121 api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote;
5122 api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid;
5123 api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping;
5124 api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep;
5125 api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
5126 ipa2_get_modem_cfg_emb_pipe_flt;
5127 api_ctrl->ipa_get_transport_type = ipa2_get_transport_type;
5128 api_ctrl->ipa_ap_suspend = ipa2_ap_suspend;
5129 api_ctrl->ipa_ap_resume = ipa2_ap_resume;
5130 api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain;
5131 api_ctrl->ipa_disable_apps_wan_cons_deaggr =
5132 ipa2_disable_apps_wan_cons_deaggr;
5133 api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
5134 api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
5135 api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
5136 api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
5137 api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
5138 api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
5139 api_ctrl->ipa_inc_client_enable_clks_no_block =
5140 ipa2_inc_client_enable_clks_no_block;
5141 api_ctrl->ipa_suspend_resource_no_block =
5142 ipa2_suspend_resource_no_block;
5143 api_ctrl->ipa_resume_resource = ipa2_resume_resource;
5144 api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync;
5145 api_ctrl->ipa_set_required_perf_profile =
5146 ipa2_set_required_perf_profile;
5147 api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
5148 api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
5149 api_ctrl->ipa_rx_poll = ipa2_rx_poll;
5150 api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
5151 api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
5152 api_ctrl->ipa_tear_down_uc_offload_pipes =
5153 ipa2_tear_down_uc_offload_pipes;
Amir Levyc4222c92016-11-07 16:14:54 +02005154 api_ctrl->ipa_get_pdev = ipa2_get_pdev;
Sunil Paidimarrifbbcd072017-04-04 17:43:50 -07005155 api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB;
5156 api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB;
Shihuan Liuf4328772017-09-14 17:03:09 -07005157 api_ctrl->ipa_conn_wdi3_pipes = ipa2_conn_wdi3_pipes;
5158 api_ctrl->ipa_disconn_wdi3_pipes = ipa2_disconn_wdi3_pipes;
5159 api_ctrl->ipa_enable_wdi3_pipes = ipa2_enable_wdi3_pipes;
5160 api_ctrl->ipa_disable_wdi3_pipes = ipa2_disable_wdi3_pipes;
Amir Levy9659e592016-10-27 18:08:27 +03005161
5162 return 0;
5163}
5164
5165/**
5166 * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes.
5167 *
5168 * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L,
5169 * IPA_DEFAULT_SYS_YELLOW_WM otherwise.
5170 */
5171u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys)
5172{
Utkarsh Saxena4badc042017-03-03 15:38:45 +05305173 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L &&
5174 ipa_ctx->ipa_uc_monitor_holb) {
Amir Levy9659e592016-10-27 18:08:27 +03005175 return ipa_read_reg(ipa_ctx->mmio,
5176 IPA_YELLOW_MARKER_SYS_CFG_OFST);
5177 } else {
5178 if (!sys)
5179 return 0;
5180
5181 return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz;
5182 }
5183}
5184EXPORT_SYMBOL(ipa_get_sys_yellow_wm);
5185
5186void ipa_suspend_apps_pipes(bool suspend)
5187{
5188 struct ipa_ep_cfg_ctrl cfg;
5189 int ipa_ep_idx;
5190 u32 lan_empty = 0, wan_empty = 0;
5191 int ret;
5192 struct sps_event_notify notify;
5193 struct ipa_ep_context *ep;
5194
5195 memset(&cfg, 0, sizeof(cfg));
5196 cfg.ipa_ep_suspend = suspend;
5197
5198 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
5199 ep = &ipa_ctx->ep[ipa_ep_idx];
5200 if (ep->valid) {
5201 ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
5202 /* Check if the pipes are empty. */
5203 ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty);
5204 if (ret) {
5205 IPAERR("%s: sps_is_pipe_empty failed with %d\n",
5206 __func__, ret);
5207 }
5208 if (!lan_empty) {
5209 IPADBG("LAN Cons is not-empty. Enter poll mode.\n");
5210 notify.user = ep->sys;
5211 notify.event_id = SPS_EVENT_EOT;
5212 if (ep->sys->sps_callback)
5213 ep->sys->sps_callback(&notify);
5214 }
5215 }
5216
5217 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
5218 /* Considering the case for SSR. */
5219 if (ipa_ep_idx == -1) {
5220 IPADBG("Invalid client.\n");
5221 return;
5222 }
5223 ep = &ipa_ctx->ep[ipa_ep_idx];
5224 if (ep->valid) {
5225 ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
5226 /* Check if the pipes are empty. */
5227 ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty);
5228 if (ret) {
5229 IPAERR("%s: sps_is_pipe_empty failed with %d\n",
5230 __func__, ret);
5231 }
5232 if (!wan_empty) {
5233 IPADBG("WAN Cons is not-empty. Enter poll mode.\n");
5234 notify.user = ep->sys;
5235 notify.event_id = SPS_EVENT_EOT;
5236 if (ep->sys->sps_callback)
5237 ep->sys->sps_callback(&notify);
5238 }
5239 }
5240}
Amir Levyc4222c92016-11-07 16:14:54 +02005241
5242/**
5243 * ipa2_get_pdev() - return a pointer to IPA dev struct
5244 *
5245 * Return value: a pointer to IPA dev struct
5246 *
5247 */
5248struct device *ipa2_get_pdev(void)
5249{
5250 if (!ipa_ctx)
5251 return NULL;
5252
5253 return ipa_ctx->pdev;
5254}