blob: e06f6cf5d00e850b68fde6205080f67506317cea [file] [log] [blame]
Mohammed Javid060c9c22018-02-01 20:42:17 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <net/ip.h>
14#include <linux/genalloc.h> /* gen_pool_alloc() */
15#include <linux/io.h>
16#include <linux/ratelimit.h>
17#include <linux/msm-bus.h>
18#include <linux/msm-bus-board.h>
19#include "ipa_i.h"
20#include "../ipa_rm_i.h"
21
22#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
23#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
24#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
25#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
26#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
27#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1)
28#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1)
29#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1)
30#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL
31
32#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000)
33#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600)
34
35/* Max pipes + ICs for TAG process */
36#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6)
37
38#define IPA_TAG_SLEEP_MIN_USEC (1000)
39#define IPA_TAG_SLEEP_MAX_USEC (2000)
40#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
41#define IPA_BCR_REG_VAL (0x001FFF7F)
42#define IPA_AGGR_GRAN_MIN (1)
43#define IPA_AGGR_GRAN_MAX (32)
44#define IPA_EOT_COAL_GRAN_MIN (1)
45#define IPA_EOT_COAL_GRAN_MAX (16)
46#define MSEC 1000
47#define MIN_RX_POLL_TIME 1
48#define MAX_RX_POLL_TIME 5
49#define UPPER_CUTOFF 50
50#define LOWER_CUTOFF 10
51
52#define IPA_DEFAULT_SYS_YELLOW_WM 32
53
54#define IPA_AGGR_BYTE_LIMIT (\
55 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
56 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
57#define IPA_AGGR_PKT_LIMIT (\
58 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
59 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
60
61static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
62 IPA_OFFSET_MEQ32_1, -1 };
63static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
64 IPA_OFFSET_MEQ128_1, -1 };
65static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
66 IPA_IHL_OFFSET_RANGE16_1, -1 };
67static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
68 IPA_IHL_OFFSET_MEQ32_1, -1 };
69#define IPA_1_1 (0)
70#define IPA_2_0 (1)
71#define IPA_2_6L (2)
72
73#define INVALID_EP_MAPPING_INDEX (-1)
74
Skylar Changa9516582017-05-09 11:36:47 -070075struct ipa_ep_confing {
76 bool valid;
77 int pipe_num;
78};
Amir Levy9659e592016-10-27 18:08:27 +030079
Skylar Changa9516582017-05-09 11:36:47 -070080static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = {
81 [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = {true, 19},
82 [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = {true, 12},
83 [IPA_1_1][IPA_CLIENT_USB2_PROD] = {true, 12},
84 [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = {true, 13},
85 [IPA_1_1][IPA_CLIENT_USB3_PROD] = {true, 13},
86 [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = {true, 0},
87 [IPA_1_1][IPA_CLIENT_USB4_PROD] = {true, 0},
88 [IPA_1_1][IPA_CLIENT_USB_PROD] = {true, 11},
89 [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = {true, 15},
90 [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = {true, 8},
91 [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = {true, 6},
92 [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 2},
93 [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = {true, 1},
94 [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = {true, 5},
95
96 [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = {true, 14},
97 [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = {true, 16},
98 [IPA_1_1][IPA_CLIENT_USB2_CONS] = {true, 16},
99 [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = {true, 17},
100 [IPA_1_1][IPA_CLIENT_USB3_CONS] = {true, 17},
101 [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = {true, 18},
102 [IPA_1_1][IPA_CLIENT_USB4_CONS] = {true, 18},
103 [IPA_1_1][IPA_CLIENT_USB_CONS] = {true, 10},
104 [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = {true, 9},
105 [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = {true, 7},
106 [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = {true, 3},
107 [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = {true, 4},
Amir Levy9659e592016-10-27 18:08:27 +0300108
109
Skylar Changa9516582017-05-09 11:36:47 -0700110 [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = {true, 12},
111 [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = {true, 18},
112 [IPA_2_0][IPA_CLIENT_USB2_PROD] = {true, 12},
113 [IPA_2_0][IPA_CLIENT_USB3_PROD] = {true, 13},
114 [IPA_2_0][IPA_CLIENT_USB4_PROD] = {true, 0},
115 [IPA_2_0][IPA_CLIENT_USB_PROD] = {true, 11},
116 [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4},
117 [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = {true, 3},
118 [IPA_2_0][IPA_CLIENT_ODU_PROD] = {true, 12},
119 [IPA_2_0][IPA_CLIENT_MHI_PROD] = {true, 18},
120 [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
121 [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
Amir Levy9659e592016-10-27 18:08:27 +0300122 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
Skylar Changa9516582017-05-09 11:36:47 -0700123 = {true, 12},
Amir Levy9659e592016-10-27 18:08:27 +0300124 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
Skylar Changa9516582017-05-09 11:36:47 -0700125 = {true, 19},
126 [IPA_2_0][IPA_CLIENT_ETHERNET_PROD] = {true, 12},
Amir Levy9659e592016-10-27 18:08:27 +0300127 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700128 [IPA_2_0][IPA_CLIENT_TEST_PROD] = {true, 19},
129 [IPA_2_0][IPA_CLIENT_TEST1_PROD] = {true, 19},
130 [IPA_2_0][IPA_CLIENT_TEST2_PROD] = {true, 12},
131 [IPA_2_0][IPA_CLIENT_TEST3_PROD] = {true, 11},
132 [IPA_2_0][IPA_CLIENT_TEST4_PROD] = {true, 0},
Amir Levy9659e592016-10-27 18:08:27 +0300133
Skylar Changa9516582017-05-09 11:36:47 -0700134 [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = {true, 13},
135 [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = {true, 17},
136 [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = {true, 16},
137 [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = {true, 14},
138 [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = {true, 19},
139 [IPA_2_0][IPA_CLIENT_USB_CONS] = {true, 15},
140 [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = {true, 0},
141 [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = {true, 2},
142 [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = {true, 5},
143 [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = {true, 13},
144 [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = {true, 1},
145 [IPA_2_0][IPA_CLIENT_MHI_CONS] = {true, 17},
146 [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = {true, 8},
147 [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = {true, 9},
Amir Levy9659e592016-10-27 18:08:27 +0300148 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700149 = {true, 13},
Amir Levy9659e592016-10-27 18:08:27 +0300150 [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700151 = {true, 16},
Amir Levy9659e592016-10-27 18:08:27 +0300152 [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
Skylar Changa9516582017-05-09 11:36:47 -0700153 = {true, 10},
154 [IPA_2_0][IPA_CLIENT_ETHERNET_CONS] = {true, 1},
155
Amir Levy9659e592016-10-27 18:08:27 +0300156 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700157 [IPA_2_0][IPA_CLIENT_TEST_CONS] = {true, 1},
158 [IPA_2_0][IPA_CLIENT_TEST1_CONS] = {true, 1},
159 [IPA_2_0][IPA_CLIENT_TEST2_CONS] = {true, 16},
160 [IPA_2_0][IPA_CLIENT_TEST3_CONS] = {true, 13},
161 [IPA_2_0][IPA_CLIENT_TEST4_CONS] = {true, 15},
Amir Levy9659e592016-10-27 18:08:27 +0300162
163
Mohammed Javid060c9c22018-02-01 20:42:17 +0530164 [IPA_2_6L][IPA_CLIENT_USB_PROD] = {true, 1},
Skylar Changa9516582017-05-09 11:36:47 -0700165 [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4},
166 [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = {true, 3},
167 [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = {true, 6},
168 [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = {true, 7},
169 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = {true, 11},
170 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = {true, 13},
Amir Levy9659e592016-10-27 18:08:27 +0300171
Amir Levy9659e592016-10-27 18:08:27 +0300172 /* Only for test purpose */
Skylar Changa9516582017-05-09 11:36:47 -0700173 [IPA_2_6L][IPA_CLIENT_TEST_PROD] = {true, 11},
174 [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = {true, 11},
175 [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = {true, 12},
176 [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = {true, 13},
177 [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = {true, 14},
178
179 [IPA_2_6L][IPA_CLIENT_USB_CONS] = {true, 0},
180 [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = {true, 10},
181 [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = {true, 2},
182 [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = {true, 5},
183 [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = {true, 8},
184 [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = {true, 9},
185 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = {true, 12},
186 [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = {true, 14},
187
188 /* Only for test purpose */
189 [IPA_2_6L][IPA_CLIENT_TEST_CONS] = {true, 15},
190 [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = {true, 15},
191 [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = {true, 0},
192 [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = {true, 1},
193 [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = {true, 10},
Amir Levy9659e592016-10-27 18:08:27 +0300194};
195
196static struct msm_bus_vectors ipa_init_vectors_v1_1[] = {
197 {
198 .src = MSM_BUS_MASTER_IPA,
199 .dst = MSM_BUS_SLAVE_EBI_CH0,
200 .ab = 0,
201 .ib = 0,
202 },
203 {
204 .src = MSM_BUS_MASTER_BAM_DMA,
205 .dst = MSM_BUS_SLAVE_EBI_CH0,
206 .ab = 0,
207 .ib = 0,
208 },
209 {
210 .src = MSM_BUS_MASTER_BAM_DMA,
211 .dst = MSM_BUS_SLAVE_OCIMEM,
212 .ab = 0,
213 .ib = 0,
214 },
215};
216
217static struct msm_bus_vectors ipa_init_vectors_v2_0[] = {
218 {
219 .src = MSM_BUS_MASTER_IPA,
220 .dst = MSM_BUS_SLAVE_EBI_CH0,
221 .ab = 0,
222 .ib = 0,
223 },
224 {
225 .src = MSM_BUS_MASTER_IPA,
226 .dst = MSM_BUS_SLAVE_OCIMEM,
227 .ab = 0,
228 .ib = 0,
229 },
230};
231
232static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = {
233 {
234 .src = MSM_BUS_MASTER_IPA,
235 .dst = MSM_BUS_SLAVE_EBI_CH0,
236 .ab = 50000000,
237 .ib = 960000000,
238 },
239 {
240 .src = MSM_BUS_MASTER_BAM_DMA,
241 .dst = MSM_BUS_SLAVE_EBI_CH0,
242 .ab = 50000000,
243 .ib = 960000000,
244 },
245 {
246 .src = MSM_BUS_MASTER_BAM_DMA,
247 .dst = MSM_BUS_SLAVE_OCIMEM,
248 .ab = 50000000,
249 .ib = 960000000,
250 },
251};
252
253static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = {
254 {
255 .src = MSM_BUS_MASTER_IPA,
256 .dst = MSM_BUS_SLAVE_EBI_CH0,
257 .ab = 100000000,
258 .ib = 1300000000,
259 },
260 {
261 .src = MSM_BUS_MASTER_IPA,
262 .dst = MSM_BUS_SLAVE_OCIMEM,
263 .ab = 100000000,
264 .ib = 1300000000,
265 },
266};
267
268static struct msm_bus_paths ipa_usecases_v1_1[] = {
269 {
270 ARRAY_SIZE(ipa_init_vectors_v1_1),
271 ipa_init_vectors_v1_1,
272 },
273 {
274 ARRAY_SIZE(ipa_max_perf_vectors_v1_1),
275 ipa_max_perf_vectors_v1_1,
276 },
277};
278
279static struct msm_bus_paths ipa_usecases_v2_0[] = {
280 {
281 ARRAY_SIZE(ipa_init_vectors_v2_0),
282 ipa_init_vectors_v2_0,
283 },
284 {
285 ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0),
286 ipa_nominal_perf_vectors_v2_0,
287 },
288};
289
290static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = {
Mohammed Javid4c4037e2017-11-27 16:23:35 +0530291 .usecase = ipa_usecases_v1_1,
292 .num_usecases = ARRAY_SIZE(ipa_usecases_v1_1),
Amir Levy9659e592016-10-27 18:08:27 +0300293 .name = "ipa",
294};
295
296static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = {
Mohammed Javid4c4037e2017-11-27 16:23:35 +0530297 .usecase = ipa_usecases_v2_0,
298 .num_usecases = ARRAY_SIZE(ipa_usecases_v2_0),
Amir Levy9659e592016-10-27 18:08:27 +0300299 .name = "ipa",
300};
301
302void ipa_active_clients_lock(void)
303{
304 unsigned long flags;
305
306 mutex_lock(&ipa_ctx->ipa_active_clients.mutex);
307 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
308 ipa_ctx->ipa_active_clients.mutex_locked = true;
309 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
310}
311
312int ipa_active_clients_trylock(unsigned long *flags)
313{
314 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags);
315 if (ipa_ctx->ipa_active_clients.mutex_locked) {
316 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock,
317 *flags);
318 return 0;
319 }
320
321 return 1;
322}
323
324void ipa_active_clients_trylock_unlock(unsigned long *flags)
325{
326 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags);
327}
328
329void ipa_active_clients_unlock(void)
330{
331 unsigned long flags;
332
333 spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
334 ipa_ctx->ipa_active_clients.mutex_locked = false;
335 spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
336 mutex_unlock(&ipa_ctx->ipa_active_clients.mutex);
337}
338
339/**
340 * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an
341 * IPA_RM resource
342 *
343 * @resource: [IN] IPA Resource Manager resource
344 * @clients: [OUT] Empty array which will contain the list of clients. The
345 * caller must initialize this array.
346 *
347 * Return codes: 0 on success, negative on failure.
348 */
349int ipa_get_clients_from_rm_resource(
350 enum ipa_rm_resource_name resource,
351 struct ipa_client_names *clients)
352{
353 int i = 0;
354
355 if (resource < 0 ||
356 resource >= IPA_RM_RESOURCE_MAX ||
357 !clients) {
358 IPAERR("Bad parameters\n");
359 return -EINVAL;
360 }
361
362 switch (resource) {
363 case IPA_RM_RESOURCE_USB_CONS:
364 clients->names[i++] = IPA_CLIENT_USB_CONS;
365 break;
366 case IPA_RM_RESOURCE_HSIC_CONS:
367 clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
368 break;
369 case IPA_RM_RESOURCE_WLAN_CONS:
370 clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
371 clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
372 clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
373 clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
374 break;
375 case IPA_RM_RESOURCE_MHI_CONS:
376 clients->names[i++] = IPA_CLIENT_MHI_CONS;
377 break;
Skylar Chang79699ec2016-11-18 10:21:33 -0800378 case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
379 clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
380 clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
381 break;
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800382 case IPA_RM_RESOURCE_ETHERNET_CONS:
383 clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
384 break;
Amir Levy9659e592016-10-27 18:08:27 +0300385 case IPA_RM_RESOURCE_USB_PROD:
386 clients->names[i++] = IPA_CLIENT_USB_PROD;
387 break;
388 case IPA_RM_RESOURCE_HSIC_PROD:
389 clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
390 break;
391 case IPA_RM_RESOURCE_MHI_PROD:
392 clients->names[i++] = IPA_CLIENT_MHI_PROD;
393 break;
Skylar Chang79699ec2016-11-18 10:21:33 -0800394 case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
395 clients->names[i++] = IPA_CLIENT_ODU_PROD;
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800396 break;
397 case IPA_RM_RESOURCE_ETHERNET_PROD:
398 clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
399 break;
Amir Levy9659e592016-10-27 18:08:27 +0300400 default:
401 break;
402 }
403 clients->length = i;
404
405 return 0;
406}
407
408/**
409 * ipa_should_pipe_be_suspended() - returns true when the client's pipe should
410 * be suspended during a power save scenario. False otherwise.
411 *
412 * @client: [IN] IPA client
413 */
414bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
415{
416 struct ipa_ep_context *ep;
417 int ipa_ep_idx;
418
419 ipa_ep_idx = ipa2_get_ep_mapping(client);
420 if (ipa_ep_idx == -1) {
421 IPAERR("Invalid client.\n");
422 WARN_ON(1);
423 return false;
424 }
425
426 ep = &ipa_ctx->ep[ipa_ep_idx];
427
428 if (ep->keep_ipa_awake)
429 return false;
430
Skylar Chang79699ec2016-11-18 10:21:33 -0800431 if (client == IPA_CLIENT_USB_CONS ||
432 client == IPA_CLIENT_MHI_CONS ||
433 client == IPA_CLIENT_HSIC1_CONS ||
434 client == IPA_CLIENT_WLAN1_CONS ||
435 client == IPA_CLIENT_WLAN2_CONS ||
436 client == IPA_CLIENT_WLAN3_CONS ||
437 client == IPA_CLIENT_WLAN4_CONS ||
438 client == IPA_CLIENT_ODU_EMB_CONS ||
Sunil Paidimarri5139aa22017-02-13 11:07:32 -0800439 client == IPA_CLIENT_ODU_TETH_CONS ||
440 client == IPA_CLIENT_ETHERNET_CONS)
Amir Levy9659e592016-10-27 18:08:27 +0300441 return true;
442
443 return false;
444}
445
446/**
447 * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
448 * resource and decrement active clients counter, which may result in clock
449 * gating of IPA clocks.
450 *
451 * @resource: [IN] IPA Resource Manager resource
452 *
453 * Return codes: 0 on success, negative on failure.
454 */
455int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource)
456{
457 struct ipa_client_names clients;
458 int res;
459 int index;
460 struct ipa_ep_cfg_ctrl suspend;
461 enum ipa_client_type client;
462 int ipa_ep_idx;
463 bool pipe_suspended = false;
464
465 memset(&clients, 0, sizeof(clients));
466 res = ipa_get_clients_from_rm_resource(resource, &clients);
467 if (res) {
468 IPAERR("Bad params.\n");
469 return res;
470 }
471
472 for (index = 0; index < clients.length; index++) {
473 client = clients.names[index];
474 ipa_ep_idx = ipa2_get_ep_mapping(client);
475 if (ipa_ep_idx == -1) {
476 IPAERR("Invalid client.\n");
477 res = -EINVAL;
478 continue;
479 }
480 ipa_ctx->resume_on_connect[client] = false;
481 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
482 ipa_should_pipe_be_suspended(client)) {
483 if (ipa_ctx->ep[ipa_ep_idx].valid) {
484 /* suspend endpoint */
485 memset(&suspend, 0, sizeof(suspend));
486 suspend.ipa_ep_suspend = true;
487 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
488 pipe_suspended = true;
489 }
490 }
491 }
492 /* Sleep ~1 msec */
493 if (pipe_suspended)
494 usleep_range(1000, 2000);
495
496 /* before gating IPA clocks do TAG process */
497 ipa_ctx->tag_process_before_gating = true;
498 IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
499
500 return 0;
501}
502
503/**
504 * ipa2_suspend_resource_no_block() - suspend client endpoints related to the
505 * IPA_RM resource and decrement active clients counter. This function is
506 * guaranteed to avoid sleeping.
507 *
508 * @resource: [IN] IPA Resource Manager resource
509 *
510 * Return codes: 0 on success, negative on failure.
511 */
512int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource)
513{
514 int res;
515 struct ipa_client_names clients;
516 int index;
517 enum ipa_client_type client;
518 struct ipa_ep_cfg_ctrl suspend;
519 int ipa_ep_idx;
520 unsigned long flags;
521 struct ipa_active_client_logging_info log_info;
522
523 if (ipa_active_clients_trylock(&flags) == 0)
524 return -EPERM;
525 if (ipa_ctx->ipa_active_clients.cnt == 1) {
526 res = -EPERM;
527 goto bail;
528 }
529
530 memset(&clients, 0, sizeof(clients));
531 res = ipa_get_clients_from_rm_resource(resource, &clients);
532 if (res) {
533 IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n"
534 , resource);
535 goto bail;
536 }
537
538 for (index = 0; index < clients.length; index++) {
539 client = clients.names[index];
540 ipa_ep_idx = ipa2_get_ep_mapping(client);
541 if (ipa_ep_idx == -1) {
542 IPAERR("Invalid client.\n");
543 res = -EINVAL;
544 continue;
545 }
546 ipa_ctx->resume_on_connect[client] = false;
547 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
548 ipa_should_pipe_be_suspended(client)) {
549 if (ipa_ctx->ep[ipa_ep_idx].valid) {
550 /* suspend endpoint */
551 memset(&suspend, 0, sizeof(suspend));
552 suspend.ipa_ep_suspend = true;
553 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
554 }
555 }
556 }
557
558 if (res == 0) {
559 IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
560 ipa_rm_resource_str(resource));
561 ipa2_active_clients_log_dec(&log_info, true);
562 ipa_ctx->ipa_active_clients.cnt--;
563 IPADBG("active clients = %d\n",
564 ipa_ctx->ipa_active_clients.cnt);
565 }
566bail:
567 ipa_active_clients_trylock_unlock(&flags);
568
569 return res;
570}
571
572/**
573 * ipa2_resume_resource() - resume client endpoints related to the IPA_RM
574 * resource.
575 *
576 * @resource: [IN] IPA Resource Manager resource
577 *
578 * Return codes: 0 on success, negative on failure.
579 */
580int ipa2_resume_resource(enum ipa_rm_resource_name resource)
581{
582
583 struct ipa_client_names clients;
584 int res;
585 int index;
586 struct ipa_ep_cfg_ctrl suspend;
587 enum ipa_client_type client;
588 int ipa_ep_idx;
589
590 memset(&clients, 0, sizeof(clients));
591 res = ipa_get_clients_from_rm_resource(resource, &clients);
592 if (res) {
593 IPAERR("ipa_get_clients_from_rm_resource() failed.\n");
594 return res;
595 }
596
597 for (index = 0; index < clients.length; index++) {
598 client = clients.names[index];
599 ipa_ep_idx = ipa2_get_ep_mapping(client);
600 if (ipa_ep_idx == -1) {
601 IPAERR("Invalid client.\n");
602 res = -EINVAL;
603 continue;
604 }
605 /*
606 * The related ep, will be resumed on connect
607 * while its resource is granted
608 */
609 ipa_ctx->resume_on_connect[client] = true;
610 IPADBG("%d will be resumed on connect.\n", client);
611 if (ipa_ctx->ep[ipa_ep_idx].client == client &&
612 ipa_should_pipe_be_suspended(client)) {
613 spin_lock(&ipa_ctx->disconnect_lock);
614 if (ipa_ctx->ep[ipa_ep_idx].valid &&
615 !ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) {
616 memset(&suspend, 0, sizeof(suspend));
617 suspend.ipa_ep_suspend = false;
618 ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
619 }
620 spin_unlock(&ipa_ctx->disconnect_lock);
621 }
622 }
623
624 return res;
625}
626
627/* read how much SRAM is available for SW use
628 * In case of IPAv2.0 this will also supply an offset from
629 * which we can start write
630 */
631void _ipa_sram_settings_read_v1_1(void)
632{
633 ipa_ctx->smem_restricted_bytes = 0;
634 ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
635 IPA_SHARED_MEM_SIZE_OFST_v1_1);
636 ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST;
637 ipa_ctx->hdr_tbl_lcl = 1;
638 ipa_ctx->ip4_rt_tbl_lcl = 0;
639 ipa_ctx->ip6_rt_tbl_lcl = 0;
640 ipa_ctx->ip4_flt_tbl_lcl = 1;
641 ipa_ctx->ip6_flt_tbl_lcl = 1;
642}
643
644void _ipa_sram_settings_read_v2_0(void)
645{
646 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
647 IPA_SHARED_MEM_SIZE_OFST_v2_0,
648 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
649 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
650 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
651 IPA_SHARED_MEM_SIZE_OFST_v2_0,
652 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
653 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
654 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
655 ipa_ctx->hdr_tbl_lcl = 0;
656 ipa_ctx->ip4_rt_tbl_lcl = 0;
657 ipa_ctx->ip6_rt_tbl_lcl = 0;
658 ipa_ctx->ip4_flt_tbl_lcl = 0;
659 ipa_ctx->ip6_flt_tbl_lcl = 0;
660}
661
662void _ipa_sram_settings_read_v2_5(void)
663{
664 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
665 IPA_SHARED_MEM_SIZE_OFST_v2_0,
666 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
667 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
668 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
669 IPA_SHARED_MEM_SIZE_OFST_v2_0,
670 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
671 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
672 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
673 ipa_ctx->hdr_tbl_lcl = 0;
674 ipa_ctx->hdr_proc_ctx_tbl_lcl = 1;
675
676 /*
677 * when proc ctx table is located in internal memory,
678 * modem entries resides first.
679 */
680 if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
681 ipa_ctx->hdr_proc_ctx_tbl.start_offset =
682 IPA_MEM_PART(modem_hdr_proc_ctx_size);
683 }
684 ipa_ctx->ip4_rt_tbl_lcl = 0;
685 ipa_ctx->ip6_rt_tbl_lcl = 0;
686 ipa_ctx->ip4_flt_tbl_lcl = 0;
687 ipa_ctx->ip6_flt_tbl_lcl = 0;
688}
689
690void _ipa_sram_settings_read_v2_6L(void)
691{
692 ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
693 IPA_SHARED_MEM_SIZE_OFST_v2_0,
694 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
695 IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
696 ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
697 IPA_SHARED_MEM_SIZE_OFST_v2_0,
698 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
699 IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
700 ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
701 ipa_ctx->hdr_tbl_lcl = 0;
702 ipa_ctx->ip4_rt_tbl_lcl = 0;
703 ipa_ctx->ip6_rt_tbl_lcl = 0;
704 ipa_ctx->ip4_flt_tbl_lcl = 0;
705 ipa_ctx->ip6_flt_tbl_lcl = 0;
706}
707
708void _ipa_cfg_route_v1_1(struct ipa_route *route)
709{
710 u32 reg_val = 0;
711
712 IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
713 IPA_ROUTE_ROUTE_DIS_SHFT,
714 IPA_ROUTE_ROUTE_DIS_BMSK);
715
716 IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
717 IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
718 IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
719
720 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
721 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
722 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
723
724 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
725 IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
726 IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
727
728 ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
729}
730
731void _ipa_cfg_route_v2_0(struct ipa_route *route)
732{
733 u32 reg_val = 0;
734
735 IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
736 IPA_ROUTE_ROUTE_DIS_SHFT,
737 IPA_ROUTE_ROUTE_DIS_BMSK);
738
739 IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
740 IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
741 IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
742
743 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
744 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
745 IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
746
747 IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
748 IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
749 IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
750
751 IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe,
752 IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
753 IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
754
755 ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
756}
757
758/**
759 * ipa_cfg_route() - configure IPA route
760 * @route: IPA route
761 *
762 * Return codes:
763 * 0: success
764 */
765int ipa_cfg_route(struct ipa_route *route)
766{
767
768 IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
769 route->route_dis,
770 route->route_def_pipe,
771 route->route_def_hdr_table);
772 IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
773 route->route_def_hdr_ofst,
774 route->route_frag_def_pipe);
775
776 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
777
778 ipa_ctx->ctrl->ipa_cfg_route(route);
779
780 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
781
782 return 0;
783}
784
785/**
786 * ipa_cfg_filter() - configure filter
787 * @disable: disable value
788 *
789 * Return codes:
790 * 0: success
791 */
792int ipa_cfg_filter(u32 disable)
793{
794 u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1;
795
796 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
797 ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
798 IPA_SETFIELD(!disable,
799 IPA_FILTER_FILTER_EN_SHFT,
800 IPA_FILTER_FILTER_EN_BMSK));
801 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
802
803 return 0;
804}
805
806/**
807 * ipa_init_hw() - initialize HW
808 *
809 * Return codes:
810 * 0: success
811 */
812int ipa_init_hw(void)
813{
814 u32 ipa_version = 0;
815
816 /* do soft reset of IPA */
817 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
818 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
819
820 /* enable IPA */
821 ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
822
823 /* Read IPA version and make sure we have access to the registers */
824 ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
825 if (ipa_version == 0)
826 return -EFAULT;
827
828 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
829 /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */
830 ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL);
831 }
832 return 0;
833}
834
835/**
836 * ipa2_get_ep_mapping() - provide endpoint mapping
837 * @client: client type
838 *
839 * Return value: endpoint mapping
840 */
841int ipa2_get_ep_mapping(enum ipa_client_type client)
842{
843 u8 hw_type_index = IPA_1_1;
844
845 if (unlikely(!ipa_ctx)) {
846 IPAERR("IPA driver was not initialized\n");
847 return INVALID_EP_MAPPING_INDEX;
848 }
849
850 if (client >= IPA_CLIENT_MAX || client < 0) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530851 IPAERR_RL("Bad client number! client =%d\n", client);
Amir Levy9659e592016-10-27 18:08:27 +0300852 return INVALID_EP_MAPPING_INDEX;
853 }
854
855 switch (ipa_ctx->ipa_hw_type) {
856 case IPA_HW_v2_0:
857 case IPA_HW_v2_5:
858 hw_type_index = IPA_2_0;
859 break;
860 case IPA_HW_v2_6L:
861 hw_type_index = IPA_2_6L;
862 break;
863 default:
864 hw_type_index = IPA_1_1;
865 break;
866 }
867
Skylar Changa9516582017-05-09 11:36:47 -0700868 if (!ep_mapping[hw_type_index][client].valid)
869 return INVALID_EP_MAPPING_INDEX;
870
871 return ep_mapping[hw_type_index][client].pipe_num;
Amir Levy9659e592016-10-27 18:08:27 +0300872}
873
874/* ipa2_set_client() - provide client mapping
875 * @client: client type
876 *
877 * Return value: none
878 */
879
880void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
881{
Skylar Chang09e0e252017-03-20 14:51:29 -0700882 if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
Amir Levy9659e592016-10-27 18:08:27 +0300883 IPAERR("Bad client number! client =%d\n", client);
884 } else if (index >= IPA_MAX_NUM_PIPES || index < 0) {
885 IPAERR("Bad pipe index! index =%d\n", index);
886 } else {
887 ipa_ctx->ipacm_client[index].client_enum = client;
888 ipa_ctx->ipacm_client[index].uplink = uplink;
889 }
890}
891
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530892/* ipa2_get_wlan_stats() - get ipa wifi stats
893 *
894 * Return value: success or failure
895 */
896int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
897{
898 if (ipa_ctx->uc_wdi_ctx.stats_notify) {
899 ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
900 wdi_sap_stats);
901 } else {
902 IPAERR("uc_wdi_ctx.stats_notify not registered\n");
903 return -EFAULT;
904 }
905 return 0;
906}
907
908int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
909{
910 if (ipa_ctx->uc_wdi_ctx.stats_notify) {
911 ipa_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
912 wdi_quota);
913 } else {
914 IPAERR("uc_wdi_ctx.stats_notify not registered\n");
915 return -EFAULT;
916 }
917 return 0;
918}
919
Amir Levy9659e592016-10-27 18:08:27 +0300920/**
921 * ipa2_get_client() - provide client mapping
922 * @client: client type
923 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530924 * Return value: client mapping enum
Amir Levy9659e592016-10-27 18:08:27 +0300925 */
926enum ipacm_client_enum ipa2_get_client(int pipe_idx)
927{
928 if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) {
929 IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
930 return IPACM_CLIENT_MAX;
931 } else {
932 return ipa_ctx->ipacm_client[pipe_idx].client_enum;
933 }
934}
935
936/**
937 * ipa2_get_client_uplink() - provide client mapping
938 * @client: client type
939 *
940 * Return value: none
941 */
942bool ipa2_get_client_uplink(int pipe_idx)
943{
Skylar Chang53f855e2017-06-12 10:50:12 -0700944 if (pipe_idx < 0 || pipe_idx >= IPA_MAX_NUM_PIPES) {
945 IPAERR("invalid pipe idx %d\n", pipe_idx);
946 return false;
947 }
948
Amir Levy9659e592016-10-27 18:08:27 +0300949 return ipa_ctx->ipacm_client[pipe_idx].uplink;
950}
951
952/**
953 * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
954 * the supplied pipe index.
955 *
956 * @pipe_idx:
957 *
958 * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
959 * found.
960 */
961enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx)
962{
963 int i;
964 int j;
965 enum ipa_client_type client;
966 struct ipa_client_names clients;
967 bool found = false;
968
969 if (unlikely(!ipa_ctx)) {
970 IPAERR("IPA driver was not initialized\n");
971 return -EINVAL;
972 }
973
974 if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
975 IPAERR("Bad pipe index!\n");
976 return -EINVAL;
977 }
978
979 client = ipa_ctx->ep[pipe_idx].client;
980
981 for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
982 memset(&clients, 0, sizeof(clients));
983 ipa_get_clients_from_rm_resource(i, &clients);
984 for (j = 0; j < clients.length; j++) {
985 if (clients.names[j] == client) {
986 found = true;
987 break;
988 }
989 }
990 if (found)
991 break;
992 }
993
994 if (!found)
995 return -EFAULT;
996
997 return i;
998}
999
1000/**
1001 * ipa2_get_client_mapping() - provide client mapping
1002 * @pipe_idx: IPA end-point number
1003 *
1004 * Return value: client mapping
1005 */
1006enum ipa_client_type ipa2_get_client_mapping(int pipe_idx)
1007{
1008 if (unlikely(!ipa_ctx)) {
1009 IPAERR("IPA driver was not initialized\n");
1010 return -EINVAL;
1011 }
1012
1013 if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
1014 IPAERR("Bad pipe index!\n");
1015 return -EINVAL;
1016 }
1017
1018 return ipa_ctx->ep[pipe_idx].client;
1019}
1020
1021void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset,
1022 const uint8_t mac_addr_mask[ETH_ALEN],
1023 const uint8_t mac_addr[ETH_ALEN])
1024{
1025 *buf = ipa_write_8(hdr_mac_addr_offset, *buf);
1026
1027 /* MAC addr mask copied as little endian each 4 bytes */
1028 *buf = ipa_write_8(mac_addr_mask[3], *buf);
1029 *buf = ipa_write_8(mac_addr_mask[2], *buf);
1030 *buf = ipa_write_8(mac_addr_mask[1], *buf);
1031 *buf = ipa_write_8(mac_addr_mask[0], *buf);
1032 *buf = ipa_write_16(0, *buf);
1033 *buf = ipa_write_8(mac_addr_mask[5], *buf);
1034 *buf = ipa_write_8(mac_addr_mask[4], *buf);
1035 *buf = ipa_write_32(0, *buf);
1036 *buf = ipa_write_32(0, *buf);
1037
1038 /* MAC addr copied as little endian each 4 bytes */
1039 *buf = ipa_write_8(mac_addr[3], *buf);
1040 *buf = ipa_write_8(mac_addr[2], *buf);
1041 *buf = ipa_write_8(mac_addr[1], *buf);
1042 *buf = ipa_write_8(mac_addr[0], *buf);
1043 *buf = ipa_write_16(0, *buf);
1044 *buf = ipa_write_8(mac_addr[5], *buf);
1045 *buf = ipa_write_8(mac_addr[4], *buf);
1046 *buf = ipa_write_32(0, *buf);
1047 *buf = ipa_write_32(0, *buf);
1048 *buf = ipa_pad_to_32(*buf);
1049}
1050
1051/**
1052 * ipa_generate_hw_rule() - generate HW rule
1053 * @ip: IP address type
1054 * @attrib: IPA rule attribute
1055 * @buf: output buffer
1056 * @en_rule: rule
1057 *
1058 * Return codes:
1059 * 0: success
1060 * -EPERM: wrong input
1061 */
1062int ipa_generate_hw_rule(enum ipa_ip_type ip,
1063 const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
1064{
1065 u8 ofst_meq32 = 0;
1066 u8 ihl_ofst_rng16 = 0;
1067 u8 ihl_ofst_meq32 = 0;
1068 u8 ofst_meq128 = 0;
1069
1070 if (ip == IPA_IP_v4) {
1071
1072 /* error check */
1073 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
1074 attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
1075 IPA_FLT_FLOW_LABEL) {
1076 IPAERR("v6 attrib's specified for v4 rule\n");
1077 return -EPERM;
1078 }
1079
1080 if (attrib->attrib_mask & IPA_FLT_TOS) {
1081 *en_rule |= IPA_TOS_EQ;
1082 *buf = ipa_write_8(attrib->u.v4.tos, *buf);
1083 *buf = ipa_pad_to_32(*buf);
1084 }
1085
1086 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1087 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1088 IPAERR("ran out of meq32 eq\n");
1089 return -EPERM;
1090 }
1091 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1092 /* 0 => offset of TOS in v4 header */
1093 *buf = ipa_write_8(0, *buf);
1094 *buf = ipa_write_32((attrib->tos_mask << 16), *buf);
1095 *buf = ipa_write_32((attrib->tos_value << 16), *buf);
1096 *buf = ipa_pad_to_32(*buf);
1097 ofst_meq32++;
1098 }
1099
1100 if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1101 *en_rule |= IPA_PROTOCOL_EQ;
1102 *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
1103 *buf = ipa_pad_to_32(*buf);
1104 }
1105
1106 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1107 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1108 IPAERR("ran out of meq32 eq\n");
1109 return -EPERM;
1110 }
1111 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1112 /* 12 => offset of src ip in v4 header */
1113 *buf = ipa_write_8(12, *buf);
1114 *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
1115 *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
1116 *buf = ipa_pad_to_32(*buf);
1117 ofst_meq32++;
1118 }
1119
1120 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1121 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1122 IPAERR("ran out of meq32 eq\n");
1123 return -EPERM;
1124 }
1125 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1126 /* 16 => offset of dst ip in v4 header */
1127 *buf = ipa_write_8(16, *buf);
1128 *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
1129 *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
1130 *buf = ipa_pad_to_32(*buf);
1131 ofst_meq32++;
1132 }
1133
1134 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1135 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1136 IPAERR("ran out of meq32 eq\n");
1137 return -EPERM;
1138 }
1139 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1140 /* -2 => offset of ether type in L2 hdr */
1141 *buf = ipa_write_8((u8)-2, *buf);
1142 *buf = ipa_write_16(0, *buf);
1143 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1144 *buf = ipa_write_16(0, *buf);
1145 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1146 *buf = ipa_pad_to_32(*buf);
1147 ofst_meq32++;
1148 }
1149
1150 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1151 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1152 IPAERR("ran out of ihl_rng16 eq\n");
1153 return -EPERM;
1154 }
1155 if (attrib->src_port_hi < attrib->src_port_lo) {
1156 IPAERR("bad src port range param\n");
1157 return -EPERM;
1158 }
1159 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1160 /* 0 => offset of src port after v4 header */
1161 *buf = ipa_write_8(0, *buf);
1162 *buf = ipa_write_16(attrib->src_port_hi, *buf);
1163 *buf = ipa_write_16(attrib->src_port_lo, *buf);
1164 *buf = ipa_pad_to_32(*buf);
1165 ihl_ofst_rng16++;
1166 }
1167
1168 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1169 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1170 IPAERR("ran out of ihl_rng16 eq\n");
1171 return -EPERM;
1172 }
1173 if (attrib->dst_port_hi < attrib->dst_port_lo) {
1174 IPAERR("bad dst port range param\n");
1175 return -EPERM;
1176 }
1177 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1178 /* 2 => offset of dst port after v4 header */
1179 *buf = ipa_write_8(2, *buf);
1180 *buf = ipa_write_16(attrib->dst_port_hi, *buf);
1181 *buf = ipa_write_16(attrib->dst_port_lo, *buf);
1182 *buf = ipa_pad_to_32(*buf);
1183 ihl_ofst_rng16++;
1184 }
1185
1186 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1187 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1188 IPAERR("ran out of ihl_meq32 eq\n");
1189 return -EPERM;
1190 }
1191 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1192 /* 0 => offset of type after v4 header */
1193 *buf = ipa_write_8(0, *buf);
1194 *buf = ipa_write_32(0xFF, *buf);
1195 *buf = ipa_write_32(attrib->type, *buf);
1196 *buf = ipa_pad_to_32(*buf);
1197 ihl_ofst_meq32++;
1198 }
1199
1200 if (attrib->attrib_mask & IPA_FLT_CODE) {
1201 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1202 IPAERR("ran out of ihl_meq32 eq\n");
1203 return -EPERM;
1204 }
1205 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1206 /* 1 => offset of code after v4 header */
1207 *buf = ipa_write_8(1, *buf);
1208 *buf = ipa_write_32(0xFF, *buf);
1209 *buf = ipa_write_32(attrib->code, *buf);
1210 *buf = ipa_pad_to_32(*buf);
1211 ihl_ofst_meq32++;
1212 }
1213
1214 if (attrib->attrib_mask & IPA_FLT_SPI) {
1215 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1216 IPAERR("ran out of ihl_meq32 eq\n");
1217 return -EPERM;
1218 }
1219 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1220 /* 0 => offset of SPI after v4 header FIXME */
1221 *buf = ipa_write_8(0, *buf);
1222 *buf = ipa_write_32(0xFFFFFFFF, *buf);
1223 *buf = ipa_write_32(attrib->spi, *buf);
1224 *buf = ipa_pad_to_32(*buf);
1225 ihl_ofst_meq32++;
1226 }
1227
1228 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1229 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1230 IPAERR("ran out of ihl_rng16 eq\n");
1231 return -EPERM;
1232 }
1233 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1234 /* 0 => offset of src port after v4 header */
1235 *buf = ipa_write_8(0, *buf);
1236 *buf = ipa_write_16(attrib->src_port, *buf);
1237 *buf = ipa_write_16(attrib->src_port, *buf);
1238 *buf = ipa_pad_to_32(*buf);
1239 ihl_ofst_rng16++;
1240 }
1241
1242 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1243 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1244 IPAERR("ran out of ihl_rng16 eq\n");
1245 return -EPERM;
1246 }
1247 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1248 /* 2 => offset of dst port after v4 header */
1249 *buf = ipa_write_8(2, *buf);
1250 *buf = ipa_write_16(attrib->dst_port, *buf);
1251 *buf = ipa_write_16(attrib->dst_port, *buf);
1252 *buf = ipa_pad_to_32(*buf);
1253 ihl_ofst_rng16++;
1254 }
1255
1256 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1257 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1258 IPAERR("ran out of meq128 eq\n");
1259 return -EPERM;
1260 }
1261 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1262
1263 /* -14 => offset of dst mac addr in Ethernet II hdr */
1264 ipa_generate_mac_addr_hw_rule(
1265 buf,
1266 -14,
1267 attrib->dst_mac_addr_mask,
1268 attrib->dst_mac_addr);
1269
1270 ofst_meq128++;
1271 }
1272
1273 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1274 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1275 IPAERR("ran out of meq128 eq\n");
1276 return -EPERM;
1277 }
1278 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1279
1280 /* -8 => offset of src mac addr in Ethernet II hdr */
1281 ipa_generate_mac_addr_hw_rule(
1282 buf,
1283 -8,
1284 attrib->src_mac_addr_mask,
1285 attrib->src_mac_addr);
1286
1287 ofst_meq128++;
1288 }
1289
1290 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1291 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1292 IPAERR("ran out of meq128 eq\n");
1293 return -EPERM;
1294 }
1295 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1296
1297 /* -22 => offset of dst mac addr in 802.3 hdr */
1298 ipa_generate_mac_addr_hw_rule(
1299 buf,
1300 -22,
1301 attrib->dst_mac_addr_mask,
1302 attrib->dst_mac_addr);
1303
1304 ofst_meq128++;
1305 }
1306
1307 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1308 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1309 IPAERR("ran out of meq128 eq\n");
1310 return -EPERM;
1311 }
1312 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1313
1314 /* -16 => offset of src mac addr in 802.3 hdr */
1315 ipa_generate_mac_addr_hw_rule(
1316 buf,
1317 -16,
1318 attrib->src_mac_addr_mask,
1319 attrib->src_mac_addr);
1320
1321 ofst_meq128++;
1322 }
1323
1324 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1325 *en_rule |= IPA_METADATA_COMPARE;
1326 *buf = ipa_write_8(0, *buf); /* offset, reserved */
1327 *buf = ipa_write_32(attrib->meta_data_mask, *buf);
1328 *buf = ipa_write_32(attrib->meta_data, *buf);
1329 *buf = ipa_pad_to_32(*buf);
1330 }
1331
1332 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1333 *en_rule |= IPA_IS_FRAG;
1334 *buf = ipa_pad_to_32(*buf);
1335 }
1336 } else if (ip == IPA_IP_v6) {
1337
1338 /* v6 code below assumes no extension headers TODO: fix this */
1339
1340 /* error check */
1341 if (attrib->attrib_mask & IPA_FLT_TOS ||
1342 attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1343 IPAERR("v4 attrib's specified for v6 rule\n");
1344 return -EPERM;
1345 }
1346
1347 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
1348 *en_rule |= IPA_PROTOCOL_EQ;
1349 *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
1350 *buf = ipa_pad_to_32(*buf);
1351 }
1352
1353 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1354 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1355 IPAERR("ran out of meq32 eq\n");
1356 return -EPERM;
1357 }
1358 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1359 /* -2 => offset of ether type in L2 hdr */
1360 *buf = ipa_write_8((u8)-2, *buf);
1361 *buf = ipa_write_16(0, *buf);
1362 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1363 *buf = ipa_write_16(0, *buf);
1364 *buf = ipa_write_16(htons(attrib->ether_type), *buf);
1365 *buf = ipa_pad_to_32(*buf);
1366 ofst_meq32++;
1367 }
1368
1369 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1370 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1371 IPAERR("ran out of ihl_meq32 eq\n");
1372 return -EPERM;
1373 }
1374 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1375 /* 0 => offset of type after v6 header */
1376 *buf = ipa_write_8(0, *buf);
1377 *buf = ipa_write_32(0xFF, *buf);
1378 *buf = ipa_write_32(attrib->type, *buf);
1379 *buf = ipa_pad_to_32(*buf);
1380 ihl_ofst_meq32++;
1381 }
1382
1383 if (attrib->attrib_mask & IPA_FLT_CODE) {
1384 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1385 IPAERR("ran out of ihl_meq32 eq\n");
1386 return -EPERM;
1387 }
1388 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1389 /* 1 => offset of code after v6 header */
1390 *buf = ipa_write_8(1, *buf);
1391 *buf = ipa_write_32(0xFF, *buf);
1392 *buf = ipa_write_32(attrib->code, *buf);
1393 *buf = ipa_pad_to_32(*buf);
1394 ihl_ofst_meq32++;
1395 }
1396
1397 if (attrib->attrib_mask & IPA_FLT_SPI) {
1398 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1399 IPAERR("ran out of ihl_meq32 eq\n");
1400 return -EPERM;
1401 }
1402 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1403 /* 0 => offset of SPI after v6 header FIXME */
1404 *buf = ipa_write_8(0, *buf);
1405 *buf = ipa_write_32(0xFFFFFFFF, *buf);
1406 *buf = ipa_write_32(attrib->spi, *buf);
1407 *buf = ipa_pad_to_32(*buf);
1408 ihl_ofst_meq32++;
1409 }
1410
Shihuan Liuf4433442017-09-28 17:46:41 -07001411 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
1412 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1413 IPAERR("ran out of ihl_meq32 eq\n");
1414 return -EPERM;
1415 }
1416 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1417 /* 22 => offset of IP type after v6 header */
1418 *buf = ipa_write_8(22, *buf);
1419 *buf = ipa_write_32(0xF0000000, *buf);
1420 if (attrib->type == 0x40)
1421 *buf = ipa_write_32(0x40000000, *buf);
1422 else
1423 *buf = ipa_write_32(0x60000000, *buf);
1424 *buf = ipa_pad_to_32(*buf);
1425 ihl_ofst_meq32++;
1426 }
1427
1428 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
1429 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
1430 IPAERR("ran out of ihl_meq32 eq\n");
1431 return -EPERM;
1432 }
1433 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1434 /* 38 => offset of inner IPv4 addr */
1435 *buf = ipa_write_8(38, *buf);
1436 *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
1437 *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
1438 *buf = ipa_pad_to_32(*buf);
1439 ihl_ofst_meq32++;
1440 }
1441
Amir Levy9659e592016-10-27 18:08:27 +03001442 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1443 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1444 IPAERR("ran out of ihl_rng16 eq\n");
1445 return -EPERM;
1446 }
1447 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1448 /* 0 => offset of src port after v6 header */
1449 *buf = ipa_write_8(0, *buf);
1450 *buf = ipa_write_16(attrib->src_port, *buf);
1451 *buf = ipa_write_16(attrib->src_port, *buf);
1452 *buf = ipa_pad_to_32(*buf);
1453 ihl_ofst_rng16++;
1454 }
1455
1456 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1457 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1458 IPAERR("ran out of ihl_rng16 eq\n");
1459 return -EPERM;
1460 }
1461 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1462 /* 2 => offset of dst port after v6 header */
1463 *buf = ipa_write_8(2, *buf);
1464 *buf = ipa_write_16(attrib->dst_port, *buf);
1465 *buf = ipa_write_16(attrib->dst_port, *buf);
1466 *buf = ipa_pad_to_32(*buf);
1467 ihl_ofst_rng16++;
1468 }
1469
1470 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1471 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1472 IPAERR("ran out of ihl_rng16 eq\n");
1473 return -EPERM;
1474 }
1475 if (attrib->src_port_hi < attrib->src_port_lo) {
1476 IPAERR("bad src port range param\n");
1477 return -EPERM;
1478 }
1479 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1480 /* 0 => offset of src port after v6 header */
1481 *buf = ipa_write_8(0, *buf);
1482 *buf = ipa_write_16(attrib->src_port_hi, *buf);
1483 *buf = ipa_write_16(attrib->src_port_lo, *buf);
1484 *buf = ipa_pad_to_32(*buf);
1485 ihl_ofst_rng16++;
1486 }
1487
1488 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1489 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
1490 IPAERR("ran out of ihl_rng16 eq\n");
1491 return -EPERM;
1492 }
1493 if (attrib->dst_port_hi < attrib->dst_port_lo) {
1494 IPAERR("bad dst port range param\n");
1495 return -EPERM;
1496 }
1497 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1498 /* 2 => offset of dst port after v6 header */
1499 *buf = ipa_write_8(2, *buf);
1500 *buf = ipa_write_16(attrib->dst_port_hi, *buf);
1501 *buf = ipa_write_16(attrib->dst_port_lo, *buf);
1502 *buf = ipa_pad_to_32(*buf);
1503 ihl_ofst_rng16++;
1504 }
1505
1506 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1507 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1508 IPAERR("ran out of meq128 eq\n");
1509 return -EPERM;
1510 }
1511 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1512 /* 8 => offset of src ip in v6 header */
1513 *buf = ipa_write_8(8, *buf);
1514 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
1515 *buf);
1516 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
1517 *buf);
1518 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
1519 *buf);
1520 *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
1521 *buf);
1522 *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
1523 *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
1524 *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
1525 *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
1526 *buf = ipa_pad_to_32(*buf);
1527 ofst_meq128++;
1528 }
1529
1530 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1531 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1532 IPAERR("ran out of meq128 eq\n");
1533 return -EPERM;
1534 }
1535 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1536 /* 24 => offset of dst ip in v6 header */
1537 *buf = ipa_write_8(24, *buf);
1538 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
1539 *buf);
1540 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
1541 *buf);
1542 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
1543 *buf);
1544 *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
1545 *buf);
1546 *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
1547 *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
1548 *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
1549 *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
1550 *buf = ipa_pad_to_32(*buf);
1551 ofst_meq128++;
1552 }
1553
1554 if (attrib->attrib_mask & IPA_FLT_TC) {
1555 *en_rule |= IPA_FLT_TC;
1556 *buf = ipa_write_8(attrib->u.v6.tc, *buf);
1557 *buf = ipa_pad_to_32(*buf);
1558 }
1559
1560 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1561 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1562 IPAERR("ran out of meq128 eq\n");
1563 return -EPERM;
1564 }
1565 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1566 /* 0 => offset of TOS in v6 header */
1567 *buf = ipa_write_8(0, *buf);
1568 *buf = ipa_write_32((attrib->tos_mask << 20), *buf);
1569 *buf = ipa_write_32(0, *buf);
1570 *buf = ipa_write_32(0, *buf);
1571 *buf = ipa_write_32(0, *buf);
1572
1573 *buf = ipa_write_32((attrib->tos_value << 20), *buf);
1574 *buf = ipa_write_32(0, *buf);
1575 *buf = ipa_write_32(0, *buf);
1576 *buf = ipa_write_32(0, *buf);
1577 *buf = ipa_pad_to_32(*buf);
1578 ofst_meq128++;
1579 }
1580
1581 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1582 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1583 IPAERR("ran out of meq128 eq\n");
1584 return -EPERM;
1585 }
1586 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1587
1588 /* -14 => offset of dst mac addr in Ethernet II hdr */
1589 ipa_generate_mac_addr_hw_rule(
1590 buf,
1591 -14,
1592 attrib->dst_mac_addr_mask,
1593 attrib->dst_mac_addr);
1594
1595 ofst_meq128++;
1596 }
1597
1598 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1599 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1600 IPAERR("ran out of meq128 eq\n");
1601 return -EPERM;
1602 }
1603 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1604
1605 /* -8 => offset of src mac addr in Ethernet II hdr */
1606 ipa_generate_mac_addr_hw_rule(
1607 buf,
1608 -8,
1609 attrib->src_mac_addr_mask,
1610 attrib->src_mac_addr);
1611
1612 ofst_meq128++;
1613 }
1614
1615 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1616 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1617 IPAERR("ran out of meq128 eq\n");
1618 return -EPERM;
1619 }
1620 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1621
1622 /* -22 => offset of dst mac addr in 802.3 hdr */
1623 ipa_generate_mac_addr_hw_rule(
1624 buf,
1625 -22,
1626 attrib->dst_mac_addr_mask,
1627 attrib->dst_mac_addr);
1628
1629 ofst_meq128++;
1630 }
1631
1632 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1633 if (ipa_ofst_meq128[ofst_meq128] == -1) {
1634 IPAERR("ran out of meq128 eq\n");
1635 return -EPERM;
1636 }
1637 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1638
1639 /* -16 => offset of src mac addr in 802.3 hdr */
1640 ipa_generate_mac_addr_hw_rule(
1641 buf,
1642 -16,
1643 attrib->src_mac_addr_mask,
1644 attrib->src_mac_addr);
1645
1646 ofst_meq128++;
1647 }
1648
1649 if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
1650 *en_rule |= IPA_FLT_FLOW_LABEL;
1651 /* FIXME FL is only 20 bits */
1652 *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
1653 *buf = ipa_pad_to_32(*buf);
1654 }
1655
1656 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1657 *en_rule |= IPA_METADATA_COMPARE;
1658 *buf = ipa_write_8(0, *buf); /* offset, reserved */
1659 *buf = ipa_write_32(attrib->meta_data_mask, *buf);
1660 *buf = ipa_write_32(attrib->meta_data, *buf);
1661 *buf = ipa_pad_to_32(*buf);
1662 }
1663
1664 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1665 *en_rule |= IPA_IS_FRAG;
1666 *buf = ipa_pad_to_32(*buf);
1667 }
1668 } else {
1669 IPAERR("unsupported ip %d\n", ip);
1670 return -EPERM;
1671 }
1672
1673 /*
1674 * default "rule" means no attributes set -> map to
1675 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
1676 */
1677 if (attrib->attrib_mask == 0) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05301678 IPADBG_LOW("building default rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001679 if (ipa_ofst_meq32[ofst_meq32] == -1) {
1680 IPAERR("ran out of meq32 eq\n");
1681 return -EPERM;
1682 }
1683 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1684 *buf = ipa_write_8(0, *buf); /* offset */
1685 *buf = ipa_write_32(0, *buf); /* mask */
1686 *buf = ipa_write_32(0, *buf); /* val */
1687 *buf = ipa_pad_to_32(*buf);
1688 ofst_meq32++;
1689 }
1690
1691 return 0;
1692}
1693
1694void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
1695 u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
1696 const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
1697{
1698 eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
1699 eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3];
1700 eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2];
1701 eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1];
1702 eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0];
1703 eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0;
1704 eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0;
1705 eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5];
1706 eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4];
1707 memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8);
1708 eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3];
1709 eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2];
1710 eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1];
1711 eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0];
1712 eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0;
1713 eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0;
1714 eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5];
1715 eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4];
1716 memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8);
1717}
1718
1719int ipa_generate_flt_eq(enum ipa_ip_type ip,
1720 const struct ipa_rule_attrib *attrib,
1721 struct ipa_ipfltri_rule_eq *eq_atrb)
1722{
1723 u8 ofst_meq32 = 0;
1724 u8 ihl_ofst_rng16 = 0;
1725 u8 ihl_ofst_meq32 = 0;
1726 u8 ofst_meq128 = 0;
1727 u16 eq_bitmap = 0;
1728 u16 *en_rule = &eq_bitmap;
1729
1730 if (ip == IPA_IP_v4) {
1731
1732 /* error check */
1733 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
1734 attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
1735 IPA_FLT_FLOW_LABEL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301736 IPAERR_RL("v6 attrib's specified for v4 rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001737 return -EPERM;
1738 }
1739
1740 if (attrib->attrib_mask & IPA_FLT_TOS) {
1741 *en_rule |= IPA_TOS_EQ;
1742 eq_atrb->tos_eq_present = 1;
1743 eq_atrb->tos_eq = attrib->u.v4.tos;
1744 }
1745
1746 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
1747 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301748 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001749 return -EPERM;
1750 }
1751 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1752 eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
1753 eq_atrb->offset_meq_32[ofst_meq32].mask =
1754 attrib->tos_mask << 16;
1755 eq_atrb->offset_meq_32[ofst_meq32].value =
1756 attrib->tos_value << 16;
1757 ofst_meq32++;
1758 }
1759
1760 if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
1761 *en_rule |= IPA_PROTOCOL_EQ;
1762 eq_atrb->protocol_eq_present = 1;
1763 eq_atrb->protocol_eq = attrib->u.v4.protocol;
1764 }
1765
1766 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
1767 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301768 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001769 return -EPERM;
1770 }
1771 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1772 eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
1773 eq_atrb->offset_meq_32[ofst_meq32].mask =
1774 attrib->u.v4.src_addr_mask;
1775 eq_atrb->offset_meq_32[ofst_meq32].value =
1776 attrib->u.v4.src_addr;
1777 ofst_meq32++;
1778 }
1779
1780 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
1781 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301782 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001783 return -EPERM;
1784 }
1785 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1786 eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
1787 eq_atrb->offset_meq_32[ofst_meq32].mask =
1788 attrib->u.v4.dst_addr_mask;
1789 eq_atrb->offset_meq_32[ofst_meq32].value =
1790 attrib->u.v4.dst_addr;
1791 ofst_meq32++;
1792 }
1793
1794 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
1795 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301796 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001797 return -EPERM;
1798 }
1799 if (attrib->src_port_hi < attrib->src_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301800 IPAERR_RL("bad src port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03001801 return -EPERM;
1802 }
1803 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1804 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
1805 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1806 = attrib->src_port_lo;
1807 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1808 = attrib->src_port_hi;
1809 ihl_ofst_rng16++;
1810 }
1811
1812 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
1813 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301814 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001815 return -EPERM;
1816 }
1817 if (attrib->dst_port_hi < attrib->dst_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301818 IPAERR_RL("bad dst port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03001819 return -EPERM;
1820 }
1821 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1822 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
1823 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1824 = attrib->dst_port_lo;
1825 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1826 = attrib->dst_port_hi;
1827 ihl_ofst_rng16++;
1828 }
1829
1830 if (attrib->attrib_mask & IPA_FLT_TYPE) {
1831 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301832 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001833 return -EPERM;
1834 }
1835 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1836 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1837 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1838 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1839 attrib->type;
1840 ihl_ofst_meq32++;
1841 }
1842
1843 if (attrib->attrib_mask & IPA_FLT_CODE) {
1844 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301845 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001846 return -EPERM;
1847 }
1848 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1849 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
1850 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
1851 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1852 attrib->code;
1853 ihl_ofst_meq32++;
1854 }
1855
1856 if (attrib->attrib_mask & IPA_FLT_SPI) {
1857 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301858 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001859 return -EPERM;
1860 }
1861 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
1862 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
1863 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
1864 0xFFFFFFFF;
1865 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
1866 attrib->spi;
1867 ihl_ofst_meq32++;
1868 }
1869
1870 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
1871 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301872 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001873 return -EPERM;
1874 }
1875 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1876 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
1877 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1878 = attrib->src_port;
1879 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1880 = attrib->src_port;
1881 ihl_ofst_rng16++;
1882 }
1883
1884 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
1885 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301886 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001887 return -EPERM;
1888 }
1889 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
1890 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
1891 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
1892 = attrib->dst_port;
1893 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
1894 = attrib->dst_port;
1895 ihl_ofst_rng16++;
1896 }
1897
1898 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
1899 *en_rule |= IPA_METADATA_COMPARE;
1900 eq_atrb->metadata_meq32_present = 1;
1901 eq_atrb->metadata_meq32.offset = 0;
1902 eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
1903 eq_atrb->metadata_meq32.value = attrib->meta_data;
1904 }
1905
1906 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
1907 *en_rule |= IPA_IS_FRAG;
1908 eq_atrb->ipv4_frag_eq_present = 1;
1909 }
1910
1911 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
1912 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301913 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001914 return -EPERM;
1915 }
1916 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1917
1918 /* -14 => offset of dst mac addr in Ethernet II hdr */
1919 ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
1920 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
1921 ofst_meq128);
1922
1923 ofst_meq128++;
1924 }
1925
1926 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
1927 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301928 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001929 return -EPERM;
1930 }
1931 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1932
1933 /* -8 => offset of src mac addr in Ethernet II hdr */
1934 ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
1935 attrib->src_mac_addr_mask, attrib->src_mac_addr,
1936 ofst_meq128);
1937
1938 ofst_meq128++;
1939 }
1940
1941 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
1942 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301943 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001944 return -EPERM;
1945 }
1946 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1947
1948 /* -22 => offset of dst mac addr in 802.3 hdr */
1949 ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
1950 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
1951 ofst_meq128);
1952
1953 ofst_meq128++;
1954 }
1955
1956 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
1957 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301958 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001959 return -EPERM;
1960 }
1961 *en_rule |= ipa_ofst_meq128[ofst_meq128];
1962
1963 /* -16 => offset of src mac addr in 802.3 hdr */
1964 ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
1965 attrib->src_mac_addr_mask, attrib->src_mac_addr,
1966 ofst_meq128);
1967
1968 ofst_meq128++;
1969 }
1970
1971 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
1972 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301973 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03001974 return -EPERM;
1975 }
1976 *en_rule |= ipa_ofst_meq32[ofst_meq32];
1977 eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
1978 eq_atrb->offset_meq_32[ofst_meq32].mask =
1979 htons(attrib->ether_type);
1980 eq_atrb->offset_meq_32[ofst_meq32].value =
1981 htons(attrib->ether_type);
1982 ofst_meq32++;
1983 }
1984 } else if (ip == IPA_IP_v6) {
1985
1986 /* v6 code below assumes no extension headers TODO: fix this */
1987
1988 /* error check */
1989 if (attrib->attrib_mask & IPA_FLT_TOS ||
1990 attrib->attrib_mask & IPA_FLT_PROTOCOL) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301991 IPAERR_RL("v4 attrib's specified for v6 rule\n");
Amir Levy9659e592016-10-27 18:08:27 +03001992 return -EPERM;
1993 }
1994
1995 if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
1996 *en_rule |= IPA_PROTOCOL_EQ;
1997 eq_atrb->protocol_eq_present = 1;
1998 eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
1999 }
2000
2001 if (attrib->attrib_mask & IPA_FLT_TYPE) {
2002 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302003 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002004 return -EPERM;
2005 }
2006 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2007 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
2008 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
2009 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2010 attrib->type;
2011 ihl_ofst_meq32++;
2012 }
2013
2014 if (attrib->attrib_mask & IPA_FLT_CODE) {
2015 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302016 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002017 return -EPERM;
2018 }
2019 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2020 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
2021 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
2022 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2023 attrib->code;
2024 ihl_ofst_meq32++;
2025 }
2026
2027 if (attrib->attrib_mask & IPA_FLT_SPI) {
2028 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302029 IPAERR_RL("ran out of ihl_meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002030 return -EPERM;
2031 }
2032 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2033 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
2034 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2035 0xFFFFFFFF;
2036 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2037 attrib->spi;
2038 ihl_ofst_meq32++;
2039 }
2040
Shihuan Liuf4433442017-09-28 17:46:41 -07002041 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
2042 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
2043 IPAERR("ran out of ihl_meq32 eq\n");
2044 return -EPERM;
2045 }
2046 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2047 /* 22 => offset of inner IP type after v6 header */
2048 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
2049 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2050 0xF0000000;
2051 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2052 (u32)attrib->type << 24;
2053 ihl_ofst_meq32++;
2054 }
2055
2056 if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
2057 if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
2058 IPAERR("ran out of ihl_meq32 eq\n");
2059 return -EPERM;
2060 }
2061 *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
2062 /* 38 => offset of inner IPv4 addr */
2063 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
2064 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
2065 attrib->u.v4.dst_addr_mask;
2066 eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
2067 attrib->u.v4.dst_addr;
2068 ihl_ofst_meq32++;
2069 }
2070
Amir Levy9659e592016-10-27 18:08:27 +03002071 if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
2072 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302073 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002074 return -EPERM;
2075 }
2076 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2077 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
2078 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2079 = attrib->src_port;
2080 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2081 = attrib->src_port;
2082 ihl_ofst_rng16++;
2083 }
2084
2085 if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
2086 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302087 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002088 return -EPERM;
2089 }
2090 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2091 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
2092 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2093 = attrib->dst_port;
2094 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2095 = attrib->dst_port;
2096 ihl_ofst_rng16++;
2097 }
2098
2099 if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
2100 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302101 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002102 return -EPERM;
2103 }
2104 if (attrib->src_port_hi < attrib->src_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302105 IPAERR_RL("bad src port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03002106 return -EPERM;
2107 }
2108 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2109 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
2110 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2111 = attrib->src_port_lo;
2112 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2113 = attrib->src_port_hi;
2114 ihl_ofst_rng16++;
2115 }
2116
2117 if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
2118 if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302119 IPAERR_RL("ran out of ihl_rng16 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002120 return -EPERM;
2121 }
2122 if (attrib->dst_port_hi < attrib->dst_port_lo) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302123 IPAERR_RL("bad dst port range param\n");
Amir Levy9659e592016-10-27 18:08:27 +03002124 return -EPERM;
2125 }
2126 *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
2127 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
2128 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
2129 = attrib->dst_port_lo;
2130 eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
2131 = attrib->dst_port_hi;
2132 ihl_ofst_rng16++;
2133 }
2134
2135 if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
2136 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302137 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002138 return -EPERM;
2139 }
2140 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2141 eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
2142 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2143 = attrib->u.v6.src_addr_mask[0];
2144 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2145 = attrib->u.v6.src_addr_mask[1];
2146 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2147 = attrib->u.v6.src_addr_mask[2];
2148 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2149 = attrib->u.v6.src_addr_mask[3];
2150 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2151 = attrib->u.v6.src_addr[0];
2152 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2153 = attrib->u.v6.src_addr[1];
2154 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2155 = attrib->u.v6.src_addr[2];
2156 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2157 12) = attrib->u.v6.src_addr[3];
2158 ofst_meq128++;
2159 }
2160
2161 if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
2162 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302163 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002164 return -EPERM;
2165 }
2166 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2167 eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
2168 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2169 = attrib->u.v6.dst_addr_mask[0];
2170 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2171 = attrib->u.v6.dst_addr_mask[1];
2172 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2173 = attrib->u.v6.dst_addr_mask[2];
2174 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2175 = attrib->u.v6.dst_addr_mask[3];
2176 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2177 = attrib->u.v6.dst_addr[0];
2178 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2179 = attrib->u.v6.dst_addr[1];
2180 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2181 = attrib->u.v6.dst_addr[2];
2182 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2183 12) = attrib->u.v6.dst_addr[3];
2184 ofst_meq128++;
2185 }
2186
2187 if (attrib->attrib_mask & IPA_FLT_TC) {
2188 *en_rule |= IPA_FLT_TC;
2189 eq_atrb->tc_eq_present = 1;
2190 eq_atrb->tc_eq = attrib->u.v6.tc;
2191 }
2192
2193 if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
2194 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302195 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002196 return -EPERM;
2197 }
2198 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2199 eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
2200 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
2201 = attrib->tos_mask << 20;
2202 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
2203 = 0;
2204 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
2205 = 0;
2206 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
2207 = 0;
2208 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
2209 = attrib->tos_value << 20;
2210 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
2211 = 0;
2212 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
2213 = 0;
2214 *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
2215 12) = 0;
2216 ofst_meq128++;
2217 }
2218
2219 if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
2220 *en_rule |= IPA_FLT_FLOW_LABEL;
2221 eq_atrb->fl_eq_present = 1;
2222 eq_atrb->fl_eq = attrib->u.v6.flow_label;
2223 }
2224
2225 if (attrib->attrib_mask & IPA_FLT_META_DATA) {
2226 *en_rule |= IPA_METADATA_COMPARE;
2227 eq_atrb->metadata_meq32_present = 1;
2228 eq_atrb->metadata_meq32.offset = 0;
2229 eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
2230 eq_atrb->metadata_meq32.value = attrib->meta_data;
2231 }
2232
2233 if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
2234 *en_rule |= IPA_IS_FRAG;
2235 eq_atrb->ipv4_frag_eq_present = 1;
2236 }
2237
2238 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
2239 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302240 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002241 return -EPERM;
2242 }
2243 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2244
2245 /* -14 => offset of dst mac addr in Ethernet II hdr */
2246 ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
2247 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
2248 ofst_meq128);
2249
2250 ofst_meq128++;
2251 }
2252
2253 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
2254 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302255 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002256 return -EPERM;
2257 }
2258 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2259
2260 /* -8 => offset of src mac addr in Ethernet II hdr */
2261 ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
2262 attrib->src_mac_addr_mask, attrib->src_mac_addr,
2263 ofst_meq128);
2264
2265 ofst_meq128++;
2266 }
2267
2268 if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
2269 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302270 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002271 return -EPERM;
2272 }
2273 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2274
2275 /* -22 => offset of dst mac addr in 802.3 hdr */
2276 ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
2277 attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
2278 ofst_meq128);
2279
2280 ofst_meq128++;
2281 }
2282
2283 if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
2284 if (ipa_ofst_meq128[ofst_meq128] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302285 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002286 return -EPERM;
2287 }
2288 *en_rule |= ipa_ofst_meq128[ofst_meq128];
2289
2290 /* -16 => offset of src mac addr in 802.3 hdr */
2291 ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
2292 attrib->src_mac_addr_mask, attrib->src_mac_addr,
2293 ofst_meq128);
2294
2295 ofst_meq128++;
2296 }
2297
2298 if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
2299 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302300 IPAERR_RL("ran out of meq128 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002301 return -EPERM;
2302 }
2303 *en_rule |= ipa_ofst_meq32[ofst_meq32];
2304 eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
2305 eq_atrb->offset_meq_32[ofst_meq32].mask =
2306 htons(attrib->ether_type);
2307 eq_atrb->offset_meq_32[ofst_meq32].value =
2308 htons(attrib->ether_type);
2309 ofst_meq32++;
2310 }
2311
2312 } else {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302313 IPAERR_RL("unsupported ip %d\n", ip);
Amir Levy9659e592016-10-27 18:08:27 +03002314 return -EPERM;
2315 }
2316
2317 /*
2318 * default "rule" means no attributes set -> map to
2319 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
2320 */
2321 if (attrib->attrib_mask == 0) {
2322 if (ipa_ofst_meq32[ofst_meq32] == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05302323 IPAERR_RL("ran out of meq32 eq\n");
Amir Levy9659e592016-10-27 18:08:27 +03002324 return -EPERM;
2325 }
2326 *en_rule |= ipa_ofst_meq32[ofst_meq32];
2327 eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
2328 eq_atrb->offset_meq_32[ofst_meq32].mask = 0;
2329 eq_atrb->offset_meq_32[ofst_meq32].value = 0;
2330 ofst_meq32++;
2331 }
2332
2333 eq_atrb->rule_eq_bitmap = *en_rule;
2334 eq_atrb->num_offset_meq_32 = ofst_meq32;
2335 eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
2336 eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
2337 eq_atrb->num_offset_meq_128 = ofst_meq128;
2338
2339 return 0;
2340}
2341
2342/**
2343 * ipa2_cfg_ep - IPA end-point configuration
2344 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2345 * @ipa_ep_cfg: [in] IPA end-point configuration params
2346 *
2347 * This includes nat, header, mode, aggregation and route settings and is a one
2348 * shot API to configure the IPA end-point fully
2349 *
2350 * Returns: 0 on success, negative on failure
2351 *
2352 * Note: Should not be called from atomic context
2353 */
2354int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
2355{
2356 int result = -EINVAL;
2357
2358 if (unlikely(!ipa_ctx)) {
2359 IPAERR("IPA driver was not initialized\n");
2360 return -EINVAL;
2361 }
2362
2363 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2364 ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
2365 IPAERR("bad parm.\n");
2366 return -EINVAL;
2367 }
2368
2369 result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
2370 if (result)
2371 return result;
2372
2373 result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
2374 if (result)
2375 return result;
2376
2377 result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
2378 if (result)
2379 return result;
2380
2381 result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
2382 if (result)
2383 return result;
2384
2385 if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
2386 result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
2387 if (result)
2388 return result;
2389
2390 result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
2391 if (result)
2392 return result;
2393
2394 result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
2395 if (result)
2396 return result;
2397
2398 result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
2399 if (result)
2400 return result;
2401 } else {
2402 result = ipa2_cfg_ep_metadata_mask(clnt_hdl,
2403 &ipa_ep_cfg->metadata_mask);
2404 if (result)
2405 return result;
2406 }
2407
2408 return 0;
2409}
2410
2411const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en)
2412{
2413 switch (nat_en) {
2414 case (IPA_BYPASS_NAT):
2415 return "NAT disabled";
2416 case (IPA_SRC_NAT):
2417 return "Source NAT";
2418 case (IPA_DST_NAT):
2419 return "Dst NAT";
2420 }
2421
2422 return "undefined";
2423}
2424
2425void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl,
2426 const struct ipa_ep_cfg_nat *ep_nat)
2427{
2428 u32 reg_val = 0;
2429
2430 IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
2431 IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
2432 IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
2433
2434 ipa_write_reg(ipa_ctx->mmio,
2435 IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl),
2436 reg_val);
2437}
2438
2439void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl,
2440 const struct ipa_ep_cfg_nat *ep_nat)
2441{
2442 u32 reg_val = 0;
2443
2444 IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
2445 IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
2446 IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
2447
2448 ipa_write_reg(ipa_ctx->mmio,
2449 IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl),
2450 reg_val);
2451}
2452
2453/**
2454 * ipa2_cfg_ep_nat() - IPA end-point NAT configuration
2455 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2456 * @ipa_ep_cfg: [in] IPA end-point configuration params
2457 *
2458 * Returns: 0 on success, negative on failure
2459 *
2460 * Note: Should not be called from atomic context
2461 */
2462int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
2463{
2464 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2465 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
2466 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2467 clnt_hdl,
2468 ipa_ctx->ep[clnt_hdl].valid);
2469 return -EINVAL;
2470 }
2471
2472 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
2473 IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
2474 return -EINVAL;
2475 }
2476
2477 IPADBG("pipe=%d, nat_en=%d(%s)\n",
2478 clnt_hdl,
2479 ep_nat->nat_en,
2480 ipa_get_nat_en_str(ep_nat->nat_en));
2481
2482 /* copy over EP cfg */
2483 ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
2484
2485 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2486
2487 ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat);
2488
2489 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2490
2491 return 0;
2492}
2493
2494static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl,
2495 const struct ipa_ep_cfg_status *ep_status)
2496{
2497 IPADBG("Not supported for version 1.1\n");
2498}
2499
2500static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl,
2501 const struct ipa_ep_cfg_status *ep_status)
2502{
2503 u32 reg_val = 0;
2504
2505 IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en,
2506 IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
2507 IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
2508
2509 IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep,
2510 IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
2511 IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
2512
2513 ipa_write_reg(ipa_ctx->mmio,
2514 IPA_ENDP_STATUS_n_OFST(clnt_hdl),
2515 reg_val);
2516}
2517
2518/**
2519 * ipa2_cfg_ep_status() - IPA end-point status configuration
2520 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2521 * @ipa_ep_cfg: [in] IPA end-point configuration params
2522 *
2523 * Returns: 0 on success, negative on failure
2524 *
2525 * Note: Should not be called from atomic context
2526 */
2527int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status)
2528{
2529 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2530 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
2531 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2532 clnt_hdl,
2533 ipa_ctx->ep[clnt_hdl].valid);
2534 return -EINVAL;
2535 }
2536
2537 IPADBG("pipe=%d, status_en=%d status_ep=%d\n",
2538 clnt_hdl,
2539 ep_status->status_en,
2540 ep_status->status_ep);
2541
2542 /* copy over EP cfg */
2543 ipa_ctx->ep[clnt_hdl].status = *ep_status;
2544
2545 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2546
2547 ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status);
2548
2549 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2550
2551 return 0;
2552}
2553
2554static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl,
2555 const struct ipa_ep_cfg_cfg *cfg)
2556{
2557 IPADBG("Not supported for version 1.1\n");
2558}
2559
2560static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl,
2561 const struct ipa_ep_cfg_cfg *cfg)
2562{
2563 u32 reg_val = 0;
2564
2565 IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en,
2566 IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
2567 IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
2568 IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en,
2569 IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
2570 IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
2571 IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset,
2572 IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
2573 IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
2574
2575 ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl),
2576 reg_val);
2577}
2578
2579/**
2580 * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration
2581 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2582 * @ipa_ep_cfg: [in] IPA end-point configuration params
2583 *
2584 * Returns: 0 on success, negative on failure
2585 *
2586 * Note: Should not be called from atomic context
2587 */
2588int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
2589{
2590 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2591 ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
2592 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2593 clnt_hdl,
2594 ipa_ctx->ep[clnt_hdl].valid);
2595 return -EINVAL;
2596 }
2597
2598 IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n",
2599 clnt_hdl,
2600 cfg->frag_offload_en,
2601 cfg->cs_offload_en,
2602 cfg->cs_metadata_hdr_offset);
2603
2604 /* copy over EP cfg */
2605 ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
2606
2607 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2608
2609 ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg);
2610
2611 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2612
2613 return 0;
2614}
2615
2616static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl,
2617 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2618{
2619 IPADBG("Not supported for version 1.1\n");
2620}
2621
2622static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl,
2623 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2624{
2625 u32 reg_val = 0;
2626
2627 IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask,
2628 IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
2629 IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
2630
2631 ipa_write_reg(ipa_ctx->mmio,
2632 IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl),
2633 reg_val);
2634}
2635
2636/**
2637 * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
2638 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2639 * @ipa_ep_cfg: [in] IPA end-point configuration params
2640 *
2641 * Returns: 0 on success, negative on failure
2642 *
2643 * Note: Should not be called from atomic context
2644 */
2645int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
2646 const struct ipa_ep_cfg_metadata_mask *metadata_mask)
2647{
2648 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2649 ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
2650 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2651 clnt_hdl,
2652 ipa_ctx->ep[clnt_hdl].valid);
2653 return -EINVAL;
2654 }
2655
2656 IPADBG("pipe=%d, metadata_mask=0x%x\n",
2657 clnt_hdl,
2658 metadata_mask->metadata_mask);
2659
2660 /* copy over EP cfg */
2661 ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
2662
2663 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2664
2665 ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
2666
2667 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2668
2669 return 0;
2670}
2671
2672void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number,
2673 const struct ipa_ep_cfg_hdr *ep_hdr)
2674{
2675 u32 val = 0;
2676
2677 val = IPA_SETFIELD(ep_hdr->hdr_len,
2678 IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
2679 IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) |
2680 IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid,
2681 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
2682 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) |
2683 IPA_SETFIELD(ep_hdr->hdr_ofst_metadata,
2684 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
2685 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) |
2686 IPA_SETFIELD(ep_hdr->hdr_additional_const_len,
2687 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
2688 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) |
2689 IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid,
2690 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
2691 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) |
2692 IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size,
2693 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
2694 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) |
2695 IPA_SETFIELD(ep_hdr->hdr_a5_mux,
2696 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
2697 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
2698 ipa_write_reg(ipa_ctx->mmio,
2699 IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val);
2700}
2701
2702void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number,
2703 const struct ipa_ep_cfg_hdr *ep_hdr)
2704{
2705 u32 reg_val = 0;
2706
2707 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid,
2708 IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2,
2709 IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2);
2710
2711 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional,
2712 IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
2713 IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
2714
2715 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux,
2716 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
2717 IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
2718
2719 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size,
2720 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
2721 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK);
2722
2723 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid,
2724 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
2725 IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK);
2726
2727 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len,
2728 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
2729 IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK);
2730
2731 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata,
2732 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
2733 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK);
2734
2735 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid,
2736 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
2737 IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK);
2738
2739 IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len,
2740 IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
2741 IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK);
2742
2743 ipa_write_reg(ipa_ctx->mmio,
2744 IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val);
2745}
2746
2747/**
2748 * ipa2_cfg_ep_hdr() - IPA end-point header configuration
2749 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2750 * @ipa_ep_cfg: [in] IPA end-point configuration params
2751 *
2752 * Returns: 0 on success, negative on failure
2753 *
2754 * Note: Should not be called from atomic context
2755 */
2756int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
2757{
2758 struct ipa_ep_context *ep;
2759
2760 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2761 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
2762 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2763 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
2764 return -EINVAL;
2765 }
2766 IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
2767 clnt_hdl,
2768 ep_hdr->hdr_remove_additional,
2769 ep_hdr->hdr_a5_mux,
2770 ep_hdr->hdr_ofst_pkt_size);
2771
2772 IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
2773 ep_hdr->hdr_ofst_pkt_size_valid,
2774 ep_hdr->hdr_additional_const_len);
2775
2776 IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
2777 ep_hdr->hdr_ofst_metadata,
2778 ep_hdr->hdr_ofst_metadata_valid,
2779 ep_hdr->hdr_len);
2780
2781 ep = &ipa_ctx->ep[clnt_hdl];
2782
2783 /* copy over EP cfg */
2784 ep->cfg.hdr = *ep_hdr;
2785
2786 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2787
2788 ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
2789
2790 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2791
2792 return 0;
2793}
2794
2795static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl,
2796 const struct ipa_ep_cfg_hdr_ext *ep_hdr)
2797{
2798 IPADBG("Not supported for version 1.1\n");
2799 return 0;
2800}
2801
2802static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
2803 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val)
2804{
2805 u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
2806
2807 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset,
2808 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
2809 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
2810
2811 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding,
2812 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
2813 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
2814
2815 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad,
2816 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
2817 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
2818
2819 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid,
2820 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
2821 IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
2822
2823 IPA_SETFIELD_IN_REG(reg_val, hdr_endianness,
2824 IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
2825 IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
2826
2827 ipa_write_reg(ipa_ctx->mmio,
2828 IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val);
2829
2830 return 0;
2831}
2832
2833static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl,
2834 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2835{
2836 u32 reg_val = 0;
2837
2838 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2839 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2840 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0);
2841
2842 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2843}
2844
2845static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl,
2846 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2847{
2848 u32 reg_val = 0;
2849
2850 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2851 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2852 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
2853
2854 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2855
2856}
2857
2858static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl,
2859 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2860{
2861 u32 reg_val = 0;
2862
2863 IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
2864 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
2865 IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
2866
2867 return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
2868
2869}
2870
2871/**
2872 * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration
2873 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2874 * @ep_hdr_ext: [in] IPA end-point configuration params
2875 *
2876 * Returns: 0 on success, negative on failure
2877 *
2878 * Note: Should not be called from atomic context
2879 */
2880int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
2881 const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
2882{
2883 struct ipa_ep_context *ep;
2884
2885 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
2886 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
2887 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
2888 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
2889 return -EINVAL;
2890 }
2891
2892 IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
2893 clnt_hdl,
2894 ep_hdr_ext->hdr_pad_to_alignment);
2895
2896 IPADBG("hdr_total_len_or_pad_offset=%d\n",
2897 ep_hdr_ext->hdr_total_len_or_pad_offset);
2898
2899 IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
2900 ep_hdr_ext->hdr_payload_len_inc_padding,
2901 ep_hdr_ext->hdr_total_len_or_pad);
2902
2903 IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
2904 ep_hdr_ext->hdr_total_len_or_pad_valid,
2905 ep_hdr_ext->hdr_little_endian);
2906
2907 ep = &ipa_ctx->ep[clnt_hdl];
2908
2909 /* copy over EP cfg */
2910 ep->cfg.hdr_ext = *ep_hdr_ext;
2911
2912 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
2913
2914 ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
2915
2916 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
2917
2918 return 0;
2919}
2920
2921/**
2922 * ipa2_cfg_ep_hdr() - IPA end-point Control configuration
2923 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
2924 * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
2925 *
2926 * Returns: 0 on success, negative on failure
2927 */
2928int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
2929{
2930 u32 reg_val = 0;
2931
2932 if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) {
2933 IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
2934 return -EINVAL;
2935 }
2936
2937 IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
2938 clnt_hdl,
2939 ep_ctrl->ipa_ep_suspend,
2940 ep_ctrl->ipa_ep_delay);
2941
2942 IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend,
2943 IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT,
2944 IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK);
2945
2946 IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay,
2947 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
2948 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
2949
2950 ipa_write_reg(ipa_ctx->mmio,
2951 IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val);
2952
2953 return 0;
2954
2955}
2956
2957/**
2958 * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration
2959 * @aggr_granularity: [in] defines the granularity of AGGR timers
2960 * number of units of 1/32msec
2961 *
2962 * Returns: 0 on success, negative on failure
2963 */
2964int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity)
2965{
2966 u32 reg_val = 0;
2967
2968 if (aggr_granularity <= IPA_AGGR_GRAN_MIN ||
2969 aggr_granularity > IPA_AGGR_GRAN_MAX) {
2970 IPAERR("bad param, aggr_granularity = %d\n",
2971 aggr_granularity);
2972 return -EINVAL;
2973 }
2974 IPADBG("aggr_granularity=%d\n", aggr_granularity);
2975
2976 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
2977 reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
2978
2979 IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1,
2980 IPA_COUNTER_CFG_AGGR_GRAN_SHFT,
2981 IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
2982
2983 ipa_write_reg(ipa_ctx->mmio,
2984 IPA_COUNTER_CFG_OFST, reg_val);
2985
2986 return 0;
2987
2988}
2989EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity);
2990
2991/**
2992 * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer
2993 * configuration
2994 * @eot_coal_granularity: defines the granularity of EOT_COAL timers
2995 * number of units of 1/32msec
2996 *
2997 * Returns: 0 on success, negative on failure
2998 */
2999int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity)
3000{
3001 u32 reg_val = 0;
3002
3003 if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN ||
3004 eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) {
3005 IPAERR("bad parm, eot_coal_granularity = %d\n",
3006 eot_coal_granularity);
3007 return -EINVAL;
3008 }
3009 IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity);
3010
3011 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
3012 reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
3013
3014 IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1,
3015 IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT,
3016 IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
3017
3018 ipa_write_reg(ipa_ctx->mmio,
3019 IPA_COUNTER_CFG_OFST, reg_val);
3020
3021 return 0;
3022
3023}
3024EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity);
3025
3026const char *ipa_get_mode_type_str(enum ipa_mode_type mode)
3027{
3028 switch (mode) {
3029 case (IPA_BASIC):
3030 return "Basic";
3031 case (IPA_ENABLE_FRAMING_HDLC):
3032 return "HDLC framing";
3033 case (IPA_ENABLE_DEFRAMING_HDLC):
3034 return "HDLC de-framing";
3035 case (IPA_DMA):
3036 return "DMA";
3037 }
3038
3039 return "undefined";
3040}
3041
3042void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number,
3043 const struct ipa_ep_cfg_mode *ep_mode)
3044{
3045 u32 reg_val = 0;
3046
3047 IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
3048 IPA_ENDP_INIT_MODE_N_MODE_SHFT,
3049 IPA_ENDP_INIT_MODE_N_MODE_BMSK);
3050
3051 IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
3052 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1,
3053 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1);
3054
3055 ipa_write_reg(ipa_ctx->mmio,
3056 IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val);
3057}
3058
3059void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number,
3060 const struct ipa_ep_cfg_mode *ep_mode)
3061{
3062 u32 reg_val = 0;
3063
3064 IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
3065 IPA_ENDP_INIT_MODE_N_MODE_SHFT,
3066 IPA_ENDP_INIT_MODE_N_MODE_BMSK);
3067
3068 IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
3069 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0,
3070 IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0);
3071
3072 ipa_write_reg(ipa_ctx->mmio,
3073 IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val);
3074}
3075
3076/**
3077 * ipa2_cfg_ep_mode() - IPA end-point mode configuration
3078 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3079 * @ipa_ep_cfg: [in] IPA end-point configuration params
3080 *
3081 * Returns: 0 on success, negative on failure
3082 *
3083 * Note: Should not be called from atomic context
3084 */
3085int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
3086{
3087 int ep;
3088
3089 if (unlikely(!ipa_ctx)) {
3090 IPAERR("IPA driver was not initialized\n");
3091 return -EINVAL;
3092 }
3093
3094 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3095 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
3096 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3097 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3098 return -EINVAL;
3099 }
3100
3101 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
3102 IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
3103 return -EINVAL;
3104 }
3105
3106 ep = ipa2_get_ep_mapping(ep_mode->dst);
3107 if (ep == -1 && ep_mode->mode == IPA_DMA) {
3108 IPAERR("dst %d does not exist\n", ep_mode->dst);
3109 return -EINVAL;
3110 }
3111
3112 WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
3113
3114 if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
3115 ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
3116
3117 IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
3118 clnt_hdl,
3119 ep_mode->mode,
3120 ipa_get_mode_type_str(ep_mode->mode),
3121 ep_mode->dst);
3122
3123 /* copy over EP cfg */
3124 ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
3125 ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
3126
3127 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3128
3129 ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl,
3130 ipa_ctx->ep[clnt_hdl].dst_pipe_index,
3131 ep_mode);
3132
3133 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3134
3135 return 0;
3136}
3137
3138const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
3139{
3140 switch (aggr_en) {
3141 case (IPA_BYPASS_AGGR):
3142 return "no aggregation";
3143 case (IPA_ENABLE_AGGR):
3144 return "aggregation enabled";
3145 case (IPA_ENABLE_DEAGGR):
3146 return "de-aggregation enabled";
3147 }
3148
3149 return "undefined";
3150}
3151
3152const char *get_aggr_type_str(enum ipa_aggr_type aggr_type)
3153{
3154 switch (aggr_type) {
3155 case (IPA_MBIM_16):
3156 return "MBIM_16";
3157 case (IPA_HDLC):
3158 return "HDLC";
3159 case (IPA_TLP):
3160 return "TLP";
3161 case (IPA_RNDIS):
3162 return "RNDIS";
3163 case (IPA_GENERIC):
3164 return "GENERIC";
3165 case (IPA_QCMAP):
3166 return "QCMAP";
3167 }
3168 return "undefined";
3169}
3170
3171void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number,
3172 const struct ipa_ep_cfg_aggr *ep_aggr)
3173{
3174 u32 reg_val = 0;
3175
3176 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
3177 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
3178 IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
3179
3180 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
3181 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
3182 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
3183
3184 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
3185 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
3186 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
3187
3188 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
3189 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
3190 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
3191
3192 ipa_write_reg(ipa_ctx->mmio,
3193 IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val);
3194}
3195
3196void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number,
3197 const struct ipa_ep_cfg_aggr *ep_aggr)
3198{
3199 u32 reg_val = 0;
3200
3201 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
3202 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
3203 IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
3204
3205 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
3206 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
3207 IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
3208
3209 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
3210 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
3211 IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
3212
3213 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
3214 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
3215 IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
3216
3217 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit,
3218 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
3219 IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
3220
3221 IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active,
3222 IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
3223 IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
3224
3225 ipa_write_reg(ipa_ctx->mmio,
3226 IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val);
3227}
3228
3229/**
3230 * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration
3231 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3232 * @ipa_ep_cfg: [in] IPA end-point configuration params
3233 *
3234 * Returns: 0 on success, negative on failure
3235 *
3236 * Note: Should not be called from atomic context
3237 */
3238int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
3239{
3240 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3241 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
3242 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3243 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3244 return -EINVAL;
3245 }
3246
3247 IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
3248 clnt_hdl,
3249 ep_aggr->aggr_en,
3250 get_aggr_enable_str(ep_aggr->aggr_en),
3251 ep_aggr->aggr,
3252 get_aggr_type_str(ep_aggr->aggr),
3253 ep_aggr->aggr_byte_limit,
3254 ep_aggr->aggr_time_limit);
3255
3256 /* copy over EP cfg */
3257 ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
3258
3259 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3260
3261 ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr);
3262
3263 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3264
3265 return 0;
3266}
3267
3268void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index)
3269{
3270 int reg_val = 0;
3271
3272 IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
3273 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
3274 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
3275
3276 ipa_write_reg(ipa_ctx->mmio,
3277 IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index),
3278 reg_val);
3279}
3280
3281void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index)
3282{
3283 int reg_val = 0;
3284
3285 IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
3286 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
3287 IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
3288
3289 ipa_write_reg(ipa_ctx->mmio,
3290 IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index),
3291 reg_val);
3292}
3293
3294/**
3295 * ipa2_cfg_ep_route() - IPA end-point routing configuration
3296 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3297 * @ipa_ep_cfg: [in] IPA end-point configuration params
3298 *
3299 * Returns: 0 on success, negative on failure
3300 *
3301 * Note: Should not be called from atomic context
3302 */
3303int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
3304{
3305 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3306 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
3307 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3308 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3309 return -EINVAL;
3310 }
3311
3312 if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
3313 IPAERR("ROUTE does not apply to IPA out EP %d\n",
3314 clnt_hdl);
3315 return -EINVAL;
3316 }
3317
3318 /*
3319 * if DMA mode was configured previously for this EP, return with
3320 * success
3321 */
3322 if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
3323 IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
3324 clnt_hdl);
3325 return 0;
3326 }
3327
3328 if (ep_route->rt_tbl_hdl)
3329 IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
3330
3331 IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
3332 clnt_hdl,
3333 ep_route->rt_tbl_hdl);
3334
3335 /* always use "default" routing table when programming EP ROUTE reg */
3336 if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
3337 ipa_ctx->ep[clnt_hdl].rt_tbl_idx =
3338 IPA_MEM_PART(v4_apps_rt_index_lo);
3339 else
3340 ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
3341
3342 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3343
3344 ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl,
3345 ipa_ctx->ep[clnt_hdl].rt_tbl_idx);
3346
3347 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3348
3349 return 0;
3350}
3351
3352void _ipa_cfg_ep_holb_v1_1(u32 pipe_number,
3353 const struct ipa_ep_cfg_holb *ep_holb)
3354{
3355 ipa_write_reg(ipa_ctx->mmio,
3356 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number),
3357 ep_holb->en);
3358
3359 ipa_write_reg(ipa_ctx->mmio,
3360 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number),
3361 (u16)ep_holb->tmr_val);
3362}
3363
3364void _ipa_cfg_ep_holb_v2_0(u32 pipe_number,
3365 const struct ipa_ep_cfg_holb *ep_holb)
3366{
3367 ipa_write_reg(ipa_ctx->mmio,
3368 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3369 ep_holb->en);
3370
3371 ipa_write_reg(ipa_ctx->mmio,
3372 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3373 (u16)ep_holb->tmr_val);
3374}
3375
3376void _ipa_cfg_ep_holb_v2_5(u32 pipe_number,
3377 const struct ipa_ep_cfg_holb *ep_holb)
3378{
3379 ipa_write_reg(ipa_ctx->mmio,
3380 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3381 ep_holb->en);
3382
3383 ipa_write_reg(ipa_ctx->mmio,
3384 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3385 ep_holb->tmr_val);
3386}
3387
3388void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number,
3389 const struct ipa_ep_cfg_holb *ep_holb)
3390{
3391 ipa_write_reg(ipa_ctx->mmio,
3392 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
3393 ep_holb->en);
3394
3395 ipa_write_reg(ipa_ctx->mmio,
3396 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
3397 ep_holb->tmr_val);
3398}
3399
3400/**
3401 * ipa2_cfg_ep_holb() - IPA end-point holb configuration
3402 *
3403 * If an IPA producer pipe is full, IPA HW by default will block
3404 * indefinitely till space opens up. During this time no packets
3405 * including those from unrelated pipes will be processed. Enabling
3406 * HOLB means IPA HW will be allowed to drop packets as/when needed
3407 * and indefinite blocking is avoided.
3408 *
3409 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3410 * @ipa_ep_cfg: [in] IPA end-point configuration params
3411 *
3412 * Returns: 0 on success, negative on failure
3413 */
3414int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
3415{
3416 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3417 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
3418 ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val ||
3419 ep_holb->en > 1) {
3420 IPAERR("bad parm.\n");
3421 return -EINVAL;
3422 }
3423
3424 if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
3425 IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
3426 return -EINVAL;
3427 }
3428
3429 if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) {
3430 IPAERR("HOLB is not supported for this IPA core\n");
3431 return -EINVAL;
3432 }
3433
3434 ipa_ctx->ep[clnt_hdl].holb = *ep_holb;
3435
3436 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3437
3438 ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb);
3439
3440 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3441
3442 IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
3443 ep_holb->tmr_val);
3444
3445 return 0;
3446}
3447
3448/**
3449 * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration
3450 *
3451 * Wrapper function for ipa_cfg_ep_holb() with client name instead of
3452 * client handle. This function is used for clients that does not have
3453 * client handle.
3454 *
3455 * @client: [in] client name
3456 * @ipa_ep_cfg: [in] IPA end-point configuration params
3457 *
3458 * Returns: 0 on success, negative on failure
3459 */
3460int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
3461 const struct ipa_ep_cfg_holb *ep_holb)
3462{
3463 return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb);
3464}
3465
3466static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl,
3467 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3468{
3469 IPADBG("Not supported for version 1.1\n");
3470 return 0;
3471}
3472
3473static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl,
3474 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3475{
3476 u32 reg_val = 0;
3477
3478 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len,
3479 IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
3480 IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
3481
3482 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid,
3483 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
3484 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
3485
3486 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location,
3487 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
3488 IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
3489
3490 IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len,
3491 IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
3492 IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
3493
3494 ipa_write_reg(ipa_ctx->mmio,
3495 IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val);
3496
3497 return 0;
3498}
3499
3500/**
3501 * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration
3502 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3503 * @ep_deaggr: [in] IPA end-point configuration params
3504 *
3505 * Returns: 0 on success, negative on failure
3506 *
3507 * Note: Should not be called from atomic context
3508 */
3509int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
3510 const struct ipa_ep_cfg_deaggr *ep_deaggr)
3511{
3512 struct ipa_ep_context *ep;
3513
3514 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3515 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
3516 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3517 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3518 return -EINVAL;
3519 }
3520
3521 IPADBG("pipe=%d deaggr_hdr_len=%d\n",
3522 clnt_hdl,
3523 ep_deaggr->deaggr_hdr_len);
3524
3525 IPADBG("packet_offset_valid=%d\n",
3526 ep_deaggr->packet_offset_valid);
3527
3528 IPADBG("packet_offset_location=%d max_packet_len=%d\n",
3529 ep_deaggr->packet_offset_location,
3530 ep_deaggr->max_packet_len);
3531
3532 ep = &ipa_ctx->ep[clnt_hdl];
3533
3534 /* copy over EP cfg */
3535 ep->cfg.deaggr = *ep_deaggr;
3536
3537 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3538
3539 ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
3540
3541 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3542
3543 return 0;
3544}
3545
3546static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number,
3547 const struct ipa_ep_cfg_metadata *meta)
3548{
3549 IPADBG("Not supported for version 1.1\n");
3550}
3551
3552static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number,
3553 const struct ipa_ep_cfg_metadata *meta)
3554{
3555 u32 reg_val = 0;
3556
3557 IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id,
3558 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT,
3559 IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK);
3560
3561 ipa_write_reg(ipa_ctx->mmio,
3562 IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number),
3563 reg_val);
3564}
3565
3566/**
3567 * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration
3568 * @clnt_hdl: [in] opaque client handle assigned by IPA to client
3569 * @ipa_ep_cfg: [in] IPA end-point configuration params
3570 *
3571 * Returns: 0 on success, negative on failure
3572 *
3573 * Note: Should not be called from atomic context
3574 */
3575int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
3576{
3577 if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
3578 ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
3579 IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
3580 clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
3581 return -EINVAL;
3582 }
3583
3584 IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
3585
3586 /* copy over EP cfg */
3587 ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
3588
3589 IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
3590
3591 ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md);
3592 ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
3593 ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr);
3594
3595 IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
3596
3597 return 0;
3598}
3599EXPORT_SYMBOL(ipa2_cfg_ep_metadata);
3600
3601int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
3602{
3603 struct ipa_ep_cfg_metadata meta;
3604 struct ipa_ep_context *ep;
3605 int ipa_ep_idx;
3606 int result = -EINVAL;
3607
3608 if (unlikely(!ipa_ctx)) {
3609 IPAERR("IPA driver was not initialized\n");
3610 return -EINVAL;
3611 }
3612
3613 if (param_in->client >= IPA_CLIENT_MAX) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303614 IPAERR_RL("bad parm client:%d\n", param_in->client);
Amir Levy9659e592016-10-27 18:08:27 +03003615 goto fail;
3616 }
3617
3618 ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
3619 if (ipa_ep_idx == -1) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303620 IPAERR_RL("Invalid client.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003621 goto fail;
3622 }
3623
3624 ep = &ipa_ctx->ep[ipa_ep_idx];
3625 if (!ep->valid) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303626 IPAERR_RL("EP not allocated.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003627 goto fail;
3628 }
3629
3630 meta.qmap_id = param_in->qmap_id;
3631 if (param_in->client == IPA_CLIENT_USB_PROD ||
3632 param_in->client == IPA_CLIENT_HSIC1_PROD ||
Sunil Paidimarri5139aa22017-02-13 11:07:32 -08003633 param_in->client == IPA_CLIENT_ODU_PROD ||
3634 param_in->client == IPA_CLIENT_ETHERNET_PROD) {
Amir Levy9659e592016-10-27 18:08:27 +03003635 result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
3636 } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
3637 ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
3638 result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
3639 if (result)
Utkarsh Saxenae9782812017-05-26 17:20:32 +05303640 IPAERR_RL("qmap_id %d write failed on ep=%d\n",
Amir Levy9659e592016-10-27 18:08:27 +03003641 meta.qmap_id, ipa_ep_idx);
3642 result = 0;
3643 }
3644
3645fail:
3646 return result;
3647}
3648
3649/**
3650 * ipa_dump_buff_internal() - dumps buffer for debug purposes
3651 * @base: buffer base address
3652 * @phy_base: buffer physical base address
3653 * @size: size of the buffer
3654 */
3655void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
3656{
3657 int i;
3658 u32 *cur = (u32 *)base;
3659 u8 *byt;
3660
3661 IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
3662 for (i = 0; i < size / 4; i++) {
3663 byt = (u8 *)(cur + i);
3664 IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
3665 byt[0], byt[1], byt[2], byt[3]);
3666 }
3667 IPADBG("END\n");
3668}
3669
3670/**
3671 * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling
3672 * @time: time fom dtsi entry or from debugfs file system
3673 * @min: rx polling min timeout
3674 * @max: rx polling max timeout
3675 * Maximum time could be of 10Msec allowed.
3676 */
3677void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time)
3678{
3679 if ((time >= MIN_RX_POLL_TIME) &&
3680 (time <= MAX_RX_POLL_TIME)) {
3681 *min = (time * MSEC) + LOWER_CUTOFF;
3682 *max = (time * MSEC) + UPPER_CUTOFF;
3683 } else {
3684 /* Setting up the default min max time */
3685 IPADBG("Setting up default rx polling timeout\n");
3686 *min = (MIN_RX_POLL_TIME * MSEC) +
3687 LOWER_CUTOFF;
3688 *max = (MIN_RX_POLL_TIME * MSEC) +
3689 UPPER_CUTOFF;
3690 }
3691 IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max);
3692}
3693
3694/**
3695 * ipa_pipe_mem_init() - initialize the pipe memory
3696 * @start_ofst: start offset
3697 * @size: size
3698 *
3699 * Return value:
3700 * 0: success
3701 * -ENOMEM: no memory
3702 */
3703int ipa_pipe_mem_init(u32 start_ofst, u32 size)
3704{
3705 int res;
3706 u32 aligned_start_ofst;
3707 u32 aligned_size;
3708 struct gen_pool *pool;
3709
3710 if (!size) {
3711 IPAERR("no IPA pipe memory allocated\n");
3712 goto fail;
3713 }
3714
3715 aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
3716 aligned_size = size - (aligned_start_ofst - start_ofst);
3717
3718 IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
3719 start_ofst, aligned_start_ofst, size, aligned_size);
3720
3721 /* allocation order of 8 i.e. 128 bytes, global pool */
3722 pool = gen_pool_create(8, -1);
3723 if (!pool) {
3724 IPAERR("Failed to create a new memory pool.\n");
3725 goto fail;
3726 }
3727
3728 res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
3729 if (res) {
3730 IPAERR("Failed to add memory to IPA pipe pool\n");
3731 goto err_pool_add;
3732 }
3733
3734 ipa_ctx->pipe_mem_pool = pool;
3735 return 0;
3736
3737err_pool_add:
3738 gen_pool_destroy(pool);
3739fail:
3740 return -ENOMEM;
3741}
3742
3743/**
3744 * ipa_pipe_mem_alloc() - allocate pipe memory
3745 * @ofst: offset
3746 * @size: size
3747 *
3748 * Return value:
3749 * 0: success
3750 */
3751int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
3752{
3753 u32 vaddr;
3754 int res = -1;
3755
3756 if (!ipa_ctx->pipe_mem_pool || !size) {
3757 IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
3758 ipa_ctx->pipe_mem_pool);
3759 return res;
3760 }
3761
3762 vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
3763
3764 if (vaddr) {
3765 *ofst = vaddr;
3766 res = 0;
3767 IPADBG("size=%u ofst=%u\n", size, vaddr);
3768 } else {
3769 IPAERR("size=%u failed\n", size);
3770 }
3771
3772 return res;
3773}
3774
3775/**
3776 * ipa_pipe_mem_free() - free pipe memory
3777 * @ofst: offset
3778 * @size: size
3779 *
3780 * Return value:
3781 * 0: success
3782 */
3783int ipa_pipe_mem_free(u32 ofst, u32 size)
3784{
3785 IPADBG("size=%u ofst=%u\n", size, ofst);
3786 if (ipa_ctx->pipe_mem_pool && size)
3787 gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
3788 return 0;
3789}
3790
3791/**
3792 * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting
3793 * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
3794 * etc
3795 *
3796 * Returns: 0 on success
3797 */
3798int ipa2_set_aggr_mode(enum ipa_aggr_mode mode)
3799{
3800 u32 reg_val;
3801
3802 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3803 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
3804 ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
3805 (reg_val & 0xfffffffe));
3806 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3807
3808 return 0;
3809}
3810
3811/**
3812 * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
3813 * mode
3814 * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
3815 * "QND")
3816 *
3817 * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
3818 * (expected to be 'P') needs to be set using the header addition mechanism
3819 *
3820 * Returns: 0 on success, negative on failure
3821 */
3822int ipa2_set_qcncm_ndp_sig(char sig[3])
3823{
3824 u32 reg_val;
3825
3826 if (sig == NULL) {
3827 IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
3828 return -EINVAL;
3829 }
3830 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3831 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
3832 ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
3833 (sig[1] << 12) | (sig[2] << 4) |
3834 (reg_val & 0xf000000f));
3835 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3836
3837 return 0;
3838}
3839
3840/**
3841 * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
3842 * configuration
3843 * @enable: [in] true for single NDP/MBIM; false otherwise
3844 *
3845 * Returns: 0 on success
3846 */
3847int ipa2_set_single_ndp_per_mbim(bool enable)
3848{
3849 u32 reg_val;
3850
3851 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3852 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
3853 ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
3854 (enable & 0x1) | (reg_val & 0xfffffffe));
3855 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3856
3857 return 0;
3858}
3859
3860/**
3861 * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix
3862 * for MBIM aggregation.
3863 * @enable: [in] true for enable HW fix; false otherwise
3864 *
3865 * Returns: 0 on success
3866 */
3867int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
3868{
3869 u32 reg_val;
3870
3871 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3872 reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
3873 ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
3874 (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
3875 (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
3876 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3877 return 0;
3878}
3879EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
3880
3881/**
3882 * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
3883 * @start: start address of the memory buffer
3884 * @end: end address of the memory buffer
3885 * @boundary: boundary
3886 *
3887 * Return value:
3888 * 1: if the interval [start, end] straddles boundary
3889 * 0: otherwise
3890 */
3891int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
3892{
3893 u32 next_start;
3894 u32 prev_end;
3895
3896 IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
3897
3898 next_start = (start + (boundary - 1)) & ~(boundary - 1);
3899 prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
3900
3901 while (next_start < prev_end)
3902 next_start += boundary;
3903
3904 if (next_start == prev_end)
3905 return 1;
3906 else
3907 return 0;
3908}
3909
3910/**
3911 * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
3912 *
3913 * Function is rate limited to avoid flooding kernel log buffer
3914 */
3915void ipa2_bam_reg_dump(void)
3916{
3917 static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
3918
3919 if (__ratelimit(&_rs)) {
3920 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
3921 pr_err("IPA BAM START\n");
3922 if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
3923 sps_get_bam_debug_info(ipa_ctx->bam_handle, 5,
3924 511950, 0, 0);
3925 sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0,
3926 0, 0);
3927 } else {
3928 sps_get_bam_debug_info(ipa_ctx->bam_handle, 93,
3929 (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS))
3930 |
3931 SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))),
3932 0, 2);
3933 }
3934 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
3935 }
3936}
3937
3938static void ipa_init_mem_partition_v2(void)
3939{
3940 IPADBG("Memory partition IPA 2\n");
3941 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
3942 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
3943 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
3944 IPA_MEM_PART(nat_size));
3945
3946 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START;
3947 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
3948
3949 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST;
3950 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE;
3951 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
3952 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3953 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
3954 IPA_MEM_PART(v4_flt_size_ddr));
3955
3956 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST;
3957 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE;
3958 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
3959 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
3960 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
3961 IPA_MEM_PART(v6_flt_size_ddr));
3962
3963 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST;
3964 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
3965
3966 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX;
3967 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
3968
3969 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO;
3970 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI;
3971 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
3972 IPA_MEM_PART(v4_modem_rt_index_lo),
3973 IPA_MEM_PART(v4_modem_rt_index_hi));
3974
3975 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO;
3976 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI;
3977 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
3978 IPA_MEM_PART(v4_apps_rt_index_lo),
3979 IPA_MEM_PART(v4_apps_rt_index_hi));
3980
3981 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE;
3982 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
3983 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
3984 IPA_MEM_PART(v4_rt_size_ddr));
3985
3986 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST;
3987 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
3988
3989 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX;
3990 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
3991
3992 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO;
3993 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI;
3994 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
3995 IPA_MEM_PART(v6_modem_rt_index_lo),
3996 IPA_MEM_PART(v6_modem_rt_index_hi));
3997
3998 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO;
3999 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI;
4000 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4001 IPA_MEM_PART(v6_apps_rt_index_lo),
4002 IPA_MEM_PART(v6_apps_rt_index_hi));
4003
4004 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE;
4005 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4006 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4007 IPA_MEM_PART(v6_rt_size_ddr));
4008
4009 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST;
4010 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE;
4011 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4012 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4013
4014 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST;
4015 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE;
4016 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR;
4017 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4018 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4019 IPA_MEM_PART(apps_hdr_size_ddr));
4020
4021 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST;
4022 IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE;
4023 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4024 IPA_MEM_PART(modem_size));
4025
4026 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST;
4027 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE;
4028 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4029 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4030
4031 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST;
4032 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE;
4033 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4034 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4035
4036 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST;
4037 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE;
4038 IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4039 IPA_MEM_PART(uc_info_size));
4040
4041 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST;
4042 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST;
4043 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE;
4044 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST;
4045 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE;
4046}
4047
4048static void ipa_init_mem_partition_v2_5(void)
4049{
4050 IPADBG("Memory partition IPA 2.5\n");
4051 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
4052 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
4053 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
4054 IPA_MEM_PART(nat_size));
4055
4056 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST;
4057 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE;
4058 IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4059 IPA_MEM_PART(uc_info_size));
4060
4061 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START;
4062 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
4063
4064 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST;
4065 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE;
4066 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
4067 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4068 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
4069 IPA_MEM_PART(v4_flt_size_ddr));
4070
4071 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST;
4072 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE;
4073 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
4074 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4075 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
4076 IPA_MEM_PART(v6_flt_size_ddr));
4077
4078 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST;
4079 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
4080
4081 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX;
4082 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
4083
4084 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO;
4085 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI;
4086 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
4087 IPA_MEM_PART(v4_modem_rt_index_lo),
4088 IPA_MEM_PART(v4_modem_rt_index_hi));
4089
4090 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO;
4091 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI;
4092 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
4093 IPA_MEM_PART(v4_apps_rt_index_lo),
4094 IPA_MEM_PART(v4_apps_rt_index_hi));
4095
4096 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE;
4097 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
4098 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
4099 IPA_MEM_PART(v4_rt_size_ddr));
4100
4101 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST;
4102 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
4103
4104 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX;
4105 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
4106
4107 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO;
4108 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI;
4109 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
4110 IPA_MEM_PART(v6_modem_rt_index_lo),
4111 IPA_MEM_PART(v6_modem_rt_index_hi));
4112
4113 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO;
4114 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI;
4115 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4116 IPA_MEM_PART(v6_apps_rt_index_lo),
4117 IPA_MEM_PART(v6_apps_rt_index_hi));
4118
4119 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE;
4120 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4121 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4122 IPA_MEM_PART(v6_rt_size_ddr));
4123
4124 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST;
4125 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE;
4126 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4127 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4128
4129 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST;
4130 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE;
4131 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR;
4132 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4133 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4134 IPA_MEM_PART(apps_hdr_size_ddr));
4135
4136 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) =
4137 IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST;
4138 IPA_MEM_PART(modem_hdr_proc_ctx_size) =
4139 IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE;
4140 IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
4141 IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
4142 IPA_MEM_PART(modem_hdr_proc_ctx_size));
4143
4144 IPA_MEM_PART(apps_hdr_proc_ctx_ofst) =
4145 IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST;
4146 IPA_MEM_PART(apps_hdr_proc_ctx_size) =
4147 IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE;
4148 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) =
4149 IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR;
4150 IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4151 IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
4152 IPA_MEM_PART(apps_hdr_proc_ctx_size),
4153 IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
4154
4155 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST;
4156 IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE;
4157 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4158 IPA_MEM_PART(modem_size));
4159
4160 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST;
4161 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE;
4162 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4163 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4164
4165 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST;
4166 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE;
4167 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4168 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4169
4170 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST;
4171 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST;
4172 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE;
4173 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST;
4174 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE;
4175}
4176
4177static void ipa_init_mem_partition_v2_6L(void)
4178{
4179 IPADBG("Memory partition IPA 2.6Lite\n");
4180 IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
4181 IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
4182 IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
4183 IPA_MEM_PART(nat_size));
4184
4185 IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST;
4186 IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE;
4187 IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
4188 IPA_MEM_PART(uc_info_size));
4189
4190 IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START;
4191 IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
4192
4193 IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST;
4194 IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE;
4195 IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
4196 IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4197 IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
4198 IPA_MEM_PART(v4_flt_size_ddr));
4199
4200 IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST;
4201 IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE;
4202 IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
4203 IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4204 IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
4205 IPA_MEM_PART(v6_flt_size_ddr));
4206
4207 IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST;
4208 IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
4209
4210 IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX;
4211 IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
4212
4213 IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO;
4214 IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI;
4215 IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
4216 IPA_MEM_PART(v4_modem_rt_index_lo),
4217 IPA_MEM_PART(v4_modem_rt_index_hi));
4218
4219 IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO;
4220 IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI;
4221 IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
4222 IPA_MEM_PART(v4_apps_rt_index_lo),
4223 IPA_MEM_PART(v4_apps_rt_index_hi));
4224
4225 IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE;
4226 IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
4227 IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
4228 IPA_MEM_PART(v4_rt_size_ddr));
4229
4230 IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST;
4231 IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
4232
4233 IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX;
4234 IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
4235
4236 IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO;
4237 IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI;
4238 IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
4239 IPA_MEM_PART(v6_modem_rt_index_lo),
4240 IPA_MEM_PART(v6_modem_rt_index_hi));
4241
4242 IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO;
4243 IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI;
4244 IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
4245 IPA_MEM_PART(v6_apps_rt_index_lo),
4246 IPA_MEM_PART(v6_apps_rt_index_hi));
4247
4248 IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE;
4249 IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
4250 IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
4251 IPA_MEM_PART(v6_rt_size_ddr));
4252
4253 IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST;
4254 IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE;
4255 IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
4256 IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
4257
4258 IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST;
4259 IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE;
4260 IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR;
4261 IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
4262 IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
4263 IPA_MEM_PART(apps_hdr_size_ddr));
4264
4265 IPA_MEM_PART(modem_comp_decomp_ofst) =
4266 IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST;
4267 IPA_MEM_PART(modem_comp_decomp_size) =
4268 IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE;
4269 IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n",
4270 IPA_MEM_PART(modem_comp_decomp_ofst),
4271 IPA_MEM_PART(modem_comp_decomp_size));
4272
4273 IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST;
4274 IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE;
4275 IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
4276 IPA_MEM_PART(modem_size));
4277
4278 IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST;
4279 IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE;
4280 IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
4281 IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
4282
4283 IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST;
4284 IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE;
4285 IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
4286 IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
4287
4288 IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST;
4289 IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST;
4290 IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE;
4291 IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST;
4292 IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE;
4293}
4294
4295/**
4296 * ipa_controller_shared_static_bind() - set the appropriate shared methods for
4297 * for IPA HW version 2.0, 2.5, 2.6 and 2.6L
4298 *
4299 * @ctrl: data structure which holds the function pointers
4300 */
4301void ipa_controller_shared_static_bind(struct ipa_controller *ctrl)
4302{
4303 ctrl->ipa_init_rt4 = _ipa_init_rt4_v2;
4304 ctrl->ipa_init_rt6 = _ipa_init_rt6_v2;
4305 ctrl->ipa_init_flt4 = _ipa_init_flt4_v2;
4306 ctrl->ipa_init_flt6 = _ipa_init_flt6_v2;
4307 ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0;
4308 ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0;
4309 ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0;
4310 ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0;
4311 ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0;
4312 ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0;
4313 ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0;
4314 ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0;
4315 ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0;
4316 ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0;
4317 ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO;
4318 ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL;
4319 ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS;
4320 ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0;
4321 ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0;
4322 ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0;
4323 ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0;
4324 ctrl->ipa_commit_flt = __ipa_commit_flt_v2;
4325 ctrl->ipa_commit_rt = __ipa_commit_rt_v2;
4326 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
4327 ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0;
4328 ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0;
4329 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0;
4330 ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0;
4331 ctrl->clock_scaling_bw_threshold_nominal =
4332 IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS;
4333 ctrl->clock_scaling_bw_threshold_turbo =
4334 IPA_V2_0_BW_THRESHOLD_TURBO_MBPS;
4335}
4336
4337/**
4338 * ipa_ctrl_static_bind() - set the appropriate methods for
4339 * IPA Driver based on the HW version
4340 *
4341 * @ctrl: data structure which holds the function pointers
4342 * @hw_type: the HW type in use
4343 *
4344 * This function can avoid the runtime assignment by using C99 special
4345 * struct initialization - hard decision... time.vs.mem
4346 */
4347int ipa_controller_static_bind(struct ipa_controller *ctrl,
4348 enum ipa_hw_type hw_type)
4349{
4350 switch (hw_type) {
4351 case (IPA_HW_v1_1):
4352 ipa_init_mem_partition_v2();
4353 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1;
4354 ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1;
4355 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1;
4356 ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1;
4357 ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1;
4358 ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1;
4359 ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1;
4360 ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1;
4361 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1;
4362 ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1;
4363 ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1;
4364 ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1;
4365 ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1;
4366 ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE;
4367 ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE;
4368 ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE;
4369 ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1;
4370 ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1;
4371 ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1;
4372 ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1;
4373 ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1;
4374 ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1;
4375 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1;
4376 ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1;
4377 ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1;
4378 ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1;
4379 ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1;
4380 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
4381 ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL;
4382 break;
4383 case (IPA_HW_v2_0):
4384 ipa_init_mem_partition_v2();
4385 ipa_controller_shared_static_bind(ctrl);
4386 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0;
4387 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
4388 ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL;
4389 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0;
4390 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0;
4391 ctrl->ipa_init_sram = _ipa_init_sram_v2;
4392 ctrl->ipa_init_hdr = _ipa_init_hdr_v2;
4393 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
4394 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2;
4395 break;
4396 case (IPA_HW_v2_5):
4397 ipa_init_mem_partition_v2_5();
4398 ipa_controller_shared_static_bind(ctrl);
4399 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5;
4400 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5;
4401 ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL;
4402 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5;
4403 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5;
4404 ctrl->ipa_init_sram = _ipa_init_sram_v2_5;
4405 ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5;
4406 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5;
4407 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5;
4408 break;
4409 case (IPA_HW_v2_6L):
4410 ipa_init_mem_partition_v2_6L();
4411 ipa_controller_shared_static_bind(ctrl);
4412 ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L;
4413 ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L;
4414 ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL;
4415 ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L;
4416 ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L;
4417 ctrl->ipa_init_sram = _ipa_init_sram_v2_6L;
4418 ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L;
4419 ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L;
4420 ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L;
4421 break;
4422 default:
4423 return -EPERM;
4424 }
4425
4426 return 0;
4427}
4428
4429void ipa_skb_recycle(struct sk_buff *skb)
4430{
4431 struct skb_shared_info *shinfo;
4432
4433 shinfo = skb_shinfo(skb);
4434 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
4435 atomic_set(&shinfo->dataref, 1);
4436
4437 memset(skb, 0, offsetof(struct sk_buff, tail));
4438 skb->data = skb->head + NET_SKB_PAD;
4439 skb_reset_tail_pointer(skb);
4440}
4441
4442int ipa_id_alloc(void *ptr)
4443{
4444 int id;
4445
4446 idr_preload(GFP_KERNEL);
4447 spin_lock(&ipa_ctx->idr_lock);
4448 id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
4449 spin_unlock(&ipa_ctx->idr_lock);
4450 idr_preload_end();
4451
4452 return id;
4453}
4454
4455void *ipa_id_find(u32 id)
4456{
4457 void *ptr;
4458
4459 spin_lock(&ipa_ctx->idr_lock);
4460 ptr = idr_find(&ipa_ctx->ipa_idr, id);
4461 spin_unlock(&ipa_ctx->idr_lock);
4462
4463 return ptr;
4464}
4465
4466void ipa_id_remove(u32 id)
4467{
4468 spin_lock(&ipa_ctx->idr_lock);
4469 idr_remove(&ipa_ctx->ipa_idr, id);
4470 spin_unlock(&ipa_ctx->idr_lock);
4471}
4472
4473static void ipa_tag_free_buf(void *user1, int user2)
4474{
4475 kfree(user1);
4476}
4477
4478static void ipa_tag_free_skb(void *user1, int user2)
4479{
4480 dev_kfree_skb_any((struct sk_buff *)user1);
4481}
4482
4483#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
4484
4485/* ipa_tag_process() - Initiates a tag process. Incorporates the input
4486 * descriptors
4487 *
4488 * @desc: descriptors with commands for IC
4489 * @desc_size: amount of descriptors in the above variable
4490 *
4491 * Note: The descriptors are copied (if there's room), the client needs to
4492 * free his descriptors afterwards
4493 *
4494 * Return: 0 or negative in case of failure
4495 */
4496int ipa_tag_process(struct ipa_desc desc[],
4497 int descs_num,
4498 unsigned long timeout)
4499{
4500 struct ipa_sys_context *sys;
4501 struct ipa_desc *tag_desc;
4502 int desc_idx = 0;
4503 struct ipa_ip_packet_init *pkt_init;
4504 struct ipa_register_write *reg_write_nop;
4505 struct ipa_ip_packet_tag_status *status;
4506 int i;
4507 struct sk_buff *dummy_skb;
4508 int res;
4509 struct ipa_tag_completion *comp;
4510 int ep_idx;
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05304511 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03004512
4513 /* Not enough room for the required descriptors for the tag process */
4514 if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
4515 IPAERR("up to %d descriptors are allowed (received %d)\n",
4516 IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
4517 descs_num);
4518 return -ENOMEM;
4519 }
4520
4521 ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
4522 if (-1 == ep_idx) {
4523 IPAERR("Client %u is not mapped\n",
4524 IPA_CLIENT_APPS_CMD_PROD);
4525 return -EFAULT;
4526 }
4527 sys = ipa_ctx->ep[ep_idx].sys;
4528
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05304529 tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag);
Amir Levy9659e592016-10-27 18:08:27 +03004530 if (!tag_desc) {
4531 IPAERR("failed to allocate memory\n");
4532 res = -ENOMEM;
4533 goto fail_alloc_desc;
4534 }
4535
4536 /* IP_PACKET_INIT IC for tag status to be sent to apps */
Mohammed Javid097ca402017-11-02 19:10:22 +05304537 pkt_init = kzalloc(sizeof(*pkt_init), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004538 if (!pkt_init) {
4539 IPAERR("failed to allocate memory\n");
4540 res = -ENOMEM;
4541 goto fail_alloc_pkt_init;
4542 }
4543
4544 pkt_init->destination_pipe_index =
4545 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
4546
4547 tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT;
4548 tag_desc[desc_idx].pyld = pkt_init;
4549 tag_desc[desc_idx].len = sizeof(*pkt_init);
4550 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4551 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4552 tag_desc[desc_idx].user1 = pkt_init;
4553 desc_idx++;
4554
4555 /* NO-OP IC for ensuring that IPA pipeline is empty */
Mohammed Javid097ca402017-11-02 19:10:22 +05304556 reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004557 if (!reg_write_nop) {
4558 IPAERR("no mem\n");
4559 res = -ENOMEM;
4560 goto fail_free_desc;
4561 }
4562
4563 reg_write_nop->skip_pipeline_clear = 0;
4564 reg_write_nop->value_mask = 0x0;
4565
4566 tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE;
4567 tag_desc[desc_idx].pyld = reg_write_nop;
4568 tag_desc[desc_idx].len = sizeof(*reg_write_nop);
4569 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4570 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4571 tag_desc[desc_idx].user1 = reg_write_nop;
4572 desc_idx++;
4573
4574 /* status IC */
Mohammed Javid097ca402017-11-02 19:10:22 +05304575 status = kzalloc(sizeof(*status), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004576 if (!status) {
4577 IPAERR("no mem\n");
4578 res = -ENOMEM;
4579 goto fail_free_desc;
4580 }
4581
4582 status->tag_f_2 = IPA_COOKIE;
4583
4584 tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS;
4585 tag_desc[desc_idx].pyld = status;
4586 tag_desc[desc_idx].len = sizeof(*status);
4587 tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
4588 tag_desc[desc_idx].callback = ipa_tag_free_buf;
4589 tag_desc[desc_idx].user1 = status;
4590 desc_idx++;
4591
4592 /* Copy the required descriptors from the client now */
4593 if (desc) {
4594 memcpy(&(tag_desc[desc_idx]), desc, descs_num *
4595 sizeof(struct ipa_desc));
4596 desc_idx += descs_num;
4597 }
4598
4599 comp = kzalloc(sizeof(*comp), GFP_KERNEL);
4600 if (!comp) {
4601 IPAERR("no mem\n");
4602 res = -ENOMEM;
4603 goto fail_free_desc;
4604 }
4605 init_completion(&comp->comp);
4606
4607 /* completion needs to be released from both here and rx handler */
4608 atomic_set(&comp->cnt, 2);
4609
4610 /* dummy packet to send to IPA. packet payload is a completion object */
Mohammed Javid097ca402017-11-02 19:10:22 +05304611 dummy_skb = alloc_skb(sizeof(comp), flag);
Amir Levy9659e592016-10-27 18:08:27 +03004612 if (!dummy_skb) {
4613 IPAERR("failed to allocate memory\n");
4614 res = -ENOMEM;
4615 goto fail_free_skb;
4616 }
4617
4618 memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
4619
4620 tag_desc[desc_idx].pyld = dummy_skb->data;
4621 tag_desc[desc_idx].len = dummy_skb->len;
4622 tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
4623 tag_desc[desc_idx].callback = ipa_tag_free_skb;
4624 tag_desc[desc_idx].user1 = dummy_skb;
4625 desc_idx++;
4626
4627 /* send all descriptors to IPA with single EOT */
4628 res = ipa_send(sys, desc_idx, tag_desc, true);
4629 if (res) {
4630 IPAERR("failed to send TAG packets %d\n", res);
4631 res = -ENOMEM;
4632 goto fail_send;
4633 }
4634 kfree(tag_desc);
4635 tag_desc = NULL;
4636
4637 IPADBG("waiting for TAG response\n");
4638 res = wait_for_completion_timeout(&comp->comp, timeout);
4639 if (res == 0) {
4640 IPAERR("timeout (%lu msec) on waiting for TAG response\n",
4641 timeout);
4642 WARN_ON(1);
4643 if (atomic_dec_return(&comp->cnt) == 0)
4644 kfree(comp);
4645 return -ETIME;
4646 }
4647
4648 IPADBG("TAG response arrived!\n");
4649 if (atomic_dec_return(&comp->cnt) == 0)
4650 kfree(comp);
4651
4652 /* sleep for short period to ensure IPA wrote all packets to BAM */
4653 usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
4654
4655 return 0;
4656
4657fail_send:
4658 dev_kfree_skb_any(dummy_skb);
4659 desc_idx--;
4660fail_free_skb:
4661 kfree(comp);
4662fail_free_desc:
4663 /*
4664 * Free only the first descriptors allocated here.
4665 * [pkt_init, status, nop]
4666 * The user is responsible to free his allocations
4667 * in case of failure.
4668 * The min is required because we may fail during
4669 * of the initial allocations above
4670 */
4671 for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++)
4672 kfree(tag_desc[i].user1);
4673
4674fail_alloc_pkt_init:
4675 kfree(tag_desc);
4676fail_alloc_desc:
4677 return res;
4678}
4679
4680/**
4681 * ipa_tag_generate_force_close_desc() - generate descriptors for force close
4682 * immediate command
4683 *
4684 * @desc: descriptors for IC
4685 * @desc_size: desc array size
4686 * @start_pipe: first pipe to close aggregation
4687 * @end_pipe: last (non-inclusive) pipe to close aggregation
4688 *
4689 * Return: number of descriptors written or negative in case of failure
4690 */
4691static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[],
4692 int desc_size, int start_pipe, int end_pipe)
4693{
4694 int i;
4695 u32 aggr_init;
4696 int desc_idx = 0;
4697 int res;
4698 struct ipa_register_write *reg_write_agg_close;
4699
4700 for (i = start_pipe; i < end_pipe; i++) {
4701 aggr_init = ipa_read_reg(ipa_ctx->mmio,
4702 IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i));
4703 if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
4704 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR)
4705 continue;
4706 IPADBG("Force close ep: %d\n", i);
4707 if (desc_idx + 1 > desc_size) {
4708 IPAERR("Internal error - no descriptors\n");
4709 res = -EFAULT;
4710 goto fail_no_desc;
4711 }
4712
4713 reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close),
4714 GFP_KERNEL);
4715 if (!reg_write_agg_close) {
4716 IPAERR("no mem\n");
4717 res = -ENOMEM;
4718 goto fail_alloc_reg_write_agg_close;
4719 }
4720
4721 reg_write_agg_close->skip_pipeline_clear = 0;
4722 reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i);
4723 reg_write_agg_close->value =
4724 (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
4725 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
4726 reg_write_agg_close->value_mask =
4727 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
4728 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
4729
4730 desc[desc_idx].opcode = IPA_REGISTER_WRITE;
4731 desc[desc_idx].pyld = reg_write_agg_close;
4732 desc[desc_idx].len = sizeof(*reg_write_agg_close);
4733 desc[desc_idx].type = IPA_IMM_CMD_DESC;
4734 desc[desc_idx].callback = ipa_tag_free_buf;
4735 desc[desc_idx].user1 = reg_write_agg_close;
4736 desc_idx++;
4737 }
4738
4739 return desc_idx;
4740
4741fail_alloc_reg_write_agg_close:
4742 for (i = 0; i < desc_idx; i++)
4743 kfree(desc[desc_idx].user1);
4744fail_no_desc:
4745 return res;
4746}
4747
4748/**
4749 * ipa_tag_aggr_force_close() - Force close aggregation
4750 *
4751 * @pipe_num: pipe number or -1 for all pipes
4752 */
4753int ipa_tag_aggr_force_close(int pipe_num)
4754{
4755 struct ipa_desc *desc;
4756 int res = -1;
4757 int start_pipe;
4758 int end_pipe;
4759 int num_descs;
4760 int num_aggr_descs;
4761
4762 if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) {
4763 IPAERR("Invalid pipe number %d\n", pipe_num);
4764 return -EINVAL;
4765 }
4766
4767 if (pipe_num == -1) {
4768 start_pipe = 0;
4769 end_pipe = ipa_ctx->ipa_num_pipes;
4770 } else {
4771 start_pipe = pipe_num;
4772 end_pipe = pipe_num + 1;
4773 }
4774
4775 num_descs = end_pipe - start_pipe;
4776
4777 desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
4778 if (!desc) {
4779 IPAERR("no mem\n");
4780 return -ENOMEM;
4781 }
4782
4783 /* Force close aggregation on all valid pipes with aggregation */
4784 num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs,
4785 start_pipe, end_pipe);
4786 if (num_aggr_descs < 0) {
4787 IPAERR("ipa_tag_generate_force_close_desc failed %d\n",
4788 num_aggr_descs);
4789 goto fail_free_desc;
4790 }
4791
4792 res = ipa_tag_process(desc, num_aggr_descs,
4793 IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
4794
4795fail_free_desc:
4796 kfree(desc);
4797
4798 return res;
4799}
4800
4801/**
4802 * ipa2_is_ready() - check if IPA module was initialized
4803 * successfully
4804 *
4805 * Return value: true for yes; false for no
4806 */
4807bool ipa2_is_ready(void)
4808{
4809 return (ipa_ctx != NULL) ? true : false;
4810}
4811
4812/**
4813 * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle
4814 *
4815 * Return value: true for yes; false for no
4816 */
4817bool ipa2_is_client_handle_valid(u32 clnt_hdl)
4818{
4819 if (unlikely(!ipa_ctx)) {
4820 IPAERR("IPA driver was not initialized\n");
4821 return false;
4822 }
4823
4824 if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes)
4825 return true;
4826 return false;
4827}
4828
4829/**
4830 * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote
4831 *
4832 * Return value: none
4833 */
4834void ipa2_proxy_clk_unvote(void)
4835{
4836 if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) {
4837 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
4838 ipa_ctx->q6_proxy_clk_vote_valid = false;
4839 }
4840}
4841
4842/**
4843 * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote
4844 *
4845 * Return value: none
4846 */
4847void ipa2_proxy_clk_vote(void)
4848{
4849 if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) {
4850 IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
4851 ipa_ctx->q6_proxy_clk_vote_valid = true;
4852 }
4853}
4854
4855
4856/**
4857 * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes
4858 *
4859 * Return value: u16 - number of IPA smem restricted bytes
4860 */
4861u16 ipa2_get_smem_restr_bytes(void)
4862{
4863 if (ipa_ctx)
4864 return ipa_ctx->smem_restricted_bytes;
4865
4866 IPAERR("IPA Driver not initialized\n");
4867
4868 return 0;
4869}
4870
4871/**
4872 * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
4873 *
4874 * Return value: true if modem configures embedded pipe flt, false otherwise
4875 */
4876bool ipa2_get_modem_cfg_emb_pipe_flt(void)
4877{
4878 if (ipa_ctx)
4879 return ipa_ctx->modem_cfg_emb_pipe_flt;
4880
4881 IPAERR("IPA driver has not been initialized\n");
4882
4883 return false;
4884}
4885/**
4886 * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS
4887 *
4888 * Return value: enum ipa_transport_type
4889 */
4890enum ipa_transport_type ipa2_get_transport_type(void)
4891{
4892 return IPA_TRANSPORT_TYPE_SPS;
4893}
4894
4895u32 ipa_get_num_pipes(void)
4896{
4897 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
4898 return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST);
4899 else
4900 return IPA_MAX_NUM_PIPES;
4901}
4902EXPORT_SYMBOL(ipa_get_num_pipes);
4903
4904/**
4905 * ipa2_disable_apps_wan_cons_deaggr()-
4906 * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
4907 *
4908 * Return value: 0 or negative in case of failure
4909 */
4910int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
4911{
4912 int res = -1;
4913
4914 /* checking if IPA-HW can support */
4915 if ((agg_size >> 10) >
4916 IPA_AGGR_BYTE_LIMIT) {
4917 IPAWANERR("IPA-AGG byte limit %d\n",
4918 IPA_AGGR_BYTE_LIMIT);
4919 IPAWANERR("exceed aggr_byte_limit\n");
4920 return res;
4921 }
4922 if (agg_count >
4923 IPA_AGGR_PKT_LIMIT) {
4924 IPAWANERR("IPA-AGG pkt limit %d\n",
4925 IPA_AGGR_PKT_LIMIT);
4926 IPAWANERR("exceed aggr_pkt_limit\n");
4927 return res;
4928 }
4929
4930 if (ipa_ctx) {
4931 ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true;
4932 return 0;
4933 }
4934 return res;
4935}
4936
Amir Levy3be373c2017-03-05 16:31:30 +02004937static const struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info
4938 (enum ipa_client_type client)
Amir Levy9659e592016-10-27 18:08:27 +03004939{
4940 IPAERR("Not supported for IPA 2.x\n");
4941 return NULL;
4942}
4943
4944static int ipa2_stop_gsi_channel(u32 clnt_hdl)
4945{
4946 IPAERR("Not supported for IPA 2.x\n");
4947 return -EFAULT;
4948}
4949
4950static void *ipa2_get_ipc_logbuf(void)
4951{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304952 if (ipa_ctx)
4953 return ipa_ctx->logbuf;
4954
Amir Levy9659e592016-10-27 18:08:27 +03004955 return NULL;
4956}
4957
4958static void *ipa2_get_ipc_logbuf_low(void)
4959{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304960 if (ipa_ctx)
4961 return ipa_ctx->logbuf_low;
4962
Amir Levy9659e592016-10-27 18:08:27 +03004963 return NULL;
4964}
4965
4966static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
4967{
4968 *holb = ipa_ctx->ep[ep_idx].holb;
4969}
4970
4971static int ipa2_generate_tag_process(void)
4972{
4973 int res;
4974
4975 res = ipa_tag_process(NULL, 0, HZ);
4976 if (res)
4977 IPAERR("TAG process failed\n");
4978
4979 return res;
4980}
4981
4982static void ipa2_set_tag_process_before_gating(bool val)
4983{
4984 ipa_ctx->tag_process_before_gating = val;
4985}
4986
4987int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
4988 struct ipa_api_controller *api_ctrl)
4989{
4990 if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) {
4991 IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
4992 WARN_ON(1);
4993 return -EPERM;
4994 }
4995
4996 api_ctrl->ipa_connect = ipa2_connect;
4997 api_ctrl->ipa_disconnect = ipa2_disconnect;
4998 api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
4999 api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
5000 api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
5001 api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
5002 api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
5003 api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
5004 api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext;
5005 api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode;
5006 api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr;
5007 api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr;
5008 api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route;
5009 api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb;
5010 api_ctrl->ipa_get_holb = ipa2_get_holb;
5011 api_ctrl->ipa_set_tag_process_before_gating =
5012 ipa2_set_tag_process_before_gating;
5013 api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg;
5014 api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask;
5015 api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
5016 api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl;
5017 api_ctrl->ipa_add_hdr = ipa2_add_hdr;
5018 api_ctrl->ipa_del_hdr = ipa2_del_hdr;
5019 api_ctrl->ipa_commit_hdr = ipa2_commit_hdr;
5020 api_ctrl->ipa_reset_hdr = ipa2_reset_hdr;
5021 api_ctrl->ipa_get_hdr = ipa2_get_hdr;
5022 api_ctrl->ipa_put_hdr = ipa2_put_hdr;
5023 api_ctrl->ipa_copy_hdr = ipa2_copy_hdr;
5024 api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx;
5025 api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx;
5026 api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule;
5027 api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule;
5028 api_ctrl->ipa_commit_rt = ipa2_commit_rt;
5029 api_ctrl->ipa_reset_rt = ipa2_reset_rt;
5030 api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl;
5031 api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl;
5032 api_ctrl->ipa_query_rt_index = ipa2_query_rt_index;
5033 api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule;
5034 api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule;
5035 api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule;
5036 api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
5037 api_ctrl->ipa_commit_flt = ipa2_commit_flt;
5038 api_ctrl->ipa_reset_flt = ipa2_reset_flt;
Amir Levy479cfdd2017-10-26 12:23:14 +03005039 api_ctrl->ipa_allocate_nat_device = ipa2_allocate_nat_device;
Amir Levy9659e592016-10-27 18:08:27 +03005040 api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd;
5041 api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd;
5042 api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd;
5043 api_ctrl->ipa_send_msg = ipa2_send_msg;
5044 api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg;
5045 api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg;
5046 api_ctrl->ipa_register_intf = ipa2_register_intf;
5047 api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext;
5048 api_ctrl->ipa_deregister_intf = ipa2_deregister_intf;
5049 api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode;
5050 api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig;
5051 api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim;
5052 api_ctrl->ipa_tx_dp = ipa2_tx_dp;
5053 api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul;
5054 api_ctrl->ipa_free_skb = ipa2_free_skb;
5055 api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe;
5056 api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe;
5057 api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls;
5058 api_ctrl->ipa_sys_setup = ipa2_sys_setup;
5059 api_ctrl->ipa_sys_teardown = ipa2_sys_teardown;
5060 api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe;
5061 api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe;
5062 api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe;
5063 api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe;
5064 api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe;
5065 api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe;
5066 api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats;
5067 api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05305068 api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
5069 ipa2_broadcast_wdi_quota_reach_ind;
Amir Levy9659e592016-10-27 18:08:27 +03005070 api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
5071 api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
5072 api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
5073 api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
5074 api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
5075 api_ctrl->teth_bridge_init = ipa2_teth_bridge_init;
5076 api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect;
5077 api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect;
5078 api_ctrl->ipa_set_client = ipa2_set_client;
5079 api_ctrl->ipa_get_client = ipa2_get_client;
5080 api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink;
5081 api_ctrl->ipa_dma_init = ipa2_dma_init;
5082 api_ctrl->ipa_dma_enable = ipa2_dma_enable;
5083 api_ctrl->ipa_dma_disable = ipa2_dma_disable;
5084 api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy;
5085 api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy;
5086 api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy;
5087 api_ctrl->ipa_dma_destroy = ipa2_dma_destroy;
5088 api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine;
5089 api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe;
5090 api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe;
5091 api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel;
5092 api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty;
5093 api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process;
5094 api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe;
5095 api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
5096 qmi_enable_force_clear_datapath_send;
5097 api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
5098 qmi_disable_force_clear_datapath_send;
5099 api_ctrl->ipa_mhi_reset_channel_internal =
5100 ipa2_mhi_reset_channel_internal;
5101 api_ctrl->ipa_mhi_start_channel_internal =
5102 ipa2_mhi_start_channel_internal;
5103 api_ctrl->ipa_mhi_resume_channels_internal =
5104 ipa2_mhi_resume_channels_internal;
5105 api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
5106 ipa2_uc_mhi_send_dl_ul_sync_info;
5107 api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init;
5108 api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel;
5109 api_ctrl->ipa_uc_mhi_stop_event_update_channel =
5110 ipa2_uc_mhi_stop_event_update_channel;
5111 api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup;
5112 api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats;
5113 api_ctrl->ipa_uc_state_check = ipa2_uc_state_check;
5114 api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id;
5115 api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler;
5116 api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler;
5117 api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler;
5118 api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump;
5119 api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping;
5120 api_ctrl->ipa_is_ready = ipa2_is_ready;
5121 api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote;
5122 api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote;
5123 api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid;
5124 api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping;
5125 api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep;
5126 api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
5127 ipa2_get_modem_cfg_emb_pipe_flt;
5128 api_ctrl->ipa_get_transport_type = ipa2_get_transport_type;
5129 api_ctrl->ipa_ap_suspend = ipa2_ap_suspend;
5130 api_ctrl->ipa_ap_resume = ipa2_ap_resume;
5131 api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain;
5132 api_ctrl->ipa_disable_apps_wan_cons_deaggr =
5133 ipa2_disable_apps_wan_cons_deaggr;
5134 api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
5135 api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
5136 api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
5137 api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
5138 api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
5139 api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
5140 api_ctrl->ipa_inc_client_enable_clks_no_block =
5141 ipa2_inc_client_enable_clks_no_block;
5142 api_ctrl->ipa_suspend_resource_no_block =
5143 ipa2_suspend_resource_no_block;
5144 api_ctrl->ipa_resume_resource = ipa2_resume_resource;
5145 api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync;
5146 api_ctrl->ipa_set_required_perf_profile =
5147 ipa2_set_required_perf_profile;
5148 api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
5149 api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
5150 api_ctrl->ipa_rx_poll = ipa2_rx_poll;
5151 api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
5152 api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
5153 api_ctrl->ipa_tear_down_uc_offload_pipes =
5154 ipa2_tear_down_uc_offload_pipes;
Amir Levyc4222c92016-11-07 16:14:54 +02005155 api_ctrl->ipa_get_pdev = ipa2_get_pdev;
Sunil Paidimarrifbbcd072017-04-04 17:43:50 -07005156 api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB;
5157 api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB;
Shihuan Liuf4328772017-09-14 17:03:09 -07005158 api_ctrl->ipa_conn_wdi3_pipes = ipa2_conn_wdi3_pipes;
5159 api_ctrl->ipa_disconn_wdi3_pipes = ipa2_disconn_wdi3_pipes;
5160 api_ctrl->ipa_enable_wdi3_pipes = ipa2_enable_wdi3_pipes;
5161 api_ctrl->ipa_disable_wdi3_pipes = ipa2_disable_wdi3_pipes;
Amir Levy9659e592016-10-27 18:08:27 +03005162
5163 return 0;
5164}
5165
5166/**
5167 * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes.
5168 *
5169 * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L,
5170 * IPA_DEFAULT_SYS_YELLOW_WM otherwise.
5171 */
5172u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys)
5173{
Utkarsh Saxena4badc042017-03-03 15:38:45 +05305174 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L &&
5175 ipa_ctx->ipa_uc_monitor_holb) {
Amir Levy9659e592016-10-27 18:08:27 +03005176 return ipa_read_reg(ipa_ctx->mmio,
5177 IPA_YELLOW_MARKER_SYS_CFG_OFST);
5178 } else {
5179 if (!sys)
5180 return 0;
5181
5182 return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz;
5183 }
5184}
5185EXPORT_SYMBOL(ipa_get_sys_yellow_wm);
5186
5187void ipa_suspend_apps_pipes(bool suspend)
5188{
5189 struct ipa_ep_cfg_ctrl cfg;
5190 int ipa_ep_idx;
5191 u32 lan_empty = 0, wan_empty = 0;
5192 int ret;
5193 struct sps_event_notify notify;
5194 struct ipa_ep_context *ep;
5195
5196 memset(&cfg, 0, sizeof(cfg));
5197 cfg.ipa_ep_suspend = suspend;
5198
5199 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
5200 ep = &ipa_ctx->ep[ipa_ep_idx];
5201 if (ep->valid) {
5202 ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
5203 /* Check if the pipes are empty. */
5204 ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty);
5205 if (ret) {
5206 IPAERR("%s: sps_is_pipe_empty failed with %d\n",
5207 __func__, ret);
5208 }
5209 if (!lan_empty) {
5210 IPADBG("LAN Cons is not-empty. Enter poll mode.\n");
5211 notify.user = ep->sys;
5212 notify.event_id = SPS_EVENT_EOT;
5213 if (ep->sys->sps_callback)
5214 ep->sys->sps_callback(&notify);
5215 }
5216 }
5217
5218 ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
5219 /* Considering the case for SSR. */
5220 if (ipa_ep_idx == -1) {
5221 IPADBG("Invalid client.\n");
5222 return;
5223 }
5224 ep = &ipa_ctx->ep[ipa_ep_idx];
5225 if (ep->valid) {
5226 ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
5227 /* Check if the pipes are empty. */
5228 ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty);
5229 if (ret) {
5230 IPAERR("%s: sps_is_pipe_empty failed with %d\n",
5231 __func__, ret);
5232 }
5233 if (!wan_empty) {
5234 IPADBG("WAN Cons is not-empty. Enter poll mode.\n");
5235 notify.user = ep->sys;
5236 notify.event_id = SPS_EVENT_EOT;
5237 if (ep->sys->sps_callback)
5238 ep->sys->sps_callback(&notify);
5239 }
5240 }
5241}
Amir Levyc4222c92016-11-07 16:14:54 +02005242
5243/**
5244 * ipa2_get_pdev() - return a pointer to IPA dev struct
5245 *
5246 * Return value: a pointer to IPA dev struct
5247 *
5248 */
5249struct device *ipa2_get_pdev(void)
5250{
5251 if (!ipa_ctx)
5252 return NULL;
5253
5254 return ipa_ctx->pdev;
5255}