blob: 7eed5d09077c89cdd7303794ef9db1f0b86ca36b [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Amir Levy9659e592016-10-27 18:08:27 +030038#include <soc/qcom/subsystem_restart.h>
39#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020040#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020041#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020042
43#ifdef CONFIG_ARM64
44
45/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
60#define IPA_GPIO_IN_QUERY_CLK_IDX 0
61#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
62#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
63
64#define IPA_SUMMING_THRESHOLD (0x10)
65#define IPA_PIPE_MEM_START_OFST (0x0)
66#define IPA_PIPE_MEM_SIZE (0x0)
67#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
68 x == IPA_MODE_MOBILE_AP_WAN || \
69 x == IPA_MODE_MOBILE_AP_WLAN)
70#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
71#define IPA_A5_MUX_HEADER_LENGTH (8)
72
73#define IPA_AGGR_MAX_STR_LENGTH (10)
74
Gidon Studinski3021a6f2016-11-10 12:48:48 +020075#define CLEANUP_TAG_PROCESS_TIMEOUT 500
Amir Levy9659e592016-10-27 18:08:27 +030076
77#define IPA_AGGR_STR_IN_BYTES(str) \
78 (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
79
80#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
81
82#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
83
84#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
85#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
86#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
87#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
88
Ghanim Fodic823bc62017-10-21 17:29:53 +030089#define IPA_MHI_GSI_EVENT_RING_ID_START 10
90#define IPA_MHI_GSI_EVENT_RING_ID_END 12
91
Amir Levy9659e592016-10-27 18:08:27 +030092#define IPA_SMEM_SIZE (8 * 1024)
93
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -070094#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
95#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
96#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
97
Amir Levy9659e592016-10-27 18:08:27 +030098/* round addresses for closes page per SMMU requirements */
99#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
100 do { \
101 (iova_p) = rounddown((iova), PAGE_SIZE); \
102 (pa_p) = rounddown((pa), PAGE_SIZE); \
103 (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
104 } while (0)
105
106
107/* The relative location in /lib/firmware where the FWs will reside */
108#define IPA_FWS_PATH "ipa/ipa_fws.elf"
109
110#ifdef CONFIG_COMPAT
111#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
112 IPA_IOCTL_ADD_HDR, \
113 compat_uptr_t)
114#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
115 IPA_IOCTL_DEL_HDR, \
116 compat_uptr_t)
117#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
118 IPA_IOCTL_ADD_RT_RULE, \
119 compat_uptr_t)
120#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
121 IPA_IOCTL_DEL_RT_RULE, \
122 compat_uptr_t)
123#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
124 IPA_IOCTL_ADD_FLT_RULE, \
125 compat_uptr_t)
126#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
127 IPA_IOCTL_DEL_FLT_RULE, \
128 compat_uptr_t)
129#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
130 IPA_IOCTL_GET_RT_TBL, \
131 compat_uptr_t)
132#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
133 IPA_IOCTL_COPY_HDR, \
134 compat_uptr_t)
135#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
136 IPA_IOCTL_QUERY_INTF, \
137 compat_uptr_t)
138#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
139 IPA_IOCTL_QUERY_INTF_TX_PROPS, \
140 compat_uptr_t)
141#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
142 IPA_IOCTL_QUERY_INTF_RX_PROPS, \
143 compat_uptr_t)
144#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
145 IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
146 compat_uptr_t)
147#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
148 IPA_IOCTL_GET_HDR, \
149 compat_uptr_t)
150#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
151 IPA_IOCTL_ALLOC_NAT_MEM, \
152 compat_uptr_t)
153#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
154 IPA_IOCTL_V4_INIT_NAT, \
155 compat_uptr_t)
156#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
157 IPA_IOCTL_NAT_DMA, \
158 compat_uptr_t)
159#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
160 IPA_IOCTL_V4_DEL_NAT, \
161 compat_uptr_t)
162#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
163 IPA_IOCTL_GET_NAT_OFFSET, \
164 compat_uptr_t)
165#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
166 IPA_IOCTL_PULL_MSG, \
167 compat_uptr_t)
168#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
169 IPA_IOCTL_RM_ADD_DEPENDENCY, \
170 compat_uptr_t)
171#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
172 IPA_IOCTL_RM_DEL_DEPENDENCY, \
173 compat_uptr_t)
174#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
175 IPA_IOCTL_GENERATE_FLT_EQ, \
176 compat_uptr_t)
177#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
178 IPA_IOCTL_QUERY_RT_TBL_INDEX, \
179 compat_uptr_t)
180#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
181 IPA_IOCTL_WRITE_QMAPID, \
182 compat_uptr_t)
183#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
184 IPA_IOCTL_MDFY_FLT_RULE, \
185 compat_uptr_t)
186#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
187 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
188 compat_uptr_t)
189#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
190 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
191 compat_uptr_t)
192#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
193 IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
194 compat_uptr_t)
195#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
196 IPA_IOCTL_ADD_HDR_PROC_CTX, \
197 compat_uptr_t)
198#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
199 IPA_IOCTL_DEL_HDR_PROC_CTX, \
200 compat_uptr_t)
201#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
202 IPA_IOCTL_MDFY_RT_RULE, \
203 compat_uptr_t)
204
205/**
206 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
207 * properties
208 * @dev_name: input parameter, the name of table
209 * @size: input parameter, size of table in bytes
210 * @offset: output parameter, offset into page in case of system memory
211 */
212struct ipa3_ioc_nat_alloc_mem32 {
213 char dev_name[IPA_RESOURCE_NAME_MAX];
214 compat_size_t size;
215 compat_off_t offset;
216};
217#endif
218
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200219#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
220#define TZ_MEM_PROTECT_REGION_ID 0x10
221
222struct tz_smmu_ipa_protect_region_iovec_s {
223 u64 input_addr;
224 u64 output_addr;
225 u64 size;
226 u32 attr;
227} __packed;
228
229struct tz_smmu_ipa_protect_region_s {
230 phys_addr_t iovec_buf;
231 u32 size_bytes;
232} __packed;
233
Amir Levy9659e592016-10-27 18:08:27 +0300234static void ipa3_start_tag_process(struct work_struct *work);
235static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
236
Amir Levya59ed3f2017-03-05 17:30:55 +0200237static void ipa3_transport_release_resource(struct work_struct *work);
238static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
239 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300240static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
241
Ghanim Fodia5f376a2017-10-17 18:14:53 +0300242static void ipa3_load_ipa_fw(struct work_struct *work);
243static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
Utkarsh Saxenaded78142017-05-03 14:04:30 +0530244
Skylar Chang242952b2017-07-20 15:04:05 -0700245static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
246static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
247 ipa_dec_clients_disable_clks_on_wq);
248
Amir Levy9659e592016-10-27 18:08:27 +0300249static struct ipa3_plat_drv_res ipa3_res = {0, };
250struct msm_bus_scale_pdata *ipa3_bus_scale_table;
251
252static struct clk *ipa3_clk;
253
254struct ipa3_context *ipa3_ctx;
255static struct device *master_dev;
256struct platform_device *ipa3_pdev;
257static struct {
258 bool present;
259 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300260 bool fast_map;
Michael Adisumarta93e97522017-10-06 15:49:46 -0700261 bool s1_bypass_arr[IPA_SMMU_CB_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300262 bool use_64_bit_dma_mask;
263 u32 ipa_base;
264 u32 ipa_size;
265} smmu_info;
266
267static char *active_clients_table_buf;
268
269int ipa3_active_clients_log_print_buffer(char *buf, int size)
270{
271 int i;
272 int nbytes;
273 int cnt = 0;
274 int start_idx;
275 int end_idx;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700276 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300277
Skylar Chang69ae50e2017-07-31 13:13:29 -0700278 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300279 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
280 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
281 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
282 for (i = start_idx; i != end_idx;
283 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
284 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
285 ipa3_ctx->ipa3_active_clients_logging
286 .log_buffer[i]);
287 cnt += nbytes;
288 }
Skylar Chang69ae50e2017-07-31 13:13:29 -0700289 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
290 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300291
292 return cnt;
293}
294
295int ipa3_active_clients_log_print_table(char *buf, int size)
296{
297 int i;
298 struct ipa3_active_client_htable_entry *iterator;
299 int cnt = 0;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700300 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300301
Skylar Chang69ae50e2017-07-31 13:13:29 -0700302 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300303 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
304 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
305 iterator, list) {
306 switch (iterator->type) {
307 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
308 cnt += scnprintf(buf + cnt, size - cnt,
309 "%-40s %-3d ENDPOINT\n",
310 iterator->id_string, iterator->count);
311 break;
312 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
313 cnt += scnprintf(buf + cnt, size - cnt,
314 "%-40s %-3d SIMPLE\n",
315 iterator->id_string, iterator->count);
316 break;
317 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
318 cnt += scnprintf(buf + cnt, size - cnt,
319 "%-40s %-3d RESOURCE\n",
320 iterator->id_string, iterator->count);
321 break;
322 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
323 cnt += scnprintf(buf + cnt, size - cnt,
324 "%-40s %-3d SPECIAL\n",
325 iterator->id_string, iterator->count);
326 break;
327 default:
328 IPAERR("Trying to print illegal active_clients type");
329 break;
330 }
331 }
332 cnt += scnprintf(buf + cnt, size - cnt,
333 "\nTotal active clients count: %d\n",
Skylar Chang242952b2017-07-20 15:04:05 -0700334 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Skylar Chang69ae50e2017-07-31 13:13:29 -0700335 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
336 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300337
338 return cnt;
339}
340
341static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
342 unsigned long event, void *ptr)
343{
Skylar Chang242952b2017-07-20 15:04:05 -0700344 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300345 ipa3_active_clients_log_print_table(active_clients_table_buf,
346 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
347 IPAERR("%s", active_clients_table_buf);
Skylar Chang242952b2017-07-20 15:04:05 -0700348 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300349
350 return NOTIFY_DONE;
351}
352
353static struct notifier_block ipa3_active_clients_panic_blk = {
354 .notifier_call = ipa3_active_clients_panic_notifier,
355};
356
357static int ipa3_active_clients_log_insert(const char *string)
358{
359 int head;
360 int tail;
361
362 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
363 return -EPERM;
364
365 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
366 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
367
368 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
369 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
370 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
371 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
372 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
373 if (tail == head)
374 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
375
376 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
377 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
378
379 return 0;
380}
381
382static int ipa3_active_clients_log_init(void)
383{
384 int i;
385
Skylar Chang69ae50e2017-07-31 13:13:29 -0700386 spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
Amir Levy9659e592016-10-27 18:08:27 +0300387 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
388 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
389 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
390 GFP_KERNEL);
391 active_clients_table_buf = kzalloc(sizeof(
392 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
393 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
394 pr_err("Active Clients Logging memory allocation failed");
395 goto bail;
396 }
397 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
398 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
399 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
400 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
401 }
402 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
403 ipa3_ctx->ipa3_active_clients_logging.log_tail =
404 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
405 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
406 atomic_notifier_chain_register(&panic_notifier_list,
407 &ipa3_active_clients_panic_blk);
408 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
409
410 return 0;
411
412bail:
413 return -ENOMEM;
414}
415
416void ipa3_active_clients_log_clear(void)
417{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700418 unsigned long flags;
419
420 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300421 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
422 ipa3_ctx->ipa3_active_clients_logging.log_tail =
423 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700424 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
425 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300426}
427
428static void ipa3_active_clients_log_destroy(void)
429{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700430 unsigned long flags;
431
432 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300433 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
434 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
435 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
436 ipa3_ctx->ipa3_active_clients_logging.log_tail =
437 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700438 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
439 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300440}
441
Amir Levy9659e592016-10-27 18:08:27 +0300442static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
443
444struct iommu_domain *ipa3_get_smmu_domain(void)
445{
446 if (smmu_cb[IPA_SMMU_CB_AP].valid)
447 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
448
449 IPAERR("CB not valid\n");
450
451 return NULL;
452}
453
454struct iommu_domain *ipa3_get_uc_smmu_domain(void)
455{
456 if (smmu_cb[IPA_SMMU_CB_UC].valid)
457 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
458
459 IPAERR("CB not valid\n");
460
461 return NULL;
462}
463
464struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
465{
466 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
467 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
468
469 IPAERR("CB not valid\n");
470
471 return NULL;
472}
473
474
475struct device *ipa3_get_dma_dev(void)
476{
477 return ipa3_ctx->pdev;
478}
479
480/**
481 * ipa3_get_smmu_ctx()- Return the wlan smmu context
482 *
483 * Return value: pointer to smmu context address
484 */
485struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
486{
487 return &smmu_cb[IPA_SMMU_CB_AP];
488}
489
490/**
491 * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
492 *
493 * Return value: pointer to smmu context address
494 */
495struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
496{
497 return &smmu_cb[IPA_SMMU_CB_WLAN];
498}
499
500/**
501 * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
502 *
503 * Return value: pointer to smmu context address
504 */
505struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
506{
507 return &smmu_cb[IPA_SMMU_CB_UC];
508}
509
510static int ipa3_open(struct inode *inode, struct file *filp)
511{
512 struct ipa3_context *ctx = NULL;
513
514 IPADBG_LOW("ENTER\n");
515 ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
516 filp->private_data = ctx;
517
518 return 0;
519}
520
Amir Levy9659e592016-10-27 18:08:27 +0300521static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
522{
523 if (!buff) {
524 IPAERR("Null buffer\n");
525 return;
526 }
527
528 if (type != WAN_UPSTREAM_ROUTE_ADD &&
529 type != WAN_UPSTREAM_ROUTE_DEL &&
530 type != WAN_EMBMS_CONNECT) {
531 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
532 return;
533 }
534
535 kfree(buff);
536}
537
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530538static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache)
Amir Levy9659e592016-10-27 18:08:27 +0300539{
540 int retval;
541 struct ipa_wan_msg *wan_msg;
542 struct ipa_msg_meta msg_meta;
Mohammed Javid616bb992017-10-03 13:10:05 +0530543 struct ipa_wan_msg cache_wan_msg;
Amir Levy9659e592016-10-27 18:08:27 +0300544
545 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
546 if (!wan_msg) {
547 IPAERR("no memory\n");
548 return -ENOMEM;
549 }
550
551 if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
552 sizeof(struct ipa_wan_msg))) {
553 kfree(wan_msg);
554 return -EFAULT;
555 }
556
Mohammed Javid616bb992017-10-03 13:10:05 +0530557 memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
558
Amir Levy9659e592016-10-27 18:08:27 +0300559 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
560 msg_meta.msg_type = msg_type;
561 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
562 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
563 if (retval) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530564 IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
Amir Levy9659e592016-10-27 18:08:27 +0300565 kfree(wan_msg);
566 return retval;
567 }
568
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530569 if (is_cache) {
570 mutex_lock(&ipa3_ctx->ipa_cne_evt_lock);
571
572 /* cache the cne event */
573 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
574 ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
Mohammed Javid616bb992017-10-03 13:10:05 +0530575 &cache_wan_msg,
576 sizeof(cache_wan_msg));
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530577
578 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
579 ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
580 &msg_meta,
581 sizeof(struct ipa_msg_meta));
582
583 ipa3_ctx->num_ipa_cne_evt_req++;
584 ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
585 mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock);
586 }
587
Amir Levy9659e592016-10-27 18:08:27 +0300588 return 0;
589}
590
Shihuan Liuc3174f52017-05-04 15:59:13 -0700591static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
592{
593 if (!buff) {
594 IPAERR("Null buffer\n");
595 return;
596 }
597
598 if (type != ADD_VLAN_IFACE &&
599 type != DEL_VLAN_IFACE &&
600 type != ADD_L2TP_VLAN_MAPPING &&
601 type != DEL_L2TP_VLAN_MAPPING) {
602 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
603 return;
604 }
605
606 kfree(buff);
607}
608
609static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
610{
611 int retval;
612 struct ipa_ioc_vlan_iface_info *vlan_info;
613 struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
614 struct ipa_msg_meta msg_meta;
615
616 if (msg_type == ADD_VLAN_IFACE ||
617 msg_type == DEL_VLAN_IFACE) {
618 vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
619 GFP_KERNEL);
620 if (!vlan_info) {
621 IPAERR("no memory\n");
622 return -ENOMEM;
623 }
624
625 if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
626 sizeof(struct ipa_ioc_vlan_iface_info))) {
627 kfree(vlan_info);
628 return -EFAULT;
629 }
630
631 memset(&msg_meta, 0, sizeof(msg_meta));
632 msg_meta.msg_type = msg_type;
633 msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
634 retval = ipa3_send_msg(&msg_meta, vlan_info,
635 ipa3_vlan_l2tp_msg_free_cb);
636 if (retval) {
637 IPAERR("ipa3_send_msg failed: %d\n", retval);
638 kfree(vlan_info);
639 return retval;
640 }
641 } else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
642 msg_type == DEL_L2TP_VLAN_MAPPING) {
643 mapping_info = kzalloc(sizeof(struct
644 ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
645 if (!mapping_info) {
646 IPAERR("no memory\n");
647 return -ENOMEM;
648 }
649
650 if (copy_from_user((u8 *)mapping_info,
651 (void __user *)usr_param,
652 sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
653 kfree(mapping_info);
654 return -EFAULT;
655 }
656
657 memset(&msg_meta, 0, sizeof(msg_meta));
658 msg_meta.msg_type = msg_type;
659 msg_meta.msg_len = sizeof(struct
660 ipa_ioc_l2tp_vlan_mapping_info);
661 retval = ipa3_send_msg(&msg_meta, mapping_info,
662 ipa3_vlan_l2tp_msg_free_cb);
663 if (retval) {
664 IPAERR("ipa3_send_msg failed: %d\n", retval);
665 kfree(mapping_info);
666 return retval;
667 }
668 } else {
669 IPAERR("Unexpected event\n");
670 return -EFAULT;
671 }
672
673 return 0;
674}
Amir Levy9659e592016-10-27 18:08:27 +0300675
676static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
677{
678 int retval = 0;
679 u32 pyld_sz;
680 u8 header[128] = { 0 };
681 u8 *param = NULL;
682 struct ipa_ioc_nat_alloc_mem nat_mem;
683 struct ipa_ioc_v4_nat_init nat_init;
684 struct ipa_ioc_v4_nat_del nat_del;
Amir Levy05fccd02017-06-13 16:25:45 +0300685 struct ipa_ioc_nat_pdn_entry mdfy_pdn;
Amir Levy9659e592016-10-27 18:08:27 +0300686 struct ipa_ioc_rm_dependency rm_depend;
687 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200688 int pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300689
690 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
691
Amir Levy9659e592016-10-27 18:08:27 +0300692 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
693 return -ENOTTY;
694 if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
695 return -ENOTTY;
696
Amir Levy05532622016-11-28 12:12:01 +0200697 if (!ipa3_is_ready()) {
698 IPAERR("IPA not ready, waiting for init completion\n");
699 wait_for_completion(&ipa3_ctx->init_completion_obj);
700 }
701
Amir Levy9659e592016-10-27 18:08:27 +0300702 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
703
704 switch (cmd) {
705 case IPA_IOC_ALLOC_NAT_MEM:
706 if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
707 sizeof(struct ipa_ioc_nat_alloc_mem))) {
708 retval = -EFAULT;
709 break;
710 }
711 /* null terminate the string */
712 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
713
714 if (ipa3_allocate_nat_device(&nat_mem)) {
715 retval = -EFAULT;
716 break;
717 }
718 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
719 sizeof(struct ipa_ioc_nat_alloc_mem))) {
720 retval = -EFAULT;
721 break;
722 }
723 break;
724 case IPA_IOC_V4_INIT_NAT:
725 if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
726 sizeof(struct ipa_ioc_v4_nat_init))) {
727 retval = -EFAULT;
728 break;
729 }
730 if (ipa3_nat_init_cmd(&nat_init)) {
731 retval = -EFAULT;
732 break;
733 }
734 break;
735
736 case IPA_IOC_NAT_DMA:
737 if (copy_from_user(header, (u8 *)arg,
738 sizeof(struct ipa_ioc_nat_dma_cmd))) {
739 retval = -EFAULT;
740 break;
741 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200742 pre_entry =
743 ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
Amir Levy9659e592016-10-27 18:08:27 +0300744 pyld_sz =
745 sizeof(struct ipa_ioc_nat_dma_cmd) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200746 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300747 param = kzalloc(pyld_sz, GFP_KERNEL);
748 if (!param) {
749 retval = -ENOMEM;
750 break;
751 }
752
753 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
754 retval = -EFAULT;
755 break;
756 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200757 /* add check in case user-space module compromised */
758 if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
759 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530760 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200761 ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
762 pre_entry);
763 retval = -EFAULT;
764 break;
765 }
Amir Levy9659e592016-10-27 18:08:27 +0300766 if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
767 retval = -EFAULT;
768 break;
769 }
770 break;
771
772 case IPA_IOC_V4_DEL_NAT:
773 if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
774 sizeof(struct ipa_ioc_v4_nat_del))) {
775 retval = -EFAULT;
776 break;
777 }
778 if (ipa3_nat_del_cmd(&nat_del)) {
779 retval = -EFAULT;
780 break;
781 }
782 break;
783
Amir Levy05fccd02017-06-13 16:25:45 +0300784 case IPA_IOC_NAT_MODIFY_PDN:
785 if (copy_from_user((u8 *)&mdfy_pdn, (const void __user *)arg,
786 sizeof(struct ipa_ioc_nat_pdn_entry))) {
787 retval = -EFAULT;
788 break;
789 }
Amir Levydc65f4c2017-07-06 09:49:50 +0300790 if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
Amir Levy05fccd02017-06-13 16:25:45 +0300791 retval = -EFAULT;
792 break;
793 }
794 break;
795
Amir Levy9659e592016-10-27 18:08:27 +0300796 case IPA_IOC_ADD_HDR:
797 if (copy_from_user(header, (u8 *)arg,
798 sizeof(struct ipa_ioc_add_hdr))) {
799 retval = -EFAULT;
800 break;
801 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200802 pre_entry =
803 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +0300804 pyld_sz =
805 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200806 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +0300807 param = kzalloc(pyld_sz, GFP_KERNEL);
808 if (!param) {
809 retval = -ENOMEM;
810 break;
811 }
812 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
813 retval = -EFAULT;
814 break;
815 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200816 /* add check in case user-space module compromised */
817 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
818 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530819 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200820 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
821 pre_entry);
822 retval = -EFAULT;
823 break;
824 }
Amir Levy9659e592016-10-27 18:08:27 +0300825 if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
826 retval = -EFAULT;
827 break;
828 }
829 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
830 retval = -EFAULT;
831 break;
832 }
833 break;
834
835 case IPA_IOC_DEL_HDR:
836 if (copy_from_user(header, (u8 *)arg,
837 sizeof(struct ipa_ioc_del_hdr))) {
838 retval = -EFAULT;
839 break;
840 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200841 pre_entry =
842 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300843 pyld_sz =
844 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200845 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +0300846 param = kzalloc(pyld_sz, GFP_KERNEL);
847 if (!param) {
848 retval = -ENOMEM;
849 break;
850 }
851 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
852 retval = -EFAULT;
853 break;
854 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200855 /* add check in case user-space module compromised */
856 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
857 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530858 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200859 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
860 pre_entry);
861 retval = -EFAULT;
862 break;
863 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200864 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
865 true)) {
Amir Levy9659e592016-10-27 18:08:27 +0300866 retval = -EFAULT;
867 break;
868 }
869 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
870 retval = -EFAULT;
871 break;
872 }
873 break;
874
875 case IPA_IOC_ADD_RT_RULE:
876 if (copy_from_user(header, (u8 *)arg,
877 sizeof(struct ipa_ioc_add_rt_rule))) {
878 retval = -EFAULT;
879 break;
880 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200881 pre_entry =
882 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300883 pyld_sz =
884 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200885 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300886 param = kzalloc(pyld_sz, GFP_KERNEL);
887 if (!param) {
888 retval = -ENOMEM;
889 break;
890 }
891 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
892 retval = -EFAULT;
893 break;
894 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200895 /* add check in case user-space module compromised */
896 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
897 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530898 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200899 ((struct ipa_ioc_add_rt_rule *)param)->
900 num_rules,
901 pre_entry);
902 retval = -EFAULT;
903 break;
904 }
Amir Levy9659e592016-10-27 18:08:27 +0300905 if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
906 retval = -EFAULT;
907 break;
908 }
909 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
910 retval = -EFAULT;
911 break;
912 }
913 break;
914 case IPA_IOC_ADD_RT_RULE_AFTER:
915 if (copy_from_user(header, (u8 *)arg,
916 sizeof(struct ipa_ioc_add_rt_rule_after))) {
917
918 retval = -EFAULT;
919 break;
920 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200921 pre_entry =
922 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300923 pyld_sz =
924 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200925 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300926 param = kzalloc(pyld_sz, GFP_KERNEL);
927 if (!param) {
928 retval = -ENOMEM;
929 break;
930 }
931 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
932 retval = -EFAULT;
933 break;
934 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200935 /* add check in case user-space module compromised */
936 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
937 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530938 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200939 ((struct ipa_ioc_add_rt_rule_after *)param)->
940 num_rules,
941 pre_entry);
942 retval = -EFAULT;
943 break;
944 }
Amir Levy9659e592016-10-27 18:08:27 +0300945 if (ipa3_add_rt_rule_after(
946 (struct ipa_ioc_add_rt_rule_after *)param)) {
947
948 retval = -EFAULT;
949 break;
950 }
951 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
952 retval = -EFAULT;
953 break;
954 }
955 break;
956
957 case IPA_IOC_MDFY_RT_RULE:
958 if (copy_from_user(header, (u8 *)arg,
959 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
960 retval = -EFAULT;
961 break;
962 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200963 pre_entry =
964 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300965 pyld_sz =
966 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200967 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300968 param = kzalloc(pyld_sz, GFP_KERNEL);
969 if (!param) {
970 retval = -ENOMEM;
971 break;
972 }
973 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
974 retval = -EFAULT;
975 break;
976 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200977 /* add check in case user-space module compromised */
978 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
979 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530980 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200981 ((struct ipa_ioc_mdfy_rt_rule *)param)->
982 num_rules,
983 pre_entry);
984 retval = -EFAULT;
985 break;
986 }
Amir Levy9659e592016-10-27 18:08:27 +0300987 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
988 retval = -EFAULT;
989 break;
990 }
991 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
992 retval = -EFAULT;
993 break;
994 }
995 break;
996
997 case IPA_IOC_DEL_RT_RULE:
998 if (copy_from_user(header, (u8 *)arg,
999 sizeof(struct ipa_ioc_del_rt_rule))) {
1000 retval = -EFAULT;
1001 break;
1002 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001003 pre_entry =
1004 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001005 pyld_sz =
1006 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001007 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001008 param = kzalloc(pyld_sz, GFP_KERNEL);
1009 if (!param) {
1010 retval = -ENOMEM;
1011 break;
1012 }
1013 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1014 retval = -EFAULT;
1015 break;
1016 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001017 /* add check in case user-space module compromised */
1018 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
1019 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301020 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001021 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
1022 pre_entry);
1023 retval = -EFAULT;
1024 break;
1025 }
Amir Levy9659e592016-10-27 18:08:27 +03001026 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
1027 retval = -EFAULT;
1028 break;
1029 }
1030 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1031 retval = -EFAULT;
1032 break;
1033 }
1034 break;
1035
1036 case IPA_IOC_ADD_FLT_RULE:
1037 if (copy_from_user(header, (u8 *)arg,
1038 sizeof(struct ipa_ioc_add_flt_rule))) {
1039 retval = -EFAULT;
1040 break;
1041 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001042 pre_entry =
1043 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001044 pyld_sz =
1045 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001046 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001047 param = kzalloc(pyld_sz, GFP_KERNEL);
1048 if (!param) {
1049 retval = -ENOMEM;
1050 break;
1051 }
1052 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1053 retval = -EFAULT;
1054 break;
1055 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001056 /* add check in case user-space module compromised */
1057 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
1058 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301059 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001060 ((struct ipa_ioc_add_flt_rule *)param)->
1061 num_rules,
1062 pre_entry);
1063 retval = -EFAULT;
1064 break;
1065 }
Amir Levy9659e592016-10-27 18:08:27 +03001066 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
1067 retval = -EFAULT;
1068 break;
1069 }
1070 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1071 retval = -EFAULT;
1072 break;
1073 }
1074 break;
1075
1076 case IPA_IOC_ADD_FLT_RULE_AFTER:
1077 if (copy_from_user(header, (u8 *)arg,
1078 sizeof(struct ipa_ioc_add_flt_rule_after))) {
1079
1080 retval = -EFAULT;
1081 break;
1082 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001083 pre_entry =
1084 ((struct ipa_ioc_add_flt_rule_after *)header)->
1085 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001086 pyld_sz =
1087 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001088 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001089 param = kzalloc(pyld_sz, GFP_KERNEL);
1090 if (!param) {
1091 retval = -ENOMEM;
1092 break;
1093 }
1094 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1095 retval = -EFAULT;
1096 break;
1097 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001098 /* add check in case user-space module compromised */
1099 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1100 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301101 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001102 ((struct ipa_ioc_add_flt_rule_after *)param)->
1103 num_rules,
1104 pre_entry);
1105 retval = -EFAULT;
1106 break;
1107 }
Amir Levy9659e592016-10-27 18:08:27 +03001108 if (ipa3_add_flt_rule_after(
1109 (struct ipa_ioc_add_flt_rule_after *)param)) {
1110 retval = -EFAULT;
1111 break;
1112 }
1113 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1114 retval = -EFAULT;
1115 break;
1116 }
1117 break;
1118
1119 case IPA_IOC_DEL_FLT_RULE:
1120 if (copy_from_user(header, (u8 *)arg,
1121 sizeof(struct ipa_ioc_del_flt_rule))) {
1122 retval = -EFAULT;
1123 break;
1124 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001125 pre_entry =
1126 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001127 pyld_sz =
1128 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001129 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001130 param = kzalloc(pyld_sz, GFP_KERNEL);
1131 if (!param) {
1132 retval = -ENOMEM;
1133 break;
1134 }
1135 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1136 retval = -EFAULT;
1137 break;
1138 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001139 /* add check in case user-space module compromised */
1140 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1141 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301142 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001143 ((struct ipa_ioc_del_flt_rule *)param)->
1144 num_hdls,
1145 pre_entry);
1146 retval = -EFAULT;
1147 break;
1148 }
Amir Levy9659e592016-10-27 18:08:27 +03001149 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1150 retval = -EFAULT;
1151 break;
1152 }
1153 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1154 retval = -EFAULT;
1155 break;
1156 }
1157 break;
1158
1159 case IPA_IOC_MDFY_FLT_RULE:
1160 if (copy_from_user(header, (u8 *)arg,
1161 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
1162 retval = -EFAULT;
1163 break;
1164 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001165 pre_entry =
1166 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001167 pyld_sz =
1168 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001169 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001170 param = kzalloc(pyld_sz, GFP_KERNEL);
1171 if (!param) {
1172 retval = -ENOMEM;
1173 break;
1174 }
1175 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1176 retval = -EFAULT;
1177 break;
1178 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001179 /* add check in case user-space module compromised */
1180 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1181 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301182 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001183 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1184 num_rules,
1185 pre_entry);
1186 retval = -EFAULT;
1187 break;
1188 }
Amir Levy9659e592016-10-27 18:08:27 +03001189 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1190 retval = -EFAULT;
1191 break;
1192 }
1193 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1194 retval = -EFAULT;
1195 break;
1196 }
1197 break;
1198
1199 case IPA_IOC_COMMIT_HDR:
1200 retval = ipa3_commit_hdr();
1201 break;
1202 case IPA_IOC_RESET_HDR:
1203 retval = ipa3_reset_hdr();
1204 break;
1205 case IPA_IOC_COMMIT_RT:
1206 retval = ipa3_commit_rt(arg);
1207 break;
1208 case IPA_IOC_RESET_RT:
1209 retval = ipa3_reset_rt(arg);
1210 break;
1211 case IPA_IOC_COMMIT_FLT:
1212 retval = ipa3_commit_flt(arg);
1213 break;
1214 case IPA_IOC_RESET_FLT:
1215 retval = ipa3_reset_flt(arg);
1216 break;
1217 case IPA_IOC_GET_RT_TBL:
1218 if (copy_from_user(header, (u8 *)arg,
1219 sizeof(struct ipa_ioc_get_rt_tbl))) {
1220 retval = -EFAULT;
1221 break;
1222 }
1223 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1224 retval = -EFAULT;
1225 break;
1226 }
1227 if (copy_to_user((u8 *)arg, header,
1228 sizeof(struct ipa_ioc_get_rt_tbl))) {
1229 retval = -EFAULT;
1230 break;
1231 }
1232 break;
1233 case IPA_IOC_PUT_RT_TBL:
1234 retval = ipa3_put_rt_tbl(arg);
1235 break;
1236 case IPA_IOC_GET_HDR:
1237 if (copy_from_user(header, (u8 *)arg,
1238 sizeof(struct ipa_ioc_get_hdr))) {
1239 retval = -EFAULT;
1240 break;
1241 }
1242 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1243 retval = -EFAULT;
1244 break;
1245 }
1246 if (copy_to_user((u8 *)arg, header,
1247 sizeof(struct ipa_ioc_get_hdr))) {
1248 retval = -EFAULT;
1249 break;
1250 }
1251 break;
1252 case IPA_IOC_PUT_HDR:
1253 retval = ipa3_put_hdr(arg);
1254 break;
1255 case IPA_IOC_SET_FLT:
1256 retval = ipa3_cfg_filter(arg);
1257 break;
1258 case IPA_IOC_COPY_HDR:
1259 if (copy_from_user(header, (u8 *)arg,
1260 sizeof(struct ipa_ioc_copy_hdr))) {
1261 retval = -EFAULT;
1262 break;
1263 }
1264 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1265 retval = -EFAULT;
1266 break;
1267 }
1268 if (copy_to_user((u8 *)arg, header,
1269 sizeof(struct ipa_ioc_copy_hdr))) {
1270 retval = -EFAULT;
1271 break;
1272 }
1273 break;
1274 case IPA_IOC_QUERY_INTF:
1275 if (copy_from_user(header, (u8 *)arg,
1276 sizeof(struct ipa_ioc_query_intf))) {
1277 retval = -EFAULT;
1278 break;
1279 }
1280 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1281 retval = -1;
1282 break;
1283 }
1284 if (copy_to_user((u8 *)arg, header,
1285 sizeof(struct ipa_ioc_query_intf))) {
1286 retval = -EFAULT;
1287 break;
1288 }
1289 break;
1290 case IPA_IOC_QUERY_INTF_TX_PROPS:
1291 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
1292 if (copy_from_user(header, (u8 *)arg, sz)) {
1293 retval = -EFAULT;
1294 break;
1295 }
1296
1297 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
1298 > IPA_NUM_PROPS_MAX) {
1299 retval = -EFAULT;
1300 break;
1301 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001302 pre_entry =
1303 ((struct ipa_ioc_query_intf_tx_props *)
1304 header)->num_tx_props;
1305 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001306 sizeof(struct ipa_ioc_tx_intf_prop);
1307 param = kzalloc(pyld_sz, GFP_KERNEL);
1308 if (!param) {
1309 retval = -ENOMEM;
1310 break;
1311 }
1312 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1313 retval = -EFAULT;
1314 break;
1315 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001316 /* add check in case user-space module compromised */
1317 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1318 param)->num_tx_props
1319 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301320 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001321 ((struct ipa_ioc_query_intf_tx_props *)
1322 param)->num_tx_props, pre_entry);
1323 retval = -EFAULT;
1324 break;
1325 }
Amir Levy9659e592016-10-27 18:08:27 +03001326 if (ipa3_query_intf_tx_props(
1327 (struct ipa_ioc_query_intf_tx_props *)param)) {
1328 retval = -1;
1329 break;
1330 }
1331 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1332 retval = -EFAULT;
1333 break;
1334 }
1335 break;
1336 case IPA_IOC_QUERY_INTF_RX_PROPS:
1337 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
1338 if (copy_from_user(header, (u8 *)arg, sz)) {
1339 retval = -EFAULT;
1340 break;
1341 }
1342
1343 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
1344 > IPA_NUM_PROPS_MAX) {
1345 retval = -EFAULT;
1346 break;
1347 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001348 pre_entry =
1349 ((struct ipa_ioc_query_intf_rx_props *)
1350 header)->num_rx_props;
1351 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001352 sizeof(struct ipa_ioc_rx_intf_prop);
1353 param = kzalloc(pyld_sz, GFP_KERNEL);
1354 if (!param) {
1355 retval = -ENOMEM;
1356 break;
1357 }
1358 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1359 retval = -EFAULT;
1360 break;
1361 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001362 /* add check in case user-space module compromised */
1363 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1364 param)->num_rx_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301365 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001366 ((struct ipa_ioc_query_intf_rx_props *)
1367 param)->num_rx_props, pre_entry);
1368 retval = -EFAULT;
1369 break;
1370 }
Amir Levy9659e592016-10-27 18:08:27 +03001371 if (ipa3_query_intf_rx_props(
1372 (struct ipa_ioc_query_intf_rx_props *)param)) {
1373 retval = -1;
1374 break;
1375 }
1376 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1377 retval = -EFAULT;
1378 break;
1379 }
1380 break;
1381 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1382 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
1383 if (copy_from_user(header, (u8 *)arg, sz)) {
1384 retval = -EFAULT;
1385 break;
1386 }
1387
1388 if (((struct ipa_ioc_query_intf_ext_props *)
1389 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
1390 retval = -EFAULT;
1391 break;
1392 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001393 pre_entry =
1394 ((struct ipa_ioc_query_intf_ext_props *)
1395 header)->num_ext_props;
1396 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001397 sizeof(struct ipa_ioc_ext_intf_prop);
1398 param = kzalloc(pyld_sz, GFP_KERNEL);
1399 if (!param) {
1400 retval = -ENOMEM;
1401 break;
1402 }
1403 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1404 retval = -EFAULT;
1405 break;
1406 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001407 /* add check in case user-space module compromised */
1408 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1409 param)->num_ext_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301410 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001411 ((struct ipa_ioc_query_intf_ext_props *)
1412 param)->num_ext_props, pre_entry);
1413 retval = -EFAULT;
1414 break;
1415 }
Amir Levy9659e592016-10-27 18:08:27 +03001416 if (ipa3_query_intf_ext_props(
1417 (struct ipa_ioc_query_intf_ext_props *)param)) {
1418 retval = -1;
1419 break;
1420 }
1421 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1422 retval = -EFAULT;
1423 break;
1424 }
1425 break;
1426 case IPA_IOC_PULL_MSG:
1427 if (copy_from_user(header, (u8 *)arg,
1428 sizeof(struct ipa_msg_meta))) {
1429 retval = -EFAULT;
1430 break;
1431 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001432 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001433 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001434 pyld_sz = sizeof(struct ipa_msg_meta) +
1435 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001436 param = kzalloc(pyld_sz, GFP_KERNEL);
1437 if (!param) {
1438 retval = -ENOMEM;
1439 break;
1440 }
1441 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1442 retval = -EFAULT;
1443 break;
1444 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001445 /* add check in case user-space module compromised */
1446 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1447 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301448 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001449 ((struct ipa_msg_meta *)param)->msg_len,
1450 pre_entry);
1451 retval = -EFAULT;
1452 break;
1453 }
Amir Levy9659e592016-10-27 18:08:27 +03001454 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
1455 (char *)param + sizeof(struct ipa_msg_meta),
1456 ((struct ipa_msg_meta *)param)->msg_len) !=
1457 ((struct ipa_msg_meta *)param)->msg_len) {
1458 retval = -1;
1459 break;
1460 }
1461 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1462 retval = -EFAULT;
1463 break;
1464 }
1465 break;
1466 case IPA_IOC_RM_ADD_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001467 /* deprecate if IPA PM is used */
1468 if (ipa3_ctx->use_ipa_pm)
1469 return 0;
1470
Amir Levy9659e592016-10-27 18:08:27 +03001471 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1472 sizeof(struct ipa_ioc_rm_dependency))) {
1473 retval = -EFAULT;
1474 break;
1475 }
1476 retval = ipa_rm_add_dependency_from_ioctl(
1477 rm_depend.resource_name, rm_depend.depends_on_name);
1478 break;
1479 case IPA_IOC_RM_DEL_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001480 /* deprecate if IPA PM is used */
1481 if (ipa3_ctx->use_ipa_pm)
1482 return 0;
1483
Amir Levy9659e592016-10-27 18:08:27 +03001484 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1485 sizeof(struct ipa_ioc_rm_dependency))) {
1486 retval = -EFAULT;
1487 break;
1488 }
1489 retval = ipa_rm_delete_dependency_from_ioctl(
1490 rm_depend.resource_name, rm_depend.depends_on_name);
1491 break;
1492 case IPA_IOC_GENERATE_FLT_EQ:
1493 {
1494 struct ipa_ioc_generate_flt_eq flt_eq;
1495
1496 if (copy_from_user(&flt_eq, (u8 *)arg,
1497 sizeof(struct ipa_ioc_generate_flt_eq))) {
1498 retval = -EFAULT;
1499 break;
1500 }
1501 if (ipahal_flt_generate_equation(flt_eq.ip,
1502 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1503 retval = -EFAULT;
1504 break;
1505 }
1506 if (copy_to_user((u8 *)arg, &flt_eq,
1507 sizeof(struct ipa_ioc_generate_flt_eq))) {
1508 retval = -EFAULT;
1509 break;
1510 }
1511 break;
1512 }
1513 case IPA_IOC_QUERY_EP_MAPPING:
1514 {
1515 retval = ipa3_get_ep_mapping(arg);
1516 break;
1517 }
1518 case IPA_IOC_QUERY_RT_TBL_INDEX:
1519 if (copy_from_user(header, (u8 *)arg,
1520 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1521 retval = -EFAULT;
1522 break;
1523 }
1524 if (ipa3_query_rt_index(
1525 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
1526 retval = -EFAULT;
1527 break;
1528 }
1529 if (copy_to_user((u8 *)arg, header,
1530 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1531 retval = -EFAULT;
1532 break;
1533 }
1534 break;
1535 case IPA_IOC_WRITE_QMAPID:
1536 if (copy_from_user(header, (u8 *)arg,
1537 sizeof(struct ipa_ioc_write_qmapid))) {
1538 retval = -EFAULT;
1539 break;
1540 }
1541 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1542 retval = -EFAULT;
1543 break;
1544 }
1545 if (copy_to_user((u8 *)arg, header,
1546 sizeof(struct ipa_ioc_write_qmapid))) {
1547 retval = -EFAULT;
1548 break;
1549 }
1550 break;
1551 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301552 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
Amir Levy9659e592016-10-27 18:08:27 +03001553 if (retval) {
1554 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1555 break;
1556 }
1557 break;
1558 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301559 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
Amir Levy9659e592016-10-27 18:08:27 +03001560 if (retval) {
1561 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1562 break;
1563 }
1564 break;
1565 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301566 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
Amir Levy9659e592016-10-27 18:08:27 +03001567 if (retval) {
1568 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1569 break;
1570 }
1571 break;
1572 case IPA_IOC_ADD_HDR_PROC_CTX:
1573 if (copy_from_user(header, (u8 *)arg,
1574 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1575 retval = -EFAULT;
1576 break;
1577 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001578 pre_entry =
1579 ((struct ipa_ioc_add_hdr_proc_ctx *)
1580 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001581 pyld_sz =
1582 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001583 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001584 param = kzalloc(pyld_sz, GFP_KERNEL);
1585 if (!param) {
1586 retval = -ENOMEM;
1587 break;
1588 }
1589 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1590 retval = -EFAULT;
1591 break;
1592 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001593 /* add check in case user-space module compromised */
1594 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1595 param)->num_proc_ctxs != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301596 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001597 ((struct ipa_ioc_add_hdr_proc_ctx *)
1598 param)->num_proc_ctxs, pre_entry);
1599 retval = -EFAULT;
1600 break;
1601 }
Amir Levy9659e592016-10-27 18:08:27 +03001602 if (ipa3_add_hdr_proc_ctx(
1603 (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
1604 retval = -EFAULT;
1605 break;
1606 }
1607 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1608 retval = -EFAULT;
1609 break;
1610 }
1611 break;
1612 case IPA_IOC_DEL_HDR_PROC_CTX:
1613 if (copy_from_user(header, (u8 *)arg,
1614 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1615 retval = -EFAULT;
1616 break;
1617 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001618 pre_entry =
1619 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001620 pyld_sz =
1621 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001622 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001623 param = kzalloc(pyld_sz, GFP_KERNEL);
1624 if (!param) {
1625 retval = -ENOMEM;
1626 break;
1627 }
1628 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1629 retval = -EFAULT;
1630 break;
1631 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001632 /* add check in case user-space module compromised */
1633 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1634 param)->num_hdls != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301635 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001636 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1637 num_hdls,
1638 pre_entry);
1639 retval = -EFAULT;
1640 break;
1641 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001642 if (ipa3_del_hdr_proc_ctx_by_user(
1643 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001644 retval = -EFAULT;
1645 break;
1646 }
1647 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1648 retval = -EFAULT;
1649 break;
1650 }
1651 break;
1652
1653 case IPA_IOC_GET_HW_VERSION:
1654 pyld_sz = sizeof(enum ipa_hw_type);
1655 param = kzalloc(pyld_sz, GFP_KERNEL);
1656 if (!param) {
1657 retval = -ENOMEM;
1658 break;
1659 }
1660 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
1661 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1662 retval = -EFAULT;
1663 break;
1664 }
1665 break;
1666
Shihuan Liuc3174f52017-05-04 15:59:13 -07001667 case IPA_IOC_ADD_VLAN_IFACE:
1668 if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
1669 retval = -EFAULT;
1670 break;
1671 }
1672 break;
1673
1674 case IPA_IOC_DEL_VLAN_IFACE:
1675 if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
1676 retval = -EFAULT;
1677 break;
1678 }
1679 break;
1680
1681 case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
1682 if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
1683 retval = -EFAULT;
1684 break;
1685 }
1686 break;
1687
1688 case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
1689 if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
1690 retval = -EFAULT;
1691 break;
1692 }
1693 break;
1694
Amir Levy9659e592016-10-27 18:08:27 +03001695 default: /* redundant, as cmd was checked against MAXNR */
1696 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1697 return -ENOTTY;
1698 }
1699 kfree(param);
1700 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1701
1702 return retval;
1703}
1704
1705/**
1706* ipa3_setup_dflt_rt_tables() - Setup default routing tables
1707*
1708* Return codes:
1709* 0: success
1710* -ENOMEM: failed to allocate memory
1711* -EPERM: failed to add the tables
1712*/
1713int ipa3_setup_dflt_rt_tables(void)
1714{
1715 struct ipa_ioc_add_rt_rule *rt_rule;
1716 struct ipa_rt_rule_add *rt_rule_entry;
1717
1718 rt_rule =
1719 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
1720 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
1721 if (!rt_rule) {
1722 IPAERR("fail to alloc mem\n");
1723 return -ENOMEM;
1724 }
1725 /* setup a default v4 route to point to Apps */
1726 rt_rule->num_rules = 1;
1727 rt_rule->commit = 1;
1728 rt_rule->ip = IPA_IP_v4;
1729 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
1730 IPA_RESOURCE_NAME_MAX);
1731
1732 rt_rule_entry = &rt_rule->rules[0];
1733 rt_rule_entry->at_rear = 1;
1734 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
1735 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
1736 rt_rule_entry->rule.retain_hdr = 1;
1737
1738 if (ipa3_add_rt_rule(rt_rule)) {
1739 IPAERR("fail to add dflt v4 rule\n");
1740 kfree(rt_rule);
1741 return -EPERM;
1742 }
1743 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1744 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1745
1746 /* setup a default v6 route to point to A5 */
1747 rt_rule->ip = IPA_IP_v6;
1748 if (ipa3_add_rt_rule(rt_rule)) {
1749 IPAERR("fail to add dflt v6 rule\n");
1750 kfree(rt_rule);
1751 return -EPERM;
1752 }
1753 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1754 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1755
1756 /*
1757 * because these tables are the very first to be added, they will both
1758 * have the same index (0) which is essential for programming the
1759 * "route" end-point config
1760 */
1761
1762 kfree(rt_rule);
1763
1764 return 0;
1765}
1766
1767static int ipa3_setup_exception_path(void)
1768{
1769 struct ipa_ioc_add_hdr *hdr;
1770 struct ipa_hdr_add *hdr_entry;
1771 struct ipahal_reg_route route = { 0 };
1772 int ret;
1773
1774 /* install the basic exception header */
1775 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
1776 sizeof(struct ipa_hdr_add), GFP_KERNEL);
1777 if (!hdr) {
1778 IPAERR("fail to alloc exception hdr\n");
1779 return -ENOMEM;
1780 }
1781 hdr->num_hdrs = 1;
1782 hdr->commit = 1;
1783 hdr_entry = &hdr->hdr[0];
1784
1785 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
1786 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
1787
1788 if (ipa3_add_hdr(hdr)) {
1789 IPAERR("fail to add exception hdr\n");
1790 ret = -EPERM;
1791 goto bail;
1792 }
1793
1794 if (hdr_entry->status) {
1795 IPAERR("fail to add exception hdr\n");
1796 ret = -EPERM;
1797 goto bail;
1798 }
1799
1800 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
1801
1802 /* set the route register to pass exception packets to Apps */
1803 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1804 route.route_frag_def_pipe = ipa3_get_ep_mapping(
1805 IPA_CLIENT_APPS_LAN_CONS);
1806 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
1807 route.route_def_retain_hdr = 1;
1808
1809 if (ipa3_cfg_route(&route)) {
1810 IPAERR("fail to add exception hdr\n");
1811 ret = -EPERM;
1812 goto bail;
1813 }
1814
1815 ret = 0;
1816bail:
1817 kfree(hdr);
1818 return ret;
1819}
1820
1821static int ipa3_init_smem_region(int memory_region_size,
1822 int memory_region_offset)
1823{
1824 struct ipahal_imm_cmd_dma_shared_mem cmd;
1825 struct ipahal_imm_cmd_pyld *cmd_pyld;
1826 struct ipa3_desc desc;
1827 struct ipa_mem_buffer mem;
1828 int rc;
1829
1830 if (memory_region_size == 0)
1831 return 0;
1832
1833 memset(&desc, 0, sizeof(desc));
1834 memset(&cmd, 0, sizeof(cmd));
1835 memset(&mem, 0, sizeof(mem));
1836
1837 mem.size = memory_region_size;
1838 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
1839 &mem.phys_base, GFP_KERNEL);
1840 if (!mem.base) {
1841 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
1842 return -ENOMEM;
1843 }
1844
1845 memset(mem.base, 0, mem.size);
1846 cmd.is_read = false;
1847 cmd.skip_pipeline_clear = false;
1848 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1849 cmd.size = mem.size;
1850 cmd.system_addr = mem.phys_base;
1851 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
1852 memory_region_offset;
1853 cmd_pyld = ipahal_construct_imm_cmd(
1854 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1855 if (!cmd_pyld) {
1856 IPAERR("failed to construct dma_shared_mem imm cmd\n");
1857 return -ENOMEM;
1858 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07001859 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03001860 desc.pyld = cmd_pyld->data;
1861 desc.len = cmd_pyld->len;
1862 desc.type = IPA_IMM_CMD_DESC;
1863
1864 rc = ipa3_send_cmd(1, &desc);
1865 if (rc) {
1866 IPAERR("failed to send immediate command (error %d)\n", rc);
1867 rc = -EFAULT;
1868 }
1869
1870 ipahal_destroy_imm_cmd(cmd_pyld);
1871 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
1872 mem.phys_base);
1873
1874 return rc;
1875}
1876
1877/**
1878* ipa3_init_q6_smem() - Initialize Q6 general memory and
1879* header memory regions in IPA.
1880*
1881* Return codes:
1882* 0: success
1883* -ENOMEM: failed to allocate dma memory
1884* -EFAULT: failed to send IPA command to initialize the memory
1885*/
1886int ipa3_init_q6_smem(void)
1887{
1888 int rc;
1889
1890 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1891
1892 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
1893 IPA_MEM_PART(modem_ofst));
1894 if (rc) {
1895 IPAERR("failed to initialize Modem RAM memory\n");
1896 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1897 return rc;
1898 }
1899
1900 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
1901 IPA_MEM_PART(modem_hdr_ofst));
1902 if (rc) {
1903 IPAERR("failed to initialize Modem HDRs RAM memory\n");
1904 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1905 return rc;
1906 }
1907
1908 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
1909 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
1910 if (rc) {
1911 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
1912 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1913 return rc;
1914 }
1915
1916 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
1917 IPA_MEM_PART(modem_comp_decomp_ofst));
1918 if (rc) {
1919 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
1920 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1921 return rc;
1922 }
1923 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1924
1925 return rc;
1926}
1927
1928static void ipa3_destroy_imm(void *user1, int user2)
1929{
1930 ipahal_destroy_imm_cmd(user1);
1931}
1932
1933static void ipa3_q6_pipe_delay(bool delay)
1934{
1935 int client_idx;
1936 int ep_idx;
1937 struct ipa_ep_cfg_ctrl ep_ctrl;
1938
1939 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
1940 ep_ctrl.ipa_ep_delay = delay;
1941
1942 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1943 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
1944 ep_idx = ipa3_get_ep_mapping(client_idx);
1945 if (ep_idx == -1)
1946 continue;
1947
1948 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
1949 ep_idx, &ep_ctrl);
1950 }
1951 }
1952}
1953
1954static void ipa3_q6_avoid_holb(void)
1955{
1956 int ep_idx;
1957 int client_idx;
1958 struct ipa_ep_cfg_ctrl ep_suspend;
1959 struct ipa_ep_cfg_holb ep_holb;
1960
1961 memset(&ep_suspend, 0, sizeof(ep_suspend));
1962 memset(&ep_holb, 0, sizeof(ep_holb));
1963
1964 ep_suspend.ipa_ep_suspend = true;
1965 ep_holb.tmr_val = 0;
1966 ep_holb.en = 1;
1967
1968 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1969 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1970 ep_idx = ipa3_get_ep_mapping(client_idx);
1971 if (ep_idx == -1)
1972 continue;
1973
1974 /*
1975 * ipa3_cfg_ep_holb is not used here because we are
1976 * setting HOLB on Q6 pipes, and from APPS perspective
1977 * they are not valid, therefore, the above function
1978 * will fail.
1979 */
1980 ipahal_write_reg_n_fields(
1981 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
1982 ep_idx, &ep_holb);
1983 ipahal_write_reg_n_fields(
1984 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
1985 ep_idx, &ep_holb);
1986
Skylar Changa699afd2017-06-06 10:06:21 -07001987 /* from IPA 4.0 pipe suspend is not supported */
1988 if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
1989 ipahal_write_reg_n_fields(
1990 IPA_ENDP_INIT_CTRL_n,
1991 ep_idx, &ep_suspend);
Amir Levy9659e592016-10-27 18:08:27 +03001992 }
1993 }
1994}
1995
Skylar Chang94692c92017-03-01 09:07:11 -08001996static void ipa3_halt_q6_cons_gsi_channels(void)
1997{
1998 int ep_idx;
1999 int client_idx;
2000 const struct ipa_gsi_ep_config *gsi_ep_cfg;
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002001 int i;
Skylar Chang94692c92017-03-01 09:07:11 -08002002 int ret;
2003 int code = 0;
2004
2005 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2006 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
2007 ep_idx = ipa3_get_ep_mapping(client_idx);
2008 if (ep_idx == -1)
2009 continue;
2010
Skylar Changc1f15312017-05-09 14:14:32 -07002011 gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
Skylar Chang94692c92017-03-01 09:07:11 -08002012 if (!gsi_ep_cfg) {
2013 IPAERR("failed to get GSI config\n");
2014 ipa_assert();
2015 return;
2016 }
2017
2018 ret = gsi_halt_channel_ee(
2019 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
2020 &code);
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002021 for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY &&
2022 ret == -GSI_STATUS_AGAIN; i++) {
2023 IPADBG(
2024 "ch %d ee %d with code %d\n is busy try again",
2025 gsi_ep_cfg->ipa_gsi_chan_num,
2026 gsi_ep_cfg->ee,
2027 code);
2028 usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP,
2029 IPA_GSI_CHANNEL_HALT_MAX_SLEEP);
2030 ret = gsi_halt_channel_ee(
2031 gsi_ep_cfg->ipa_gsi_chan_num,
2032 gsi_ep_cfg->ee, &code);
2033 }
Skylar Chang94692c92017-03-01 09:07:11 -08002034 if (ret == GSI_STATUS_SUCCESS)
2035 IPADBG("halted gsi ch %d ee %d with code %d\n",
2036 gsi_ep_cfg->ipa_gsi_chan_num,
2037 gsi_ep_cfg->ee,
2038 code);
2039 else
2040 IPAERR("failed to halt ch %d ee %d code %d\n",
2041 gsi_ep_cfg->ipa_gsi_chan_num,
2042 gsi_ep_cfg->ee,
2043 code);
2044 }
2045 }
2046}
2047
2048
Amir Levy9659e592016-10-27 18:08:27 +03002049static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
2050 enum ipa_rule_type rlt)
2051{
2052 struct ipa3_desc *desc;
2053 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2054 struct ipahal_imm_cmd_pyld **cmd_pyld;
2055 int retval = 0;
2056 int pipe_idx;
2057 int flt_idx = 0;
2058 int num_cmds = 0;
2059 int index;
2060 u32 lcl_addr_mem_part;
2061 u32 lcl_hdr_sz;
2062 struct ipa_mem_buffer mem;
2063
2064 IPADBG("Entry\n");
2065
2066 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2067 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2068 return -EINVAL;
2069 }
2070
2071 /* Up to filtering pipes we have filtering tables */
2072 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
2073 GFP_KERNEL);
2074 if (!desc) {
2075 IPAERR("failed to allocate memory\n");
2076 return -ENOMEM;
2077 }
2078
2079 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
2080 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
2081 if (!cmd_pyld) {
2082 IPAERR("failed to allocate memory\n");
2083 retval = -ENOMEM;
2084 goto free_desc;
2085 }
2086
2087 if (ip == IPA_IP_v4) {
2088 if (rlt == IPA_RULE_HASHABLE) {
2089 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
2090 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2091 } else {
2092 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
2093 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2094 }
2095 } else {
2096 if (rlt == IPA_RULE_HASHABLE) {
2097 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
2098 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2099 } else {
2100 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
2101 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2102 }
2103 }
2104
2105 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02002106 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002107 if (retval) {
2108 IPAERR("failed to generate flt single tbl empty img\n");
2109 goto free_cmd_pyld;
2110 }
2111
2112 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
2113 if (!ipa_is_ep_support_flt(pipe_idx))
2114 continue;
2115
2116 /*
2117 * Iterating over all the filtering pipes which are either
2118 * invalid but connected or connected but not configured by AP.
2119 */
2120 if (!ipa3_ctx->ep[pipe_idx].valid ||
2121 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
2122
2123 cmd.is_read = false;
2124 cmd.skip_pipeline_clear = false;
2125 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2126 cmd.size = mem.size;
2127 cmd.system_addr = mem.phys_base;
2128 cmd.local_addr =
2129 ipa3_ctx->smem_restricted_bytes +
2130 lcl_addr_mem_part +
2131 ipahal_get_hw_tbl_hdr_width() +
2132 flt_idx * ipahal_get_hw_tbl_hdr_width();
2133 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
2134 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2135 if (!cmd_pyld[num_cmds]) {
2136 IPAERR("fail construct dma_shared_mem cmd\n");
2137 retval = -ENOMEM;
2138 goto free_empty_img;
2139 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002140 desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002141 desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
2142 desc[num_cmds].len = cmd_pyld[num_cmds]->len;
2143 desc[num_cmds].type = IPA_IMM_CMD_DESC;
2144 num_cmds++;
2145 }
2146
2147 flt_idx++;
2148 }
2149
2150 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2151 retval = ipa3_send_cmd(num_cmds, desc);
2152 if (retval) {
2153 IPAERR("failed to send immediate command (err %d)\n", retval);
2154 retval = -EFAULT;
2155 }
2156
2157free_empty_img:
2158 ipahal_free_dma_mem(&mem);
2159free_cmd_pyld:
2160 for (index = 0; index < num_cmds; index++)
2161 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2162 kfree(cmd_pyld);
2163free_desc:
2164 kfree(desc);
2165 return retval;
2166}
2167
2168static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2169 enum ipa_rule_type rlt)
2170{
2171 struct ipa3_desc *desc;
2172 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2173 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2174 int retval = 0;
2175 u32 modem_rt_index_lo;
2176 u32 modem_rt_index_hi;
2177 u32 lcl_addr_mem_part;
2178 u32 lcl_hdr_sz;
2179 struct ipa_mem_buffer mem;
2180
2181 IPADBG("Entry\n");
2182
2183 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2184 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2185 return -EINVAL;
2186 }
2187
2188 if (ip == IPA_IP_v4) {
2189 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2190 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2191 if (rlt == IPA_RULE_HASHABLE) {
2192 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2193 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2194 } else {
2195 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2196 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2197 }
2198 } else {
2199 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2200 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2201 if (rlt == IPA_RULE_HASHABLE) {
2202 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2203 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2204 } else {
2205 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2206 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2207 }
2208 }
2209
2210 retval = ipahal_rt_generate_empty_img(
2211 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002212 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002213 if (retval) {
2214 IPAERR("fail generate empty rt img\n");
2215 return -ENOMEM;
2216 }
2217
2218 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2219 if (!desc) {
2220 IPAERR("failed to allocate memory\n");
2221 goto free_empty_img;
2222 }
2223
2224 cmd.is_read = false;
2225 cmd.skip_pipeline_clear = false;
2226 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2227 cmd.size = mem.size;
2228 cmd.system_addr = mem.phys_base;
2229 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2230 lcl_addr_mem_part +
2231 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2232 cmd_pyld = ipahal_construct_imm_cmd(
2233 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2234 if (!cmd_pyld) {
2235 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2236 retval = -ENOMEM;
2237 goto free_desc;
2238 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002239 desc->opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002240 desc->pyld = cmd_pyld->data;
2241 desc->len = cmd_pyld->len;
2242 desc->type = IPA_IMM_CMD_DESC;
2243
2244 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2245 retval = ipa3_send_cmd(1, desc);
2246 if (retval) {
2247 IPAERR("failed to send immediate command (err %d)\n", retval);
2248 retval = -EFAULT;
2249 }
2250
2251 ipahal_destroy_imm_cmd(cmd_pyld);
2252free_desc:
2253 kfree(desc);
2254free_empty_img:
2255 ipahal_free_dma_mem(&mem);
2256 return retval;
2257}
2258
2259static int ipa3_q6_clean_q6_tables(void)
2260{
2261 struct ipa3_desc *desc;
2262 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2263 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2264 int retval;
2265 struct ipahal_reg_fltrt_hash_flush flush;
2266 struct ipahal_reg_valmask valmask;
2267
2268 IPADBG("Entry\n");
2269
2270
2271 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2272 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2273 return -EFAULT;
2274 }
2275 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2276 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2277 return -EFAULT;
2278 }
2279 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2280 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2281 return -EFAULT;
2282 }
2283 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2284 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2285 return -EFAULT;
2286 }
2287
2288 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2289 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2290 return -EFAULT;
2291 }
2292 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2293 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2294 return -EFAULT;
2295 }
2296 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2297 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2298 return -EFAULT;
2299 }
2300 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2301 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2302 return -EFAULT;
2303 }
2304
2305 /* Flush rules cache */
2306 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2307 if (!desc) {
2308 IPAERR("failed to allocate memory\n");
2309 return -ENOMEM;
2310 }
2311
2312 flush.v4_flt = true;
2313 flush.v4_rt = true;
2314 flush.v6_flt = true;
2315 flush.v6_rt = true;
2316 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2317 reg_write_cmd.skip_pipeline_clear = false;
2318 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2319 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2320 reg_write_cmd.value = valmask.val;
2321 reg_write_cmd.value_mask = valmask.mask;
2322 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2323 &reg_write_cmd, false);
2324 if (!cmd_pyld) {
2325 IPAERR("fail construct register_write imm cmd\n");
2326 retval = -EFAULT;
2327 goto bail_desc;
2328 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002329 desc->opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002330 desc->pyld = cmd_pyld->data;
2331 desc->len = cmd_pyld->len;
2332 desc->type = IPA_IMM_CMD_DESC;
2333
2334 IPADBG("Sending 1 descriptor for tbls flush\n");
2335 retval = ipa3_send_cmd(1, desc);
2336 if (retval) {
2337 IPAERR("failed to send immediate command (err %d)\n", retval);
2338 retval = -EFAULT;
2339 }
2340
2341 ipahal_destroy_imm_cmd(cmd_pyld);
2342
2343bail_desc:
2344 kfree(desc);
2345 IPADBG("Done - retval = %d\n", retval);
2346 return retval;
2347}
2348
2349static int ipa3_q6_set_ex_path_to_apps(void)
2350{
2351 int ep_idx;
2352 int client_idx;
2353 struct ipa3_desc *desc;
2354 int num_descs = 0;
2355 int index;
2356 struct ipahal_imm_cmd_register_write reg_write;
2357 struct ipahal_imm_cmd_pyld *cmd_pyld;
2358 int retval;
Amir Levy9659e592016-10-27 18:08:27 +03002359
2360 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2361 GFP_KERNEL);
2362 if (!desc) {
2363 IPAERR("failed to allocate memory\n");
2364 return -ENOMEM;
2365 }
2366
2367 /* Set the exception path to AP */
2368 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2369 ep_idx = ipa3_get_ep_mapping(client_idx);
2370 if (ep_idx == -1)
2371 continue;
2372
Skylar Chang53137112017-05-12 17:13:13 -07002373 /* disable statuses for all modem controlled prod pipes */
2374 if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
2375 (ipa3_ctx->ep[ep_idx].valid &&
2376 ipa3_ctx->ep[ep_idx].skip_ep_cfg)) {
Amir Levy5807be32017-04-19 14:35:12 +03002377 ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
2378
2379 reg_write.skip_pipeline_clear = false;
2380 reg_write.pipeline_clear_options =
2381 IPAHAL_HPS_CLEAR;
2382 reg_write.offset =
2383 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2384 ep_idx);
2385 reg_write.value = 0;
2386 reg_write.value_mask = ~0;
2387 cmd_pyld = ipahal_construct_imm_cmd(
2388 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2389 if (!cmd_pyld) {
2390 IPAERR("fail construct register_write cmd\n");
2391 ipa_assert();
2392 return -EFAULT;
2393 }
2394
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002395 desc[num_descs].opcode = cmd_pyld->opcode;
Amir Levy5807be32017-04-19 14:35:12 +03002396 desc[num_descs].type = IPA_IMM_CMD_DESC;
2397 desc[num_descs].callback = ipa3_destroy_imm;
2398 desc[num_descs].user1 = cmd_pyld;
2399 desc[num_descs].pyld = cmd_pyld->data;
2400 desc[num_descs].len = cmd_pyld->len;
2401 num_descs++;
2402 }
Amir Levy9659e592016-10-27 18:08:27 +03002403 }
2404
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002405 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002406 retval = ipa3_tag_process(desc, num_descs,
2407 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2408 if (retval) {
2409 IPAERR("TAG process failed! (error %d)\n", retval);
2410 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2411 if (retval != -ETIME) {
2412 for (index = 0; index < num_descs; index++)
2413 if (desc[index].callback)
2414 desc[index].callback(desc[index].user1,
2415 desc[index].user2);
2416 retval = -EINVAL;
2417 }
2418 }
2419
2420 kfree(desc);
2421
2422 return retval;
2423}
2424
2425/**
2426* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2427* in IPA HW. This is performed in case of SSR.
2428*
2429* This is a mandatory procedure, in case one of the steps fails, the
2430* AP needs to restart.
2431*/
2432void ipa3_q6_pre_shutdown_cleanup(void)
2433{
2434 IPADBG_LOW("ENTER\n");
2435
2436 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2437
2438 ipa3_q6_pipe_delay(true);
2439 ipa3_q6_avoid_holb();
2440 if (ipa3_q6_clean_q6_tables()) {
2441 IPAERR("Failed to clean Q6 tables\n");
2442 BUG();
2443 }
2444 if (ipa3_q6_set_ex_path_to_apps()) {
2445 IPAERR("Failed to redirect exceptions to APPS\n");
2446 BUG();
2447 }
2448 /* Remove delay from Q6 PRODs to avoid pending descriptors
2449 * on pipe reset procedure
2450 */
2451 ipa3_q6_pipe_delay(false);
2452
2453 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2454 IPADBG_LOW("Exit with success\n");
2455}
2456
2457/*
2458 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2459 * check if GSI channel related to Q6 producer client is empty.
2460 *
2461 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2462 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2463 */
2464void ipa3_q6_post_shutdown_cleanup(void)
2465{
2466 int client_idx;
Skylar Changc1f15312017-05-09 14:14:32 -07002467 int ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03002468
2469 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002470
2471 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2472 IPAERR("uC is not loaded. Skipping\n");
2473 return;
2474 }
2475
Skylar Chang94692c92017-03-01 09:07:11 -08002476 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2477
2478 /* Handle the issue where SUSPEND was removed for some reason */
2479 ipa3_q6_avoid_holb();
2480 ipa3_halt_q6_cons_gsi_channels();
2481
Amir Levy9659e592016-10-27 18:08:27 +03002482 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2483 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
Skylar Changc1f15312017-05-09 14:14:32 -07002484 ep_idx = ipa3_get_ep_mapping(client_idx);
2485 if (ep_idx == -1)
2486 continue;
2487
Amir Levy9659e592016-10-27 18:08:27 +03002488 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2489 IPAERR("fail to validate Q6 ch emptiness %d\n",
2490 client_idx);
2491 BUG();
2492 return;
2493 }
2494 }
2495
2496 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2497 IPADBG_LOW("Exit with success\n");
2498}
2499
2500static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2501{
2502 /* Set 4 bytes of CANARY before the offset */
2503 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2504}
2505
2506/**
Amir Levy9fadeca2017-04-25 10:18:32 +03002507 * _ipa_init_sram_v3() - Initialize IPA local SRAM.
Amir Levy9659e592016-10-27 18:08:27 +03002508 *
2509 * Return codes: 0 for success, negative value for failure
2510 */
Amir Levy9fadeca2017-04-25 10:18:32 +03002511int _ipa_init_sram_v3(void)
Amir Levy9659e592016-10-27 18:08:27 +03002512{
2513 u32 *ipa_sram_mmio;
2514 unsigned long phys_addr;
2515
2516 phys_addr = ipa3_ctx->ipa_wrapper_base +
2517 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2518 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
2519 ipa3_ctx->smem_restricted_bytes / 4);
2520
2521 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
2522 if (!ipa_sram_mmio) {
2523 IPAERR("fail to ioremap IPA SRAM\n");
2524 return -ENOMEM;
2525 }
2526
2527 /* Consult with ipa_i.h on the location of the CANARY values */
2528 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
2529 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
2530 ipa3_sram_set_canary(ipa_sram_mmio,
2531 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
2532 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
2533 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
2534 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
2535 ipa3_sram_set_canary(ipa_sram_mmio,
2536 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
2537 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
2538 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
2539 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
2540 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
2541 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
2542 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
2543 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
2544 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
2545 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
2546 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
2547 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
2548 ipa3_sram_set_canary(ipa_sram_mmio,
2549 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
2550 ipa3_sram_set_canary(ipa_sram_mmio,
2551 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2552 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
2553 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
Amir Levy9fadeca2017-04-25 10:18:32 +03002554 ipa3_sram_set_canary(ipa_sram_mmio,
2555 (ipa_get_hw_type() >= IPA_HW_v3_5) ?
2556 IPA_MEM_PART(uc_event_ring_ofst) :
2557 IPA_MEM_PART(end_ofst));
Amir Levy9659e592016-10-27 18:08:27 +03002558
2559 iounmap(ipa_sram_mmio);
2560
2561 return 0;
2562}
2563
2564/**
2565 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
2566 *
2567 * Return codes: 0 for success, negative value for failure
2568 */
2569int _ipa_init_hdr_v3_0(void)
2570{
2571 struct ipa3_desc desc = { 0 };
2572 struct ipa_mem_buffer mem;
2573 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
2574 struct ipahal_imm_cmd_pyld *cmd_pyld;
2575 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
2576
2577 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2578 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2579 GFP_KERNEL);
2580 if (!mem.base) {
2581 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2582 return -ENOMEM;
2583 }
2584 memset(mem.base, 0, mem.size);
2585
2586 cmd.hdr_table_addr = mem.phys_base;
2587 cmd.size_hdr_table = mem.size;
2588 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
2589 IPA_MEM_PART(modem_hdr_ofst);
2590 cmd_pyld = ipahal_construct_imm_cmd(
2591 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
2592 if (!cmd_pyld) {
2593 IPAERR("fail to construct hdr_init_local imm cmd\n");
2594 dma_free_coherent(ipa3_ctx->pdev,
2595 mem.size, mem.base,
2596 mem.phys_base);
2597 return -EFAULT;
2598 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002599 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002600 desc.type = IPA_IMM_CMD_DESC;
2601 desc.pyld = cmd_pyld->data;
2602 desc.len = cmd_pyld->len;
2603 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2604
2605 if (ipa3_send_cmd(1, &desc)) {
2606 IPAERR("fail to send immediate command\n");
2607 ipahal_destroy_imm_cmd(cmd_pyld);
2608 dma_free_coherent(ipa3_ctx->pdev,
2609 mem.size, mem.base,
2610 mem.phys_base);
2611 return -EFAULT;
2612 }
2613
2614 ipahal_destroy_imm_cmd(cmd_pyld);
2615 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2616
2617 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
2618 IPA_MEM_PART(apps_hdr_proc_ctx_size);
2619 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2620 GFP_KERNEL);
2621 if (!mem.base) {
2622 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2623 return -ENOMEM;
2624 }
2625 memset(mem.base, 0, mem.size);
2626 memset(&desc, 0, sizeof(desc));
2627
2628 dma_cmd.is_read = false;
2629 dma_cmd.skip_pipeline_clear = false;
2630 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2631 dma_cmd.system_addr = mem.phys_base;
2632 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2633 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
2634 dma_cmd.size = mem.size;
2635 cmd_pyld = ipahal_construct_imm_cmd(
2636 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
2637 if (!cmd_pyld) {
2638 IPAERR("fail to construct dma_shared_mem imm\n");
2639 dma_free_coherent(ipa3_ctx->pdev,
2640 mem.size, mem.base,
2641 mem.phys_base);
2642 return -EFAULT;
2643 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002644 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002645 desc.pyld = cmd_pyld->data;
2646 desc.len = cmd_pyld->len;
2647 desc.type = IPA_IMM_CMD_DESC;
2648 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2649
2650 if (ipa3_send_cmd(1, &desc)) {
2651 IPAERR("fail to send immediate command\n");
2652 ipahal_destroy_imm_cmd(cmd_pyld);
2653 dma_free_coherent(ipa3_ctx->pdev,
2654 mem.size,
2655 mem.base,
2656 mem.phys_base);
2657 return -EFAULT;
2658 }
2659 ipahal_destroy_imm_cmd(cmd_pyld);
2660
2661 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
2662
2663 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2664
2665 return 0;
2666}
2667
2668/**
2669 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
2670 *
2671 * Return codes: 0 for success, negative value for failure
2672 */
2673int _ipa_init_rt4_v3(void)
2674{
2675 struct ipa3_desc desc = { 0 };
2676 struct ipa_mem_buffer mem;
2677 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
2678 struct ipahal_imm_cmd_pyld *cmd_pyld;
2679 int i;
2680 int rc = 0;
2681
2682 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
2683 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
2684 i++)
2685 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
2686 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
2687
2688 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
2689 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002690 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002691 if (rc) {
2692 IPAERR("fail generate empty v4 rt img\n");
2693 return rc;
2694 }
2695
2696 v4_cmd.hash_rules_addr = mem.phys_base;
2697 v4_cmd.hash_rules_size = mem.size;
2698 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2699 IPA_MEM_PART(v4_rt_hash_ofst);
2700 v4_cmd.nhash_rules_addr = mem.phys_base;
2701 v4_cmd.nhash_rules_size = mem.size;
2702 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2703 IPA_MEM_PART(v4_rt_nhash_ofst);
2704 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
2705 v4_cmd.hash_local_addr);
2706 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
2707 v4_cmd.nhash_local_addr);
2708 cmd_pyld = ipahal_construct_imm_cmd(
2709 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
2710 if (!cmd_pyld) {
2711 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
2712 rc = -EPERM;
2713 goto free_mem;
2714 }
2715
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002716 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002717 desc.type = IPA_IMM_CMD_DESC;
2718 desc.pyld = cmd_pyld->data;
2719 desc.len = cmd_pyld->len;
2720 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2721
2722 if (ipa3_send_cmd(1, &desc)) {
2723 IPAERR("fail to send immediate command\n");
2724 rc = -EFAULT;
2725 }
2726
2727 ipahal_destroy_imm_cmd(cmd_pyld);
2728
2729free_mem:
2730 ipahal_free_dma_mem(&mem);
2731 return rc;
2732}
2733
2734/**
2735 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
2736 *
2737 * Return codes: 0 for success, negative value for failure
2738 */
2739int _ipa_init_rt6_v3(void)
2740{
2741 struct ipa3_desc desc = { 0 };
2742 struct ipa_mem_buffer mem;
2743 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
2744 struct ipahal_imm_cmd_pyld *cmd_pyld;
2745 int i;
2746 int rc = 0;
2747
2748 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
2749 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
2750 i++)
2751 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
2752 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
2753
2754 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
2755 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002756 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002757 if (rc) {
2758 IPAERR("fail generate empty v6 rt img\n");
2759 return rc;
2760 }
2761
2762 v6_cmd.hash_rules_addr = mem.phys_base;
2763 v6_cmd.hash_rules_size = mem.size;
2764 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2765 IPA_MEM_PART(v6_rt_hash_ofst);
2766 v6_cmd.nhash_rules_addr = mem.phys_base;
2767 v6_cmd.nhash_rules_size = mem.size;
2768 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2769 IPA_MEM_PART(v6_rt_nhash_ofst);
2770 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
2771 v6_cmd.hash_local_addr);
2772 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
2773 v6_cmd.nhash_local_addr);
2774 cmd_pyld = ipahal_construct_imm_cmd(
2775 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
2776 if (!cmd_pyld) {
2777 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
2778 rc = -EPERM;
2779 goto free_mem;
2780 }
2781
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002782 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002783 desc.type = IPA_IMM_CMD_DESC;
2784 desc.pyld = cmd_pyld->data;
2785 desc.len = cmd_pyld->len;
2786 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2787
2788 if (ipa3_send_cmd(1, &desc)) {
2789 IPAERR("fail to send immediate command\n");
2790 rc = -EFAULT;
2791 }
2792
2793 ipahal_destroy_imm_cmd(cmd_pyld);
2794
2795free_mem:
2796 ipahal_free_dma_mem(&mem);
2797 return rc;
2798}
2799
2800/**
2801 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
2802 *
2803 * Return codes: 0 for success, negative value for failure
2804 */
2805int _ipa_init_flt4_v3(void)
2806{
2807 struct ipa3_desc desc = { 0 };
2808 struct ipa_mem_buffer mem;
2809 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
2810 struct ipahal_imm_cmd_pyld *cmd_pyld;
2811 int rc;
2812
2813 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2814 IPA_MEM_PART(v4_flt_hash_size),
2815 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002816 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002817 if (rc) {
2818 IPAERR("fail generate empty v4 flt img\n");
2819 return rc;
2820 }
2821
2822 v4_cmd.hash_rules_addr = mem.phys_base;
2823 v4_cmd.hash_rules_size = mem.size;
2824 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2825 IPA_MEM_PART(v4_flt_hash_ofst);
2826 v4_cmd.nhash_rules_addr = mem.phys_base;
2827 v4_cmd.nhash_rules_size = mem.size;
2828 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2829 IPA_MEM_PART(v4_flt_nhash_ofst);
2830 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
2831 v4_cmd.hash_local_addr);
2832 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
2833 v4_cmd.nhash_local_addr);
2834 cmd_pyld = ipahal_construct_imm_cmd(
2835 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
2836 if (!cmd_pyld) {
2837 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
2838 rc = -EPERM;
2839 goto free_mem;
2840 }
2841
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002842 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002843 desc.type = IPA_IMM_CMD_DESC;
2844 desc.pyld = cmd_pyld->data;
2845 desc.len = cmd_pyld->len;
2846 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2847
2848 if (ipa3_send_cmd(1, &desc)) {
2849 IPAERR("fail to send immediate command\n");
2850 rc = -EFAULT;
2851 }
2852
2853 ipahal_destroy_imm_cmd(cmd_pyld);
2854
2855free_mem:
2856 ipahal_free_dma_mem(&mem);
2857 return rc;
2858}
2859
2860/**
2861 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
2862 *
2863 * Return codes: 0 for success, negative value for failure
2864 */
2865int _ipa_init_flt6_v3(void)
2866{
2867 struct ipa3_desc desc = { 0 };
2868 struct ipa_mem_buffer mem;
2869 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
2870 struct ipahal_imm_cmd_pyld *cmd_pyld;
2871 int rc;
2872
2873 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2874 IPA_MEM_PART(v6_flt_hash_size),
2875 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002876 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002877 if (rc) {
2878 IPAERR("fail generate empty v6 flt img\n");
2879 return rc;
2880 }
2881
2882 v6_cmd.hash_rules_addr = mem.phys_base;
2883 v6_cmd.hash_rules_size = mem.size;
2884 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2885 IPA_MEM_PART(v6_flt_hash_ofst);
2886 v6_cmd.nhash_rules_addr = mem.phys_base;
2887 v6_cmd.nhash_rules_size = mem.size;
2888 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2889 IPA_MEM_PART(v6_flt_nhash_ofst);
2890 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
2891 v6_cmd.hash_local_addr);
2892 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
2893 v6_cmd.nhash_local_addr);
2894
2895 cmd_pyld = ipahal_construct_imm_cmd(
2896 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
2897 if (!cmd_pyld) {
2898 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
2899 rc = -EPERM;
2900 goto free_mem;
2901 }
2902
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002903 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002904 desc.type = IPA_IMM_CMD_DESC;
2905 desc.pyld = cmd_pyld->data;
2906 desc.len = cmd_pyld->len;
2907 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2908
2909 if (ipa3_send_cmd(1, &desc)) {
2910 IPAERR("fail to send immediate command\n");
2911 rc = -EFAULT;
2912 }
2913
2914 ipahal_destroy_imm_cmd(cmd_pyld);
2915
2916free_mem:
2917 ipahal_free_dma_mem(&mem);
2918 return rc;
2919}
2920
2921static int ipa3_setup_flt_hash_tuple(void)
2922{
2923 int pipe_idx;
2924 struct ipahal_reg_hash_tuple tuple;
2925
2926 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2927
2928 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
2929 if (!ipa_is_ep_support_flt(pipe_idx))
2930 continue;
2931
2932 if (ipa_is_modem_pipe(pipe_idx))
2933 continue;
2934
2935 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
2936 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
2937 return -EFAULT;
2938 }
2939 }
2940
2941 return 0;
2942}
2943
2944static int ipa3_setup_rt_hash_tuple(void)
2945{
2946 int tbl_idx;
2947 struct ipahal_reg_hash_tuple tuple;
2948
2949 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2950
2951 for (tbl_idx = 0;
2952 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
2953 IPA_MEM_PART(v4_rt_num_index));
2954 tbl_idx++) {
2955
2956 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
2957 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
2958 continue;
2959
2960 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
2961 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
2962 continue;
2963
2964 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
2965 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
2966 return -EFAULT;
2967 }
2968 }
2969
2970 return 0;
2971}
2972
2973static int ipa3_setup_apps_pipes(void)
2974{
2975 struct ipa_sys_connect_params sys_in;
2976 int result = 0;
2977
2978 if (ipa3_ctx->gsi_ch20_wa) {
2979 IPADBG("Allocating GSI physical channel 20\n");
2980 result = ipa_gsi_ch20_wa();
2981 if (result) {
2982 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002983 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002984 }
2985 }
2986
Skylar Changd407e592017-03-30 11:25:30 -07002987 /* allocate the common PROD event ring */
2988 if (ipa3_alloc_common_event_ring()) {
2989 IPAERR("ipa3_alloc_common_event_ring failed.\n");
2990 result = -EPERM;
2991 goto fail_ch20_wa;
2992 }
2993
Amir Levy9659e592016-10-27 18:08:27 +03002994 /* CMD OUT (AP->IPA) */
2995 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2996 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
2997 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2998 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
2999 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
3000 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003001 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003002 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003003 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03003004 }
3005 IPADBG("Apps to IPA cmd pipe is connected\n");
3006
3007 ipa3_ctx->ctrl->ipa_init_sram();
3008 IPADBG("SRAM initialized\n");
3009
3010 ipa3_ctx->ctrl->ipa_init_hdr();
3011 IPADBG("HDR initialized\n");
3012
3013 ipa3_ctx->ctrl->ipa_init_rt4();
3014 IPADBG("V4 RT initialized\n");
3015
3016 ipa3_ctx->ctrl->ipa_init_rt6();
3017 IPADBG("V6 RT initialized\n");
3018
3019 ipa3_ctx->ctrl->ipa_init_flt4();
3020 IPADBG("V4 FLT initialized\n");
3021
3022 ipa3_ctx->ctrl->ipa_init_flt6();
3023 IPADBG("V6 FLT initialized\n");
3024
3025 if (ipa3_setup_flt_hash_tuple()) {
3026 IPAERR(":fail to configure flt hash tuple\n");
3027 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003028 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003029 }
3030 IPADBG("flt hash tuple is configured\n");
3031
3032 if (ipa3_setup_rt_hash_tuple()) {
3033 IPAERR(":fail to configure rt hash tuple\n");
3034 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003035 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003036 }
3037 IPADBG("rt hash tuple is configured\n");
3038
3039 if (ipa3_setup_exception_path()) {
3040 IPAERR(":fail to setup excp path\n");
3041 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003042 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003043 }
3044 IPADBG("Exception path was successfully set");
3045
3046 if (ipa3_setup_dflt_rt_tables()) {
3047 IPAERR(":fail to setup dflt routes\n");
3048 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003049 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003050 }
3051 IPADBG("default routing was set\n");
3052
Ghanim Fodic6b67492017-03-15 14:19:56 +02003053 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03003054 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3055 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
3056 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3057 sys_in.notify = ipa3_lan_rx_cb;
3058 sys_in.priv = NULL;
3059 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
3060 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
3061 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
3062 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
3063 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
3064 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
3065 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
3066 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
3067
3068 /**
3069 * ipa_lan_rx_cb() intended to notify the source EP about packet
3070 * being received on the LAN_CONS via calling the source EP call-back.
3071 * There could be a race condition with calling this call-back. Other
3072 * thread may nullify it - e.g. on EP disconnect.
3073 * This lock intended to protect the access to the source EP call-back
3074 */
3075 spin_lock_init(&ipa3_ctx->disconnect_lock);
3076 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003077 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003078 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003079 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003080 }
3081
Ghanim Fodic6b67492017-03-15 14:19:56 +02003082 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02003083 if (!ipa3_ctx->ipa_config_is_mhi) {
3084 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3085 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
3086 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
3087 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
3088 if (ipa3_setup_sys_pipe(&sys_in,
3089 &ipa3_ctx->clnt_hdl_data_out)) {
3090 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
3091 result = -EPERM;
3092 goto fail_lan_data_out;
3093 }
Amir Levy9659e592016-10-27 18:08:27 +03003094 }
3095
3096 return 0;
3097
Ghanim Fodic6b67492017-03-15 14:19:56 +02003098fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03003099 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003100fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03003101 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
3102 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3103 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
3104 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
3105 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003106 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003107 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003108fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03003109 return result;
3110}
3111
3112static void ipa3_teardown_apps_pipes(void)
3113{
Amir Levy54fe4d32017-03-16 11:21:49 +02003114 if (!ipa3_ctx->ipa_config_is_mhi)
3115 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03003116 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
3117 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3118 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003119 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003120 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
3121}
3122
3123#ifdef CONFIG_COMPAT
3124long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3125{
3126 int retval = 0;
3127 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
3128 struct ipa_ioc_nat_alloc_mem nat_mem;
3129
3130 switch (cmd) {
3131 case IPA_IOC_ADD_HDR32:
3132 cmd = IPA_IOC_ADD_HDR;
3133 break;
3134 case IPA_IOC_DEL_HDR32:
3135 cmd = IPA_IOC_DEL_HDR;
3136 break;
3137 case IPA_IOC_ADD_RT_RULE32:
3138 cmd = IPA_IOC_ADD_RT_RULE;
3139 break;
3140 case IPA_IOC_DEL_RT_RULE32:
3141 cmd = IPA_IOC_DEL_RT_RULE;
3142 break;
3143 case IPA_IOC_ADD_FLT_RULE32:
3144 cmd = IPA_IOC_ADD_FLT_RULE;
3145 break;
3146 case IPA_IOC_DEL_FLT_RULE32:
3147 cmd = IPA_IOC_DEL_FLT_RULE;
3148 break;
3149 case IPA_IOC_GET_RT_TBL32:
3150 cmd = IPA_IOC_GET_RT_TBL;
3151 break;
3152 case IPA_IOC_COPY_HDR32:
3153 cmd = IPA_IOC_COPY_HDR;
3154 break;
3155 case IPA_IOC_QUERY_INTF32:
3156 cmd = IPA_IOC_QUERY_INTF;
3157 break;
3158 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3159 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3160 break;
3161 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3162 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3163 break;
3164 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3165 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3166 break;
3167 case IPA_IOC_GET_HDR32:
3168 cmd = IPA_IOC_GET_HDR;
3169 break;
3170 case IPA_IOC_ALLOC_NAT_MEM32:
3171 if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
3172 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3173 retval = -EFAULT;
3174 goto ret;
3175 }
3176 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3177 IPA_RESOURCE_NAME_MAX);
3178 nat_mem.size = (size_t)nat_mem32.size;
3179 nat_mem.offset = (off_t)nat_mem32.offset;
3180
3181 /* null terminate the string */
3182 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3183
3184 if (ipa3_allocate_nat_device(&nat_mem)) {
3185 retval = -EFAULT;
3186 goto ret;
3187 }
3188 nat_mem32.offset = (compat_off_t)nat_mem.offset;
3189 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
3190 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3191 retval = -EFAULT;
3192 }
3193ret:
3194 return retval;
3195 case IPA_IOC_V4_INIT_NAT32:
3196 cmd = IPA_IOC_V4_INIT_NAT;
3197 break;
3198 case IPA_IOC_NAT_DMA32:
3199 cmd = IPA_IOC_NAT_DMA;
3200 break;
3201 case IPA_IOC_V4_DEL_NAT32:
3202 cmd = IPA_IOC_V4_DEL_NAT;
3203 break;
3204 case IPA_IOC_GET_NAT_OFFSET32:
3205 cmd = IPA_IOC_GET_NAT_OFFSET;
3206 break;
3207 case IPA_IOC_PULL_MSG32:
3208 cmd = IPA_IOC_PULL_MSG;
3209 break;
3210 case IPA_IOC_RM_ADD_DEPENDENCY32:
3211 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3212 break;
3213 case IPA_IOC_RM_DEL_DEPENDENCY32:
3214 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3215 break;
3216 case IPA_IOC_GENERATE_FLT_EQ32:
3217 cmd = IPA_IOC_GENERATE_FLT_EQ;
3218 break;
3219 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3220 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3221 break;
3222 case IPA_IOC_WRITE_QMAPID32:
3223 cmd = IPA_IOC_WRITE_QMAPID;
3224 break;
3225 case IPA_IOC_MDFY_FLT_RULE32:
3226 cmd = IPA_IOC_MDFY_FLT_RULE;
3227 break;
3228 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3229 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3230 break;
3231 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3232 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3233 break;
3234 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3235 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3236 break;
3237 case IPA_IOC_MDFY_RT_RULE32:
3238 cmd = IPA_IOC_MDFY_RT_RULE;
3239 break;
3240 case IPA_IOC_COMMIT_HDR:
3241 case IPA_IOC_RESET_HDR:
3242 case IPA_IOC_COMMIT_RT:
3243 case IPA_IOC_RESET_RT:
3244 case IPA_IOC_COMMIT_FLT:
3245 case IPA_IOC_RESET_FLT:
3246 case IPA_IOC_DUMP:
3247 case IPA_IOC_PUT_RT_TBL:
3248 case IPA_IOC_PUT_HDR:
3249 case IPA_IOC_SET_FLT:
3250 case IPA_IOC_QUERY_EP_MAPPING:
3251 break;
3252 default:
3253 return -ENOIOCTLCMD;
3254 }
3255 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3256}
3257#endif
3258
3259static ssize_t ipa3_write(struct file *file, const char __user *buf,
3260 size_t count, loff_t *ppos);
3261
3262static const struct file_operations ipa3_drv_fops = {
3263 .owner = THIS_MODULE,
3264 .open = ipa3_open,
3265 .read = ipa3_read,
3266 .write = ipa3_write,
3267 .unlocked_ioctl = ipa3_ioctl,
3268#ifdef CONFIG_COMPAT
3269 .compat_ioctl = compat_ipa3_ioctl,
3270#endif
3271};
3272
3273static int ipa3_get_clks(struct device *dev)
3274{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003275 if (ipa3_res.use_bw_vote) {
3276 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3277 ipa3_clk = NULL;
3278 return 0;
3279 }
3280
Amir Levy9659e592016-10-27 18:08:27 +03003281 ipa3_clk = clk_get(dev, "core_clk");
3282 if (IS_ERR(ipa3_clk)) {
3283 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3284 IPAERR("fail to get ipa clk\n");
3285 return PTR_ERR(ipa3_clk);
3286 }
3287 return 0;
3288}
3289
3290/**
3291 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3292 */
3293void _ipa_enable_clks_v3_0(void)
3294{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003295 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003296 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003297 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003298 clk_prepare(ipa3_clk);
3299 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003300 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003301 }
3302
Ghanim Fodi6a831342017-03-07 18:19:15 +02003303 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003304}
3305
3306static unsigned int ipa3_get_bus_vote(void)
3307{
3308 unsigned int idx = 1;
3309
Skylar Chang448d8b82017-08-08 17:30:32 -07003310 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) {
Amir Levy9659e592016-10-27 18:08:27 +03003311 idx = 1;
3312 } else if (ipa3_ctx->curr_ipa_clk_rate ==
Skylar Chang448d8b82017-08-08 17:30:32 -07003313 ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3314 idx = 2;
3315 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3316 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3317 idx = 3;
Amir Levy9659e592016-10-27 18:08:27 +03003318 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3319 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3320 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3321 } else {
3322 WARN_ON(1);
3323 }
Amir Levy9659e592016-10-27 18:08:27 +03003324 IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
3325
3326 return idx;
3327}
3328
3329/**
3330* ipa3_enable_clks() - Turn on IPA clocks
3331*
3332* Return codes:
3333* None
3334*/
3335void ipa3_enable_clks(void)
3336{
3337 IPADBG("enabling IPA clocks and bus voting\n");
3338
Ghanim Fodi6a831342017-03-07 18:19:15 +02003339 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3340 ipa3_get_bus_vote()))
3341 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003342
Ghanim Fodi6a831342017-03-07 18:19:15 +02003343 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003344}
3345
3346
3347/**
3348 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3349 */
3350void _ipa_disable_clks_v3_0(void)
3351{
Amir Levy9659e592016-10-27 18:08:27 +03003352 ipa3_suspend_apps_pipes(true);
3353 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003354 if (ipa3_clk) {
3355 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003356 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003357 }
Amir Levy9659e592016-10-27 18:08:27 +03003358}
3359
3360/**
3361* ipa3_disable_clks() - Turn off IPA clocks
3362*
3363* Return codes:
3364* None
3365*/
3366void ipa3_disable_clks(void)
3367{
3368 IPADBG("disabling IPA clocks and bus voting\n");
3369
3370 ipa3_ctx->ctrl->ipa3_disable_clks();
3371
Ghanim Fodi6a831342017-03-07 18:19:15 +02003372 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3373 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003374}
3375
3376/**
3377 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3378 *
3379 * This function is called prior to clock gating when active client counter
3380 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003381 * were not submitted to the IPA client via the transport. During TAG process
3382 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003383 *
3384 * Return codes:
3385 * None
3386 */
3387static void ipa3_start_tag_process(struct work_struct *work)
3388{
3389 int res;
3390
3391 IPADBG("starting TAG process\n");
3392 /* close aggregation frames on all pipes */
3393 res = ipa3_tag_aggr_force_close(-1);
3394 if (res)
3395 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3396 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3397
3398 IPADBG("TAG process done\n");
3399}
3400
3401/**
3402* ipa3_active_clients_log_mod() - Log a modification in the active clients
3403* reference count
3404*
3405* This method logs any modification in the active clients reference count:
3406* It logs the modification in the circular history buffer
3407* It logs the modification in the hash table - looking for an entry,
3408* creating one if needed and deleting one if needed.
3409*
3410* @id: ipa3_active client logging info struct to hold the log information
3411* @inc: a boolean variable to indicate whether the modification is an increase
3412* or decrease
3413* @int_ctx: a boolean variable to indicate whether this call is being made from
3414* an interrupt context and therefore should allocate GFP_ATOMIC memory
3415*
3416* Method process:
3417* - Hash the unique identifier string
3418* - Find the hash in the table
3419* 1)If found, increase or decrease the reference count
3420* 2)If not found, allocate a new hash table entry struct and initialize it
3421* - Remove and deallocate unneeded data structure
3422* - Log the call in the circular history buffer (unless it is a simple call)
3423*/
3424void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3425 bool inc, bool int_ctx)
3426{
3427 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3428 unsigned long long t;
3429 unsigned long nanosec_rem;
3430 struct ipa3_active_client_htable_entry *hentry;
3431 struct ipa3_active_client_htable_entry *hfound;
3432 u32 hkey;
3433 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
Skylar Chang69ae50e2017-07-31 13:13:29 -07003434 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +03003435
Skylar Chang69ae50e2017-07-31 13:13:29 -07003436 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
3437 int_ctx = true;
Amir Levy9659e592016-10-27 18:08:27 +03003438 hfound = NULL;
3439 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3440 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003441 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003442 0);
3443 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3444 hentry, list, hkey) {
3445 if (!strcmp(hentry->id_string, id->id_string)) {
3446 hentry->count = hentry->count + (inc ? 1 : -1);
3447 hfound = hentry;
3448 }
3449 }
3450 if (hfound == NULL) {
3451 hentry = NULL;
3452 hentry = kzalloc(sizeof(
3453 struct ipa3_active_client_htable_entry),
3454 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3455 if (hentry == NULL) {
3456 IPAERR("failed allocating active clients hash entry");
Skylar Chang69ae50e2017-07-31 13:13:29 -07003457 spin_unlock_irqrestore(
3458 &ipa3_ctx->ipa3_active_clients_logging.lock,
3459 flags);
Amir Levy9659e592016-10-27 18:08:27 +03003460 return;
3461 }
3462 hentry->type = id->type;
3463 strlcpy(hentry->id_string, id->id_string,
3464 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3465 INIT_HLIST_NODE(&hentry->list);
3466 hentry->count = inc ? 1 : -1;
3467 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3468 &hentry->list, hkey);
3469 } else if (hfound->count == 0) {
3470 hash_del(&hfound->list);
3471 kfree(hfound);
3472 }
3473
3474 if (id->type != SIMPLE) {
3475 t = local_clock();
3476 nanosec_rem = do_div(t, 1000000000) / 1000;
3477 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3478 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3479 "[%5lu.%06lu] v %s, %s: %d",
3480 (unsigned long)t, nanosec_rem,
3481 id->id_string, id->file, id->line);
3482 ipa3_active_clients_log_insert(temp_str);
3483 }
Skylar Chang69ae50e2017-07-31 13:13:29 -07003484 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
3485 flags);
Amir Levy9659e592016-10-27 18:08:27 +03003486}
3487
3488void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
3489 bool int_ctx)
3490{
3491 ipa3_active_clients_log_mod(id, false, int_ctx);
3492}
3493
3494void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
3495 bool int_ctx)
3496{
3497 ipa3_active_clients_log_mod(id, true, int_ctx);
3498}
3499
3500/**
3501* ipa3_inc_client_enable_clks() - Increase active clients counter, and
3502* enable ipa clocks if necessary
3503*
3504* Return codes:
3505* None
3506*/
3507void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
3508{
Skylar Chang242952b2017-07-20 15:04:05 -07003509 int ret;
3510
Amir Levy9659e592016-10-27 18:08:27 +03003511 ipa3_active_clients_log_inc(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07003512 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3513 if (ret) {
3514 IPADBG_LOW("active clients = %d\n",
3515 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3516 return;
3517 }
3518
3519 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3520
3521 /* somebody might voted to clocks meanwhile */
3522 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3523 if (ret) {
3524 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3525 IPADBG_LOW("active clients = %d\n",
3526 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3527 return;
3528 }
3529
3530 ipa3_enable_clks();
3531 atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
3532 IPADBG_LOW("active clients = %d\n",
3533 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3534 ipa3_suspend_apps_pipes(false);
3535 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003536}
3537
3538/**
3539* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
3540* clients if no asynchronous actions should be done. Asynchronous actions are
3541* locking a mutex and waking up IPA HW.
3542*
3543* Return codes: 0 for success
3544* -EPERM if an asynchronous action should have been done
3545*/
3546int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
3547 *id)
3548{
Skylar Chang242952b2017-07-20 15:04:05 -07003549 int ret;
Amir Levy9659e592016-10-27 18:08:27 +03003550
Skylar Chang242952b2017-07-20 15:04:05 -07003551 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3552 if (ret) {
3553 ipa3_active_clients_log_inc(id, true);
3554 IPADBG_LOW("active clients = %d\n",
3555 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3556 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03003557 }
Amir Levy9659e592016-10-27 18:08:27 +03003558
Skylar Chang242952b2017-07-20 15:04:05 -07003559 return -EPERM;
3560}
3561
3562static void __ipa3_dec_client_disable_clks(void)
3563{
3564 int ret;
3565
3566 if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
3567 IPAERR("trying to disable clocks with refcnt is 0!\n");
3568 ipa_assert();
3569 return;
3570 }
3571
3572 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
3573 if (ret)
3574 goto bail;
3575
3576 /* seems like this is the only client holding the clocks */
3577 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3578 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
3579 ipa3_ctx->tag_process_before_gating) {
3580 ipa3_ctx->tag_process_before_gating = false;
3581 /*
3582 * When TAG process ends, active clients will be
3583 * decreased
3584 */
3585 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
3586 goto unlock_mutex;
3587 }
3588
3589 /* a different context might increase the clock reference meanwhile */
3590 ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
3591 if (ret > 0)
3592 goto unlock_mutex;
3593 ipa3_disable_clks();
3594
3595unlock_mutex:
3596 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3597bail:
3598 IPADBG_LOW("active clients = %d\n",
3599 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Amir Levy9659e592016-10-27 18:08:27 +03003600}
3601
3602/**
3603 * ipa3_dec_client_disable_clks() - Decrease active clients counter
3604 *
3605 * In case that there are no active clients this function also starts
3606 * TAG process. When TAG progress ends ipa clocks will be gated.
3607 * start_tag_process_again flag is set during this function to signal TAG
3608 * process to start again as there was another client that may send data to ipa
3609 *
3610 * Return codes:
3611 * None
3612 */
3613void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
3614{
Amir Levy9659e592016-10-27 18:08:27 +03003615 ipa3_active_clients_log_dec(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07003616 __ipa3_dec_client_disable_clks();
3617}
3618
3619static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
3620{
3621 __ipa3_dec_client_disable_clks();
3622}
3623
3624/**
3625 * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
3626 * if possible without blocking. If this is the last client then the desrease
3627 * will happen from work queue context.
3628 *
3629 * Return codes:
3630 * None
3631 */
3632void ipa3_dec_client_disable_clks_no_block(
3633 struct ipa_active_client_logging_info *id)
3634{
3635 int ret;
3636
3637 ipa3_active_clients_log_dec(id, true);
3638 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
3639 if (ret) {
3640 IPADBG_LOW("active clients = %d\n",
3641 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3642 return;
Amir Levy9659e592016-10-27 18:08:27 +03003643 }
Skylar Chang242952b2017-07-20 15:04:05 -07003644
3645 /* seems like this is the only client holding the clocks */
3646 queue_work(ipa3_ctx->power_mgmt_wq,
3647 &ipa_dec_clients_disable_clks_on_wq_work);
Amir Levy9659e592016-10-27 18:08:27 +03003648}
3649
3650/**
3651* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
3652* acquire wakelock if necessary
3653*
3654* Return codes:
3655* None
3656*/
3657void ipa3_inc_acquire_wakelock(void)
3658{
3659 unsigned long flags;
3660
3661 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3662 ipa3_ctx->wakelock_ref_cnt.cnt++;
3663 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
3664 __pm_stay_awake(&ipa3_ctx->w_lock);
3665 IPADBG_LOW("active wakelock ref cnt = %d\n",
3666 ipa3_ctx->wakelock_ref_cnt.cnt);
3667 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3668}
3669
3670/**
3671 * ipa3_dec_release_wakelock() - Decrease active clients counter
3672 *
3673 * In case if the ref count is 0, release the wakelock.
3674 *
3675 * Return codes:
3676 * None
3677 */
3678void ipa3_dec_release_wakelock(void)
3679{
3680 unsigned long flags;
3681
3682 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3683 ipa3_ctx->wakelock_ref_cnt.cnt--;
3684 IPADBG_LOW("active wakelock ref cnt = %d\n",
3685 ipa3_ctx->wakelock_ref_cnt.cnt);
3686 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
3687 __pm_relax(&ipa3_ctx->w_lock);
3688 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3689}
3690
Michael Adisumartac06df412017-09-19 10:10:35 -07003691int ipa3_set_clock_plan_from_pm(int idx)
3692{
3693 u32 clk_rate;
3694
3695 IPADBG_LOW("idx = %d\n", idx);
3696
3697 if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
3698 IPAERR("bad voltage\n");
3699 return -EINVAL;
3700 }
3701
3702 if (idx == 1)
3703 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3704 else if (idx == 2)
3705 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3706 else if (idx == 3)
3707 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3708 else {
3709 IPAERR("bad voltage\n");
3710 WARN_ON(1);
3711 return -EFAULT;
3712 }
3713
3714 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3715 IPADBG_LOW("Same voltage\n");
3716 return 0;
3717 }
3718
3719 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3720 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3721 ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
3722 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
3723 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
3724 if (ipa3_clk)
3725 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3726 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3727 ipa3_get_bus_vote()))
3728 WARN_ON(1);
3729 } else {
3730 IPADBG_LOW("clocks are gated, not setting rate\n");
3731 }
3732 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3733 IPADBG_LOW("Done\n");
3734
3735 return 0;
3736}
3737
Amir Levy9659e592016-10-27 18:08:27 +03003738int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
3739 u32 bandwidth_mbps)
3740{
3741 enum ipa_voltage_level needed_voltage;
3742 u32 clk_rate;
3743
3744 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
3745 floor_voltage, bandwidth_mbps);
3746
3747 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
3748 floor_voltage >= IPA_VOLTAGE_MAX) {
3749 IPAERR("bad voltage\n");
3750 return -EINVAL;
3751 }
3752
3753 if (ipa3_ctx->enable_clock_scaling) {
3754 IPADBG_LOW("Clock scaling is enabled\n");
3755 if (bandwidth_mbps >=
3756 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
3757 needed_voltage = IPA_VOLTAGE_TURBO;
3758 else if (bandwidth_mbps >=
3759 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
3760 needed_voltage = IPA_VOLTAGE_NOMINAL;
Skylar Chang448d8b82017-08-08 17:30:32 -07003761 else if (bandwidth_mbps >=
3762 ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs)
Amir Levy9659e592016-10-27 18:08:27 +03003763 needed_voltage = IPA_VOLTAGE_SVS;
Skylar Chang448d8b82017-08-08 17:30:32 -07003764 else
3765 needed_voltage = IPA_VOLTAGE_SVS2;
Amir Levy9659e592016-10-27 18:08:27 +03003766 } else {
3767 IPADBG_LOW("Clock scaling is disabled\n");
3768 needed_voltage = IPA_VOLTAGE_NOMINAL;
3769 }
3770
3771 needed_voltage = max(needed_voltage, floor_voltage);
3772 switch (needed_voltage) {
Skylar Chang448d8b82017-08-08 17:30:32 -07003773 case IPA_VOLTAGE_SVS2:
3774 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
3775 break;
Amir Levy9659e592016-10-27 18:08:27 +03003776 case IPA_VOLTAGE_SVS:
3777 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3778 break;
3779 case IPA_VOLTAGE_NOMINAL:
3780 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3781 break;
3782 case IPA_VOLTAGE_TURBO:
3783 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3784 break;
3785 default:
3786 IPAERR("bad voltage\n");
3787 WARN_ON(1);
3788 return -EFAULT;
3789 }
3790
3791 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3792 IPADBG_LOW("Same voltage\n");
3793 return 0;
3794 }
3795
Skylar Chang242952b2017-07-20 15:04:05 -07003796 /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
3797 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003798 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3799 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
Skylar Chang242952b2017-07-20 15:04:05 -07003800 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003801 if (ipa3_clk)
3802 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3803 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
Skylar Chang242952b2017-07-20 15:04:05 -07003804 ipa3_get_bus_vote()))
Ghanim Fodi6a831342017-03-07 18:19:15 +02003805 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003806 } else {
3807 IPADBG_LOW("clocks are gated, not setting rate\n");
3808 }
Skylar Chang242952b2017-07-20 15:04:05 -07003809 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003810 IPADBG_LOW("Done\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003811
Amir Levy9659e592016-10-27 18:08:27 +03003812 return 0;
3813}
3814
Amir Levya59ed3f2017-03-05 17:30:55 +02003815static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03003816{
3817 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02003818 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03003819 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
3820}
3821
3822/**
3823* ipa3_suspend_handler() - Handles the suspend interrupt:
3824* wakes up the suspended peripheral by requesting its consumer
3825* @interrupt: Interrupt type
3826* @private_data: The client's private data
3827* @interrupt_data: Interrupt specific information data
3828*/
3829void ipa3_suspend_handler(enum ipa_irq_type interrupt,
3830 void *private_data,
3831 void *interrupt_data)
3832{
3833 enum ipa_rm_resource_name resource;
3834 u32 suspend_data =
3835 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
3836 u32 bmsk = 1;
3837 u32 i = 0;
3838 int res;
3839 struct ipa_ep_cfg_holb holb_cfg;
Michael Adisumarta3e350812017-09-18 14:54:36 -07003840 u32 pipe_bitmask = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003841
3842 IPADBG("interrupt=%d, interrupt_data=%u\n",
3843 interrupt, suspend_data);
3844 memset(&holb_cfg, 0, sizeof(holb_cfg));
3845 holb_cfg.tmr_val = 0;
3846
Michael Adisumarta3e350812017-09-18 14:54:36 -07003847 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
Amir Levy9659e592016-10-27 18:08:27 +03003848 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
Michael Adisumarta3e350812017-09-18 14:54:36 -07003849 if (ipa3_ctx->use_ipa_pm) {
3850 pipe_bitmask |= bmsk;
3851 continue;
3852 }
Amir Levy9659e592016-10-27 18:08:27 +03003853 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
3854 /*
3855 * pipe will be unsuspended as part of
3856 * enabling IPA clocks
3857 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08003858 mutex_lock(&ipa3_ctx->transport_pm.
3859 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003860 if (!atomic_read(
3861 &ipa3_ctx->transport_pm.dec_clients)
3862 ) {
3863 IPA_ACTIVE_CLIENTS_INC_EP(
3864 ipa3_ctx->ep[i].client);
3865 IPADBG_LOW("Pipes un-suspended.\n");
3866 IPADBG_LOW("Enter poll mode.\n");
3867 atomic_set(
3868 &ipa3_ctx->transport_pm.dec_clients,
3869 1);
Amir Levya59ed3f2017-03-05 17:30:55 +02003870 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003871 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08003872 mutex_unlock(&ipa3_ctx->transport_pm.
3873 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003874 } else {
3875 resource = ipa3_get_rm_resource_from_ep(i);
3876 res =
3877 ipa_rm_request_resource_with_timer(resource);
3878 if (res == -EPERM &&
3879 IPA_CLIENT_IS_CONS(
3880 ipa3_ctx->ep[i].client)) {
3881 holb_cfg.en = 1;
3882 res = ipa3_cfg_ep_holb_by_client(
3883 ipa3_ctx->ep[i].client, &holb_cfg);
3884 if (res) {
3885 IPAERR("holb en fail, stall\n");
3886 BUG();
3887 }
3888 }
3889 }
3890 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07003891 }
3892 if (ipa3_ctx->use_ipa_pm) {
3893 res = ipa_pm_handle_suspend(pipe_bitmask);
3894 if (res) {
3895 IPAERR("ipa_pm_handle_suspend failed %d\n", res);
3896 return;
3897 }
Amir Levy9659e592016-10-27 18:08:27 +03003898 }
3899}
3900
3901/**
3902* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
3903* as it was registered in the IPA init sequence.
3904* Return codes:
3905* 0: success
3906* -EPERM: failed to remove current handler or failed to add original handler
3907*/
3908int ipa3_restore_suspend_handler(void)
3909{
3910 int result = 0;
3911
3912 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
3913 if (result) {
3914 IPAERR("remove handler for suspend interrupt failed\n");
3915 return -EPERM;
3916 }
3917
3918 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3919 ipa3_suspend_handler, false, NULL);
3920 if (result) {
3921 IPAERR("register handler for suspend interrupt failed\n");
3922 result = -EPERM;
3923 }
3924
3925 IPADBG("suspend handler successfully restored\n");
3926
3927 return result;
3928}
3929
3930static int ipa3_apps_cons_release_resource(void)
3931{
3932 return 0;
3933}
3934
3935static int ipa3_apps_cons_request_resource(void)
3936{
3937 return 0;
3938}
3939
Amir Levya59ed3f2017-03-05 17:30:55 +02003940static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03003941{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303942 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003943 /* check whether still need to decrease client usage */
3944 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
3945 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
3946 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02003947 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003948 } else {
3949 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02003950 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03003951 }
3952 }
3953 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303954 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003955}
3956
3957int ipa3_create_apps_resource(void)
3958{
3959 struct ipa_rm_create_params apps_cons_create_params;
3960 struct ipa_rm_perf_profile profile;
3961 int result = 0;
3962
3963 memset(&apps_cons_create_params, 0,
3964 sizeof(apps_cons_create_params));
3965 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
3966 apps_cons_create_params.request_resource =
3967 ipa3_apps_cons_request_resource;
3968 apps_cons_create_params.release_resource =
3969 ipa3_apps_cons_release_resource;
3970 result = ipa_rm_create_resource(&apps_cons_create_params);
3971 if (result) {
3972 IPAERR("ipa_rm_create_resource failed\n");
3973 return result;
3974 }
3975
3976 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
3977 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
3978
3979 return result;
3980}
3981
3982/**
3983 * ipa3_init_interrupts() - Register to IPA IRQs
3984 *
3985 * Return codes: 0 in success, negative in failure
3986 *
3987 */
3988int ipa3_init_interrupts(void)
3989{
3990 int result;
3991
3992 /*register IPA IRQ handler*/
3993 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
3994 master_dev);
3995 if (result) {
3996 IPAERR("ipa interrupts initialization failed\n");
3997 return -ENODEV;
3998 }
3999
4000 /*add handler for suspend interrupt*/
4001 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
4002 ipa3_suspend_handler, false, NULL);
4003 if (result) {
4004 IPAERR("register handler for suspend interrupt failed\n");
4005 result = -ENODEV;
4006 goto fail_add_interrupt_handler;
4007 }
4008
4009 return 0;
4010
4011fail_add_interrupt_handler:
4012 free_irq(ipa3_res.ipa_irq, master_dev);
4013 return result;
4014}
4015
4016/**
4017 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
4018 * The idr strcuture per filtering table is intended for rule id generation
4019 * per filtering rule.
4020 */
4021static void ipa3_destroy_flt_tbl_idrs(void)
4022{
4023 int i;
4024 struct ipa3_flt_tbl *flt_tbl;
4025
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004026 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4027 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4028
Amir Levy9659e592016-10-27 18:08:27 +03004029 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4030 if (!ipa_is_ep_support_flt(i))
4031 continue;
4032
4033 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004034 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004035 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004036 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004037 }
4038}
4039
4040static void ipa3_freeze_clock_vote_and_notify_modem(void)
4041{
4042 int res;
Amir Levy9659e592016-10-27 18:08:27 +03004043 struct ipa_active_client_logging_info log_info;
4044
4045 if (ipa3_ctx->smp2p_info.res_sent)
4046 return;
4047
Skylar Change1209942017-02-02 14:26:38 -08004048 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
4049 IPAERR("smp2p out gpio not assigned\n");
4050 return;
4051 }
4052
Amir Levy9659e592016-10-27 18:08:27 +03004053 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
4054 res = ipa3_inc_client_enable_clks_no_block(&log_info);
4055 if (res)
Skylar Change1209942017-02-02 14:26:38 -08004056 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004057 else
Skylar Change1209942017-02-02 14:26:38 -08004058 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03004059
Skylar Change1209942017-02-02 14:26:38 -08004060 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4061 IPA_GPIO_OUT_CLK_VOTE_IDX,
4062 ipa3_ctx->smp2p_info.ipa_clk_on);
4063 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4064 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004065
Skylar Change1209942017-02-02 14:26:38 -08004066 ipa3_ctx->smp2p_info.res_sent = true;
4067 IPADBG("IPA clocks are %s\n",
4068 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
4069}
4070
4071void ipa3_reset_freeze_vote(void)
4072{
4073 if (ipa3_ctx->smp2p_info.res_sent == false)
4074 return;
4075
4076 if (ipa3_ctx->smp2p_info.ipa_clk_on)
4077 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
4078
4079 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4080 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
4081 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4082 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
4083
4084 ipa3_ctx->smp2p_info.res_sent = false;
4085 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004086}
4087
4088static int ipa3_panic_notifier(struct notifier_block *this,
4089 unsigned long event, void *ptr)
4090{
4091 int res;
4092
4093 ipa3_freeze_clock_vote_and_notify_modem();
4094
4095 IPADBG("Calling uC panic handler\n");
4096 res = ipa3_uc_panic_notifier(this, event, ptr);
4097 if (res)
4098 IPAERR("uC panic handler failed %d\n", res);
4099
4100 return NOTIFY_DONE;
4101}
4102
4103static struct notifier_block ipa3_panic_blk = {
4104 .notifier_call = ipa3_panic_notifier,
4105 /* IPA panic handler needs to run before modem shuts down */
4106 .priority = INT_MAX,
4107};
4108
4109static void ipa3_register_panic_hdlr(void)
4110{
4111 atomic_notifier_chain_register(&panic_notifier_list,
4112 &ipa3_panic_blk);
4113}
4114
4115static void ipa3_trigger_ipa_ready_cbs(void)
4116{
4117 struct ipa3_ready_cb_info *info;
4118
4119 mutex_lock(&ipa3_ctx->lock);
4120
4121 /* Call all the CBs */
4122 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
4123 if (info->ready_cb)
4124 info->ready_cb(info->user_data);
4125
4126 mutex_unlock(&ipa3_ctx->lock);
4127}
4128
4129static int ipa3_gsi_pre_fw_load_init(void)
4130{
4131 int result;
4132
4133 result = gsi_configure_regs(ipa3_res.transport_mem_base,
4134 ipa3_res.transport_mem_size,
4135 ipa3_res.ipa_mem_base);
4136 if (result) {
4137 IPAERR("Failed to configure GSI registers\n");
4138 return -EINVAL;
4139 }
4140
4141 return 0;
4142}
4143
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004144static void ipa3_uc_is_loaded(void)
4145{
4146 IPADBG("\n");
4147 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
4148}
4149
Amir Levy41644242016-11-03 15:38:09 +02004150static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
4151{
4152 enum gsi_ver gsi_ver;
4153
4154 switch (ipa_hw_type) {
4155 case IPA_HW_v3_0:
4156 case IPA_HW_v3_1:
4157 gsi_ver = GSI_VER_1_0;
4158 break;
4159 case IPA_HW_v3_5:
4160 gsi_ver = GSI_VER_1_2;
4161 break;
4162 case IPA_HW_v3_5_1:
4163 gsi_ver = GSI_VER_1_3;
4164 break;
Michael Adisumarta891a4ff2017-05-16 16:40:06 -07004165 case IPA_HW_v4_0:
4166 gsi_ver = GSI_VER_2_0;
4167 break;
Amir Levy41644242016-11-03 15:38:09 +02004168 default:
4169 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
4170 WARN_ON(1);
4171 gsi_ver = GSI_VER_ERR;
4172 }
4173
4174 IPADBG("GSI version %d\n", gsi_ver);
4175
4176 return gsi_ver;
4177}
4178
Amir Levy9659e592016-10-27 18:08:27 +03004179/**
4180 * ipa3_post_init() - Initialize the IPA Driver (Part II).
4181 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02004182 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03004183 *
4184 * @resource_p: contain platform specific values from DST file
4185 * @pdev: The platform device structure representing the IPA driver
4186 *
4187 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004188 * - Initialize endpoints bitmaps
4189 * - Initialize resource groups min and max values
4190 * - Initialize filtering lists heads and idr
4191 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02004192 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03004193 * - Setup APPS pipes
4194 * - Initialize tethering bridge
4195 * - Initialize IPA debugfs
4196 * - Initialize IPA uC interface
4197 * - Initialize WDI interface
4198 * - Initialize USB interface
4199 * - Register for panic handler
4200 * - Trigger IPA ready callbacks (to all subscribers)
4201 * - Trigger IPA completion object (to all who wait on it)
4202 */
4203static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
4204 struct device *ipa_dev)
4205{
4206 int result;
Amir Levy9659e592016-10-27 18:08:27 +03004207 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004208 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02004209 struct ipa3_flt_tbl *flt_tbl;
4210 int i;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004211 struct idr *idr;
Amir Levy54fe4d32017-03-16 11:21:49 +02004212
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304213 if (ipa3_ctx == NULL) {
4214 IPADBG("IPA driver haven't initialized\n");
4215 return -ENXIO;
4216 }
4217
4218 /* Prevent consequent calls from trying to load the FW again. */
4219 if (ipa3_ctx->ipa_initialization_complete)
4220 return 0;
4221
Amir Levy54fe4d32017-03-16 11:21:49 +02004222 /*
4223 * indication whether working in MHI config or non MHI config is given
4224 * in ipa3_write which is launched before ipa3_post_init. i.e. from
4225 * this point it is safe to use ipa3_ep_mapping array and the correct
4226 * entry will be returned from ipa3_get_hw_type_index()
4227 */
4228 ipa_init_ep_flt_bitmap();
4229 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
4230 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
4231
4232 /* Assign resource limitation to each group */
4233 ipa3_set_resorce_groups_min_max_limits();
4234
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004235 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4236 idr_init(idr);
4237 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4238 idr_init(idr);
4239
Amir Levy54fe4d32017-03-16 11:21:49 +02004240 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4241 if (!ipa_is_ep_support_flt(i))
4242 continue;
4243
4244 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
4245 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4246 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4247 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
4248 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4249 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004250 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
Amir Levy54fe4d32017-03-16 11:21:49 +02004251
4252 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
4253 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4254 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4255 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
4256 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4257 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004258 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
Amir Levy54fe4d32017-03-16 11:21:49 +02004259 }
4260
4261 if (!ipa3_ctx->apply_rg10_wa) {
4262 result = ipa3_init_interrupts();
4263 if (result) {
4264 IPAERR("ipa initialization of interrupts failed\n");
4265 result = -ENODEV;
4266 goto fail_register_device;
4267 }
4268 } else {
4269 IPADBG("Initialization of ipa interrupts skipped\n");
4270 }
Amir Levy9659e592016-10-27 18:08:27 +03004271
Amir Levy3afd94a2017-01-05 10:19:13 +02004272 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02004273 * IPAv3.5 and above requires to disable prefetch for USB in order
4274 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
Amir Levy3afd94a2017-01-05 10:19:13 +02004275 */
Michael Adisumartad68ab112017-06-14 11:40:06 -07004276 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4277 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
Amir Levy5cfbb322017-01-09 14:53:02 +02004278 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02004279 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
4280
Amir Levya59ed3f2017-03-05 17:30:55 +02004281 memset(&gsi_props, 0, sizeof(gsi_props));
4282 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
4283 gsi_props.ee = resource_p->ee;
4284 gsi_props.intr = GSI_INTR_IRQ;
4285 gsi_props.irq = resource_p->transport_irq;
4286 gsi_props.phys_addr = resource_p->transport_mem_base;
4287 gsi_props.size = resource_p->transport_mem_size;
4288 gsi_props.notify_cb = ipa_gsi_notify_cb;
4289 gsi_props.req_clk_cb = NULL;
4290 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004291
Ghanim Fodic823bc62017-10-21 17:29:53 +03004292 if (ipa3_ctx->ipa_config_is_mhi) {
4293 gsi_props.mhi_er_id_limits_valid = true;
4294 gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
4295 gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
4296 }
4297
Amir Levya59ed3f2017-03-05 17:30:55 +02004298 result = gsi_register_device(&gsi_props,
4299 &ipa3_ctx->gsi_dev_hdl);
4300 if (result != GSI_STATUS_SUCCESS) {
4301 IPAERR(":gsi register error - %d\n", result);
4302 result = -ENODEV;
4303 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03004304 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004305 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03004306
4307 /* setup the AP-IPA pipes */
4308 if (ipa3_setup_apps_pipes()) {
4309 IPAERR(":failed to setup IPA-Apps pipes\n");
4310 result = -ENODEV;
4311 goto fail_setup_apps_pipes;
4312 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004313 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004314
4315 if (ipa3_ctx->use_ipa_teth_bridge) {
4316 /* Initialize the tethering bridge driver */
4317 result = ipa3_teth_bridge_driver_init();
4318 if (result) {
4319 IPAERR(":teth_bridge init failed (%d)\n", -result);
4320 result = -ENODEV;
4321 goto fail_teth_bridge_driver_init;
4322 }
4323 IPADBG("teth_bridge initialized");
4324 }
4325
4326 ipa3_debugfs_init();
4327
4328 result = ipa3_uc_interface_init();
4329 if (result)
4330 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4331 else
4332 IPADBG(":ipa Uc interface init ok\n");
4333
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004334 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4335 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4336
Amir Levy9659e592016-10-27 18:08:27 +03004337 result = ipa3_wdi_init();
4338 if (result)
4339 IPAERR(":wdi init failed (%d)\n", -result);
4340 else
4341 IPADBG(":wdi init ok\n");
4342
4343 result = ipa3_ntn_init();
4344 if (result)
4345 IPAERR(":ntn init failed (%d)\n", -result);
4346 else
4347 IPADBG(":ntn init ok\n");
4348
Skylar Chang6f6e3072017-07-28 10:03:47 -07004349 result = ipa_hw_stats_init();
4350 if (result)
4351 IPAERR("fail to init stats %d\n", result);
4352 else
4353 IPADBG(":stats init ok\n");
4354
Amir Levy9659e592016-10-27 18:08:27 +03004355 ipa3_register_panic_hdlr();
4356
4357 ipa3_ctx->q6_proxy_clk_vote_valid = true;
4358
4359 mutex_lock(&ipa3_ctx->lock);
4360 ipa3_ctx->ipa_initialization_complete = true;
4361 mutex_unlock(&ipa3_ctx->lock);
4362
4363 ipa3_trigger_ipa_ready_cbs();
4364 complete_all(&ipa3_ctx->init_completion_obj);
4365 pr_info("IPA driver initialization was successful.\n");
4366
4367 return 0;
4368
4369fail_teth_bridge_driver_init:
4370 ipa3_teardown_apps_pipes();
4371fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02004372 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03004373fail_register_device:
Amir Levy9659e592016-10-27 18:08:27 +03004374 ipa3_destroy_flt_tbl_idrs();
Amir Levy9659e592016-10-27 18:08:27 +03004375 return result;
4376}
4377
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004378static int ipa3_manual_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03004379{
4380 int result;
4381 const struct firmware *fw;
4382
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004383 IPADBG("Manual FW loading process initiated\n");
Amir Levy9659e592016-10-27 18:08:27 +03004384
4385 result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
4386 if (result < 0) {
4387 IPAERR("request_firmware failed, error %d\n", result);
4388 return result;
4389 }
4390 if (fw == NULL) {
4391 IPAERR("Firmware is NULL!\n");
4392 return -EINVAL;
4393 }
4394
4395 IPADBG("FWs are available for loading\n");
4396
Ghanim Fodi37b64952017-01-24 15:42:30 +02004397 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004398 if (result) {
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004399 IPAERR("Manual IPA FWs loading has failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03004400 release_firmware(fw);
4401 return result;
4402 }
4403
4404 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02004405 ipa3_res.transport_mem_size,
4406 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03004407 if (result) {
4408 IPAERR("Failed to enable GSI FW\n");
4409 release_firmware(fw);
4410 return result;
4411 }
4412
4413 release_firmware(fw);
4414
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004415 IPADBG("Manual FW loading process is complete\n");
Amir Levy9659e592016-10-27 18:08:27 +03004416 return 0;
4417}
4418
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004419static int ipa3_pil_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03004420{
4421 void *subsystem_get_retval = NULL;
4422
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004423 IPADBG("PIL FW loading process initiated\n");
Amir Levy9659e592016-10-27 18:08:27 +03004424
4425 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
4426 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
4427 IPAERR("Unable to trigger PIL process for FW loading\n");
4428 return -EINVAL;
4429 }
4430
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004431 IPADBG("PIL FW loading process is complete\n");
Amir Levy9659e592016-10-27 18:08:27 +03004432 return 0;
4433}
4434
Ghanim Fodia5f376a2017-10-17 18:14:53 +03004435static void ipa3_load_ipa_fw(struct work_struct *work)
4436{
4437 int result;
4438
4439 IPADBG("Entry\n");
4440
4441 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
4442
4443 if (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))
4444 result = ipa3_pil_load_ipa_fws();
4445 else
4446 result = ipa3_manual_load_ipa_fws();
4447
4448 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
4449
4450 if (result) {
4451 IPAERR("IPA FW loading process has failed\n");
4452 return;
4453 }
4454 pr_info("IPA FW loaded successfully\n");
4455
4456 result = ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
4457 if (result)
4458 IPAERR("IPA post init failed %d\n", result);
4459}
4460
Amir Levy9659e592016-10-27 18:08:27 +03004461static ssize_t ipa3_write(struct file *file, const char __user *buf,
4462 size_t count, loff_t *ppos)
4463{
4464 unsigned long missing;
Amir Levy9659e592016-10-27 18:08:27 +03004465
4466 char dbg_buff[16] = { 0 };
4467
4468 if (sizeof(dbg_buff) < count + 1)
4469 return -EFAULT;
4470
4471 missing = copy_from_user(dbg_buff, buf, count);
4472
4473 if (missing) {
4474 IPAERR("Unable to copy data from user\n");
4475 return -EFAULT;
4476 }
4477
Mohammed Javidbf4c8022017-08-07 23:15:48 +05304478 if (count > 0)
4479 dbg_buff[count - 1] = '\0';
4480
Amir Levy9659e592016-10-27 18:08:27 +03004481 /* Prevent consequent calls from trying to load the FW again. */
4482 if (ipa3_is_ready())
4483 return count;
4484
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004485 /* Check MHI configuration on MDM devices */
4486 if (!ipa3_is_msm_device()) {
Amir Levy54fe4d32017-03-16 11:21:49 +02004487 if (!strcasecmp(dbg_buff, "MHI")) {
4488 ipa3_ctx->ipa_config_is_mhi = true;
4489 pr_info(
4490 "IPA is loading with MHI configuration\n");
4491 } else {
4492 pr_info(
4493 "IPA is loading with non MHI configuration\n");
4494 }
Amir Levy54fe4d32017-03-16 11:21:49 +02004495 }
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004496
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004497 queue_work(ipa3_ctx->transport_power_mgmt_wq,
Ghanim Fodia5f376a2017-10-17 18:14:53 +03004498 &ipa3_fw_loading_work);
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004499
Ghanim Fodia5f376a2017-10-17 18:14:53 +03004500 IPADBG("Scheduled a work to load IPA FW\n");
Amir Levy9659e592016-10-27 18:08:27 +03004501 return count;
4502}
4503
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004504static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
4505{
4506 int i, size, ret, resp;
4507 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
4508 struct tz_smmu_ipa_protect_region_s cmd_buf;
4509
4510 if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
4511 size = ipa3_ctx->ipa_tz_unlock_reg_num *
4512 sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
4513 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
4514 if (ipa_tz_unlock_vec == NULL)
4515 return -ENOMEM;
4516
4517 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4518 ipa_tz_unlock_vec[i].input_addr =
4519 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4520 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4521 0xFFF);
4522 ipa_tz_unlock_vec[i].output_addr =
4523 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4524 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4525 0xFFF);
4526 ipa_tz_unlock_vec[i].size =
4527 ipa3_ctx->ipa_tz_unlock_reg[i].size;
4528 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
4529 }
4530
4531 /* pass physical address of command buffer */
4532 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
4533 cmd_buf.size_bytes = size;
4534
4535 /* flush cache to DDR */
4536 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
4537 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
4538
4539 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
4540 sizeof(cmd_buf), &resp, sizeof(resp));
4541 if (ret) {
4542 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
4543 kfree(ipa_tz_unlock_vec);
4544 return -EFAULT;
4545 }
4546 kfree(ipa_tz_unlock_vec);
4547 }
4548 return 0;
4549}
4550
Skylar Changcd3902d2017-03-27 18:08:27 -07004551static int ipa3_alloc_pkt_init(void)
4552{
4553 struct ipa_mem_buffer mem;
4554 struct ipahal_imm_cmd_pyld *cmd_pyld;
4555 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
4556 int i;
4557
4558 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4559 &cmd, false);
4560 if (!cmd_pyld) {
4561 IPAERR("failed to construct IMM cmd\n");
4562 return -ENOMEM;
4563 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07004564 ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
Skylar Changcd3902d2017-03-27 18:08:27 -07004565
4566 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
4567 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
4568 &mem.phys_base, GFP_KERNEL);
4569 if (!mem.base) {
4570 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
4571 ipahal_destroy_imm_cmd(cmd_pyld);
4572 return -ENOMEM;
4573 }
4574 ipahal_destroy_imm_cmd(cmd_pyld);
4575
4576 memset(mem.base, 0, mem.size);
4577 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4578 cmd.destination_pipe_index = i;
4579 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4580 &cmd, false);
4581 if (!cmd_pyld) {
4582 IPAERR("failed to construct IMM cmd\n");
4583 dma_free_coherent(ipa3_ctx->pdev,
4584 mem.size,
4585 mem.base,
4586 mem.phys_base);
4587 return -ENOMEM;
4588 }
4589 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
4590 cmd_pyld->len);
4591 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
4592 ipahal_destroy_imm_cmd(cmd_pyld);
4593 }
4594
4595 return 0;
4596}
4597
Amir Levy9659e592016-10-27 18:08:27 +03004598/**
4599* ipa3_pre_init() - Initialize the IPA Driver.
4600* This part contains all initialization which doesn't require IPA HW, such
4601* as structure allocations and initializations, register writes, etc.
4602*
4603* @resource_p: contain platform specific values from DST file
4604* @pdev: The platform device structure representing the IPA driver
4605*
4606* Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004607* Allocate memory for the driver context data struct
4608* Initializing the ipa3_ctx with :
Amir Levy9659e592016-10-27 18:08:27 +03004609* 1)parsed values from the dts file
4610* 2)parameters passed to the module initialization
4611* 3)read HW values(such as core memory size)
Amir Levy54fe4d32017-03-16 11:21:49 +02004612* Map IPA core registers to CPU memory
4613* Restart IPA core(HW reset)
4614* Initialize the look-aside caches(kmem_cache/slab) for filter,
Amir Levy9659e592016-10-27 18:08:27 +03004615* routing and IPA-tree
Amir Levy54fe4d32017-03-16 11:21:49 +02004616* Create memory pool with 4 objects for DMA operations(each object
Amir Levy9659e592016-10-27 18:08:27 +03004617* is 512Bytes long), this object will be use for tx(A5->IPA)
Amir Levy54fe4d32017-03-16 11:21:49 +02004618* Initialize lists head(routing, hdr, system pipes)
4619* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
4620* Initialize spinlocks (for list related to A5<->IPA pipes)
4621* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
4622* Initialize Red-Black-Tree(s) for handles of header,routing rule,
4623* routing table ,filtering rule
4624* Initialize the filter block by committing IPV4 and IPV6 default rules
4625* Create empty routing table in system memory(no committing)
4626* Create a char-device for IPA
4627* Initialize IPA RM (resource manager)
4628* Configure GSI registers (in GSI case)
Amir Levy9659e592016-10-27 18:08:27 +03004629*/
4630static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
4631 struct device *ipa_dev)
4632{
4633 int result = 0;
4634 int i;
Amir Levy9659e592016-10-27 18:08:27 +03004635 struct ipa3_rt_tbl_set *rset;
4636 struct ipa_active_client_logging_info log_info;
4637
4638 IPADBG("IPA Driver initialization started\n");
4639
4640 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
4641 if (!ipa3_ctx) {
4642 IPAERR(":kzalloc err.\n");
4643 result = -ENOMEM;
4644 goto fail_mem_ctx;
4645 }
4646
4647 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
Skylar Chang841c1452017-04-03 16:07:22 -07004648 if (ipa3_ctx->logbuf == NULL)
4649 IPAERR("failed to create IPC log, continue...\n");
Amir Levy9659e592016-10-27 18:08:27 +03004650
4651 ipa3_ctx->pdev = ipa_dev;
4652 ipa3_ctx->uc_pdev = ipa_dev;
4653 ipa3_ctx->smmu_present = smmu_info.present;
Michael Adisumarta93e97522017-10-06 15:49:46 -07004654 if (!ipa3_ctx->smmu_present) {
4655 for (i = 0; i < IPA_SMMU_CB_MAX; i++)
4656 ipa3_ctx->s1_bypass_arr[i] = true;
4657 } else {
Michael Adisumarta972e33e2017-10-20 15:24:27 -07004658 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] =
4659 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP];
Michael Adisumarta93e97522017-10-06 15:49:46 -07004660 }
4661
Amir Levy9659e592016-10-27 18:08:27 +03004662 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
4663 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
4664 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
4665 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
4666 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03004667 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
4668 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
4669 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
4670 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
4671 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
4672 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
4673 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03004674 ipa3_ctx->ee = resource_p->ee;
4675 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
4676 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
Michael Adisumarta3e350812017-09-18 14:54:36 -07004677 ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
Amir Levy9659e592016-10-27 18:08:27 +03004678 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Ghanim Fodic823bc62017-10-21 17:29:53 +03004679 ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
4680 ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004681 if (resource_p->ipa_tz_unlock_reg) {
4682 ipa3_ctx->ipa_tz_unlock_reg_num =
4683 resource_p->ipa_tz_unlock_reg_num;
4684 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
4685 ipa3_ctx->ipa_tz_unlock_reg_num,
4686 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
4687 GFP_KERNEL);
4688 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
4689 result = -ENOMEM;
4690 goto fail_tz_unlock_reg;
4691 }
4692 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4693 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
4694 resource_p->ipa_tz_unlock_reg[i].reg_addr;
4695 ipa3_ctx->ipa_tz_unlock_reg[i].size =
4696 resource_p->ipa_tz_unlock_reg[i].size;
4697 }
4698 }
4699
4700 /* unlock registers for uc */
4701 ipa3_tz_unlock_reg(ipa3_ctx);
Amir Levy9659e592016-10-27 18:08:27 +03004702
4703 /* default aggregation parameters */
4704 ipa3_ctx->aggregation_type = IPA_MBIM_16;
4705 ipa3_ctx->aggregation_byte_limit = 1;
4706 ipa3_ctx->aggregation_time_limit = 0;
4707
4708 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
4709 if (!ipa3_ctx->ctrl) {
4710 IPAERR("memory allocation error for ctrl\n");
4711 result = -ENOMEM;
4712 goto fail_mem_ctrl;
4713 }
4714 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
4715 ipa3_ctx->ipa_hw_type);
4716 if (result) {
4717 IPAERR("fail to static bind IPA ctrl.\n");
4718 result = -EFAULT;
4719 goto fail_bind;
4720 }
4721
4722 result = ipa3_init_mem_partition(master_dev->of_node);
4723 if (result) {
4724 IPAERR(":ipa3_init_mem_partition failed!\n");
4725 result = -ENODEV;
4726 goto fail_init_mem_partition;
4727 }
4728
4729 if (ipa3_bus_scale_table) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004730 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
4731 ipa3_bus_scale_table->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03004732 ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
4733 }
4734
Ghanim Fodi6a831342017-03-07 18:19:15 +02004735 /* get BUS handle */
4736 ipa3_ctx->ipa_bus_hdl =
4737 msm_bus_scale_register_client(
4738 ipa3_ctx->ctrl->msm_bus_data_ptr);
4739 if (!ipa3_ctx->ipa_bus_hdl) {
4740 IPAERR("fail to register with bus mgr!\n");
4741 result = -ENODEV;
4742 goto fail_bus_reg;
Amir Levy9659e592016-10-27 18:08:27 +03004743 }
4744
4745 /* get IPA clocks */
4746 result = ipa3_get_clks(master_dev);
4747 if (result)
4748 goto fail_clk;
4749
4750 /* init active_clients_log after getting ipa-clk */
4751 if (ipa3_active_clients_log_init())
4752 goto fail_init_active_client;
4753
4754 /* Enable ipa3_ctx->enable_clock_scaling */
4755 ipa3_ctx->enable_clock_scaling = 1;
4756 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4757
4758 /* enable IPA clocks explicitly to allow the initialization */
4759 ipa3_enable_clks();
4760
4761 /* setup IPA register access */
4762 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
4763 ipa3_ctx->ctrl->ipa_reg_base_ofst);
4764 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
4765 ipa3_ctx->ctrl->ipa_reg_base_ofst,
4766 resource_p->ipa_mem_size);
4767 if (!ipa3_ctx->mmio) {
4768 IPAERR(":ipa-base ioremap err.\n");
4769 result = -EFAULT;
4770 goto fail_remap;
4771 }
4772
4773 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4774 ipa3_ctx->pdev)) {
4775 IPAERR("fail to init ipahal\n");
4776 result = -EFAULT;
4777 goto fail_ipahal;
4778 }
4779
4780 result = ipa3_init_hw();
4781 if (result) {
4782 IPAERR(":error initializing HW.\n");
4783 result = -ENODEV;
4784 goto fail_init_hw;
4785 }
4786 IPADBG("IPA HW initialization sequence completed");
4787
4788 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4789 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4790 IPAERR("IPA has more pipes then supported! has %d, max %d\n",
4791 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4792 result = -ENODEV;
4793 goto fail_init_hw;
4794 }
4795
Amir Levy9659e592016-10-27 18:08:27 +03004796 ipa3_ctx->ctrl->ipa_sram_read_settings();
4797 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4798 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4799
4800 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4801 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4802 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4803
4804 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4805 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4806
4807 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4808 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4809 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4810
4811 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4812 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4813 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4814
4815 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4816 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4817 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4818 result = -ENOMEM;
4819 goto fail_init_hw;
4820 }
4821
4822 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004823 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
4824 ipa3_active_clients_log_inc(&log_info, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004825 atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004826
Amir Levy9659e592016-10-27 18:08:27 +03004827 /* Create workqueues for power management */
4828 ipa3_ctx->power_mgmt_wq =
4829 create_singlethread_workqueue("ipa_power_mgmt");
4830 if (!ipa3_ctx->power_mgmt_wq) {
4831 IPAERR("failed to create power mgmt wq\n");
4832 result = -ENOMEM;
4833 goto fail_init_hw;
4834 }
4835
4836 ipa3_ctx->transport_power_mgmt_wq =
4837 create_singlethread_workqueue("transport_power_mgmt");
4838 if (!ipa3_ctx->transport_power_mgmt_wq) {
4839 IPAERR("failed to create transport power mgmt wq\n");
4840 result = -ENOMEM;
4841 goto fail_create_transport_wq;
4842 }
4843
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304844 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004845
4846 /* init the lookaside cache */
4847 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
4848 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
4849 if (!ipa3_ctx->flt_rule_cache) {
4850 IPAERR(":ipa flt cache create failed\n");
4851 result = -ENOMEM;
4852 goto fail_flt_rule_cache;
4853 }
4854 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
4855 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
4856 if (!ipa3_ctx->rt_rule_cache) {
4857 IPAERR(":ipa rt cache create failed\n");
4858 result = -ENOMEM;
4859 goto fail_rt_rule_cache;
4860 }
4861 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
4862 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
4863 if (!ipa3_ctx->hdr_cache) {
4864 IPAERR(":ipa hdr cache create failed\n");
4865 result = -ENOMEM;
4866 goto fail_hdr_cache;
4867 }
4868 ipa3_ctx->hdr_offset_cache =
4869 kmem_cache_create("IPA_HDR_OFFSET",
4870 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
4871 if (!ipa3_ctx->hdr_offset_cache) {
4872 IPAERR(":ipa hdr off cache create failed\n");
4873 result = -ENOMEM;
4874 goto fail_hdr_offset_cache;
4875 }
4876 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
4877 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
4878 if (!ipa3_ctx->hdr_proc_ctx_cache) {
4879 IPAERR(":ipa hdr proc ctx cache create failed\n");
4880 result = -ENOMEM;
4881 goto fail_hdr_proc_ctx_cache;
4882 }
4883 ipa3_ctx->hdr_proc_ctx_offset_cache =
4884 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
4885 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
4886 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
4887 IPAERR(":ipa hdr proc ctx off cache create failed\n");
4888 result = -ENOMEM;
4889 goto fail_hdr_proc_ctx_offset_cache;
4890 }
4891 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
4892 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
4893 if (!ipa3_ctx->rt_tbl_cache) {
4894 IPAERR(":ipa rt tbl cache create failed\n");
4895 result = -ENOMEM;
4896 goto fail_rt_tbl_cache;
4897 }
4898 ipa3_ctx->tx_pkt_wrapper_cache =
4899 kmem_cache_create("IPA_TX_PKT_WRAPPER",
4900 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
4901 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
4902 IPAERR(":ipa tx pkt wrapper cache create failed\n");
4903 result = -ENOMEM;
4904 goto fail_tx_pkt_wrapper_cache;
4905 }
4906 ipa3_ctx->rx_pkt_wrapper_cache =
4907 kmem_cache_create("IPA_RX_PKT_WRAPPER",
4908 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
4909 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
4910 IPAERR(":ipa rx pkt wrapper cache create failed\n");
4911 result = -ENOMEM;
4912 goto fail_rx_pkt_wrapper_cache;
4913 }
4914
Skylar Chang6c4bec92017-04-21 16:10:14 -07004915 /* allocate memory for DMA_TASK workaround */
4916 result = ipa3_allocate_dma_task_for_gsi();
4917 if (result) {
4918 IPAERR("failed to allocate dma task\n");
4919 goto fail_dma_task;
4920 }
4921
Amir Levy9659e592016-10-27 18:08:27 +03004922 /* init the various list heads */
4923 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
4924 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
4925 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
4926 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
4927 }
4928 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
4929 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
4930 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
4931 INIT_LIST_HEAD(&ipa3_ctx->
4932 hdr_proc_ctx_tbl.head_free_offset_list[i]);
4933 }
4934 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004935 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004936 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004937 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004938
4939 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
4940 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004941 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004942 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
4943 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004944 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004945
4946 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
4947 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
4948 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
4949 init_waitqueue_head(&ipa3_ctx->msg_waitq);
4950 mutex_init(&ipa3_ctx->msg_lock);
4951
4952 mutex_init(&ipa3_ctx->lock);
4953 mutex_init(&ipa3_ctx->nat_mem.lock);
Skylar Changfb792c62017-08-17 12:53:23 -07004954 mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05304955 mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
Amir Levy9659e592016-10-27 18:08:27 +03004956
4957 idr_init(&ipa3_ctx->ipa_idr);
4958 spin_lock_init(&ipa3_ctx->idr_lock);
4959
4960 /* wlan related member */
4961 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
4962 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
4963 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
4964 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
4965
Amir Levy9659e592016-10-27 18:08:27 +03004966 ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
4967
4968 result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
4969 if (result) {
4970 IPAERR("alloc_chrdev_region err.\n");
4971 result = -ENODEV;
4972 goto fail_alloc_chrdev_region;
4973 }
4974
4975 ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
4976 ipa3_ctx, DRV_NAME);
4977 if (IS_ERR(ipa3_ctx->dev)) {
4978 IPAERR(":device_create err.\n");
4979 result = -ENODEV;
4980 goto fail_device_create;
4981 }
4982
Amir Levy9659e592016-10-27 18:08:27 +03004983 if (ipa3_create_nat_device()) {
4984 IPAERR("unable to create nat device\n");
4985 result = -ENODEV;
4986 goto fail_nat_dev_add;
4987 }
4988
4989 /* Create a wakeup source. */
4990 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
4991 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
4992
Michael Adisumarta3e350812017-09-18 14:54:36 -07004993 /* Initialize Power Management framework */
4994 if (ipa3_ctx->use_ipa_pm) {
4995 result = ipa_pm_init(&ipa3_res.pm_init);
4996 if (result) {
4997 IPAERR("IPA PM initialization failed (%d)\n", -result);
4998 result = -ENODEV;
4999 goto fail_ipa_rm_init;
5000 }
5001 IPADBG("IPA resource manager initialized");
5002 } else {
5003 result = ipa_rm_initialize();
5004 if (result) {
5005 IPAERR("RM initialization failed (%d)\n", -result);
5006 result = -ENODEV;
5007 goto fail_ipa_rm_init;
5008 }
5009 IPADBG("IPA resource manager initialized");
Amir Levy9659e592016-10-27 18:08:27 +03005010
Michael Adisumarta3e350812017-09-18 14:54:36 -07005011 result = ipa3_create_apps_resource();
5012 if (result) {
5013 IPAERR("Failed to create APPS_CONS resource\n");
5014 result = -ENODEV;
5015 goto fail_create_apps_resource;
5016 }
Amir Levy9659e592016-10-27 18:08:27 +03005017 }
5018
Skylar Changcd3902d2017-03-27 18:08:27 -07005019 result = ipa3_alloc_pkt_init();
5020 if (result) {
5021 IPAERR("Failed to alloc pkt_init payload\n");
5022 result = -ENODEV;
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005023 goto fail_allok_pkt_init;
Skylar Changcd3902d2017-03-27 18:08:27 -07005024 }
5025
Amir Levy12ef0912016-08-30 09:27:34 +03005026 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
5027 ipa3_enable_dcd();
5028
Amir Levy9659e592016-10-27 18:08:27 +03005029 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
5030
5031 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07005032 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03005033
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005034 result = ipa3_dma_setup();
5035 if (result) {
5036 IPAERR("Failed to setup IPA DMA\n");
5037 result = -ENODEV;
5038 goto fail_ipa_dma_setup;
5039 }
5040
Amir Levy9659e592016-10-27 18:08:27 +03005041 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02005042 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03005043 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02005044 *
5045 * For IPA3.0, the GSI configuration is done by the GSI driver.
5046 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03005047 */
Amir Levya59ed3f2017-03-05 17:30:55 +02005048 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
5049 result = ipa3_gsi_pre_fw_load_init();
5050 if (result) {
5051 IPAERR("gsi pre FW loading config failed\n");
5052 result = -ENODEV;
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005053 goto fail_gsi_pre_fw_load_init;
Amir Levy9659e592016-10-27 18:08:27 +03005054 }
5055 }
Amir Levy9659e592016-10-27 18:08:27 +03005056
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305057 cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
5058 ipa3_ctx->cdev.owner = THIS_MODULE;
5059 ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
5060
5061 result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
5062 if (result) {
5063 IPAERR(":cdev_add err=%d\n", -result);
5064 result = -ENODEV;
5065 goto fail_cdev_add;
5066 }
5067 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
5068 MAJOR(ipa3_ctx->dev_num),
5069 MINOR(ipa3_ctx->dev_num));
Amir Levy9659e592016-10-27 18:08:27 +03005070 return 0;
5071
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305072fail_cdev_add:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005073fail_gsi_pre_fw_load_init:
5074 ipa3_dma_shutdown();
5075fail_ipa_dma_setup:
5076fail_allok_pkt_init:
5077 if (ipa3_ctx->use_ipa_pm)
5078 ipa_pm_destroy();
5079 else
Michael Adisumarta3e350812017-09-18 14:54:36 -07005080 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
Amir Levy9659e592016-10-27 18:08:27 +03005081fail_create_apps_resource:
Michael Adisumarta3e350812017-09-18 14:54:36 -07005082 if (!ipa3_ctx->use_ipa_pm)
5083 ipa_rm_exit();
Amir Levy9659e592016-10-27 18:08:27 +03005084fail_ipa_rm_init:
5085fail_nat_dev_add:
Amir Levy9659e592016-10-27 18:08:27 +03005086 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
5087fail_device_create:
5088 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
5089fail_alloc_chrdev_region:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005090 idr_destroy(&ipa3_ctx->ipa_idr);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005091 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
5092 idr_destroy(&rset->rule_ids);
5093 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
5094 idr_destroy(&rset->rule_ids);
5095 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
5096 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Skylar Chang6c4bec92017-04-21 16:10:14 -07005097 ipa3_free_dma_task_for_gsi();
5098fail_dma_task:
Amir Levy9659e592016-10-27 18:08:27 +03005099 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
5100fail_rx_pkt_wrapper_cache:
5101 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
5102fail_tx_pkt_wrapper_cache:
5103 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
5104fail_rt_tbl_cache:
5105 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
5106fail_hdr_proc_ctx_offset_cache:
5107 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
5108fail_hdr_proc_ctx_cache:
5109 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
5110fail_hdr_offset_cache:
5111 kmem_cache_destroy(ipa3_ctx->hdr_cache);
5112fail_hdr_cache:
5113 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
5114fail_rt_rule_cache:
5115 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
5116fail_flt_rule_cache:
5117 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
5118fail_create_transport_wq:
5119 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
5120fail_init_hw:
5121 ipahal_destroy();
5122fail_ipahal:
5123 iounmap(ipa3_ctx->mmio);
5124fail_remap:
5125 ipa3_disable_clks();
5126 ipa3_active_clients_log_destroy();
5127fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02005128 if (ipa3_clk)
5129 clk_put(ipa3_clk);
5130 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03005131fail_clk:
5132 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
5133fail_bus_reg:
Ghanim Fodi6a831342017-03-07 18:19:15 +02005134 if (ipa3_bus_scale_table) {
5135 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
5136 ipa3_bus_scale_table = NULL;
5137 }
Amir Levy9659e592016-10-27 18:08:27 +03005138fail_init_mem_partition:
5139fail_bind:
5140 kfree(ipa3_ctx->ctrl);
5141fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005142 kfree(ipa3_ctx->ipa_tz_unlock_reg);
5143fail_tz_unlock_reg:
Skylar Chang841c1452017-04-03 16:07:22 -07005144 if (ipa3_ctx->logbuf)
5145 ipc_log_context_destroy(ipa3_ctx->logbuf);
Amir Levy9659e592016-10-27 18:08:27 +03005146 kfree(ipa3_ctx);
5147 ipa3_ctx = NULL;
5148fail_mem_ctx:
5149 return result;
5150}
5151
Michael Adisumarta3e350812017-09-18 14:54:36 -07005152bool ipa_pm_is_used(void)
5153{
5154 return (ipa3_ctx) ? ipa3_ctx->use_ipa_pm : false;
5155}
5156
5157static int get_ipa_dts_pm_info(struct platform_device *pdev,
5158 struct ipa3_plat_drv_res *ipa_drv_res)
5159{
5160 int result;
5161 int i, j;
5162
5163 ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
5164 "qcom,use-ipa-pm");
5165 IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
5166 if (!ipa_drv_res->use_ipa_pm)
5167 return 0;
5168
5169 result = of_property_read_u32(pdev->dev.of_node,
5170 "qcom,msm-bus,num-cases",
5171 &ipa_drv_res->pm_init.threshold_size);
5172 /* No vote is ignored */
5173 ipa_drv_res->pm_init.threshold_size -= 2;
5174 if (result || ipa_drv_res->pm_init.threshold_size >
5175 IPA_PM_THRESHOLD_MAX) {
5176 IPAERR("invalid property qcom,msm-bus,num-cases %d\n",
5177 ipa_drv_res->pm_init.threshold_size);
5178 return -EFAULT;
5179 }
5180
5181 result = of_property_read_u32_array(pdev->dev.of_node,
5182 "qcom,throughput-threshold",
5183 ipa_drv_res->pm_init.default_threshold,
5184 ipa_drv_res->pm_init.threshold_size);
5185 if (result) {
5186 IPAERR("failed to read qcom,throughput-thresholds\n");
5187 return -EFAULT;
5188 }
5189
5190 result = of_property_count_strings(pdev->dev.of_node,
5191 "qcom,scaling-exceptions");
5192 if (result < 0) {
5193 IPADBG("no exception list for ipa pm\n");
5194 result = 0;
5195 }
5196
5197 if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
5198 IPAERR("failed to read qcom,scaling-exceptions\n");
5199 return -EFAULT;
5200 }
5201
5202 ipa_drv_res->pm_init.exception_size = result /
5203 (ipa_drv_res->pm_init.threshold_size + 1);
5204 if (ipa_drv_res->pm_init.exception_size >=
5205 IPA_PM_EXCEPTION_MAX) {
5206 IPAERR("exception list larger then max %d\n",
5207 ipa_drv_res->pm_init.exception_size);
5208 return -EFAULT;
5209 }
5210
5211 for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
5212 struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
5213
5214 result = of_property_read_string_index(pdev->dev.of_node,
5215 "qcom,scaling-exceptions",
5216 i * ipa_drv_res->pm_init.threshold_size,
5217 &ex[i].usecase);
5218 if (result) {
5219 IPAERR("failed to read qcom,scaling-exceptions");
5220 return -EFAULT;
5221 }
5222
5223 for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
5224 const char *str;
5225
5226 result = of_property_read_string_index(
5227 pdev->dev.of_node,
5228 "qcom,scaling-exceptions",
5229 i * ipa_drv_res->pm_init.threshold_size + j + 1,
5230 &str);
5231 if (result) {
5232 IPAERR("failed to read qcom,scaling-exceptions"
5233 );
5234 return -EFAULT;
5235 }
5236
5237 if (kstrtou32(str, 0, &ex[i].threshold[j])) {
5238 IPAERR("error str=%s\n", str);
5239 return -EFAULT;
5240 }
5241 }
5242 }
5243
5244 return 0;
5245}
5246
Amir Levy9659e592016-10-27 18:08:27 +03005247static int get_ipa_dts_configuration(struct platform_device *pdev,
5248 struct ipa3_plat_drv_res *ipa_drv_res)
5249{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005250 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03005251 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005252 u32 *ipa_tz_unlock_reg;
5253 int elem_num;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005254 u32 mhi_evid_limits[2];
Amir Levy9659e592016-10-27 18:08:27 +03005255
5256 /* initialize ipa3_res */
5257 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
5258 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
5259 ipa_drv_res->ipa_hw_type = 0;
5260 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03005261 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
5262 ipa_drv_res->ipa_wdi2 = false;
5263 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02005264 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03005265 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5266 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5267 ipa_drv_res->apply_rg10_wa = false;
5268 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005269 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
5270 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005271 ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START;
5272 ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
Amir Levy9659e592016-10-27 18:08:27 +03005273
5274 /* Get IPA HW Version */
5275 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
5276 &ipa_drv_res->ipa_hw_type);
5277 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
5278 IPAERR(":get resource failed for ipa-hw-ver!\n");
5279 return -ENODEV;
5280 }
5281 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
5282
5283 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
5284 IPAERR(":IPA version below 3.0 not supported!\n");
5285 return -ENODEV;
5286 }
5287
5288 /* Get IPA HW mode */
5289 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
5290 &ipa_drv_res->ipa3_hw_mode);
5291 if (result)
5292 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
5293 else
5294 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
5295 ipa_drv_res->ipa3_hw_mode);
5296
5297 /* Get IPA WAN / LAN RX pool size */
5298 result = of_property_read_u32(pdev->dev.of_node,
5299 "qcom,wan-rx-ring-size",
5300 &ipa_drv_res->wan_rx_ring_size);
5301 if (result)
5302 IPADBG("using default for wan-rx-ring-size = %u\n",
5303 ipa_drv_res->wan_rx_ring_size);
5304 else
5305 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
5306 ipa_drv_res->wan_rx_ring_size);
5307
5308 result = of_property_read_u32(pdev->dev.of_node,
5309 "qcom,lan-rx-ring-size",
5310 &ipa_drv_res->lan_rx_ring_size);
5311 if (result)
5312 IPADBG("using default for lan-rx-ring-size = %u\n",
5313 ipa_drv_res->lan_rx_ring_size);
5314 else
5315 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
5316 ipa_drv_res->lan_rx_ring_size);
5317
5318 ipa_drv_res->use_ipa_teth_bridge =
5319 of_property_read_bool(pdev->dev.of_node,
5320 "qcom,use-ipa-tethering-bridge");
5321 IPADBG(": using TBDr = %s",
5322 ipa_drv_res->use_ipa_teth_bridge
5323 ? "True" : "False");
5324
Amir Levy9659e592016-10-27 18:08:27 +03005325 ipa_drv_res->modem_cfg_emb_pipe_flt =
5326 of_property_read_bool(pdev->dev.of_node,
5327 "qcom,modem-cfg-emb-pipe-flt");
5328 IPADBG(": modem configure embedded pipe filtering = %s\n",
5329 ipa_drv_res->modem_cfg_emb_pipe_flt
5330 ? "True" : "False");
5331
5332 ipa_drv_res->ipa_wdi2 =
5333 of_property_read_bool(pdev->dev.of_node,
5334 "qcom,ipa-wdi2");
5335 IPADBG(": WDI-2.0 = %s\n",
5336 ipa_drv_res->ipa_wdi2
5337 ? "True" : "False");
5338
5339 ipa_drv_res->use_64_bit_dma_mask =
5340 of_property_read_bool(pdev->dev.of_node,
5341 "qcom,use-64-bit-dma-mask");
5342 IPADBG(": use_64_bit_dma_mask = %s\n",
5343 ipa_drv_res->use_64_bit_dma_mask
5344 ? "True" : "False");
5345
Ghanim Fodi6a831342017-03-07 18:19:15 +02005346 ipa_drv_res->use_bw_vote =
5347 of_property_read_bool(pdev->dev.of_node,
5348 "qcom,bandwidth-vote-for-ipa");
5349 IPADBG(": use_bw_vote = %s\n",
5350 ipa_drv_res->use_bw_vote
5351 ? "True" : "False");
5352
Amir Levy9659e592016-10-27 18:08:27 +03005353 ipa_drv_res->skip_uc_pipe_reset =
5354 of_property_read_bool(pdev->dev.of_node,
5355 "qcom,skip-uc-pipe-reset");
5356 IPADBG(": skip uC pipe reset = %s\n",
5357 ipa_drv_res->skip_uc_pipe_reset
5358 ? "True" : "False");
5359
5360 ipa_drv_res->tethered_flow_control =
5361 of_property_read_bool(pdev->dev.of_node,
5362 "qcom,tethered-flow-control");
5363 IPADBG(": Use apps based flow control = %s\n",
5364 ipa_drv_res->tethered_flow_control
5365 ? "True" : "False");
5366
Amir Levy9659e592016-10-27 18:08:27 +03005367 /* Get IPA wrapper address */
5368 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5369 "ipa-base");
5370 if (!resource) {
5371 IPAERR(":get resource failed for ipa-base!\n");
5372 return -ENODEV;
5373 }
5374 ipa_drv_res->ipa_mem_base = resource->start;
5375 ipa_drv_res->ipa_mem_size = resource_size(resource);
5376 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
5377 ipa_drv_res->ipa_mem_base,
5378 ipa_drv_res->ipa_mem_size);
5379
5380 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
5381 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
5382
Amir Levya59ed3f2017-03-05 17:30:55 +02005383 /* Get IPA GSI address */
5384 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5385 "gsi-base");
5386 if (!resource) {
5387 IPAERR(":get resource failed for gsi-base!\n");
5388 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03005389 }
Amir Levya59ed3f2017-03-05 17:30:55 +02005390 ipa_drv_res->transport_mem_base = resource->start;
5391 ipa_drv_res->transport_mem_size = resource_size(resource);
5392 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
5393 ipa_drv_res->transport_mem_base,
5394 ipa_drv_res->transport_mem_size);
5395
5396 /* Get IPA GSI IRQ number */
5397 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5398 "gsi-irq");
5399 if (!resource) {
5400 IPAERR(":get resource failed for gsi-irq!\n");
5401 return -ENODEV;
5402 }
5403 ipa_drv_res->transport_irq = resource->start;
5404 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03005405
5406 /* Get IPA pipe mem start ofst */
5407 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5408 "ipa-pipe-mem");
5409 if (!resource) {
5410 IPADBG(":not using pipe memory - resource nonexisting\n");
5411 } else {
5412 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
5413 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
5414 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
5415 ipa_drv_res->ipa_pipe_mem_start_ofst,
5416 ipa_drv_res->ipa_pipe_mem_size);
5417 }
5418
5419 /* Get IPA IRQ number */
5420 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5421 "ipa-irq");
5422 if (!resource) {
5423 IPAERR(":get resource failed for ipa-irq!\n");
5424 return -ENODEV;
5425 }
5426 ipa_drv_res->ipa_irq = resource->start;
5427 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
5428
5429 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
5430 &ipa_drv_res->ee);
5431 if (result)
5432 ipa_drv_res->ee = 0;
5433
5434 ipa_drv_res->apply_rg10_wa =
5435 of_property_read_bool(pdev->dev.of_node,
5436 "qcom,use-rg10-limitation-mitigation");
5437 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
5438 ipa_drv_res->apply_rg10_wa
5439 ? "True" : "False");
5440
5441 ipa_drv_res->gsi_ch20_wa =
5442 of_property_read_bool(pdev->dev.of_node,
5443 "qcom,do-not-use-ch-gsi-20");
5444 IPADBG(": GSI CH 20 WA is = %s\n",
5445 ipa_drv_res->apply_rg10_wa
5446 ? "Needed" : "Not needed");
5447
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005448 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
Ghanim Fodic823bc62017-10-21 17:29:53 +03005449 "qcom,mhi-event-ring-id-limits", sizeof(u32));
5450
5451 if (elem_num == 2) {
5452 if (of_property_read_u32_array(pdev->dev.of_node,
5453 "qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) {
5454 IPAERR("failed to read mhi event ring id limits\n");
5455 return -EFAULT;
5456 }
5457 if (mhi_evid_limits[0] > mhi_evid_limits[1]) {
5458 IPAERR("mhi event ring id low limit > high limit\n");
5459 return -EFAULT;
5460 }
5461 ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0];
5462 ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1];
5463 IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n",
5464 mhi_evid_limits[0], mhi_evid_limits[1]);
5465 } else {
5466 if (elem_num > 0) {
5467 IPAERR("Invalid mhi event ring id limits number %d\n",
5468 elem_num);
5469 return -EINVAL;
5470 }
5471 IPADBG("use default mhi evt ring id limits start=%u end=%u\n",
5472 ipa_drv_res->mhi_evid_limits[0],
5473 ipa_drv_res->mhi_evid_limits[1]);
5474 }
5475
5476 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005477 "qcom,ipa-tz-unlock-reg", sizeof(u32));
5478
5479 if (elem_num > 0 && elem_num % 2 == 0) {
5480 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
5481
5482 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
5483 if (ipa_tz_unlock_reg == NULL)
5484 return -ENOMEM;
5485
5486 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
5487 ipa_drv_res->ipa_tz_unlock_reg_num,
5488 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
5489 GFP_KERNEL);
5490 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
5491 kfree(ipa_tz_unlock_reg);
5492 return -ENOMEM;
5493 }
5494
5495 if (of_property_read_u32_array(pdev->dev.of_node,
5496 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
5497 elem_num)) {
5498 IPAERR("failed to read register addresses\n");
5499 kfree(ipa_tz_unlock_reg);
5500 kfree(ipa_drv_res->ipa_tz_unlock_reg);
5501 return -EFAULT;
5502 }
5503
5504 pos = 0;
5505 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
5506 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
5507 ipa_tz_unlock_reg[pos++];
5508 ipa_drv_res->ipa_tz_unlock_reg[i].size =
5509 ipa_tz_unlock_reg[pos++];
5510 IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
5511 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
5512 ipa_drv_res->ipa_tz_unlock_reg[i].size);
5513 }
5514 kfree(ipa_tz_unlock_reg);
5515 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07005516
5517 /* get IPA PM related information */
5518 result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
5519 if (result) {
5520 IPAERR("failed to get pm info from dts %d\n", result);
5521 return result;
5522 }
5523
Amir Levy9659e592016-10-27 18:08:27 +03005524 return 0;
5525}
5526
5527static int ipa_smmu_wlan_cb_probe(struct device *dev)
5528{
5529 struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005530 int atomic_ctx = 1;
5531 int fast = 1;
5532 int bypass = 1;
5533 int ret;
5534 u32 add_map_size;
5535 const u32 *add_map;
5536 int i;
5537
5538 IPADBG("sub pdev=%p\n", dev);
5539
5540 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005541 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03005542 if (!cb->iommu) {
5543 IPAERR("could not alloc iommu domain\n");
5544 /* assume this failure is because iommu driver is not ready */
5545 return -EPROBE_DEFER;
5546 }
5547 cb->valid = true;
5548
Michael Adisumarta93e97522017-10-06 15:49:46 -07005549 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass")) {
5550 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07005551 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
5552
Amir Levy9659e592016-10-27 18:08:27 +03005553 if (iommu_domain_set_attr(cb->iommu,
5554 DOMAIN_ATTR_S1_BYPASS,
5555 &bypass)) {
5556 IPAERR("couldn't set bypass\n");
5557 cb->valid = false;
5558 return -EIO;
5559 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005560 IPADBG("WLAN SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03005561 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07005562 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07005563 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
5564
Amir Levy9659e592016-10-27 18:08:27 +03005565 if (iommu_domain_set_attr(cb->iommu,
5566 DOMAIN_ATTR_ATOMIC,
5567 &atomic_ctx)) {
5568 IPAERR("couldn't disable coherent HTW\n");
5569 cb->valid = false;
5570 return -EIO;
5571 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005572 IPADBG(" WLAN SMMU ATTR ATOMIC\n");
Amir Levy9659e592016-10-27 18:08:27 +03005573
5574 if (smmu_info.fast_map) {
5575 if (iommu_domain_set_attr(cb->iommu,
5576 DOMAIN_ATTR_FAST,
5577 &fast)) {
5578 IPAERR("couldn't set fast map\n");
5579 cb->valid = false;
5580 return -EIO;
5581 }
5582 IPADBG("SMMU fast map set\n");
5583 }
5584 }
5585
Michael Adisumarta93e97522017-10-06 15:49:46 -07005586 pr_info("IPA smmu_info.s1_bypass_arr[WLAN]=%d smmu_info.fast_map=%d\n",
5587 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN], smmu_info.fast_map);
5588
Amir Levy9659e592016-10-27 18:08:27 +03005589 ret = iommu_attach_device(cb->iommu, dev);
5590 if (ret) {
5591 IPAERR("could not attach device ret=%d\n", ret);
5592 cb->valid = false;
5593 return ret;
5594 }
5595 /* MAP ipa-uc ram */
5596 add_map = of_get_property(dev->of_node,
5597 "qcom,additional-mapping", &add_map_size);
5598 if (add_map) {
5599 /* mapping size is an array of 3-tuple of u32 */
5600 if (add_map_size % (3 * sizeof(u32))) {
5601 IPAERR("wrong additional mapping format\n");
5602 cb->valid = false;
5603 return -EFAULT;
5604 }
5605
5606 /* iterate of each entry of the additional mapping array */
5607 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5608 u32 iova = be32_to_cpu(add_map[i]);
5609 u32 pa = be32_to_cpu(add_map[i + 1]);
5610 u32 size = be32_to_cpu(add_map[i + 2]);
5611 unsigned long iova_p;
5612 phys_addr_t pa_p;
5613 u32 size_p;
5614
5615 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5616 iova_p, pa_p, size_p);
5617 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5618 iova_p, &pa_p, size_p);
5619 ipa3_iommu_map(cb->iommu,
5620 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005621 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005622 }
5623 }
5624 return 0;
5625}
5626
5627static int ipa_smmu_uc_cb_probe(struct device *dev)
5628{
5629 struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005630 int atomic_ctx = 1;
5631 int bypass = 1;
5632 int fast = 1;
5633 int ret;
5634 u32 iova_ap_mapping[2];
5635
5636 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
5637
5638 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5639 iova_ap_mapping, 2);
5640 if (ret) {
5641 IPAERR("Fail to read UC start/size iova addresses\n");
5642 return ret;
5643 }
5644 cb->va_start = iova_ap_mapping[0];
5645 cb->va_size = iova_ap_mapping[1];
5646 cb->va_end = cb->va_start + cb->va_size;
5647 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5648
5649 if (smmu_info.use_64_bit_dma_mask) {
5650 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5651 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5652 IPAERR("DMA set 64bit mask failed\n");
5653 return -EOPNOTSUPP;
5654 }
5655 } else {
5656 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5657 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5658 IPAERR("DMA set 32bit mask failed\n");
5659 return -EOPNOTSUPP;
5660 }
5661 }
5662 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
5663
5664 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005665 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005666 cb->va_start, cb->va_size);
5667 if (IS_ERR_OR_NULL(cb->mapping)) {
5668 IPADBG("Fail to create mapping\n");
5669 /* assume this failure is because iommu driver is not ready */
5670 return -EPROBE_DEFER;
5671 }
5672 IPADBG("SMMU mapping created\n");
5673 cb->valid = true;
5674
Amir Levy9659e592016-10-27 18:08:27 +03005675 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
Michael Adisumarta93e97522017-10-06 15:49:46 -07005676
5677 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass")) {
5678 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = true;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07005679 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = true;
5680
Amir Levy9659e592016-10-27 18:08:27 +03005681 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07005682 DOMAIN_ATTR_S1_BYPASS,
5683 &bypass)) {
Amir Levy9659e592016-10-27 18:08:27 +03005684 IPAERR("couldn't set bypass\n");
5685 arm_iommu_release_mapping(cb->mapping);
5686 cb->valid = false;
5687 return -EIO;
5688 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005689 IPADBG("UC SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03005690 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07005691 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = false;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07005692 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = false;
5693
Amir Levy9659e592016-10-27 18:08:27 +03005694 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07005695 DOMAIN_ATTR_ATOMIC,
5696 &atomic_ctx)) {
Amir Levy9659e592016-10-27 18:08:27 +03005697 IPAERR("couldn't set domain as atomic\n");
5698 arm_iommu_release_mapping(cb->mapping);
5699 cb->valid = false;
5700 return -EIO;
5701 }
5702 IPADBG("SMMU atomic set\n");
5703
5704 if (smmu_info.fast_map) {
5705 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07005706 DOMAIN_ATTR_FAST,
5707 &fast)) {
Amir Levy9659e592016-10-27 18:08:27 +03005708 IPAERR("couldn't set fast map\n");
5709 arm_iommu_release_mapping(cb->mapping);
5710 cb->valid = false;
5711 return -EIO;
5712 }
5713 IPADBG("SMMU fast map set\n");
5714 }
5715 }
5716
Michael Adisumarta93e97522017-10-06 15:49:46 -07005717 pr_info("IPA smmu_info.s1_bypass_arr[UC]=%d smmu_info.fast_map=%d\n",
5718 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC], smmu_info.fast_map);
5719
Amir Levy9659e592016-10-27 18:08:27 +03005720 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
5721 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
5722 if (ret) {
5723 IPAERR("could not attach device ret=%d\n", ret);
5724 arm_iommu_release_mapping(cb->mapping);
5725 cb->valid = false;
5726 return ret;
5727 }
5728
5729 cb->next_addr = cb->va_end;
5730 ipa3_ctx->uc_pdev = dev;
5731
5732 return 0;
5733}
5734
5735static int ipa_smmu_ap_cb_probe(struct device *dev)
5736{
5737 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
5738 int result;
Amir Levy9659e592016-10-27 18:08:27 +03005739 int atomic_ctx = 1;
5740 int fast = 1;
5741 int bypass = 1;
5742 u32 iova_ap_mapping[2];
5743 u32 add_map_size;
5744 const u32 *add_map;
5745 void *smem_addr;
5746 int i;
5747
5748 IPADBG("AP CB probe: sub pdev=%p\n", dev);
5749
5750 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5751 iova_ap_mapping, 2);
5752 if (result) {
5753 IPAERR("Fail to read AP start/size iova addresses\n");
5754 return result;
5755 }
5756 cb->va_start = iova_ap_mapping[0];
5757 cb->va_size = iova_ap_mapping[1];
5758 cb->va_end = cb->va_start + cb->va_size;
5759 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5760
5761 if (smmu_info.use_64_bit_dma_mask) {
5762 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5763 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5764 IPAERR("DMA set 64bit mask failed\n");
5765 return -EOPNOTSUPP;
5766 }
5767 } else {
5768 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5769 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5770 IPAERR("DMA set 32bit mask failed\n");
5771 return -EOPNOTSUPP;
5772 }
5773 }
5774
5775 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005776 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005777 cb->va_start, cb->va_size);
5778 if (IS_ERR_OR_NULL(cb->mapping)) {
5779 IPADBG("Fail to create mapping\n");
5780 /* assume this failure is because iommu driver is not ready */
5781 return -EPROBE_DEFER;
5782 }
5783 IPADBG("SMMU mapping created\n");
5784 cb->valid = true;
5785
Michael Adisumarta93e97522017-10-06 15:49:46 -07005786 if (of_property_read_bool(dev->of_node,
5787 "qcom,smmu-s1-bypass")) {
5788 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = true;
Amir Levy9659e592016-10-27 18:08:27 +03005789 if (iommu_domain_set_attr(cb->mapping->domain,
5790 DOMAIN_ATTR_S1_BYPASS,
5791 &bypass)) {
5792 IPAERR("couldn't set bypass\n");
5793 arm_iommu_release_mapping(cb->mapping);
5794 cb->valid = false;
5795 return -EIO;
5796 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005797 IPADBG("AP/USB SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03005798 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07005799 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = false;
Amir Levy9659e592016-10-27 18:08:27 +03005800 if (iommu_domain_set_attr(cb->mapping->domain,
5801 DOMAIN_ATTR_ATOMIC,
5802 &atomic_ctx)) {
5803 IPAERR("couldn't set domain as atomic\n");
5804 arm_iommu_release_mapping(cb->mapping);
5805 cb->valid = false;
5806 return -EIO;
5807 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005808 IPADBG("AP/USB SMMU atomic set\n");
Amir Levy9659e592016-10-27 18:08:27 +03005809
5810 if (iommu_domain_set_attr(cb->mapping->domain,
5811 DOMAIN_ATTR_FAST,
5812 &fast)) {
5813 IPAERR("couldn't set fast map\n");
5814 arm_iommu_release_mapping(cb->mapping);
5815 cb->valid = false;
5816 return -EIO;
5817 }
5818 IPADBG("SMMU fast map set\n");
5819 }
5820
Michael Adisumarta93e97522017-10-06 15:49:46 -07005821 pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n",
5822 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP], smmu_info.fast_map);
5823
Amir Levy9659e592016-10-27 18:08:27 +03005824 result = arm_iommu_attach_device(cb->dev, cb->mapping);
5825 if (result) {
5826 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
5827 cb->valid = false;
5828 return result;
5829 }
5830
5831 add_map = of_get_property(dev->of_node,
5832 "qcom,additional-mapping", &add_map_size);
5833 if (add_map) {
5834 /* mapping size is an array of 3-tuple of u32 */
5835 if (add_map_size % (3 * sizeof(u32))) {
5836 IPAERR("wrong additional mapping format\n");
5837 cb->valid = false;
5838 return -EFAULT;
5839 }
5840
5841 /* iterate of each entry of the additional mapping array */
5842 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5843 u32 iova = be32_to_cpu(add_map[i]);
5844 u32 pa = be32_to_cpu(add_map[i + 1]);
5845 u32 size = be32_to_cpu(add_map[i + 2]);
5846 unsigned long iova_p;
5847 phys_addr_t pa_p;
5848 u32 size_p;
5849
5850 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5851 iova_p, pa_p, size_p);
5852 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5853 iova_p, &pa_p, size_p);
5854 ipa3_iommu_map(cb->mapping->domain,
5855 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005856 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005857 }
5858 }
5859
5860 /* map SMEM memory for IPA table accesses */
5861 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
5862 SMEM_MODEM, 0);
5863 if (smem_addr) {
5864 phys_addr_t iova = smem_virt_to_phys(smem_addr);
5865 phys_addr_t pa = iova;
5866 unsigned long iova_p;
5867 phys_addr_t pa_p;
5868 u32 size_p;
5869
5870 IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
5871 iova_p, pa_p, size_p);
5872 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5873 iova_p, &pa_p, size_p);
5874 ipa3_iommu_map(cb->mapping->domain,
5875 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005876 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005877 }
5878
5879
5880 smmu_info.present = true;
5881
5882 if (!ipa3_bus_scale_table)
5883 ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
5884
5885 /* Proceed to real initialization */
5886 result = ipa3_pre_init(&ipa3_res, dev);
5887 if (result) {
5888 IPAERR("ipa_init failed\n");
5889 arm_iommu_detach_device(cb->dev);
5890 arm_iommu_release_mapping(cb->mapping);
5891 cb->valid = false;
5892 return result;
5893 }
5894
5895 return result;
5896}
5897
5898static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
5899{
5900 ipa3_freeze_clock_vote_and_notify_modem();
5901
5902 return IRQ_HANDLED;
5903}
5904
5905static int ipa3_smp2p_probe(struct device *dev)
5906{
5907 struct device_node *node = dev->of_node;
5908 int res;
5909
Mohammed Javid7de12702017-07-21 15:22:58 +05305910 if (ipa3_ctx == NULL) {
5911 IPAERR("ipa3_ctx was not initialized\n");
5912 return -ENXIO;
5913 }
Amir Levy9659e592016-10-27 18:08:27 +03005914 IPADBG("node->name=%s\n", node->name);
5915 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
5916 res = of_get_gpio(node, 0);
5917 if (res < 0) {
5918 IPADBG("of_get_gpio returned %d\n", res);
5919 return res;
5920 }
5921
5922 ipa3_ctx->smp2p_info.out_base_id = res;
5923 IPADBG("smp2p out_base_id=%d\n",
5924 ipa3_ctx->smp2p_info.out_base_id);
5925 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
5926 int irq;
5927
5928 res = of_get_gpio(node, 0);
5929 if (res < 0) {
5930 IPADBG("of_get_gpio returned %d\n", res);
5931 return res;
5932 }
5933
5934 ipa3_ctx->smp2p_info.in_base_id = res;
5935 IPADBG("smp2p in_base_id=%d\n",
5936 ipa3_ctx->smp2p_info.in_base_id);
5937
5938 /* register for modem clk query */
5939 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
5940 IPA_GPIO_IN_QUERY_CLK_IDX);
5941 if (irq < 0) {
5942 IPAERR("gpio_to_irq failed %d\n", irq);
5943 return -ENODEV;
5944 }
5945 IPADBG("smp2p irq#=%d\n", irq);
5946 res = request_irq(irq,
5947 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
5948 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
5949 if (res) {
5950 IPAERR("fail to register smp2p irq=%d\n", irq);
5951 return -ENODEV;
5952 }
5953 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
5954 IPA_GPIO_IN_QUERY_CLK_IDX);
5955 if (res)
5956 IPAERR("failed to enable irq wake\n");
5957 }
5958
5959 return 0;
5960}
5961
5962int ipa3_plat_drv_probe(struct platform_device *pdev_p,
5963 struct ipa_api_controller *api_ctrl,
5964 const struct of_device_id *pdrv_match)
5965{
5966 int result;
5967 struct device *dev = &pdev_p->dev;
5968
5969 IPADBG("IPA driver probing started\n");
5970 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
5971
5972 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
5973 return ipa_smmu_ap_cb_probe(dev);
5974
5975 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
5976 return ipa_smmu_wlan_cb_probe(dev);
5977
5978 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
5979 return ipa_smmu_uc_cb_probe(dev);
5980
5981 if (of_device_is_compatible(dev->of_node,
5982 "qcom,smp2pgpio-map-ipa-1-in"))
5983 return ipa3_smp2p_probe(dev);
5984
5985 if (of_device_is_compatible(dev->of_node,
5986 "qcom,smp2pgpio-map-ipa-1-out"))
5987 return ipa3_smp2p_probe(dev);
5988
5989 master_dev = dev;
5990 if (!ipa3_pdev)
5991 ipa3_pdev = pdev_p;
5992
5993 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
5994 if (result) {
5995 IPAERR("IPA dts parsing failed\n");
5996 return result;
5997 }
5998
5999 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
6000 if (result) {
6001 IPAERR("IPA API binding failed\n");
6002 return result;
6003 }
6004
Amir Levy9659e592016-10-27 18:08:27 +03006005 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
6006 if (of_property_read_bool(pdev_p->dev.of_node,
Amir Levy9659e592016-10-27 18:08:27 +03006007 "qcom,smmu-fast-map"))
6008 smmu_info.fast_map = true;
6009 if (of_property_read_bool(pdev_p->dev.of_node,
6010 "qcom,use-64-bit-dma-mask"))
6011 smmu_info.use_64_bit_dma_mask = true;
6012 smmu_info.arm_smmu = true;
Amir Levy9659e592016-10-27 18:08:27 +03006013 } else if (of_property_read_bool(pdev_p->dev.of_node,
6014 "qcom,msm-smmu")) {
6015 IPAERR("Legacy IOMMU not supported\n");
6016 result = -EOPNOTSUPP;
6017 } else {
6018 if (of_property_read_bool(pdev_p->dev.of_node,
6019 "qcom,use-64-bit-dma-mask")) {
6020 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
6021 dma_set_coherent_mask(&pdev_p->dev,
6022 DMA_BIT_MASK(64))) {
6023 IPAERR("DMA set 64bit mask failed\n");
6024 return -EOPNOTSUPP;
6025 }
6026 } else {
6027 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
6028 dma_set_coherent_mask(&pdev_p->dev,
6029 DMA_BIT_MASK(32))) {
6030 IPAERR("DMA set 32bit mask failed\n");
6031 return -EOPNOTSUPP;
6032 }
6033 }
6034
6035 if (!ipa3_bus_scale_table)
6036 ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
6037 /* Proceed to real initialization */
6038 result = ipa3_pre_init(&ipa3_res, dev);
6039 if (result) {
6040 IPAERR("ipa3_init failed\n");
6041 return result;
6042 }
6043 }
6044
Ghanim Fodi115bf8a2017-04-21 01:36:06 -07006045 result = of_platform_populate(pdev_p->dev.of_node,
6046 pdrv_match, NULL, &pdev_p->dev);
6047 if (result) {
6048 IPAERR("failed to populate platform\n");
6049 return result;
6050 }
6051
Amir Levy9659e592016-10-27 18:08:27 +03006052 return result;
6053}
6054
6055/**
6056 * ipa3_ap_suspend() - suspend callback for runtime_pm
6057 * @dev: pointer to device
6058 *
6059 * This callback will be invoked by the runtime_pm framework when an AP suspend
6060 * operation is invoked, usually by pressing a suspend button.
6061 *
6062 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
6063 * This will postpone the suspend operation until IPA is no longer used by AP.
6064*/
6065int ipa3_ap_suspend(struct device *dev)
6066{
6067 int i;
6068
6069 IPADBG("Enter...\n");
6070
6071 /* In case there is a tx/rx handler in polling mode fail to suspend */
6072 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
6073 if (ipa3_ctx->ep[i].sys &&
6074 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
6075 IPAERR("EP %d is in polling state, do not suspend\n",
6076 i);
6077 return -EAGAIN;
6078 }
6079 }
6080
Michael Adisumarta3e350812017-09-18 14:54:36 -07006081 if (ipa3_ctx->use_ipa_pm) {
6082 ipa_pm_deactivate_all_deferred();
6083 } else {
6084 /*
6085 * Release transport IPA resource without waiting
6086 * for inactivity timer
6087 */
6088 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
6089 ipa3_transport_release_resource(NULL);
6090 }
Amir Levy9659e592016-10-27 18:08:27 +03006091 IPADBG("Exit\n");
6092
6093 return 0;
6094}
6095
6096/**
6097* ipa3_ap_resume() - resume callback for runtime_pm
6098* @dev: pointer to device
6099*
6100* This callback will be invoked by the runtime_pm framework when an AP resume
6101* operation is invoked.
6102*
6103* Always returns 0 since resume should always succeed.
6104*/
6105int ipa3_ap_resume(struct device *dev)
6106{
6107 return 0;
6108}
6109
6110struct ipa3_context *ipa3_get_ctx(void)
6111{
6112 return ipa3_ctx;
6113}
6114
Amir Levy9659e592016-10-27 18:08:27 +03006115static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
6116{
6117 switch (notify->evt_id) {
6118 case GSI_PER_EVT_GLOB_ERROR:
6119 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
6120 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
6121 break;
6122 case GSI_PER_EVT_GLOB_GP1:
6123 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
6124 BUG();
6125 break;
6126 case GSI_PER_EVT_GLOB_GP2:
6127 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
6128 BUG();
6129 break;
6130 case GSI_PER_EVT_GLOB_GP3:
6131 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
6132 BUG();
6133 break;
6134 case GSI_PER_EVT_GENERAL_BREAK_POINT:
6135 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
6136 break;
6137 case GSI_PER_EVT_GENERAL_BUS_ERROR:
6138 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
6139 BUG();
6140 break;
6141 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
6142 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
6143 BUG();
6144 break;
6145 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
6146 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
6147 BUG();
6148 break;
6149 default:
6150 IPAERR("Received unexpected evt: %d\n",
6151 notify->evt_id);
6152 BUG();
6153 }
6154}
6155
6156int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
6157{
6158 struct ipa3_ready_cb_info *cb_info = NULL;
6159
6160 /* check ipa3_ctx existed or not */
6161 if (!ipa3_ctx) {
6162 IPADBG("IPA driver haven't initialized\n");
6163 return -ENXIO;
6164 }
6165 mutex_lock(&ipa3_ctx->lock);
6166 if (ipa3_ctx->ipa_initialization_complete) {
6167 mutex_unlock(&ipa3_ctx->lock);
6168 IPADBG("IPA driver finished initialization already\n");
6169 return -EEXIST;
6170 }
6171
6172 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
6173 if (!cb_info) {
6174 mutex_unlock(&ipa3_ctx->lock);
6175 return -ENOMEM;
6176 }
6177
6178 cb_info->ready_cb = ipa_ready_cb;
6179 cb_info->user_data = user_data;
6180
6181 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
6182 mutex_unlock(&ipa3_ctx->lock);
6183
6184 return 0;
6185}
6186
6187int ipa3_iommu_map(struct iommu_domain *domain,
6188 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
6189{
6190 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
6191 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
6192
6193 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
6194 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
6195
6196 /* make sure no overlapping */
6197 if (domain == ipa3_get_smmu_domain()) {
6198 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
6199 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
6200 ipa_assert();
6201 return -EFAULT;
6202 }
6203 } else if (domain == ipa3_get_wlan_smmu_domain()) {
6204 /* wlan is one time map */
6205 } else if (domain == ipa3_get_uc_smmu_domain()) {
6206 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
6207 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
6208 ipa_assert();
6209 return -EFAULT;
6210 }
6211 } else {
6212 IPAERR("Unexpected domain 0x%p\n", domain);
6213 ipa_assert();
6214 return -EFAULT;
6215 }
6216
6217 return iommu_map(domain, iova, paddr, size, prot);
6218}
6219
6220MODULE_LICENSE("GPL v2");
6221MODULE_DESCRIPTION("IPA HW device driver");