blob: b6fe4178c81558783c8fba4cfd7357237cbae66c [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Amir Levy9659e592016-10-27 18:08:27 +030038#include <soc/qcom/subsystem_restart.h>
39#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020040#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020041#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020042
43#ifdef CONFIG_ARM64
44
45/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
60#define IPA_GPIO_IN_QUERY_CLK_IDX 0
61#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
62#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
63
64#define IPA_SUMMING_THRESHOLD (0x10)
65#define IPA_PIPE_MEM_START_OFST (0x0)
66#define IPA_PIPE_MEM_SIZE (0x0)
67#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
68 x == IPA_MODE_MOBILE_AP_WAN || \
69 x == IPA_MODE_MOBILE_AP_WLAN)
70#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
71#define IPA_A5_MUX_HEADER_LENGTH (8)
72
73#define IPA_AGGR_MAX_STR_LENGTH (10)
74
Gidon Studinski3021a6f2016-11-10 12:48:48 +020075#define CLEANUP_TAG_PROCESS_TIMEOUT 500
Amir Levy9659e592016-10-27 18:08:27 +030076
77#define IPA_AGGR_STR_IN_BYTES(str) \
78 (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
79
80#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
81
82#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
83
84#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
85#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
86#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
87#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
88
89#define IPA_SMEM_SIZE (8 * 1024)
90
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -070091#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
92#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000
93#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10
94
Amir Levy9659e592016-10-27 18:08:27 +030095/* round addresses for closes page per SMMU requirements */
96#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
97 do { \
98 (iova_p) = rounddown((iova), PAGE_SIZE); \
99 (pa_p) = rounddown((pa), PAGE_SIZE); \
100 (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
101 } while (0)
102
103
104/* The relative location in /lib/firmware where the FWs will reside */
105#define IPA_FWS_PATH "ipa/ipa_fws.elf"
106
107#ifdef CONFIG_COMPAT
108#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
109 IPA_IOCTL_ADD_HDR, \
110 compat_uptr_t)
111#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
112 IPA_IOCTL_DEL_HDR, \
113 compat_uptr_t)
114#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
115 IPA_IOCTL_ADD_RT_RULE, \
116 compat_uptr_t)
117#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
118 IPA_IOCTL_DEL_RT_RULE, \
119 compat_uptr_t)
120#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
121 IPA_IOCTL_ADD_FLT_RULE, \
122 compat_uptr_t)
123#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
124 IPA_IOCTL_DEL_FLT_RULE, \
125 compat_uptr_t)
126#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
127 IPA_IOCTL_GET_RT_TBL, \
128 compat_uptr_t)
129#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
130 IPA_IOCTL_COPY_HDR, \
131 compat_uptr_t)
132#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
133 IPA_IOCTL_QUERY_INTF, \
134 compat_uptr_t)
135#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
136 IPA_IOCTL_QUERY_INTF_TX_PROPS, \
137 compat_uptr_t)
138#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
139 IPA_IOCTL_QUERY_INTF_RX_PROPS, \
140 compat_uptr_t)
141#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
142 IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
143 compat_uptr_t)
144#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
145 IPA_IOCTL_GET_HDR, \
146 compat_uptr_t)
147#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
148 IPA_IOCTL_ALLOC_NAT_MEM, \
149 compat_uptr_t)
150#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
151 IPA_IOCTL_V4_INIT_NAT, \
152 compat_uptr_t)
153#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
154 IPA_IOCTL_NAT_DMA, \
155 compat_uptr_t)
156#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
157 IPA_IOCTL_V4_DEL_NAT, \
158 compat_uptr_t)
159#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
160 IPA_IOCTL_GET_NAT_OFFSET, \
161 compat_uptr_t)
162#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
163 IPA_IOCTL_PULL_MSG, \
164 compat_uptr_t)
165#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
166 IPA_IOCTL_RM_ADD_DEPENDENCY, \
167 compat_uptr_t)
168#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
169 IPA_IOCTL_RM_DEL_DEPENDENCY, \
170 compat_uptr_t)
171#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
172 IPA_IOCTL_GENERATE_FLT_EQ, \
173 compat_uptr_t)
174#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
175 IPA_IOCTL_QUERY_RT_TBL_INDEX, \
176 compat_uptr_t)
177#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
178 IPA_IOCTL_WRITE_QMAPID, \
179 compat_uptr_t)
180#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
181 IPA_IOCTL_MDFY_FLT_RULE, \
182 compat_uptr_t)
183#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
184 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
185 compat_uptr_t)
186#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
187 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
188 compat_uptr_t)
189#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
190 IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
191 compat_uptr_t)
192#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
193 IPA_IOCTL_ADD_HDR_PROC_CTX, \
194 compat_uptr_t)
195#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
196 IPA_IOCTL_DEL_HDR_PROC_CTX, \
197 compat_uptr_t)
198#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
199 IPA_IOCTL_MDFY_RT_RULE, \
200 compat_uptr_t)
201
202/**
203 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
204 * properties
205 * @dev_name: input parameter, the name of table
206 * @size: input parameter, size of table in bytes
207 * @offset: output parameter, offset into page in case of system memory
208 */
209struct ipa3_ioc_nat_alloc_mem32 {
210 char dev_name[IPA_RESOURCE_NAME_MAX];
211 compat_size_t size;
212 compat_off_t offset;
213};
214#endif
215
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200216#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
217#define TZ_MEM_PROTECT_REGION_ID 0x10
218
219struct tz_smmu_ipa_protect_region_iovec_s {
220 u64 input_addr;
221 u64 output_addr;
222 u64 size;
223 u32 attr;
224} __packed;
225
226struct tz_smmu_ipa_protect_region_s {
227 phys_addr_t iovec_buf;
228 u32 size_bytes;
229} __packed;
230
Amir Levy9659e592016-10-27 18:08:27 +0300231static void ipa3_start_tag_process(struct work_struct *work);
232static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
233
Amir Levya59ed3f2017-03-05 17:30:55 +0200234static void ipa3_transport_release_resource(struct work_struct *work);
235static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
236 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300237static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
238
Ghanim Fodia5f376a2017-10-17 18:14:53 +0300239static void ipa3_load_ipa_fw(struct work_struct *work);
240static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
Utkarsh Saxenaded78142017-05-03 14:04:30 +0530241
Skylar Chang242952b2017-07-20 15:04:05 -0700242static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
243static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
244 ipa_dec_clients_disable_clks_on_wq);
245
Amir Levy9659e592016-10-27 18:08:27 +0300246static struct ipa3_plat_drv_res ipa3_res = {0, };
247struct msm_bus_scale_pdata *ipa3_bus_scale_table;
248
249static struct clk *ipa3_clk;
250
251struct ipa3_context *ipa3_ctx;
252static struct device *master_dev;
253struct platform_device *ipa3_pdev;
254static struct {
255 bool present;
256 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300257 bool fast_map;
Michael Adisumarta93e97522017-10-06 15:49:46 -0700258 bool s1_bypass_arr[IPA_SMMU_CB_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300259 bool use_64_bit_dma_mask;
260 u32 ipa_base;
261 u32 ipa_size;
262} smmu_info;
263
264static char *active_clients_table_buf;
265
266int ipa3_active_clients_log_print_buffer(char *buf, int size)
267{
268 int i;
269 int nbytes;
270 int cnt = 0;
271 int start_idx;
272 int end_idx;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700273 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300274
Skylar Chang69ae50e2017-07-31 13:13:29 -0700275 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300276 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
277 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
278 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
279 for (i = start_idx; i != end_idx;
280 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
281 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
282 ipa3_ctx->ipa3_active_clients_logging
283 .log_buffer[i]);
284 cnt += nbytes;
285 }
Skylar Chang69ae50e2017-07-31 13:13:29 -0700286 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
287 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300288
289 return cnt;
290}
291
292int ipa3_active_clients_log_print_table(char *buf, int size)
293{
294 int i;
295 struct ipa3_active_client_htable_entry *iterator;
296 int cnt = 0;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700297 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300298
Skylar Chang69ae50e2017-07-31 13:13:29 -0700299 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300300 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
301 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
302 iterator, list) {
303 switch (iterator->type) {
304 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
305 cnt += scnprintf(buf + cnt, size - cnt,
306 "%-40s %-3d ENDPOINT\n",
307 iterator->id_string, iterator->count);
308 break;
309 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
310 cnt += scnprintf(buf + cnt, size - cnt,
311 "%-40s %-3d SIMPLE\n",
312 iterator->id_string, iterator->count);
313 break;
314 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
315 cnt += scnprintf(buf + cnt, size - cnt,
316 "%-40s %-3d RESOURCE\n",
317 iterator->id_string, iterator->count);
318 break;
319 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
320 cnt += scnprintf(buf + cnt, size - cnt,
321 "%-40s %-3d SPECIAL\n",
322 iterator->id_string, iterator->count);
323 break;
324 default:
325 IPAERR("Trying to print illegal active_clients type");
326 break;
327 }
328 }
329 cnt += scnprintf(buf + cnt, size - cnt,
330 "\nTotal active clients count: %d\n",
Skylar Chang242952b2017-07-20 15:04:05 -0700331 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Skylar Chang69ae50e2017-07-31 13:13:29 -0700332 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
333 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300334
335 return cnt;
336}
337
338static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
339 unsigned long event, void *ptr)
340{
Skylar Chang242952b2017-07-20 15:04:05 -0700341 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300342 ipa3_active_clients_log_print_table(active_clients_table_buf,
343 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
344 IPAERR("%s", active_clients_table_buf);
Skylar Chang242952b2017-07-20 15:04:05 -0700345 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300346
347 return NOTIFY_DONE;
348}
349
350static struct notifier_block ipa3_active_clients_panic_blk = {
351 .notifier_call = ipa3_active_clients_panic_notifier,
352};
353
354static int ipa3_active_clients_log_insert(const char *string)
355{
356 int head;
357 int tail;
358
359 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
360 return -EPERM;
361
362 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
363 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
364
365 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
366 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
367 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
368 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
369 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
370 if (tail == head)
371 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
372
373 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
374 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
375
376 return 0;
377}
378
379static int ipa3_active_clients_log_init(void)
380{
381 int i;
382
Skylar Chang69ae50e2017-07-31 13:13:29 -0700383 spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
Amir Levy9659e592016-10-27 18:08:27 +0300384 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
385 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
386 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
387 GFP_KERNEL);
388 active_clients_table_buf = kzalloc(sizeof(
389 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
390 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
391 pr_err("Active Clients Logging memory allocation failed");
392 goto bail;
393 }
394 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
395 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
396 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
397 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
398 }
399 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
400 ipa3_ctx->ipa3_active_clients_logging.log_tail =
401 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
402 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
403 atomic_notifier_chain_register(&panic_notifier_list,
404 &ipa3_active_clients_panic_blk);
405 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
406
407 return 0;
408
409bail:
410 return -ENOMEM;
411}
412
413void ipa3_active_clients_log_clear(void)
414{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700415 unsigned long flags;
416
417 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300418 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
419 ipa3_ctx->ipa3_active_clients_logging.log_tail =
420 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700421 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
422 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300423}
424
425static void ipa3_active_clients_log_destroy(void)
426{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700427 unsigned long flags;
428
429 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300430 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
431 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
432 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
433 ipa3_ctx->ipa3_active_clients_logging.log_tail =
434 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700435 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
436 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300437}
438
Amir Levy9659e592016-10-27 18:08:27 +0300439static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
440
441struct iommu_domain *ipa3_get_smmu_domain(void)
442{
443 if (smmu_cb[IPA_SMMU_CB_AP].valid)
444 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
445
446 IPAERR("CB not valid\n");
447
448 return NULL;
449}
450
451struct iommu_domain *ipa3_get_uc_smmu_domain(void)
452{
453 if (smmu_cb[IPA_SMMU_CB_UC].valid)
454 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
455
456 IPAERR("CB not valid\n");
457
458 return NULL;
459}
460
461struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
462{
463 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
464 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
465
466 IPAERR("CB not valid\n");
467
468 return NULL;
469}
470
471
472struct device *ipa3_get_dma_dev(void)
473{
474 return ipa3_ctx->pdev;
475}
476
477/**
478 * ipa3_get_smmu_ctx()- Return the wlan smmu context
479 *
480 * Return value: pointer to smmu context address
481 */
482struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
483{
484 return &smmu_cb[IPA_SMMU_CB_AP];
485}
486
487/**
488 * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
489 *
490 * Return value: pointer to smmu context address
491 */
492struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
493{
494 return &smmu_cb[IPA_SMMU_CB_WLAN];
495}
496
497/**
498 * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
499 *
500 * Return value: pointer to smmu context address
501 */
502struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
503{
504 return &smmu_cb[IPA_SMMU_CB_UC];
505}
506
507static int ipa3_open(struct inode *inode, struct file *filp)
508{
509 struct ipa3_context *ctx = NULL;
510
511 IPADBG_LOW("ENTER\n");
512 ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
513 filp->private_data = ctx;
514
515 return 0;
516}
517
Amir Levy9659e592016-10-27 18:08:27 +0300518static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
519{
520 if (!buff) {
521 IPAERR("Null buffer\n");
522 return;
523 }
524
525 if (type != WAN_UPSTREAM_ROUTE_ADD &&
526 type != WAN_UPSTREAM_ROUTE_DEL &&
527 type != WAN_EMBMS_CONNECT) {
528 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
529 return;
530 }
531
532 kfree(buff);
533}
534
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530535static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache)
Amir Levy9659e592016-10-27 18:08:27 +0300536{
537 int retval;
538 struct ipa_wan_msg *wan_msg;
539 struct ipa_msg_meta msg_meta;
Mohammed Javid616bb992017-10-03 13:10:05 +0530540 struct ipa_wan_msg cache_wan_msg;
Amir Levy9659e592016-10-27 18:08:27 +0300541
542 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
543 if (!wan_msg) {
544 IPAERR("no memory\n");
545 return -ENOMEM;
546 }
547
548 if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
549 sizeof(struct ipa_wan_msg))) {
550 kfree(wan_msg);
551 return -EFAULT;
552 }
553
Mohammed Javid616bb992017-10-03 13:10:05 +0530554 memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
555
Amir Levy9659e592016-10-27 18:08:27 +0300556 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
557 msg_meta.msg_type = msg_type;
558 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
559 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
560 if (retval) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530561 IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
Amir Levy9659e592016-10-27 18:08:27 +0300562 kfree(wan_msg);
563 return retval;
564 }
565
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530566 if (is_cache) {
567 mutex_lock(&ipa3_ctx->ipa_cne_evt_lock);
568
569 /* cache the cne event */
570 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
571 ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
Mohammed Javid616bb992017-10-03 13:10:05 +0530572 &cache_wan_msg,
573 sizeof(cache_wan_msg));
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530574
575 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
576 ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
577 &msg_meta,
578 sizeof(struct ipa_msg_meta));
579
580 ipa3_ctx->num_ipa_cne_evt_req++;
581 ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
582 mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock);
583 }
584
Amir Levy9659e592016-10-27 18:08:27 +0300585 return 0;
586}
587
Shihuan Liuc3174f52017-05-04 15:59:13 -0700588static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
589{
590 if (!buff) {
591 IPAERR("Null buffer\n");
592 return;
593 }
594
595 if (type != ADD_VLAN_IFACE &&
596 type != DEL_VLAN_IFACE &&
597 type != ADD_L2TP_VLAN_MAPPING &&
598 type != DEL_L2TP_VLAN_MAPPING) {
599 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
600 return;
601 }
602
603 kfree(buff);
604}
605
606static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
607{
608 int retval;
609 struct ipa_ioc_vlan_iface_info *vlan_info;
610 struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
611 struct ipa_msg_meta msg_meta;
612
613 if (msg_type == ADD_VLAN_IFACE ||
614 msg_type == DEL_VLAN_IFACE) {
615 vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
616 GFP_KERNEL);
617 if (!vlan_info) {
618 IPAERR("no memory\n");
619 return -ENOMEM;
620 }
621
622 if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
623 sizeof(struct ipa_ioc_vlan_iface_info))) {
624 kfree(vlan_info);
625 return -EFAULT;
626 }
627
628 memset(&msg_meta, 0, sizeof(msg_meta));
629 msg_meta.msg_type = msg_type;
630 msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
631 retval = ipa3_send_msg(&msg_meta, vlan_info,
632 ipa3_vlan_l2tp_msg_free_cb);
633 if (retval) {
634 IPAERR("ipa3_send_msg failed: %d\n", retval);
635 kfree(vlan_info);
636 return retval;
637 }
638 } else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
639 msg_type == DEL_L2TP_VLAN_MAPPING) {
640 mapping_info = kzalloc(sizeof(struct
641 ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
642 if (!mapping_info) {
643 IPAERR("no memory\n");
644 return -ENOMEM;
645 }
646
647 if (copy_from_user((u8 *)mapping_info,
648 (void __user *)usr_param,
649 sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
650 kfree(mapping_info);
651 return -EFAULT;
652 }
653
654 memset(&msg_meta, 0, sizeof(msg_meta));
655 msg_meta.msg_type = msg_type;
656 msg_meta.msg_len = sizeof(struct
657 ipa_ioc_l2tp_vlan_mapping_info);
658 retval = ipa3_send_msg(&msg_meta, mapping_info,
659 ipa3_vlan_l2tp_msg_free_cb);
660 if (retval) {
661 IPAERR("ipa3_send_msg failed: %d\n", retval);
662 kfree(mapping_info);
663 return retval;
664 }
665 } else {
666 IPAERR("Unexpected event\n");
667 return -EFAULT;
668 }
669
670 return 0;
671}
Amir Levy9659e592016-10-27 18:08:27 +0300672
673static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
674{
675 int retval = 0;
676 u32 pyld_sz;
677 u8 header[128] = { 0 };
678 u8 *param = NULL;
679 struct ipa_ioc_nat_alloc_mem nat_mem;
680 struct ipa_ioc_v4_nat_init nat_init;
681 struct ipa_ioc_v4_nat_del nat_del;
Amir Levy05fccd02017-06-13 16:25:45 +0300682 struct ipa_ioc_nat_pdn_entry mdfy_pdn;
Amir Levy9659e592016-10-27 18:08:27 +0300683 struct ipa_ioc_rm_dependency rm_depend;
684 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200685 int pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300686
687 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
688
Amir Levy9659e592016-10-27 18:08:27 +0300689 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
690 return -ENOTTY;
691 if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
692 return -ENOTTY;
693
Amir Levy05532622016-11-28 12:12:01 +0200694 if (!ipa3_is_ready()) {
695 IPAERR("IPA not ready, waiting for init completion\n");
696 wait_for_completion(&ipa3_ctx->init_completion_obj);
697 }
698
Amir Levy9659e592016-10-27 18:08:27 +0300699 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
700
701 switch (cmd) {
702 case IPA_IOC_ALLOC_NAT_MEM:
703 if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
704 sizeof(struct ipa_ioc_nat_alloc_mem))) {
705 retval = -EFAULT;
706 break;
707 }
708 /* null terminate the string */
709 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
710
711 if (ipa3_allocate_nat_device(&nat_mem)) {
712 retval = -EFAULT;
713 break;
714 }
715 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
716 sizeof(struct ipa_ioc_nat_alloc_mem))) {
717 retval = -EFAULT;
718 break;
719 }
720 break;
721 case IPA_IOC_V4_INIT_NAT:
722 if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
723 sizeof(struct ipa_ioc_v4_nat_init))) {
724 retval = -EFAULT;
725 break;
726 }
727 if (ipa3_nat_init_cmd(&nat_init)) {
728 retval = -EFAULT;
729 break;
730 }
731 break;
732
733 case IPA_IOC_NAT_DMA:
734 if (copy_from_user(header, (u8 *)arg,
735 sizeof(struct ipa_ioc_nat_dma_cmd))) {
736 retval = -EFAULT;
737 break;
738 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200739 pre_entry =
740 ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
Amir Levy9659e592016-10-27 18:08:27 +0300741 pyld_sz =
742 sizeof(struct ipa_ioc_nat_dma_cmd) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200743 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300744 param = kzalloc(pyld_sz, GFP_KERNEL);
745 if (!param) {
746 retval = -ENOMEM;
747 break;
748 }
749
750 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
751 retval = -EFAULT;
752 break;
753 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200754 /* add check in case user-space module compromised */
755 if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
756 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530757 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200758 ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
759 pre_entry);
760 retval = -EFAULT;
761 break;
762 }
Amir Levy9659e592016-10-27 18:08:27 +0300763 if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
764 retval = -EFAULT;
765 break;
766 }
767 break;
768
769 case IPA_IOC_V4_DEL_NAT:
770 if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
771 sizeof(struct ipa_ioc_v4_nat_del))) {
772 retval = -EFAULT;
773 break;
774 }
775 if (ipa3_nat_del_cmd(&nat_del)) {
776 retval = -EFAULT;
777 break;
778 }
779 break;
780
Amir Levy05fccd02017-06-13 16:25:45 +0300781 case IPA_IOC_NAT_MODIFY_PDN:
782 if (copy_from_user((u8 *)&mdfy_pdn, (const void __user *)arg,
783 sizeof(struct ipa_ioc_nat_pdn_entry))) {
784 retval = -EFAULT;
785 break;
786 }
Amir Levydc65f4c2017-07-06 09:49:50 +0300787 if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
Amir Levy05fccd02017-06-13 16:25:45 +0300788 retval = -EFAULT;
789 break;
790 }
791 break;
792
Amir Levy9659e592016-10-27 18:08:27 +0300793 case IPA_IOC_ADD_HDR:
794 if (copy_from_user(header, (u8 *)arg,
795 sizeof(struct ipa_ioc_add_hdr))) {
796 retval = -EFAULT;
797 break;
798 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200799 pre_entry =
800 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +0300801 pyld_sz =
802 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200803 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +0300804 param = kzalloc(pyld_sz, GFP_KERNEL);
805 if (!param) {
806 retval = -ENOMEM;
807 break;
808 }
809 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
810 retval = -EFAULT;
811 break;
812 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200813 /* add check in case user-space module compromised */
814 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
815 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530816 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200817 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
818 pre_entry);
819 retval = -EFAULT;
820 break;
821 }
Amir Levy9659e592016-10-27 18:08:27 +0300822 if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
823 retval = -EFAULT;
824 break;
825 }
826 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
827 retval = -EFAULT;
828 break;
829 }
830 break;
831
832 case IPA_IOC_DEL_HDR:
833 if (copy_from_user(header, (u8 *)arg,
834 sizeof(struct ipa_ioc_del_hdr))) {
835 retval = -EFAULT;
836 break;
837 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200838 pre_entry =
839 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300840 pyld_sz =
841 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200842 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +0300843 param = kzalloc(pyld_sz, GFP_KERNEL);
844 if (!param) {
845 retval = -ENOMEM;
846 break;
847 }
848 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
849 retval = -EFAULT;
850 break;
851 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200852 /* add check in case user-space module compromised */
853 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
854 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530855 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200856 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
857 pre_entry);
858 retval = -EFAULT;
859 break;
860 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200861 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
862 true)) {
Amir Levy9659e592016-10-27 18:08:27 +0300863 retval = -EFAULT;
864 break;
865 }
866 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
867 retval = -EFAULT;
868 break;
869 }
870 break;
871
872 case IPA_IOC_ADD_RT_RULE:
873 if (copy_from_user(header, (u8 *)arg,
874 sizeof(struct ipa_ioc_add_rt_rule))) {
875 retval = -EFAULT;
876 break;
877 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200878 pre_entry =
879 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300880 pyld_sz =
881 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200882 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300883 param = kzalloc(pyld_sz, GFP_KERNEL);
884 if (!param) {
885 retval = -ENOMEM;
886 break;
887 }
888 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
889 retval = -EFAULT;
890 break;
891 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200892 /* add check in case user-space module compromised */
893 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
894 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530895 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200896 ((struct ipa_ioc_add_rt_rule *)param)->
897 num_rules,
898 pre_entry);
899 retval = -EFAULT;
900 break;
901 }
Amir Levy9659e592016-10-27 18:08:27 +0300902 if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
903 retval = -EFAULT;
904 break;
905 }
906 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
907 retval = -EFAULT;
908 break;
909 }
910 break;
911 case IPA_IOC_ADD_RT_RULE_AFTER:
912 if (copy_from_user(header, (u8 *)arg,
913 sizeof(struct ipa_ioc_add_rt_rule_after))) {
914
915 retval = -EFAULT;
916 break;
917 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200918 pre_entry =
919 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300920 pyld_sz =
921 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200922 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300923 param = kzalloc(pyld_sz, GFP_KERNEL);
924 if (!param) {
925 retval = -ENOMEM;
926 break;
927 }
928 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
929 retval = -EFAULT;
930 break;
931 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200932 /* add check in case user-space module compromised */
933 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
934 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530935 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200936 ((struct ipa_ioc_add_rt_rule_after *)param)->
937 num_rules,
938 pre_entry);
939 retval = -EFAULT;
940 break;
941 }
Amir Levy9659e592016-10-27 18:08:27 +0300942 if (ipa3_add_rt_rule_after(
943 (struct ipa_ioc_add_rt_rule_after *)param)) {
944
945 retval = -EFAULT;
946 break;
947 }
948 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
949 retval = -EFAULT;
950 break;
951 }
952 break;
953
954 case IPA_IOC_MDFY_RT_RULE:
955 if (copy_from_user(header, (u8 *)arg,
956 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
957 retval = -EFAULT;
958 break;
959 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200960 pre_entry =
961 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300962 pyld_sz =
963 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200964 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300965 param = kzalloc(pyld_sz, GFP_KERNEL);
966 if (!param) {
967 retval = -ENOMEM;
968 break;
969 }
970 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
971 retval = -EFAULT;
972 break;
973 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200974 /* add check in case user-space module compromised */
975 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
976 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530977 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200978 ((struct ipa_ioc_mdfy_rt_rule *)param)->
979 num_rules,
980 pre_entry);
981 retval = -EFAULT;
982 break;
983 }
Amir Levy9659e592016-10-27 18:08:27 +0300984 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
985 retval = -EFAULT;
986 break;
987 }
988 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
989 retval = -EFAULT;
990 break;
991 }
992 break;
993
994 case IPA_IOC_DEL_RT_RULE:
995 if (copy_from_user(header, (u8 *)arg,
996 sizeof(struct ipa_ioc_del_rt_rule))) {
997 retval = -EFAULT;
998 break;
999 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001000 pre_entry =
1001 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001002 pyld_sz =
1003 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001004 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001005 param = kzalloc(pyld_sz, GFP_KERNEL);
1006 if (!param) {
1007 retval = -ENOMEM;
1008 break;
1009 }
1010 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1011 retval = -EFAULT;
1012 break;
1013 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001014 /* add check in case user-space module compromised */
1015 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
1016 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301017 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001018 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
1019 pre_entry);
1020 retval = -EFAULT;
1021 break;
1022 }
Amir Levy9659e592016-10-27 18:08:27 +03001023 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
1024 retval = -EFAULT;
1025 break;
1026 }
1027 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1028 retval = -EFAULT;
1029 break;
1030 }
1031 break;
1032
1033 case IPA_IOC_ADD_FLT_RULE:
1034 if (copy_from_user(header, (u8 *)arg,
1035 sizeof(struct ipa_ioc_add_flt_rule))) {
1036 retval = -EFAULT;
1037 break;
1038 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001039 pre_entry =
1040 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001041 pyld_sz =
1042 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001043 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001044 param = kzalloc(pyld_sz, GFP_KERNEL);
1045 if (!param) {
1046 retval = -ENOMEM;
1047 break;
1048 }
1049 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1050 retval = -EFAULT;
1051 break;
1052 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001053 /* add check in case user-space module compromised */
1054 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
1055 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301056 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001057 ((struct ipa_ioc_add_flt_rule *)param)->
1058 num_rules,
1059 pre_entry);
1060 retval = -EFAULT;
1061 break;
1062 }
Amir Levy9659e592016-10-27 18:08:27 +03001063 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
1064 retval = -EFAULT;
1065 break;
1066 }
1067 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1068 retval = -EFAULT;
1069 break;
1070 }
1071 break;
1072
1073 case IPA_IOC_ADD_FLT_RULE_AFTER:
1074 if (copy_from_user(header, (u8 *)arg,
1075 sizeof(struct ipa_ioc_add_flt_rule_after))) {
1076
1077 retval = -EFAULT;
1078 break;
1079 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001080 pre_entry =
1081 ((struct ipa_ioc_add_flt_rule_after *)header)->
1082 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001083 pyld_sz =
1084 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001085 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001086 param = kzalloc(pyld_sz, GFP_KERNEL);
1087 if (!param) {
1088 retval = -ENOMEM;
1089 break;
1090 }
1091 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1092 retval = -EFAULT;
1093 break;
1094 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001095 /* add check in case user-space module compromised */
1096 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1097 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301098 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001099 ((struct ipa_ioc_add_flt_rule_after *)param)->
1100 num_rules,
1101 pre_entry);
1102 retval = -EFAULT;
1103 break;
1104 }
Amir Levy9659e592016-10-27 18:08:27 +03001105 if (ipa3_add_flt_rule_after(
1106 (struct ipa_ioc_add_flt_rule_after *)param)) {
1107 retval = -EFAULT;
1108 break;
1109 }
1110 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1111 retval = -EFAULT;
1112 break;
1113 }
1114 break;
1115
1116 case IPA_IOC_DEL_FLT_RULE:
1117 if (copy_from_user(header, (u8 *)arg,
1118 sizeof(struct ipa_ioc_del_flt_rule))) {
1119 retval = -EFAULT;
1120 break;
1121 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001122 pre_entry =
1123 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001124 pyld_sz =
1125 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001126 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001127 param = kzalloc(pyld_sz, GFP_KERNEL);
1128 if (!param) {
1129 retval = -ENOMEM;
1130 break;
1131 }
1132 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1133 retval = -EFAULT;
1134 break;
1135 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001136 /* add check in case user-space module compromised */
1137 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1138 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301139 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001140 ((struct ipa_ioc_del_flt_rule *)param)->
1141 num_hdls,
1142 pre_entry);
1143 retval = -EFAULT;
1144 break;
1145 }
Amir Levy9659e592016-10-27 18:08:27 +03001146 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1147 retval = -EFAULT;
1148 break;
1149 }
1150 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1151 retval = -EFAULT;
1152 break;
1153 }
1154 break;
1155
1156 case IPA_IOC_MDFY_FLT_RULE:
1157 if (copy_from_user(header, (u8 *)arg,
1158 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
1159 retval = -EFAULT;
1160 break;
1161 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001162 pre_entry =
1163 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001164 pyld_sz =
1165 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001166 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001167 param = kzalloc(pyld_sz, GFP_KERNEL);
1168 if (!param) {
1169 retval = -ENOMEM;
1170 break;
1171 }
1172 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1173 retval = -EFAULT;
1174 break;
1175 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001176 /* add check in case user-space module compromised */
1177 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1178 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301179 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001180 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1181 num_rules,
1182 pre_entry);
1183 retval = -EFAULT;
1184 break;
1185 }
Amir Levy9659e592016-10-27 18:08:27 +03001186 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1187 retval = -EFAULT;
1188 break;
1189 }
1190 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1191 retval = -EFAULT;
1192 break;
1193 }
1194 break;
1195
1196 case IPA_IOC_COMMIT_HDR:
1197 retval = ipa3_commit_hdr();
1198 break;
1199 case IPA_IOC_RESET_HDR:
1200 retval = ipa3_reset_hdr();
1201 break;
1202 case IPA_IOC_COMMIT_RT:
1203 retval = ipa3_commit_rt(arg);
1204 break;
1205 case IPA_IOC_RESET_RT:
1206 retval = ipa3_reset_rt(arg);
1207 break;
1208 case IPA_IOC_COMMIT_FLT:
1209 retval = ipa3_commit_flt(arg);
1210 break;
1211 case IPA_IOC_RESET_FLT:
1212 retval = ipa3_reset_flt(arg);
1213 break;
1214 case IPA_IOC_GET_RT_TBL:
1215 if (copy_from_user(header, (u8 *)arg,
1216 sizeof(struct ipa_ioc_get_rt_tbl))) {
1217 retval = -EFAULT;
1218 break;
1219 }
1220 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1221 retval = -EFAULT;
1222 break;
1223 }
1224 if (copy_to_user((u8 *)arg, header,
1225 sizeof(struct ipa_ioc_get_rt_tbl))) {
1226 retval = -EFAULT;
1227 break;
1228 }
1229 break;
1230 case IPA_IOC_PUT_RT_TBL:
1231 retval = ipa3_put_rt_tbl(arg);
1232 break;
1233 case IPA_IOC_GET_HDR:
1234 if (copy_from_user(header, (u8 *)arg,
1235 sizeof(struct ipa_ioc_get_hdr))) {
1236 retval = -EFAULT;
1237 break;
1238 }
1239 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1240 retval = -EFAULT;
1241 break;
1242 }
1243 if (copy_to_user((u8 *)arg, header,
1244 sizeof(struct ipa_ioc_get_hdr))) {
1245 retval = -EFAULT;
1246 break;
1247 }
1248 break;
1249 case IPA_IOC_PUT_HDR:
1250 retval = ipa3_put_hdr(arg);
1251 break;
1252 case IPA_IOC_SET_FLT:
1253 retval = ipa3_cfg_filter(arg);
1254 break;
1255 case IPA_IOC_COPY_HDR:
1256 if (copy_from_user(header, (u8 *)arg,
1257 sizeof(struct ipa_ioc_copy_hdr))) {
1258 retval = -EFAULT;
1259 break;
1260 }
1261 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1262 retval = -EFAULT;
1263 break;
1264 }
1265 if (copy_to_user((u8 *)arg, header,
1266 sizeof(struct ipa_ioc_copy_hdr))) {
1267 retval = -EFAULT;
1268 break;
1269 }
1270 break;
1271 case IPA_IOC_QUERY_INTF:
1272 if (copy_from_user(header, (u8 *)arg,
1273 sizeof(struct ipa_ioc_query_intf))) {
1274 retval = -EFAULT;
1275 break;
1276 }
1277 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1278 retval = -1;
1279 break;
1280 }
1281 if (copy_to_user((u8 *)arg, header,
1282 sizeof(struct ipa_ioc_query_intf))) {
1283 retval = -EFAULT;
1284 break;
1285 }
1286 break;
1287 case IPA_IOC_QUERY_INTF_TX_PROPS:
1288 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
1289 if (copy_from_user(header, (u8 *)arg, sz)) {
1290 retval = -EFAULT;
1291 break;
1292 }
1293
1294 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
1295 > IPA_NUM_PROPS_MAX) {
1296 retval = -EFAULT;
1297 break;
1298 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001299 pre_entry =
1300 ((struct ipa_ioc_query_intf_tx_props *)
1301 header)->num_tx_props;
1302 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001303 sizeof(struct ipa_ioc_tx_intf_prop);
1304 param = kzalloc(pyld_sz, GFP_KERNEL);
1305 if (!param) {
1306 retval = -ENOMEM;
1307 break;
1308 }
1309 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1310 retval = -EFAULT;
1311 break;
1312 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001313 /* add check in case user-space module compromised */
1314 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1315 param)->num_tx_props
1316 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301317 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001318 ((struct ipa_ioc_query_intf_tx_props *)
1319 param)->num_tx_props, pre_entry);
1320 retval = -EFAULT;
1321 break;
1322 }
Amir Levy9659e592016-10-27 18:08:27 +03001323 if (ipa3_query_intf_tx_props(
1324 (struct ipa_ioc_query_intf_tx_props *)param)) {
1325 retval = -1;
1326 break;
1327 }
1328 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1329 retval = -EFAULT;
1330 break;
1331 }
1332 break;
1333 case IPA_IOC_QUERY_INTF_RX_PROPS:
1334 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
1335 if (copy_from_user(header, (u8 *)arg, sz)) {
1336 retval = -EFAULT;
1337 break;
1338 }
1339
1340 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
1341 > IPA_NUM_PROPS_MAX) {
1342 retval = -EFAULT;
1343 break;
1344 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001345 pre_entry =
1346 ((struct ipa_ioc_query_intf_rx_props *)
1347 header)->num_rx_props;
1348 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001349 sizeof(struct ipa_ioc_rx_intf_prop);
1350 param = kzalloc(pyld_sz, GFP_KERNEL);
1351 if (!param) {
1352 retval = -ENOMEM;
1353 break;
1354 }
1355 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1356 retval = -EFAULT;
1357 break;
1358 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001359 /* add check in case user-space module compromised */
1360 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1361 param)->num_rx_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301362 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001363 ((struct ipa_ioc_query_intf_rx_props *)
1364 param)->num_rx_props, pre_entry);
1365 retval = -EFAULT;
1366 break;
1367 }
Amir Levy9659e592016-10-27 18:08:27 +03001368 if (ipa3_query_intf_rx_props(
1369 (struct ipa_ioc_query_intf_rx_props *)param)) {
1370 retval = -1;
1371 break;
1372 }
1373 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1374 retval = -EFAULT;
1375 break;
1376 }
1377 break;
1378 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1379 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
1380 if (copy_from_user(header, (u8 *)arg, sz)) {
1381 retval = -EFAULT;
1382 break;
1383 }
1384
1385 if (((struct ipa_ioc_query_intf_ext_props *)
1386 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
1387 retval = -EFAULT;
1388 break;
1389 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001390 pre_entry =
1391 ((struct ipa_ioc_query_intf_ext_props *)
1392 header)->num_ext_props;
1393 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001394 sizeof(struct ipa_ioc_ext_intf_prop);
1395 param = kzalloc(pyld_sz, GFP_KERNEL);
1396 if (!param) {
1397 retval = -ENOMEM;
1398 break;
1399 }
1400 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1401 retval = -EFAULT;
1402 break;
1403 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001404 /* add check in case user-space module compromised */
1405 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1406 param)->num_ext_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301407 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001408 ((struct ipa_ioc_query_intf_ext_props *)
1409 param)->num_ext_props, pre_entry);
1410 retval = -EFAULT;
1411 break;
1412 }
Amir Levy9659e592016-10-27 18:08:27 +03001413 if (ipa3_query_intf_ext_props(
1414 (struct ipa_ioc_query_intf_ext_props *)param)) {
1415 retval = -1;
1416 break;
1417 }
1418 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1419 retval = -EFAULT;
1420 break;
1421 }
1422 break;
1423 case IPA_IOC_PULL_MSG:
1424 if (copy_from_user(header, (u8 *)arg,
1425 sizeof(struct ipa_msg_meta))) {
1426 retval = -EFAULT;
1427 break;
1428 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001429 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001430 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001431 pyld_sz = sizeof(struct ipa_msg_meta) +
1432 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001433 param = kzalloc(pyld_sz, GFP_KERNEL);
1434 if (!param) {
1435 retval = -ENOMEM;
1436 break;
1437 }
1438 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1439 retval = -EFAULT;
1440 break;
1441 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001442 /* add check in case user-space module compromised */
1443 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1444 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301445 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001446 ((struct ipa_msg_meta *)param)->msg_len,
1447 pre_entry);
1448 retval = -EFAULT;
1449 break;
1450 }
Amir Levy9659e592016-10-27 18:08:27 +03001451 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
1452 (char *)param + sizeof(struct ipa_msg_meta),
1453 ((struct ipa_msg_meta *)param)->msg_len) !=
1454 ((struct ipa_msg_meta *)param)->msg_len) {
1455 retval = -1;
1456 break;
1457 }
1458 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1459 retval = -EFAULT;
1460 break;
1461 }
1462 break;
1463 case IPA_IOC_RM_ADD_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001464 /* deprecate if IPA PM is used */
1465 if (ipa3_ctx->use_ipa_pm)
1466 return 0;
1467
Amir Levy9659e592016-10-27 18:08:27 +03001468 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1469 sizeof(struct ipa_ioc_rm_dependency))) {
1470 retval = -EFAULT;
1471 break;
1472 }
1473 retval = ipa_rm_add_dependency_from_ioctl(
1474 rm_depend.resource_name, rm_depend.depends_on_name);
1475 break;
1476 case IPA_IOC_RM_DEL_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001477 /* deprecate if IPA PM is used */
1478 if (ipa3_ctx->use_ipa_pm)
1479 return 0;
1480
Amir Levy9659e592016-10-27 18:08:27 +03001481 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1482 sizeof(struct ipa_ioc_rm_dependency))) {
1483 retval = -EFAULT;
1484 break;
1485 }
1486 retval = ipa_rm_delete_dependency_from_ioctl(
1487 rm_depend.resource_name, rm_depend.depends_on_name);
1488 break;
1489 case IPA_IOC_GENERATE_FLT_EQ:
1490 {
1491 struct ipa_ioc_generate_flt_eq flt_eq;
1492
1493 if (copy_from_user(&flt_eq, (u8 *)arg,
1494 sizeof(struct ipa_ioc_generate_flt_eq))) {
1495 retval = -EFAULT;
1496 break;
1497 }
1498 if (ipahal_flt_generate_equation(flt_eq.ip,
1499 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1500 retval = -EFAULT;
1501 break;
1502 }
1503 if (copy_to_user((u8 *)arg, &flt_eq,
1504 sizeof(struct ipa_ioc_generate_flt_eq))) {
1505 retval = -EFAULT;
1506 break;
1507 }
1508 break;
1509 }
1510 case IPA_IOC_QUERY_EP_MAPPING:
1511 {
1512 retval = ipa3_get_ep_mapping(arg);
1513 break;
1514 }
1515 case IPA_IOC_QUERY_RT_TBL_INDEX:
1516 if (copy_from_user(header, (u8 *)arg,
1517 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1518 retval = -EFAULT;
1519 break;
1520 }
1521 if (ipa3_query_rt_index(
1522 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
1523 retval = -EFAULT;
1524 break;
1525 }
1526 if (copy_to_user((u8 *)arg, header,
1527 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1528 retval = -EFAULT;
1529 break;
1530 }
1531 break;
1532 case IPA_IOC_WRITE_QMAPID:
1533 if (copy_from_user(header, (u8 *)arg,
1534 sizeof(struct ipa_ioc_write_qmapid))) {
1535 retval = -EFAULT;
1536 break;
1537 }
1538 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1539 retval = -EFAULT;
1540 break;
1541 }
1542 if (copy_to_user((u8 *)arg, header,
1543 sizeof(struct ipa_ioc_write_qmapid))) {
1544 retval = -EFAULT;
1545 break;
1546 }
1547 break;
1548 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301549 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
Amir Levy9659e592016-10-27 18:08:27 +03001550 if (retval) {
1551 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1552 break;
1553 }
1554 break;
1555 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301556 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
Amir Levy9659e592016-10-27 18:08:27 +03001557 if (retval) {
1558 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1559 break;
1560 }
1561 break;
1562 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301563 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
Amir Levy9659e592016-10-27 18:08:27 +03001564 if (retval) {
1565 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1566 break;
1567 }
1568 break;
1569 case IPA_IOC_ADD_HDR_PROC_CTX:
1570 if (copy_from_user(header, (u8 *)arg,
1571 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1572 retval = -EFAULT;
1573 break;
1574 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001575 pre_entry =
1576 ((struct ipa_ioc_add_hdr_proc_ctx *)
1577 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001578 pyld_sz =
1579 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001580 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001581 param = kzalloc(pyld_sz, GFP_KERNEL);
1582 if (!param) {
1583 retval = -ENOMEM;
1584 break;
1585 }
1586 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1587 retval = -EFAULT;
1588 break;
1589 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001590 /* add check in case user-space module compromised */
1591 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1592 param)->num_proc_ctxs != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301593 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001594 ((struct ipa_ioc_add_hdr_proc_ctx *)
1595 param)->num_proc_ctxs, pre_entry);
1596 retval = -EFAULT;
1597 break;
1598 }
Amir Levy9659e592016-10-27 18:08:27 +03001599 if (ipa3_add_hdr_proc_ctx(
1600 (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
1601 retval = -EFAULT;
1602 break;
1603 }
1604 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1605 retval = -EFAULT;
1606 break;
1607 }
1608 break;
1609 case IPA_IOC_DEL_HDR_PROC_CTX:
1610 if (copy_from_user(header, (u8 *)arg,
1611 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1612 retval = -EFAULT;
1613 break;
1614 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001615 pre_entry =
1616 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001617 pyld_sz =
1618 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001619 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001620 param = kzalloc(pyld_sz, GFP_KERNEL);
1621 if (!param) {
1622 retval = -ENOMEM;
1623 break;
1624 }
1625 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1626 retval = -EFAULT;
1627 break;
1628 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001629 /* add check in case user-space module compromised */
1630 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1631 param)->num_hdls != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301632 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001633 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1634 num_hdls,
1635 pre_entry);
1636 retval = -EFAULT;
1637 break;
1638 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001639 if (ipa3_del_hdr_proc_ctx_by_user(
1640 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001641 retval = -EFAULT;
1642 break;
1643 }
1644 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1645 retval = -EFAULT;
1646 break;
1647 }
1648 break;
1649
1650 case IPA_IOC_GET_HW_VERSION:
1651 pyld_sz = sizeof(enum ipa_hw_type);
1652 param = kzalloc(pyld_sz, GFP_KERNEL);
1653 if (!param) {
1654 retval = -ENOMEM;
1655 break;
1656 }
1657 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
1658 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1659 retval = -EFAULT;
1660 break;
1661 }
1662 break;
1663
Shihuan Liuc3174f52017-05-04 15:59:13 -07001664 case IPA_IOC_ADD_VLAN_IFACE:
1665 if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
1666 retval = -EFAULT;
1667 break;
1668 }
1669 break;
1670
1671 case IPA_IOC_DEL_VLAN_IFACE:
1672 if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
1673 retval = -EFAULT;
1674 break;
1675 }
1676 break;
1677
1678 case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
1679 if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
1680 retval = -EFAULT;
1681 break;
1682 }
1683 break;
1684
1685 case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
1686 if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
1687 retval = -EFAULT;
1688 break;
1689 }
1690 break;
1691
Amir Levy9659e592016-10-27 18:08:27 +03001692 default: /* redundant, as cmd was checked against MAXNR */
1693 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1694 return -ENOTTY;
1695 }
1696 kfree(param);
1697 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1698
1699 return retval;
1700}
1701
1702/**
1703* ipa3_setup_dflt_rt_tables() - Setup default routing tables
1704*
1705* Return codes:
1706* 0: success
1707* -ENOMEM: failed to allocate memory
1708* -EPERM: failed to add the tables
1709*/
1710int ipa3_setup_dflt_rt_tables(void)
1711{
1712 struct ipa_ioc_add_rt_rule *rt_rule;
1713 struct ipa_rt_rule_add *rt_rule_entry;
1714
1715 rt_rule =
1716 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
1717 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
1718 if (!rt_rule) {
1719 IPAERR("fail to alloc mem\n");
1720 return -ENOMEM;
1721 }
1722 /* setup a default v4 route to point to Apps */
1723 rt_rule->num_rules = 1;
1724 rt_rule->commit = 1;
1725 rt_rule->ip = IPA_IP_v4;
1726 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
1727 IPA_RESOURCE_NAME_MAX);
1728
1729 rt_rule_entry = &rt_rule->rules[0];
1730 rt_rule_entry->at_rear = 1;
1731 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
1732 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
1733 rt_rule_entry->rule.retain_hdr = 1;
1734
1735 if (ipa3_add_rt_rule(rt_rule)) {
1736 IPAERR("fail to add dflt v4 rule\n");
1737 kfree(rt_rule);
1738 return -EPERM;
1739 }
1740 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1741 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1742
1743 /* setup a default v6 route to point to A5 */
1744 rt_rule->ip = IPA_IP_v6;
1745 if (ipa3_add_rt_rule(rt_rule)) {
1746 IPAERR("fail to add dflt v6 rule\n");
1747 kfree(rt_rule);
1748 return -EPERM;
1749 }
1750 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1751 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1752
1753 /*
1754 * because these tables are the very first to be added, they will both
1755 * have the same index (0) which is essential for programming the
1756 * "route" end-point config
1757 */
1758
1759 kfree(rt_rule);
1760
1761 return 0;
1762}
1763
1764static int ipa3_setup_exception_path(void)
1765{
1766 struct ipa_ioc_add_hdr *hdr;
1767 struct ipa_hdr_add *hdr_entry;
1768 struct ipahal_reg_route route = { 0 };
1769 int ret;
1770
1771 /* install the basic exception header */
1772 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
1773 sizeof(struct ipa_hdr_add), GFP_KERNEL);
1774 if (!hdr) {
1775 IPAERR("fail to alloc exception hdr\n");
1776 return -ENOMEM;
1777 }
1778 hdr->num_hdrs = 1;
1779 hdr->commit = 1;
1780 hdr_entry = &hdr->hdr[0];
1781
1782 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
1783 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
1784
1785 if (ipa3_add_hdr(hdr)) {
1786 IPAERR("fail to add exception hdr\n");
1787 ret = -EPERM;
1788 goto bail;
1789 }
1790
1791 if (hdr_entry->status) {
1792 IPAERR("fail to add exception hdr\n");
1793 ret = -EPERM;
1794 goto bail;
1795 }
1796
1797 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
1798
1799 /* set the route register to pass exception packets to Apps */
1800 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1801 route.route_frag_def_pipe = ipa3_get_ep_mapping(
1802 IPA_CLIENT_APPS_LAN_CONS);
1803 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
1804 route.route_def_retain_hdr = 1;
1805
1806 if (ipa3_cfg_route(&route)) {
1807 IPAERR("fail to add exception hdr\n");
1808 ret = -EPERM;
1809 goto bail;
1810 }
1811
1812 ret = 0;
1813bail:
1814 kfree(hdr);
1815 return ret;
1816}
1817
1818static int ipa3_init_smem_region(int memory_region_size,
1819 int memory_region_offset)
1820{
1821 struct ipahal_imm_cmd_dma_shared_mem cmd;
1822 struct ipahal_imm_cmd_pyld *cmd_pyld;
1823 struct ipa3_desc desc;
1824 struct ipa_mem_buffer mem;
1825 int rc;
1826
1827 if (memory_region_size == 0)
1828 return 0;
1829
1830 memset(&desc, 0, sizeof(desc));
1831 memset(&cmd, 0, sizeof(cmd));
1832 memset(&mem, 0, sizeof(mem));
1833
1834 mem.size = memory_region_size;
1835 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
1836 &mem.phys_base, GFP_KERNEL);
1837 if (!mem.base) {
1838 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
1839 return -ENOMEM;
1840 }
1841
1842 memset(mem.base, 0, mem.size);
1843 cmd.is_read = false;
1844 cmd.skip_pipeline_clear = false;
1845 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1846 cmd.size = mem.size;
1847 cmd.system_addr = mem.phys_base;
1848 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
1849 memory_region_offset;
1850 cmd_pyld = ipahal_construct_imm_cmd(
1851 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1852 if (!cmd_pyld) {
1853 IPAERR("failed to construct dma_shared_mem imm cmd\n");
1854 return -ENOMEM;
1855 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07001856 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03001857 desc.pyld = cmd_pyld->data;
1858 desc.len = cmd_pyld->len;
1859 desc.type = IPA_IMM_CMD_DESC;
1860
1861 rc = ipa3_send_cmd(1, &desc);
1862 if (rc) {
1863 IPAERR("failed to send immediate command (error %d)\n", rc);
1864 rc = -EFAULT;
1865 }
1866
1867 ipahal_destroy_imm_cmd(cmd_pyld);
1868 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
1869 mem.phys_base);
1870
1871 return rc;
1872}
1873
1874/**
1875* ipa3_init_q6_smem() - Initialize Q6 general memory and
1876* header memory regions in IPA.
1877*
1878* Return codes:
1879* 0: success
1880* -ENOMEM: failed to allocate dma memory
1881* -EFAULT: failed to send IPA command to initialize the memory
1882*/
1883int ipa3_init_q6_smem(void)
1884{
1885 int rc;
1886
1887 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1888
1889 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
1890 IPA_MEM_PART(modem_ofst));
1891 if (rc) {
1892 IPAERR("failed to initialize Modem RAM memory\n");
1893 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1894 return rc;
1895 }
1896
1897 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
1898 IPA_MEM_PART(modem_hdr_ofst));
1899 if (rc) {
1900 IPAERR("failed to initialize Modem HDRs RAM memory\n");
1901 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1902 return rc;
1903 }
1904
1905 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
1906 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
1907 if (rc) {
1908 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
1909 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1910 return rc;
1911 }
1912
1913 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
1914 IPA_MEM_PART(modem_comp_decomp_ofst));
1915 if (rc) {
1916 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
1917 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1918 return rc;
1919 }
1920 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1921
1922 return rc;
1923}
1924
1925static void ipa3_destroy_imm(void *user1, int user2)
1926{
1927 ipahal_destroy_imm_cmd(user1);
1928}
1929
1930static void ipa3_q6_pipe_delay(bool delay)
1931{
1932 int client_idx;
1933 int ep_idx;
1934 struct ipa_ep_cfg_ctrl ep_ctrl;
1935
1936 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
1937 ep_ctrl.ipa_ep_delay = delay;
1938
1939 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1940 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
1941 ep_idx = ipa3_get_ep_mapping(client_idx);
1942 if (ep_idx == -1)
1943 continue;
1944
1945 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
1946 ep_idx, &ep_ctrl);
1947 }
1948 }
1949}
1950
1951static void ipa3_q6_avoid_holb(void)
1952{
1953 int ep_idx;
1954 int client_idx;
1955 struct ipa_ep_cfg_ctrl ep_suspend;
1956 struct ipa_ep_cfg_holb ep_holb;
1957
1958 memset(&ep_suspend, 0, sizeof(ep_suspend));
1959 memset(&ep_holb, 0, sizeof(ep_holb));
1960
1961 ep_suspend.ipa_ep_suspend = true;
1962 ep_holb.tmr_val = 0;
1963 ep_holb.en = 1;
1964
1965 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1966 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1967 ep_idx = ipa3_get_ep_mapping(client_idx);
1968 if (ep_idx == -1)
1969 continue;
1970
1971 /*
1972 * ipa3_cfg_ep_holb is not used here because we are
1973 * setting HOLB on Q6 pipes, and from APPS perspective
1974 * they are not valid, therefore, the above function
1975 * will fail.
1976 */
1977 ipahal_write_reg_n_fields(
1978 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
1979 ep_idx, &ep_holb);
1980 ipahal_write_reg_n_fields(
1981 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
1982 ep_idx, &ep_holb);
1983
Skylar Changa699afd2017-06-06 10:06:21 -07001984 /* from IPA 4.0 pipe suspend is not supported */
1985 if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
1986 ipahal_write_reg_n_fields(
1987 IPA_ENDP_INIT_CTRL_n,
1988 ep_idx, &ep_suspend);
Amir Levy9659e592016-10-27 18:08:27 +03001989 }
1990 }
1991}
1992
Skylar Chang94692c92017-03-01 09:07:11 -08001993static void ipa3_halt_q6_cons_gsi_channels(void)
1994{
1995 int ep_idx;
1996 int client_idx;
1997 const struct ipa_gsi_ep_config *gsi_ep_cfg;
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07001998 int i;
Skylar Chang94692c92017-03-01 09:07:11 -08001999 int ret;
2000 int code = 0;
2001
2002 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2003 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
2004 ep_idx = ipa3_get_ep_mapping(client_idx);
2005 if (ep_idx == -1)
2006 continue;
2007
Skylar Changc1f15312017-05-09 14:14:32 -07002008 gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
Skylar Chang94692c92017-03-01 09:07:11 -08002009 if (!gsi_ep_cfg) {
2010 IPAERR("failed to get GSI config\n");
2011 ipa_assert();
2012 return;
2013 }
2014
2015 ret = gsi_halt_channel_ee(
2016 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
2017 &code);
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002018 for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY &&
2019 ret == -GSI_STATUS_AGAIN; i++) {
2020 IPADBG(
2021 "ch %d ee %d with code %d\n is busy try again",
2022 gsi_ep_cfg->ipa_gsi_chan_num,
2023 gsi_ep_cfg->ee,
2024 code);
2025 usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP,
2026 IPA_GSI_CHANNEL_HALT_MAX_SLEEP);
2027 ret = gsi_halt_channel_ee(
2028 gsi_ep_cfg->ipa_gsi_chan_num,
2029 gsi_ep_cfg->ee, &code);
2030 }
Skylar Chang94692c92017-03-01 09:07:11 -08002031 if (ret == GSI_STATUS_SUCCESS)
2032 IPADBG("halted gsi ch %d ee %d with code %d\n",
2033 gsi_ep_cfg->ipa_gsi_chan_num,
2034 gsi_ep_cfg->ee,
2035 code);
2036 else
2037 IPAERR("failed to halt ch %d ee %d code %d\n",
2038 gsi_ep_cfg->ipa_gsi_chan_num,
2039 gsi_ep_cfg->ee,
2040 code);
2041 }
2042 }
2043}
2044
2045
Amir Levy9659e592016-10-27 18:08:27 +03002046static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
2047 enum ipa_rule_type rlt)
2048{
2049 struct ipa3_desc *desc;
2050 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2051 struct ipahal_imm_cmd_pyld **cmd_pyld;
2052 int retval = 0;
2053 int pipe_idx;
2054 int flt_idx = 0;
2055 int num_cmds = 0;
2056 int index;
2057 u32 lcl_addr_mem_part;
2058 u32 lcl_hdr_sz;
2059 struct ipa_mem_buffer mem;
2060
2061 IPADBG("Entry\n");
2062
2063 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2064 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2065 return -EINVAL;
2066 }
2067
2068 /* Up to filtering pipes we have filtering tables */
2069 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
2070 GFP_KERNEL);
2071 if (!desc) {
2072 IPAERR("failed to allocate memory\n");
2073 return -ENOMEM;
2074 }
2075
2076 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
2077 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
2078 if (!cmd_pyld) {
2079 IPAERR("failed to allocate memory\n");
2080 retval = -ENOMEM;
2081 goto free_desc;
2082 }
2083
2084 if (ip == IPA_IP_v4) {
2085 if (rlt == IPA_RULE_HASHABLE) {
2086 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
2087 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2088 } else {
2089 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
2090 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2091 }
2092 } else {
2093 if (rlt == IPA_RULE_HASHABLE) {
2094 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
2095 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2096 } else {
2097 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
2098 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2099 }
2100 }
2101
2102 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02002103 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002104 if (retval) {
2105 IPAERR("failed to generate flt single tbl empty img\n");
2106 goto free_cmd_pyld;
2107 }
2108
2109 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
2110 if (!ipa_is_ep_support_flt(pipe_idx))
2111 continue;
2112
2113 /*
2114 * Iterating over all the filtering pipes which are either
2115 * invalid but connected or connected but not configured by AP.
2116 */
2117 if (!ipa3_ctx->ep[pipe_idx].valid ||
2118 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
2119
2120 cmd.is_read = false;
2121 cmd.skip_pipeline_clear = false;
2122 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2123 cmd.size = mem.size;
2124 cmd.system_addr = mem.phys_base;
2125 cmd.local_addr =
2126 ipa3_ctx->smem_restricted_bytes +
2127 lcl_addr_mem_part +
2128 ipahal_get_hw_tbl_hdr_width() +
2129 flt_idx * ipahal_get_hw_tbl_hdr_width();
2130 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
2131 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2132 if (!cmd_pyld[num_cmds]) {
2133 IPAERR("fail construct dma_shared_mem cmd\n");
2134 retval = -ENOMEM;
2135 goto free_empty_img;
2136 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002137 desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002138 desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
2139 desc[num_cmds].len = cmd_pyld[num_cmds]->len;
2140 desc[num_cmds].type = IPA_IMM_CMD_DESC;
2141 num_cmds++;
2142 }
2143
2144 flt_idx++;
2145 }
2146
2147 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2148 retval = ipa3_send_cmd(num_cmds, desc);
2149 if (retval) {
2150 IPAERR("failed to send immediate command (err %d)\n", retval);
2151 retval = -EFAULT;
2152 }
2153
2154free_empty_img:
2155 ipahal_free_dma_mem(&mem);
2156free_cmd_pyld:
2157 for (index = 0; index < num_cmds; index++)
2158 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2159 kfree(cmd_pyld);
2160free_desc:
2161 kfree(desc);
2162 return retval;
2163}
2164
2165static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2166 enum ipa_rule_type rlt)
2167{
2168 struct ipa3_desc *desc;
2169 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2170 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2171 int retval = 0;
2172 u32 modem_rt_index_lo;
2173 u32 modem_rt_index_hi;
2174 u32 lcl_addr_mem_part;
2175 u32 lcl_hdr_sz;
2176 struct ipa_mem_buffer mem;
2177
2178 IPADBG("Entry\n");
2179
2180 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2181 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2182 return -EINVAL;
2183 }
2184
2185 if (ip == IPA_IP_v4) {
2186 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2187 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2188 if (rlt == IPA_RULE_HASHABLE) {
2189 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2190 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2191 } else {
2192 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2193 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2194 }
2195 } else {
2196 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2197 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2198 if (rlt == IPA_RULE_HASHABLE) {
2199 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2200 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2201 } else {
2202 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2203 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2204 }
2205 }
2206
2207 retval = ipahal_rt_generate_empty_img(
2208 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002209 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002210 if (retval) {
2211 IPAERR("fail generate empty rt img\n");
2212 return -ENOMEM;
2213 }
2214
2215 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2216 if (!desc) {
2217 IPAERR("failed to allocate memory\n");
2218 goto free_empty_img;
2219 }
2220
2221 cmd.is_read = false;
2222 cmd.skip_pipeline_clear = false;
2223 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2224 cmd.size = mem.size;
2225 cmd.system_addr = mem.phys_base;
2226 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2227 lcl_addr_mem_part +
2228 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2229 cmd_pyld = ipahal_construct_imm_cmd(
2230 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2231 if (!cmd_pyld) {
2232 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2233 retval = -ENOMEM;
2234 goto free_desc;
2235 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002236 desc->opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002237 desc->pyld = cmd_pyld->data;
2238 desc->len = cmd_pyld->len;
2239 desc->type = IPA_IMM_CMD_DESC;
2240
2241 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2242 retval = ipa3_send_cmd(1, desc);
2243 if (retval) {
2244 IPAERR("failed to send immediate command (err %d)\n", retval);
2245 retval = -EFAULT;
2246 }
2247
2248 ipahal_destroy_imm_cmd(cmd_pyld);
2249free_desc:
2250 kfree(desc);
2251free_empty_img:
2252 ipahal_free_dma_mem(&mem);
2253 return retval;
2254}
2255
2256static int ipa3_q6_clean_q6_tables(void)
2257{
2258 struct ipa3_desc *desc;
2259 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2260 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2261 int retval;
2262 struct ipahal_reg_fltrt_hash_flush flush;
2263 struct ipahal_reg_valmask valmask;
2264
2265 IPADBG("Entry\n");
2266
2267
2268 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2269 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2270 return -EFAULT;
2271 }
2272 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2273 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2274 return -EFAULT;
2275 }
2276 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2277 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2278 return -EFAULT;
2279 }
2280 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2281 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2282 return -EFAULT;
2283 }
2284
2285 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2286 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2287 return -EFAULT;
2288 }
2289 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2290 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2291 return -EFAULT;
2292 }
2293 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2294 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2295 return -EFAULT;
2296 }
2297 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2298 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2299 return -EFAULT;
2300 }
2301
2302 /* Flush rules cache */
2303 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2304 if (!desc) {
2305 IPAERR("failed to allocate memory\n");
2306 return -ENOMEM;
2307 }
2308
2309 flush.v4_flt = true;
2310 flush.v4_rt = true;
2311 flush.v6_flt = true;
2312 flush.v6_rt = true;
2313 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2314 reg_write_cmd.skip_pipeline_clear = false;
2315 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2316 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2317 reg_write_cmd.value = valmask.val;
2318 reg_write_cmd.value_mask = valmask.mask;
2319 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2320 &reg_write_cmd, false);
2321 if (!cmd_pyld) {
2322 IPAERR("fail construct register_write imm cmd\n");
2323 retval = -EFAULT;
2324 goto bail_desc;
2325 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002326 desc->opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002327 desc->pyld = cmd_pyld->data;
2328 desc->len = cmd_pyld->len;
2329 desc->type = IPA_IMM_CMD_DESC;
2330
2331 IPADBG("Sending 1 descriptor for tbls flush\n");
2332 retval = ipa3_send_cmd(1, desc);
2333 if (retval) {
2334 IPAERR("failed to send immediate command (err %d)\n", retval);
2335 retval = -EFAULT;
2336 }
2337
2338 ipahal_destroy_imm_cmd(cmd_pyld);
2339
2340bail_desc:
2341 kfree(desc);
2342 IPADBG("Done - retval = %d\n", retval);
2343 return retval;
2344}
2345
2346static int ipa3_q6_set_ex_path_to_apps(void)
2347{
2348 int ep_idx;
2349 int client_idx;
2350 struct ipa3_desc *desc;
2351 int num_descs = 0;
2352 int index;
2353 struct ipahal_imm_cmd_register_write reg_write;
2354 struct ipahal_imm_cmd_pyld *cmd_pyld;
2355 int retval;
Amir Levy9659e592016-10-27 18:08:27 +03002356
2357 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2358 GFP_KERNEL);
2359 if (!desc) {
2360 IPAERR("failed to allocate memory\n");
2361 return -ENOMEM;
2362 }
2363
2364 /* Set the exception path to AP */
2365 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2366 ep_idx = ipa3_get_ep_mapping(client_idx);
2367 if (ep_idx == -1)
2368 continue;
2369
Skylar Chang53137112017-05-12 17:13:13 -07002370 /* disable statuses for all modem controlled prod pipes */
2371 if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
2372 (ipa3_ctx->ep[ep_idx].valid &&
2373 ipa3_ctx->ep[ep_idx].skip_ep_cfg)) {
Amir Levy5807be32017-04-19 14:35:12 +03002374 ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
2375
2376 reg_write.skip_pipeline_clear = false;
2377 reg_write.pipeline_clear_options =
2378 IPAHAL_HPS_CLEAR;
2379 reg_write.offset =
2380 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2381 ep_idx);
2382 reg_write.value = 0;
2383 reg_write.value_mask = ~0;
2384 cmd_pyld = ipahal_construct_imm_cmd(
2385 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2386 if (!cmd_pyld) {
2387 IPAERR("fail construct register_write cmd\n");
2388 ipa_assert();
2389 return -EFAULT;
2390 }
2391
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002392 desc[num_descs].opcode = cmd_pyld->opcode;
Amir Levy5807be32017-04-19 14:35:12 +03002393 desc[num_descs].type = IPA_IMM_CMD_DESC;
2394 desc[num_descs].callback = ipa3_destroy_imm;
2395 desc[num_descs].user1 = cmd_pyld;
2396 desc[num_descs].pyld = cmd_pyld->data;
2397 desc[num_descs].len = cmd_pyld->len;
2398 num_descs++;
2399 }
Amir Levy9659e592016-10-27 18:08:27 +03002400 }
2401
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002402 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002403 retval = ipa3_tag_process(desc, num_descs,
2404 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2405 if (retval) {
2406 IPAERR("TAG process failed! (error %d)\n", retval);
2407 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2408 if (retval != -ETIME) {
2409 for (index = 0; index < num_descs; index++)
2410 if (desc[index].callback)
2411 desc[index].callback(desc[index].user1,
2412 desc[index].user2);
2413 retval = -EINVAL;
2414 }
2415 }
2416
2417 kfree(desc);
2418
2419 return retval;
2420}
2421
2422/**
2423* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2424* in IPA HW. This is performed in case of SSR.
2425*
2426* This is a mandatory procedure, in case one of the steps fails, the
2427* AP needs to restart.
2428*/
2429void ipa3_q6_pre_shutdown_cleanup(void)
2430{
2431 IPADBG_LOW("ENTER\n");
2432
2433 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2434
2435 ipa3_q6_pipe_delay(true);
2436 ipa3_q6_avoid_holb();
2437 if (ipa3_q6_clean_q6_tables()) {
2438 IPAERR("Failed to clean Q6 tables\n");
2439 BUG();
2440 }
2441 if (ipa3_q6_set_ex_path_to_apps()) {
2442 IPAERR("Failed to redirect exceptions to APPS\n");
2443 BUG();
2444 }
2445 /* Remove delay from Q6 PRODs to avoid pending descriptors
2446 * on pipe reset procedure
2447 */
2448 ipa3_q6_pipe_delay(false);
2449
2450 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2451 IPADBG_LOW("Exit with success\n");
2452}
2453
2454/*
2455 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2456 * check if GSI channel related to Q6 producer client is empty.
2457 *
2458 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2459 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2460 */
2461void ipa3_q6_post_shutdown_cleanup(void)
2462{
2463 int client_idx;
Skylar Changc1f15312017-05-09 14:14:32 -07002464 int ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03002465
2466 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002467
2468 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2469 IPAERR("uC is not loaded. Skipping\n");
2470 return;
2471 }
2472
Skylar Chang94692c92017-03-01 09:07:11 -08002473 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2474
2475 /* Handle the issue where SUSPEND was removed for some reason */
2476 ipa3_q6_avoid_holb();
2477 ipa3_halt_q6_cons_gsi_channels();
2478
Amir Levy9659e592016-10-27 18:08:27 +03002479 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2480 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
Skylar Changc1f15312017-05-09 14:14:32 -07002481 ep_idx = ipa3_get_ep_mapping(client_idx);
2482 if (ep_idx == -1)
2483 continue;
2484
Amir Levy9659e592016-10-27 18:08:27 +03002485 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2486 IPAERR("fail to validate Q6 ch emptiness %d\n",
2487 client_idx);
2488 BUG();
2489 return;
2490 }
2491 }
2492
2493 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2494 IPADBG_LOW("Exit with success\n");
2495}
2496
2497static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2498{
2499 /* Set 4 bytes of CANARY before the offset */
2500 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2501}
2502
2503/**
Amir Levy9fadeca2017-04-25 10:18:32 +03002504 * _ipa_init_sram_v3() - Initialize IPA local SRAM.
Amir Levy9659e592016-10-27 18:08:27 +03002505 *
2506 * Return codes: 0 for success, negative value for failure
2507 */
Amir Levy9fadeca2017-04-25 10:18:32 +03002508int _ipa_init_sram_v3(void)
Amir Levy9659e592016-10-27 18:08:27 +03002509{
2510 u32 *ipa_sram_mmio;
2511 unsigned long phys_addr;
2512
2513 phys_addr = ipa3_ctx->ipa_wrapper_base +
2514 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2515 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
2516 ipa3_ctx->smem_restricted_bytes / 4);
2517
2518 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
2519 if (!ipa_sram_mmio) {
2520 IPAERR("fail to ioremap IPA SRAM\n");
2521 return -ENOMEM;
2522 }
2523
2524 /* Consult with ipa_i.h on the location of the CANARY values */
2525 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
2526 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
2527 ipa3_sram_set_canary(ipa_sram_mmio,
2528 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
2529 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
2530 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
2531 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
2532 ipa3_sram_set_canary(ipa_sram_mmio,
2533 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
2534 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
2535 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
2536 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
2537 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
2538 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
2539 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
2540 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
2541 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
2542 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
2543 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
2544 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
2545 ipa3_sram_set_canary(ipa_sram_mmio,
2546 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
2547 ipa3_sram_set_canary(ipa_sram_mmio,
2548 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2549 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
2550 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
Amir Levy9fadeca2017-04-25 10:18:32 +03002551 ipa3_sram_set_canary(ipa_sram_mmio,
2552 (ipa_get_hw_type() >= IPA_HW_v3_5) ?
2553 IPA_MEM_PART(uc_event_ring_ofst) :
2554 IPA_MEM_PART(end_ofst));
Amir Levy9659e592016-10-27 18:08:27 +03002555
2556 iounmap(ipa_sram_mmio);
2557
2558 return 0;
2559}
2560
2561/**
2562 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
2563 *
2564 * Return codes: 0 for success, negative value for failure
2565 */
2566int _ipa_init_hdr_v3_0(void)
2567{
2568 struct ipa3_desc desc = { 0 };
2569 struct ipa_mem_buffer mem;
2570 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
2571 struct ipahal_imm_cmd_pyld *cmd_pyld;
2572 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
2573
2574 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2575 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2576 GFP_KERNEL);
2577 if (!mem.base) {
2578 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2579 return -ENOMEM;
2580 }
2581 memset(mem.base, 0, mem.size);
2582
2583 cmd.hdr_table_addr = mem.phys_base;
2584 cmd.size_hdr_table = mem.size;
2585 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
2586 IPA_MEM_PART(modem_hdr_ofst);
2587 cmd_pyld = ipahal_construct_imm_cmd(
2588 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
2589 if (!cmd_pyld) {
2590 IPAERR("fail to construct hdr_init_local imm cmd\n");
2591 dma_free_coherent(ipa3_ctx->pdev,
2592 mem.size, mem.base,
2593 mem.phys_base);
2594 return -EFAULT;
2595 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002596 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002597 desc.type = IPA_IMM_CMD_DESC;
2598 desc.pyld = cmd_pyld->data;
2599 desc.len = cmd_pyld->len;
2600 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2601
2602 if (ipa3_send_cmd(1, &desc)) {
2603 IPAERR("fail to send immediate command\n");
2604 ipahal_destroy_imm_cmd(cmd_pyld);
2605 dma_free_coherent(ipa3_ctx->pdev,
2606 mem.size, mem.base,
2607 mem.phys_base);
2608 return -EFAULT;
2609 }
2610
2611 ipahal_destroy_imm_cmd(cmd_pyld);
2612 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2613
2614 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
2615 IPA_MEM_PART(apps_hdr_proc_ctx_size);
2616 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2617 GFP_KERNEL);
2618 if (!mem.base) {
2619 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2620 return -ENOMEM;
2621 }
2622 memset(mem.base, 0, mem.size);
2623 memset(&desc, 0, sizeof(desc));
2624
2625 dma_cmd.is_read = false;
2626 dma_cmd.skip_pipeline_clear = false;
2627 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2628 dma_cmd.system_addr = mem.phys_base;
2629 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2630 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
2631 dma_cmd.size = mem.size;
2632 cmd_pyld = ipahal_construct_imm_cmd(
2633 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
2634 if (!cmd_pyld) {
2635 IPAERR("fail to construct dma_shared_mem imm\n");
2636 dma_free_coherent(ipa3_ctx->pdev,
2637 mem.size, mem.base,
2638 mem.phys_base);
2639 return -EFAULT;
2640 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002641 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002642 desc.pyld = cmd_pyld->data;
2643 desc.len = cmd_pyld->len;
2644 desc.type = IPA_IMM_CMD_DESC;
2645 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2646
2647 if (ipa3_send_cmd(1, &desc)) {
2648 IPAERR("fail to send immediate command\n");
2649 ipahal_destroy_imm_cmd(cmd_pyld);
2650 dma_free_coherent(ipa3_ctx->pdev,
2651 mem.size,
2652 mem.base,
2653 mem.phys_base);
2654 return -EFAULT;
2655 }
2656 ipahal_destroy_imm_cmd(cmd_pyld);
2657
2658 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
2659
2660 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2661
2662 return 0;
2663}
2664
2665/**
2666 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
2667 *
2668 * Return codes: 0 for success, negative value for failure
2669 */
2670int _ipa_init_rt4_v3(void)
2671{
2672 struct ipa3_desc desc = { 0 };
2673 struct ipa_mem_buffer mem;
2674 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
2675 struct ipahal_imm_cmd_pyld *cmd_pyld;
2676 int i;
2677 int rc = 0;
2678
2679 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
2680 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
2681 i++)
2682 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
2683 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
2684
2685 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
2686 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002687 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002688 if (rc) {
2689 IPAERR("fail generate empty v4 rt img\n");
2690 return rc;
2691 }
2692
2693 v4_cmd.hash_rules_addr = mem.phys_base;
2694 v4_cmd.hash_rules_size = mem.size;
2695 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2696 IPA_MEM_PART(v4_rt_hash_ofst);
2697 v4_cmd.nhash_rules_addr = mem.phys_base;
2698 v4_cmd.nhash_rules_size = mem.size;
2699 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2700 IPA_MEM_PART(v4_rt_nhash_ofst);
2701 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
2702 v4_cmd.hash_local_addr);
2703 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
2704 v4_cmd.nhash_local_addr);
2705 cmd_pyld = ipahal_construct_imm_cmd(
2706 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
2707 if (!cmd_pyld) {
2708 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
2709 rc = -EPERM;
2710 goto free_mem;
2711 }
2712
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002713 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002714 desc.type = IPA_IMM_CMD_DESC;
2715 desc.pyld = cmd_pyld->data;
2716 desc.len = cmd_pyld->len;
2717 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2718
2719 if (ipa3_send_cmd(1, &desc)) {
2720 IPAERR("fail to send immediate command\n");
2721 rc = -EFAULT;
2722 }
2723
2724 ipahal_destroy_imm_cmd(cmd_pyld);
2725
2726free_mem:
2727 ipahal_free_dma_mem(&mem);
2728 return rc;
2729}
2730
2731/**
2732 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
2733 *
2734 * Return codes: 0 for success, negative value for failure
2735 */
2736int _ipa_init_rt6_v3(void)
2737{
2738 struct ipa3_desc desc = { 0 };
2739 struct ipa_mem_buffer mem;
2740 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
2741 struct ipahal_imm_cmd_pyld *cmd_pyld;
2742 int i;
2743 int rc = 0;
2744
2745 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
2746 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
2747 i++)
2748 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
2749 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
2750
2751 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
2752 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002753 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002754 if (rc) {
2755 IPAERR("fail generate empty v6 rt img\n");
2756 return rc;
2757 }
2758
2759 v6_cmd.hash_rules_addr = mem.phys_base;
2760 v6_cmd.hash_rules_size = mem.size;
2761 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2762 IPA_MEM_PART(v6_rt_hash_ofst);
2763 v6_cmd.nhash_rules_addr = mem.phys_base;
2764 v6_cmd.nhash_rules_size = mem.size;
2765 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2766 IPA_MEM_PART(v6_rt_nhash_ofst);
2767 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
2768 v6_cmd.hash_local_addr);
2769 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
2770 v6_cmd.nhash_local_addr);
2771 cmd_pyld = ipahal_construct_imm_cmd(
2772 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
2773 if (!cmd_pyld) {
2774 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
2775 rc = -EPERM;
2776 goto free_mem;
2777 }
2778
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002779 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002780 desc.type = IPA_IMM_CMD_DESC;
2781 desc.pyld = cmd_pyld->data;
2782 desc.len = cmd_pyld->len;
2783 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2784
2785 if (ipa3_send_cmd(1, &desc)) {
2786 IPAERR("fail to send immediate command\n");
2787 rc = -EFAULT;
2788 }
2789
2790 ipahal_destroy_imm_cmd(cmd_pyld);
2791
2792free_mem:
2793 ipahal_free_dma_mem(&mem);
2794 return rc;
2795}
2796
2797/**
2798 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
2799 *
2800 * Return codes: 0 for success, negative value for failure
2801 */
2802int _ipa_init_flt4_v3(void)
2803{
2804 struct ipa3_desc desc = { 0 };
2805 struct ipa_mem_buffer mem;
2806 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
2807 struct ipahal_imm_cmd_pyld *cmd_pyld;
2808 int rc;
2809
2810 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2811 IPA_MEM_PART(v4_flt_hash_size),
2812 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002813 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002814 if (rc) {
2815 IPAERR("fail generate empty v4 flt img\n");
2816 return rc;
2817 }
2818
2819 v4_cmd.hash_rules_addr = mem.phys_base;
2820 v4_cmd.hash_rules_size = mem.size;
2821 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2822 IPA_MEM_PART(v4_flt_hash_ofst);
2823 v4_cmd.nhash_rules_addr = mem.phys_base;
2824 v4_cmd.nhash_rules_size = mem.size;
2825 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2826 IPA_MEM_PART(v4_flt_nhash_ofst);
2827 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
2828 v4_cmd.hash_local_addr);
2829 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
2830 v4_cmd.nhash_local_addr);
2831 cmd_pyld = ipahal_construct_imm_cmd(
2832 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
2833 if (!cmd_pyld) {
2834 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
2835 rc = -EPERM;
2836 goto free_mem;
2837 }
2838
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002839 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002840 desc.type = IPA_IMM_CMD_DESC;
2841 desc.pyld = cmd_pyld->data;
2842 desc.len = cmd_pyld->len;
2843 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2844
2845 if (ipa3_send_cmd(1, &desc)) {
2846 IPAERR("fail to send immediate command\n");
2847 rc = -EFAULT;
2848 }
2849
2850 ipahal_destroy_imm_cmd(cmd_pyld);
2851
2852free_mem:
2853 ipahal_free_dma_mem(&mem);
2854 return rc;
2855}
2856
2857/**
2858 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
2859 *
2860 * Return codes: 0 for success, negative value for failure
2861 */
2862int _ipa_init_flt6_v3(void)
2863{
2864 struct ipa3_desc desc = { 0 };
2865 struct ipa_mem_buffer mem;
2866 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
2867 struct ipahal_imm_cmd_pyld *cmd_pyld;
2868 int rc;
2869
2870 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2871 IPA_MEM_PART(v6_flt_hash_size),
2872 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002873 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002874 if (rc) {
2875 IPAERR("fail generate empty v6 flt img\n");
2876 return rc;
2877 }
2878
2879 v6_cmd.hash_rules_addr = mem.phys_base;
2880 v6_cmd.hash_rules_size = mem.size;
2881 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2882 IPA_MEM_PART(v6_flt_hash_ofst);
2883 v6_cmd.nhash_rules_addr = mem.phys_base;
2884 v6_cmd.nhash_rules_size = mem.size;
2885 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2886 IPA_MEM_PART(v6_flt_nhash_ofst);
2887 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
2888 v6_cmd.hash_local_addr);
2889 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
2890 v6_cmd.nhash_local_addr);
2891
2892 cmd_pyld = ipahal_construct_imm_cmd(
2893 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
2894 if (!cmd_pyld) {
2895 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
2896 rc = -EPERM;
2897 goto free_mem;
2898 }
2899
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002900 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002901 desc.type = IPA_IMM_CMD_DESC;
2902 desc.pyld = cmd_pyld->data;
2903 desc.len = cmd_pyld->len;
2904 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2905
2906 if (ipa3_send_cmd(1, &desc)) {
2907 IPAERR("fail to send immediate command\n");
2908 rc = -EFAULT;
2909 }
2910
2911 ipahal_destroy_imm_cmd(cmd_pyld);
2912
2913free_mem:
2914 ipahal_free_dma_mem(&mem);
2915 return rc;
2916}
2917
2918static int ipa3_setup_flt_hash_tuple(void)
2919{
2920 int pipe_idx;
2921 struct ipahal_reg_hash_tuple tuple;
2922
2923 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2924
2925 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
2926 if (!ipa_is_ep_support_flt(pipe_idx))
2927 continue;
2928
2929 if (ipa_is_modem_pipe(pipe_idx))
2930 continue;
2931
2932 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
2933 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
2934 return -EFAULT;
2935 }
2936 }
2937
2938 return 0;
2939}
2940
2941static int ipa3_setup_rt_hash_tuple(void)
2942{
2943 int tbl_idx;
2944 struct ipahal_reg_hash_tuple tuple;
2945
2946 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2947
2948 for (tbl_idx = 0;
2949 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
2950 IPA_MEM_PART(v4_rt_num_index));
2951 tbl_idx++) {
2952
2953 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
2954 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
2955 continue;
2956
2957 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
2958 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
2959 continue;
2960
2961 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
2962 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
2963 return -EFAULT;
2964 }
2965 }
2966
2967 return 0;
2968}
2969
2970static int ipa3_setup_apps_pipes(void)
2971{
2972 struct ipa_sys_connect_params sys_in;
2973 int result = 0;
2974
2975 if (ipa3_ctx->gsi_ch20_wa) {
2976 IPADBG("Allocating GSI physical channel 20\n");
2977 result = ipa_gsi_ch20_wa();
2978 if (result) {
2979 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002980 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002981 }
2982 }
2983
Skylar Changd407e592017-03-30 11:25:30 -07002984 /* allocate the common PROD event ring */
2985 if (ipa3_alloc_common_event_ring()) {
2986 IPAERR("ipa3_alloc_common_event_ring failed.\n");
2987 result = -EPERM;
2988 goto fail_ch20_wa;
2989 }
2990
Amir Levy9659e592016-10-27 18:08:27 +03002991 /* CMD OUT (AP->IPA) */
2992 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2993 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
2994 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2995 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
2996 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
2997 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02002998 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03002999 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003000 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03003001 }
3002 IPADBG("Apps to IPA cmd pipe is connected\n");
3003
3004 ipa3_ctx->ctrl->ipa_init_sram();
3005 IPADBG("SRAM initialized\n");
3006
3007 ipa3_ctx->ctrl->ipa_init_hdr();
3008 IPADBG("HDR initialized\n");
3009
3010 ipa3_ctx->ctrl->ipa_init_rt4();
3011 IPADBG("V4 RT initialized\n");
3012
3013 ipa3_ctx->ctrl->ipa_init_rt6();
3014 IPADBG("V6 RT initialized\n");
3015
3016 ipa3_ctx->ctrl->ipa_init_flt4();
3017 IPADBG("V4 FLT initialized\n");
3018
3019 ipa3_ctx->ctrl->ipa_init_flt6();
3020 IPADBG("V6 FLT initialized\n");
3021
3022 if (ipa3_setup_flt_hash_tuple()) {
3023 IPAERR(":fail to configure flt hash tuple\n");
3024 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003025 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003026 }
3027 IPADBG("flt hash tuple is configured\n");
3028
3029 if (ipa3_setup_rt_hash_tuple()) {
3030 IPAERR(":fail to configure rt hash tuple\n");
3031 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003032 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003033 }
3034 IPADBG("rt hash tuple is configured\n");
3035
3036 if (ipa3_setup_exception_path()) {
3037 IPAERR(":fail to setup excp path\n");
3038 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003039 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003040 }
3041 IPADBG("Exception path was successfully set");
3042
3043 if (ipa3_setup_dflt_rt_tables()) {
3044 IPAERR(":fail to setup dflt routes\n");
3045 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003046 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003047 }
3048 IPADBG("default routing was set\n");
3049
Ghanim Fodic6b67492017-03-15 14:19:56 +02003050 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03003051 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3052 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
3053 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3054 sys_in.notify = ipa3_lan_rx_cb;
3055 sys_in.priv = NULL;
3056 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
3057 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
3058 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
3059 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
3060 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
3061 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
3062 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
3063 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
3064
3065 /**
3066 * ipa_lan_rx_cb() intended to notify the source EP about packet
3067 * being received on the LAN_CONS via calling the source EP call-back.
3068 * There could be a race condition with calling this call-back. Other
3069 * thread may nullify it - e.g. on EP disconnect.
3070 * This lock intended to protect the access to the source EP call-back
3071 */
3072 spin_lock_init(&ipa3_ctx->disconnect_lock);
3073 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003074 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003075 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003076 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003077 }
3078
Ghanim Fodic6b67492017-03-15 14:19:56 +02003079 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02003080 if (!ipa3_ctx->ipa_config_is_mhi) {
3081 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3082 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
3083 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
3084 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
3085 if (ipa3_setup_sys_pipe(&sys_in,
3086 &ipa3_ctx->clnt_hdl_data_out)) {
3087 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
3088 result = -EPERM;
3089 goto fail_lan_data_out;
3090 }
Amir Levy9659e592016-10-27 18:08:27 +03003091 }
3092
3093 return 0;
3094
Ghanim Fodic6b67492017-03-15 14:19:56 +02003095fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03003096 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003097fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03003098 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
3099 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3100 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
3101 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
3102 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003103 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003104 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003105fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03003106 return result;
3107}
3108
3109static void ipa3_teardown_apps_pipes(void)
3110{
Amir Levy54fe4d32017-03-16 11:21:49 +02003111 if (!ipa3_ctx->ipa_config_is_mhi)
3112 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03003113 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
3114 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3115 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003116 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003117 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
3118}
3119
3120#ifdef CONFIG_COMPAT
3121long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3122{
3123 int retval = 0;
3124 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
3125 struct ipa_ioc_nat_alloc_mem nat_mem;
3126
3127 switch (cmd) {
3128 case IPA_IOC_ADD_HDR32:
3129 cmd = IPA_IOC_ADD_HDR;
3130 break;
3131 case IPA_IOC_DEL_HDR32:
3132 cmd = IPA_IOC_DEL_HDR;
3133 break;
3134 case IPA_IOC_ADD_RT_RULE32:
3135 cmd = IPA_IOC_ADD_RT_RULE;
3136 break;
3137 case IPA_IOC_DEL_RT_RULE32:
3138 cmd = IPA_IOC_DEL_RT_RULE;
3139 break;
3140 case IPA_IOC_ADD_FLT_RULE32:
3141 cmd = IPA_IOC_ADD_FLT_RULE;
3142 break;
3143 case IPA_IOC_DEL_FLT_RULE32:
3144 cmd = IPA_IOC_DEL_FLT_RULE;
3145 break;
3146 case IPA_IOC_GET_RT_TBL32:
3147 cmd = IPA_IOC_GET_RT_TBL;
3148 break;
3149 case IPA_IOC_COPY_HDR32:
3150 cmd = IPA_IOC_COPY_HDR;
3151 break;
3152 case IPA_IOC_QUERY_INTF32:
3153 cmd = IPA_IOC_QUERY_INTF;
3154 break;
3155 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3156 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3157 break;
3158 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3159 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3160 break;
3161 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3162 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3163 break;
3164 case IPA_IOC_GET_HDR32:
3165 cmd = IPA_IOC_GET_HDR;
3166 break;
3167 case IPA_IOC_ALLOC_NAT_MEM32:
3168 if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
3169 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3170 retval = -EFAULT;
3171 goto ret;
3172 }
3173 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3174 IPA_RESOURCE_NAME_MAX);
3175 nat_mem.size = (size_t)nat_mem32.size;
3176 nat_mem.offset = (off_t)nat_mem32.offset;
3177
3178 /* null terminate the string */
3179 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3180
3181 if (ipa3_allocate_nat_device(&nat_mem)) {
3182 retval = -EFAULT;
3183 goto ret;
3184 }
3185 nat_mem32.offset = (compat_off_t)nat_mem.offset;
3186 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
3187 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3188 retval = -EFAULT;
3189 }
3190ret:
3191 return retval;
3192 case IPA_IOC_V4_INIT_NAT32:
3193 cmd = IPA_IOC_V4_INIT_NAT;
3194 break;
3195 case IPA_IOC_NAT_DMA32:
3196 cmd = IPA_IOC_NAT_DMA;
3197 break;
3198 case IPA_IOC_V4_DEL_NAT32:
3199 cmd = IPA_IOC_V4_DEL_NAT;
3200 break;
3201 case IPA_IOC_GET_NAT_OFFSET32:
3202 cmd = IPA_IOC_GET_NAT_OFFSET;
3203 break;
3204 case IPA_IOC_PULL_MSG32:
3205 cmd = IPA_IOC_PULL_MSG;
3206 break;
3207 case IPA_IOC_RM_ADD_DEPENDENCY32:
3208 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3209 break;
3210 case IPA_IOC_RM_DEL_DEPENDENCY32:
3211 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3212 break;
3213 case IPA_IOC_GENERATE_FLT_EQ32:
3214 cmd = IPA_IOC_GENERATE_FLT_EQ;
3215 break;
3216 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3217 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3218 break;
3219 case IPA_IOC_WRITE_QMAPID32:
3220 cmd = IPA_IOC_WRITE_QMAPID;
3221 break;
3222 case IPA_IOC_MDFY_FLT_RULE32:
3223 cmd = IPA_IOC_MDFY_FLT_RULE;
3224 break;
3225 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3226 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3227 break;
3228 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3229 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3230 break;
3231 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3232 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3233 break;
3234 case IPA_IOC_MDFY_RT_RULE32:
3235 cmd = IPA_IOC_MDFY_RT_RULE;
3236 break;
3237 case IPA_IOC_COMMIT_HDR:
3238 case IPA_IOC_RESET_HDR:
3239 case IPA_IOC_COMMIT_RT:
3240 case IPA_IOC_RESET_RT:
3241 case IPA_IOC_COMMIT_FLT:
3242 case IPA_IOC_RESET_FLT:
3243 case IPA_IOC_DUMP:
3244 case IPA_IOC_PUT_RT_TBL:
3245 case IPA_IOC_PUT_HDR:
3246 case IPA_IOC_SET_FLT:
3247 case IPA_IOC_QUERY_EP_MAPPING:
3248 break;
3249 default:
3250 return -ENOIOCTLCMD;
3251 }
3252 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3253}
3254#endif
3255
3256static ssize_t ipa3_write(struct file *file, const char __user *buf,
3257 size_t count, loff_t *ppos);
3258
3259static const struct file_operations ipa3_drv_fops = {
3260 .owner = THIS_MODULE,
3261 .open = ipa3_open,
3262 .read = ipa3_read,
3263 .write = ipa3_write,
3264 .unlocked_ioctl = ipa3_ioctl,
3265#ifdef CONFIG_COMPAT
3266 .compat_ioctl = compat_ipa3_ioctl,
3267#endif
3268};
3269
3270static int ipa3_get_clks(struct device *dev)
3271{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003272 if (ipa3_res.use_bw_vote) {
3273 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3274 ipa3_clk = NULL;
3275 return 0;
3276 }
3277
Amir Levy9659e592016-10-27 18:08:27 +03003278 ipa3_clk = clk_get(dev, "core_clk");
3279 if (IS_ERR(ipa3_clk)) {
3280 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3281 IPAERR("fail to get ipa clk\n");
3282 return PTR_ERR(ipa3_clk);
3283 }
3284 return 0;
3285}
3286
3287/**
3288 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3289 */
3290void _ipa_enable_clks_v3_0(void)
3291{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003292 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003293 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003294 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003295 clk_prepare(ipa3_clk);
3296 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003297 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003298 }
3299
Ghanim Fodi6a831342017-03-07 18:19:15 +02003300 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003301}
3302
3303static unsigned int ipa3_get_bus_vote(void)
3304{
3305 unsigned int idx = 1;
3306
Skylar Chang448d8b82017-08-08 17:30:32 -07003307 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) {
Amir Levy9659e592016-10-27 18:08:27 +03003308 idx = 1;
3309 } else if (ipa3_ctx->curr_ipa_clk_rate ==
Skylar Chang448d8b82017-08-08 17:30:32 -07003310 ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3311 idx = 2;
3312 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3313 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3314 idx = 3;
Amir Levy9659e592016-10-27 18:08:27 +03003315 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3316 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3317 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3318 } else {
3319 WARN_ON(1);
3320 }
Amir Levy9659e592016-10-27 18:08:27 +03003321 IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
3322
3323 return idx;
3324}
3325
3326/**
3327* ipa3_enable_clks() - Turn on IPA clocks
3328*
3329* Return codes:
3330* None
3331*/
3332void ipa3_enable_clks(void)
3333{
3334 IPADBG("enabling IPA clocks and bus voting\n");
3335
Ghanim Fodi6a831342017-03-07 18:19:15 +02003336 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3337 ipa3_get_bus_vote()))
3338 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003339
Ghanim Fodi6a831342017-03-07 18:19:15 +02003340 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003341}
3342
3343
3344/**
3345 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3346 */
3347void _ipa_disable_clks_v3_0(void)
3348{
Amir Levy9659e592016-10-27 18:08:27 +03003349 ipa3_suspend_apps_pipes(true);
3350 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003351 if (ipa3_clk) {
3352 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003353 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003354 }
Amir Levy9659e592016-10-27 18:08:27 +03003355}
3356
3357/**
3358* ipa3_disable_clks() - Turn off IPA clocks
3359*
3360* Return codes:
3361* None
3362*/
3363void ipa3_disable_clks(void)
3364{
3365 IPADBG("disabling IPA clocks and bus voting\n");
3366
3367 ipa3_ctx->ctrl->ipa3_disable_clks();
3368
Ghanim Fodi6a831342017-03-07 18:19:15 +02003369 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3370 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003371}
3372
3373/**
3374 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3375 *
3376 * This function is called prior to clock gating when active client counter
3377 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003378 * were not submitted to the IPA client via the transport. During TAG process
3379 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003380 *
3381 * Return codes:
3382 * None
3383 */
3384static void ipa3_start_tag_process(struct work_struct *work)
3385{
3386 int res;
3387
3388 IPADBG("starting TAG process\n");
3389 /* close aggregation frames on all pipes */
3390 res = ipa3_tag_aggr_force_close(-1);
3391 if (res)
3392 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3393 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3394
3395 IPADBG("TAG process done\n");
3396}
3397
3398/**
3399* ipa3_active_clients_log_mod() - Log a modification in the active clients
3400* reference count
3401*
3402* This method logs any modification in the active clients reference count:
3403* It logs the modification in the circular history buffer
3404* It logs the modification in the hash table - looking for an entry,
3405* creating one if needed and deleting one if needed.
3406*
3407* @id: ipa3_active client logging info struct to hold the log information
3408* @inc: a boolean variable to indicate whether the modification is an increase
3409* or decrease
3410* @int_ctx: a boolean variable to indicate whether this call is being made from
3411* an interrupt context and therefore should allocate GFP_ATOMIC memory
3412*
3413* Method process:
3414* - Hash the unique identifier string
3415* - Find the hash in the table
3416* 1)If found, increase or decrease the reference count
3417* 2)If not found, allocate a new hash table entry struct and initialize it
3418* - Remove and deallocate unneeded data structure
3419* - Log the call in the circular history buffer (unless it is a simple call)
3420*/
3421void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3422 bool inc, bool int_ctx)
3423{
3424 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3425 unsigned long long t;
3426 unsigned long nanosec_rem;
3427 struct ipa3_active_client_htable_entry *hentry;
3428 struct ipa3_active_client_htable_entry *hfound;
3429 u32 hkey;
3430 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
Skylar Chang69ae50e2017-07-31 13:13:29 -07003431 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +03003432
Skylar Chang69ae50e2017-07-31 13:13:29 -07003433 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
3434 int_ctx = true;
Amir Levy9659e592016-10-27 18:08:27 +03003435 hfound = NULL;
3436 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3437 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003438 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003439 0);
3440 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3441 hentry, list, hkey) {
3442 if (!strcmp(hentry->id_string, id->id_string)) {
3443 hentry->count = hentry->count + (inc ? 1 : -1);
3444 hfound = hentry;
3445 }
3446 }
3447 if (hfound == NULL) {
3448 hentry = NULL;
3449 hentry = kzalloc(sizeof(
3450 struct ipa3_active_client_htable_entry),
3451 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3452 if (hentry == NULL) {
3453 IPAERR("failed allocating active clients hash entry");
Skylar Chang69ae50e2017-07-31 13:13:29 -07003454 spin_unlock_irqrestore(
3455 &ipa3_ctx->ipa3_active_clients_logging.lock,
3456 flags);
Amir Levy9659e592016-10-27 18:08:27 +03003457 return;
3458 }
3459 hentry->type = id->type;
3460 strlcpy(hentry->id_string, id->id_string,
3461 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3462 INIT_HLIST_NODE(&hentry->list);
3463 hentry->count = inc ? 1 : -1;
3464 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3465 &hentry->list, hkey);
3466 } else if (hfound->count == 0) {
3467 hash_del(&hfound->list);
3468 kfree(hfound);
3469 }
3470
3471 if (id->type != SIMPLE) {
3472 t = local_clock();
3473 nanosec_rem = do_div(t, 1000000000) / 1000;
3474 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3475 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3476 "[%5lu.%06lu] v %s, %s: %d",
3477 (unsigned long)t, nanosec_rem,
3478 id->id_string, id->file, id->line);
3479 ipa3_active_clients_log_insert(temp_str);
3480 }
Skylar Chang69ae50e2017-07-31 13:13:29 -07003481 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
3482 flags);
Amir Levy9659e592016-10-27 18:08:27 +03003483}
3484
3485void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
3486 bool int_ctx)
3487{
3488 ipa3_active_clients_log_mod(id, false, int_ctx);
3489}
3490
3491void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
3492 bool int_ctx)
3493{
3494 ipa3_active_clients_log_mod(id, true, int_ctx);
3495}
3496
3497/**
3498* ipa3_inc_client_enable_clks() - Increase active clients counter, and
3499* enable ipa clocks if necessary
3500*
3501* Return codes:
3502* None
3503*/
3504void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
3505{
Skylar Chang242952b2017-07-20 15:04:05 -07003506 int ret;
3507
Amir Levy9659e592016-10-27 18:08:27 +03003508 ipa3_active_clients_log_inc(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07003509 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3510 if (ret) {
3511 IPADBG_LOW("active clients = %d\n",
3512 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3513 return;
3514 }
3515
3516 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3517
3518 /* somebody might voted to clocks meanwhile */
3519 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3520 if (ret) {
3521 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3522 IPADBG_LOW("active clients = %d\n",
3523 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3524 return;
3525 }
3526
3527 ipa3_enable_clks();
3528 atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
3529 IPADBG_LOW("active clients = %d\n",
3530 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3531 ipa3_suspend_apps_pipes(false);
3532 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003533}
3534
3535/**
3536* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
3537* clients if no asynchronous actions should be done. Asynchronous actions are
3538* locking a mutex and waking up IPA HW.
3539*
3540* Return codes: 0 for success
3541* -EPERM if an asynchronous action should have been done
3542*/
3543int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
3544 *id)
3545{
Skylar Chang242952b2017-07-20 15:04:05 -07003546 int ret;
Amir Levy9659e592016-10-27 18:08:27 +03003547
Skylar Chang242952b2017-07-20 15:04:05 -07003548 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3549 if (ret) {
3550 ipa3_active_clients_log_inc(id, true);
3551 IPADBG_LOW("active clients = %d\n",
3552 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3553 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03003554 }
Amir Levy9659e592016-10-27 18:08:27 +03003555
Skylar Chang242952b2017-07-20 15:04:05 -07003556 return -EPERM;
3557}
3558
3559static void __ipa3_dec_client_disable_clks(void)
3560{
3561 int ret;
3562
3563 if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
3564 IPAERR("trying to disable clocks with refcnt is 0!\n");
3565 ipa_assert();
3566 return;
3567 }
3568
3569 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
3570 if (ret)
3571 goto bail;
3572
3573 /* seems like this is the only client holding the clocks */
3574 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3575 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
3576 ipa3_ctx->tag_process_before_gating) {
3577 ipa3_ctx->tag_process_before_gating = false;
3578 /*
3579 * When TAG process ends, active clients will be
3580 * decreased
3581 */
3582 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
3583 goto unlock_mutex;
3584 }
3585
3586 /* a different context might increase the clock reference meanwhile */
3587 ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
3588 if (ret > 0)
3589 goto unlock_mutex;
3590 ipa3_disable_clks();
3591
3592unlock_mutex:
3593 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3594bail:
3595 IPADBG_LOW("active clients = %d\n",
3596 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Amir Levy9659e592016-10-27 18:08:27 +03003597}
3598
3599/**
3600 * ipa3_dec_client_disable_clks() - Decrease active clients counter
3601 *
3602 * In case that there are no active clients this function also starts
3603 * TAG process. When TAG progress ends ipa clocks will be gated.
3604 * start_tag_process_again flag is set during this function to signal TAG
3605 * process to start again as there was another client that may send data to ipa
3606 *
3607 * Return codes:
3608 * None
3609 */
3610void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
3611{
Amir Levy9659e592016-10-27 18:08:27 +03003612 ipa3_active_clients_log_dec(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07003613 __ipa3_dec_client_disable_clks();
3614}
3615
3616static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
3617{
3618 __ipa3_dec_client_disable_clks();
3619}
3620
3621/**
3622 * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
3623 * if possible without blocking. If this is the last client then the desrease
3624 * will happen from work queue context.
3625 *
3626 * Return codes:
3627 * None
3628 */
3629void ipa3_dec_client_disable_clks_no_block(
3630 struct ipa_active_client_logging_info *id)
3631{
3632 int ret;
3633
3634 ipa3_active_clients_log_dec(id, true);
3635 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
3636 if (ret) {
3637 IPADBG_LOW("active clients = %d\n",
3638 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3639 return;
Amir Levy9659e592016-10-27 18:08:27 +03003640 }
Skylar Chang242952b2017-07-20 15:04:05 -07003641
3642 /* seems like this is the only client holding the clocks */
3643 queue_work(ipa3_ctx->power_mgmt_wq,
3644 &ipa_dec_clients_disable_clks_on_wq_work);
Amir Levy9659e592016-10-27 18:08:27 +03003645}
3646
3647/**
3648* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
3649* acquire wakelock if necessary
3650*
3651* Return codes:
3652* None
3653*/
3654void ipa3_inc_acquire_wakelock(void)
3655{
3656 unsigned long flags;
3657
3658 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3659 ipa3_ctx->wakelock_ref_cnt.cnt++;
3660 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
3661 __pm_stay_awake(&ipa3_ctx->w_lock);
3662 IPADBG_LOW("active wakelock ref cnt = %d\n",
3663 ipa3_ctx->wakelock_ref_cnt.cnt);
3664 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3665}
3666
3667/**
3668 * ipa3_dec_release_wakelock() - Decrease active clients counter
3669 *
3670 * In case if the ref count is 0, release the wakelock.
3671 *
3672 * Return codes:
3673 * None
3674 */
3675void ipa3_dec_release_wakelock(void)
3676{
3677 unsigned long flags;
3678
3679 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3680 ipa3_ctx->wakelock_ref_cnt.cnt--;
3681 IPADBG_LOW("active wakelock ref cnt = %d\n",
3682 ipa3_ctx->wakelock_ref_cnt.cnt);
3683 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
3684 __pm_relax(&ipa3_ctx->w_lock);
3685 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3686}
3687
Michael Adisumartac06df412017-09-19 10:10:35 -07003688int ipa3_set_clock_plan_from_pm(int idx)
3689{
3690 u32 clk_rate;
3691
3692 IPADBG_LOW("idx = %d\n", idx);
3693
3694 if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
3695 IPAERR("bad voltage\n");
3696 return -EINVAL;
3697 }
3698
3699 if (idx == 1)
3700 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3701 else if (idx == 2)
3702 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3703 else if (idx == 3)
3704 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3705 else {
3706 IPAERR("bad voltage\n");
3707 WARN_ON(1);
3708 return -EFAULT;
3709 }
3710
3711 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3712 IPADBG_LOW("Same voltage\n");
3713 return 0;
3714 }
3715
3716 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3717 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3718 ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
3719 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
3720 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
3721 if (ipa3_clk)
3722 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3723 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3724 ipa3_get_bus_vote()))
3725 WARN_ON(1);
3726 } else {
3727 IPADBG_LOW("clocks are gated, not setting rate\n");
3728 }
3729 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3730 IPADBG_LOW("Done\n");
3731
3732 return 0;
3733}
3734
Amir Levy9659e592016-10-27 18:08:27 +03003735int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
3736 u32 bandwidth_mbps)
3737{
3738 enum ipa_voltage_level needed_voltage;
3739 u32 clk_rate;
3740
3741 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
3742 floor_voltage, bandwidth_mbps);
3743
3744 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
3745 floor_voltage >= IPA_VOLTAGE_MAX) {
3746 IPAERR("bad voltage\n");
3747 return -EINVAL;
3748 }
3749
3750 if (ipa3_ctx->enable_clock_scaling) {
3751 IPADBG_LOW("Clock scaling is enabled\n");
3752 if (bandwidth_mbps >=
3753 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
3754 needed_voltage = IPA_VOLTAGE_TURBO;
3755 else if (bandwidth_mbps >=
3756 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
3757 needed_voltage = IPA_VOLTAGE_NOMINAL;
Skylar Chang448d8b82017-08-08 17:30:32 -07003758 else if (bandwidth_mbps >=
3759 ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs)
Amir Levy9659e592016-10-27 18:08:27 +03003760 needed_voltage = IPA_VOLTAGE_SVS;
Skylar Chang448d8b82017-08-08 17:30:32 -07003761 else
3762 needed_voltage = IPA_VOLTAGE_SVS2;
Amir Levy9659e592016-10-27 18:08:27 +03003763 } else {
3764 IPADBG_LOW("Clock scaling is disabled\n");
3765 needed_voltage = IPA_VOLTAGE_NOMINAL;
3766 }
3767
3768 needed_voltage = max(needed_voltage, floor_voltage);
3769 switch (needed_voltage) {
Skylar Chang448d8b82017-08-08 17:30:32 -07003770 case IPA_VOLTAGE_SVS2:
3771 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
3772 break;
Amir Levy9659e592016-10-27 18:08:27 +03003773 case IPA_VOLTAGE_SVS:
3774 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3775 break;
3776 case IPA_VOLTAGE_NOMINAL:
3777 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3778 break;
3779 case IPA_VOLTAGE_TURBO:
3780 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3781 break;
3782 default:
3783 IPAERR("bad voltage\n");
3784 WARN_ON(1);
3785 return -EFAULT;
3786 }
3787
3788 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3789 IPADBG_LOW("Same voltage\n");
3790 return 0;
3791 }
3792
Skylar Chang242952b2017-07-20 15:04:05 -07003793 /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
3794 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003795 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3796 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
Skylar Chang242952b2017-07-20 15:04:05 -07003797 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003798 if (ipa3_clk)
3799 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3800 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
Skylar Chang242952b2017-07-20 15:04:05 -07003801 ipa3_get_bus_vote()))
Ghanim Fodi6a831342017-03-07 18:19:15 +02003802 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003803 } else {
3804 IPADBG_LOW("clocks are gated, not setting rate\n");
3805 }
Skylar Chang242952b2017-07-20 15:04:05 -07003806 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003807 IPADBG_LOW("Done\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003808
Amir Levy9659e592016-10-27 18:08:27 +03003809 return 0;
3810}
3811
Amir Levya59ed3f2017-03-05 17:30:55 +02003812static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03003813{
3814 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02003815 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03003816 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
3817}
3818
3819/**
3820* ipa3_suspend_handler() - Handles the suspend interrupt:
3821* wakes up the suspended peripheral by requesting its consumer
3822* @interrupt: Interrupt type
3823* @private_data: The client's private data
3824* @interrupt_data: Interrupt specific information data
3825*/
3826void ipa3_suspend_handler(enum ipa_irq_type interrupt,
3827 void *private_data,
3828 void *interrupt_data)
3829{
3830 enum ipa_rm_resource_name resource;
3831 u32 suspend_data =
3832 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
3833 u32 bmsk = 1;
3834 u32 i = 0;
3835 int res;
3836 struct ipa_ep_cfg_holb holb_cfg;
Michael Adisumarta3e350812017-09-18 14:54:36 -07003837 u32 pipe_bitmask = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003838
3839 IPADBG("interrupt=%d, interrupt_data=%u\n",
3840 interrupt, suspend_data);
3841 memset(&holb_cfg, 0, sizeof(holb_cfg));
3842 holb_cfg.tmr_val = 0;
3843
Michael Adisumarta3e350812017-09-18 14:54:36 -07003844 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
Amir Levy9659e592016-10-27 18:08:27 +03003845 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
Michael Adisumarta3e350812017-09-18 14:54:36 -07003846 if (ipa3_ctx->use_ipa_pm) {
3847 pipe_bitmask |= bmsk;
3848 continue;
3849 }
Amir Levy9659e592016-10-27 18:08:27 +03003850 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
3851 /*
3852 * pipe will be unsuspended as part of
3853 * enabling IPA clocks
3854 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08003855 mutex_lock(&ipa3_ctx->transport_pm.
3856 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003857 if (!atomic_read(
3858 &ipa3_ctx->transport_pm.dec_clients)
3859 ) {
3860 IPA_ACTIVE_CLIENTS_INC_EP(
3861 ipa3_ctx->ep[i].client);
3862 IPADBG_LOW("Pipes un-suspended.\n");
3863 IPADBG_LOW("Enter poll mode.\n");
3864 atomic_set(
3865 &ipa3_ctx->transport_pm.dec_clients,
3866 1);
Amir Levya59ed3f2017-03-05 17:30:55 +02003867 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003868 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08003869 mutex_unlock(&ipa3_ctx->transport_pm.
3870 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003871 } else {
3872 resource = ipa3_get_rm_resource_from_ep(i);
3873 res =
3874 ipa_rm_request_resource_with_timer(resource);
3875 if (res == -EPERM &&
3876 IPA_CLIENT_IS_CONS(
3877 ipa3_ctx->ep[i].client)) {
3878 holb_cfg.en = 1;
3879 res = ipa3_cfg_ep_holb_by_client(
3880 ipa3_ctx->ep[i].client, &holb_cfg);
3881 if (res) {
3882 IPAERR("holb en fail, stall\n");
3883 BUG();
3884 }
3885 }
3886 }
3887 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07003888 }
3889 if (ipa3_ctx->use_ipa_pm) {
3890 res = ipa_pm_handle_suspend(pipe_bitmask);
3891 if (res) {
3892 IPAERR("ipa_pm_handle_suspend failed %d\n", res);
3893 return;
3894 }
Amir Levy9659e592016-10-27 18:08:27 +03003895 }
3896}
3897
3898/**
3899* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
3900* as it was registered in the IPA init sequence.
3901* Return codes:
3902* 0: success
3903* -EPERM: failed to remove current handler or failed to add original handler
3904*/
3905int ipa3_restore_suspend_handler(void)
3906{
3907 int result = 0;
3908
3909 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
3910 if (result) {
3911 IPAERR("remove handler for suspend interrupt failed\n");
3912 return -EPERM;
3913 }
3914
3915 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3916 ipa3_suspend_handler, false, NULL);
3917 if (result) {
3918 IPAERR("register handler for suspend interrupt failed\n");
3919 result = -EPERM;
3920 }
3921
3922 IPADBG("suspend handler successfully restored\n");
3923
3924 return result;
3925}
3926
3927static int ipa3_apps_cons_release_resource(void)
3928{
3929 return 0;
3930}
3931
3932static int ipa3_apps_cons_request_resource(void)
3933{
3934 return 0;
3935}
3936
Amir Levya59ed3f2017-03-05 17:30:55 +02003937static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03003938{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303939 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003940 /* check whether still need to decrease client usage */
3941 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
3942 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
3943 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02003944 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003945 } else {
3946 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02003947 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03003948 }
3949 }
3950 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303951 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003952}
3953
3954int ipa3_create_apps_resource(void)
3955{
3956 struct ipa_rm_create_params apps_cons_create_params;
3957 struct ipa_rm_perf_profile profile;
3958 int result = 0;
3959
3960 memset(&apps_cons_create_params, 0,
3961 sizeof(apps_cons_create_params));
3962 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
3963 apps_cons_create_params.request_resource =
3964 ipa3_apps_cons_request_resource;
3965 apps_cons_create_params.release_resource =
3966 ipa3_apps_cons_release_resource;
3967 result = ipa_rm_create_resource(&apps_cons_create_params);
3968 if (result) {
3969 IPAERR("ipa_rm_create_resource failed\n");
3970 return result;
3971 }
3972
3973 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
3974 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
3975
3976 return result;
3977}
3978
3979/**
3980 * ipa3_init_interrupts() - Register to IPA IRQs
3981 *
3982 * Return codes: 0 in success, negative in failure
3983 *
3984 */
3985int ipa3_init_interrupts(void)
3986{
3987 int result;
3988
3989 /*register IPA IRQ handler*/
3990 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
3991 master_dev);
3992 if (result) {
3993 IPAERR("ipa interrupts initialization failed\n");
3994 return -ENODEV;
3995 }
3996
3997 /*add handler for suspend interrupt*/
3998 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3999 ipa3_suspend_handler, false, NULL);
4000 if (result) {
4001 IPAERR("register handler for suspend interrupt failed\n");
4002 result = -ENODEV;
4003 goto fail_add_interrupt_handler;
4004 }
4005
4006 return 0;
4007
4008fail_add_interrupt_handler:
4009 free_irq(ipa3_res.ipa_irq, master_dev);
4010 return result;
4011}
4012
4013/**
4014 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
4015 * The idr strcuture per filtering table is intended for rule id generation
4016 * per filtering rule.
4017 */
4018static void ipa3_destroy_flt_tbl_idrs(void)
4019{
4020 int i;
4021 struct ipa3_flt_tbl *flt_tbl;
4022
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004023 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4024 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4025
Amir Levy9659e592016-10-27 18:08:27 +03004026 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4027 if (!ipa_is_ep_support_flt(i))
4028 continue;
4029
4030 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004031 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004032 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004033 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004034 }
4035}
4036
4037static void ipa3_freeze_clock_vote_and_notify_modem(void)
4038{
4039 int res;
Amir Levy9659e592016-10-27 18:08:27 +03004040 struct ipa_active_client_logging_info log_info;
4041
4042 if (ipa3_ctx->smp2p_info.res_sent)
4043 return;
4044
Skylar Change1209942017-02-02 14:26:38 -08004045 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
4046 IPAERR("smp2p out gpio not assigned\n");
4047 return;
4048 }
4049
Amir Levy9659e592016-10-27 18:08:27 +03004050 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
4051 res = ipa3_inc_client_enable_clks_no_block(&log_info);
4052 if (res)
Skylar Change1209942017-02-02 14:26:38 -08004053 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004054 else
Skylar Change1209942017-02-02 14:26:38 -08004055 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03004056
Skylar Change1209942017-02-02 14:26:38 -08004057 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4058 IPA_GPIO_OUT_CLK_VOTE_IDX,
4059 ipa3_ctx->smp2p_info.ipa_clk_on);
4060 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4061 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004062
Skylar Change1209942017-02-02 14:26:38 -08004063 ipa3_ctx->smp2p_info.res_sent = true;
4064 IPADBG("IPA clocks are %s\n",
4065 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
4066}
4067
4068void ipa3_reset_freeze_vote(void)
4069{
4070 if (ipa3_ctx->smp2p_info.res_sent == false)
4071 return;
4072
4073 if (ipa3_ctx->smp2p_info.ipa_clk_on)
4074 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
4075
4076 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4077 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
4078 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4079 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
4080
4081 ipa3_ctx->smp2p_info.res_sent = false;
4082 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004083}
4084
4085static int ipa3_panic_notifier(struct notifier_block *this,
4086 unsigned long event, void *ptr)
4087{
4088 int res;
4089
4090 ipa3_freeze_clock_vote_and_notify_modem();
4091
4092 IPADBG("Calling uC panic handler\n");
4093 res = ipa3_uc_panic_notifier(this, event, ptr);
4094 if (res)
4095 IPAERR("uC panic handler failed %d\n", res);
4096
4097 return NOTIFY_DONE;
4098}
4099
4100static struct notifier_block ipa3_panic_blk = {
4101 .notifier_call = ipa3_panic_notifier,
4102 /* IPA panic handler needs to run before modem shuts down */
4103 .priority = INT_MAX,
4104};
4105
4106static void ipa3_register_panic_hdlr(void)
4107{
4108 atomic_notifier_chain_register(&panic_notifier_list,
4109 &ipa3_panic_blk);
4110}
4111
4112static void ipa3_trigger_ipa_ready_cbs(void)
4113{
4114 struct ipa3_ready_cb_info *info;
4115
4116 mutex_lock(&ipa3_ctx->lock);
4117
4118 /* Call all the CBs */
4119 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
4120 if (info->ready_cb)
4121 info->ready_cb(info->user_data);
4122
4123 mutex_unlock(&ipa3_ctx->lock);
4124}
4125
4126static int ipa3_gsi_pre_fw_load_init(void)
4127{
4128 int result;
4129
4130 result = gsi_configure_regs(ipa3_res.transport_mem_base,
4131 ipa3_res.transport_mem_size,
4132 ipa3_res.ipa_mem_base);
4133 if (result) {
4134 IPAERR("Failed to configure GSI registers\n");
4135 return -EINVAL;
4136 }
4137
4138 return 0;
4139}
4140
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004141static void ipa3_uc_is_loaded(void)
4142{
4143 IPADBG("\n");
4144 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
4145}
4146
Amir Levy41644242016-11-03 15:38:09 +02004147static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
4148{
4149 enum gsi_ver gsi_ver;
4150
4151 switch (ipa_hw_type) {
4152 case IPA_HW_v3_0:
4153 case IPA_HW_v3_1:
4154 gsi_ver = GSI_VER_1_0;
4155 break;
4156 case IPA_HW_v3_5:
4157 gsi_ver = GSI_VER_1_2;
4158 break;
4159 case IPA_HW_v3_5_1:
4160 gsi_ver = GSI_VER_1_3;
4161 break;
Michael Adisumarta891a4ff2017-05-16 16:40:06 -07004162 case IPA_HW_v4_0:
4163 gsi_ver = GSI_VER_2_0;
4164 break;
Amir Levy41644242016-11-03 15:38:09 +02004165 default:
4166 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
4167 WARN_ON(1);
4168 gsi_ver = GSI_VER_ERR;
4169 }
4170
4171 IPADBG("GSI version %d\n", gsi_ver);
4172
4173 return gsi_ver;
4174}
4175
Amir Levy9659e592016-10-27 18:08:27 +03004176/**
4177 * ipa3_post_init() - Initialize the IPA Driver (Part II).
4178 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02004179 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03004180 *
4181 * @resource_p: contain platform specific values from DST file
4182 * @pdev: The platform device structure representing the IPA driver
4183 *
4184 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004185 * - Initialize endpoints bitmaps
4186 * - Initialize resource groups min and max values
4187 * - Initialize filtering lists heads and idr
4188 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02004189 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03004190 * - Setup APPS pipes
4191 * - Initialize tethering bridge
4192 * - Initialize IPA debugfs
4193 * - Initialize IPA uC interface
4194 * - Initialize WDI interface
4195 * - Initialize USB interface
4196 * - Register for panic handler
4197 * - Trigger IPA ready callbacks (to all subscribers)
4198 * - Trigger IPA completion object (to all who wait on it)
4199 */
4200static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
4201 struct device *ipa_dev)
4202{
4203 int result;
Amir Levy9659e592016-10-27 18:08:27 +03004204 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004205 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02004206 struct ipa3_flt_tbl *flt_tbl;
4207 int i;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004208 struct idr *idr;
Amir Levy54fe4d32017-03-16 11:21:49 +02004209
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304210 if (ipa3_ctx == NULL) {
4211 IPADBG("IPA driver haven't initialized\n");
4212 return -ENXIO;
4213 }
4214
4215 /* Prevent consequent calls from trying to load the FW again. */
4216 if (ipa3_ctx->ipa_initialization_complete)
4217 return 0;
4218
Amir Levy54fe4d32017-03-16 11:21:49 +02004219 /*
4220 * indication whether working in MHI config or non MHI config is given
4221 * in ipa3_write which is launched before ipa3_post_init. i.e. from
4222 * this point it is safe to use ipa3_ep_mapping array and the correct
4223 * entry will be returned from ipa3_get_hw_type_index()
4224 */
4225 ipa_init_ep_flt_bitmap();
4226 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
4227 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
4228
4229 /* Assign resource limitation to each group */
4230 ipa3_set_resorce_groups_min_max_limits();
4231
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004232 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4233 idr_init(idr);
4234 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4235 idr_init(idr);
4236
Amir Levy54fe4d32017-03-16 11:21:49 +02004237 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4238 if (!ipa_is_ep_support_flt(i))
4239 continue;
4240
4241 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
4242 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4243 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4244 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
4245 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4246 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004247 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
Amir Levy54fe4d32017-03-16 11:21:49 +02004248
4249 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
4250 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4251 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4252 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
4253 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4254 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004255 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
Amir Levy54fe4d32017-03-16 11:21:49 +02004256 }
4257
4258 if (!ipa3_ctx->apply_rg10_wa) {
4259 result = ipa3_init_interrupts();
4260 if (result) {
4261 IPAERR("ipa initialization of interrupts failed\n");
4262 result = -ENODEV;
4263 goto fail_register_device;
4264 }
4265 } else {
4266 IPADBG("Initialization of ipa interrupts skipped\n");
4267 }
Amir Levy9659e592016-10-27 18:08:27 +03004268
Amir Levy3afd94a2017-01-05 10:19:13 +02004269 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02004270 * IPAv3.5 and above requires to disable prefetch for USB in order
4271 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
Amir Levy3afd94a2017-01-05 10:19:13 +02004272 */
Michael Adisumartad68ab112017-06-14 11:40:06 -07004273 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4274 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
Amir Levy5cfbb322017-01-09 14:53:02 +02004275 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02004276 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
4277
Amir Levya59ed3f2017-03-05 17:30:55 +02004278 memset(&gsi_props, 0, sizeof(gsi_props));
4279 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
4280 gsi_props.ee = resource_p->ee;
4281 gsi_props.intr = GSI_INTR_IRQ;
4282 gsi_props.irq = resource_p->transport_irq;
4283 gsi_props.phys_addr = resource_p->transport_mem_base;
4284 gsi_props.size = resource_p->transport_mem_size;
4285 gsi_props.notify_cb = ipa_gsi_notify_cb;
4286 gsi_props.req_clk_cb = NULL;
4287 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004288
Amir Levya59ed3f2017-03-05 17:30:55 +02004289 result = gsi_register_device(&gsi_props,
4290 &ipa3_ctx->gsi_dev_hdl);
4291 if (result != GSI_STATUS_SUCCESS) {
4292 IPAERR(":gsi register error - %d\n", result);
4293 result = -ENODEV;
4294 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03004295 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004296 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03004297
4298 /* setup the AP-IPA pipes */
4299 if (ipa3_setup_apps_pipes()) {
4300 IPAERR(":failed to setup IPA-Apps pipes\n");
4301 result = -ENODEV;
4302 goto fail_setup_apps_pipes;
4303 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004304 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004305
4306 if (ipa3_ctx->use_ipa_teth_bridge) {
4307 /* Initialize the tethering bridge driver */
4308 result = ipa3_teth_bridge_driver_init();
4309 if (result) {
4310 IPAERR(":teth_bridge init failed (%d)\n", -result);
4311 result = -ENODEV;
4312 goto fail_teth_bridge_driver_init;
4313 }
4314 IPADBG("teth_bridge initialized");
4315 }
4316
4317 ipa3_debugfs_init();
4318
4319 result = ipa3_uc_interface_init();
4320 if (result)
4321 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4322 else
4323 IPADBG(":ipa Uc interface init ok\n");
4324
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004325 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4326 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4327
Amir Levy9659e592016-10-27 18:08:27 +03004328 result = ipa3_wdi_init();
4329 if (result)
4330 IPAERR(":wdi init failed (%d)\n", -result);
4331 else
4332 IPADBG(":wdi init ok\n");
4333
4334 result = ipa3_ntn_init();
4335 if (result)
4336 IPAERR(":ntn init failed (%d)\n", -result);
4337 else
4338 IPADBG(":ntn init ok\n");
4339
Skylar Chang6f6e3072017-07-28 10:03:47 -07004340 result = ipa_hw_stats_init();
4341 if (result)
4342 IPAERR("fail to init stats %d\n", result);
4343 else
4344 IPADBG(":stats init ok\n");
4345
Amir Levy9659e592016-10-27 18:08:27 +03004346 ipa3_register_panic_hdlr();
4347
4348 ipa3_ctx->q6_proxy_clk_vote_valid = true;
4349
4350 mutex_lock(&ipa3_ctx->lock);
4351 ipa3_ctx->ipa_initialization_complete = true;
4352 mutex_unlock(&ipa3_ctx->lock);
4353
4354 ipa3_trigger_ipa_ready_cbs();
4355 complete_all(&ipa3_ctx->init_completion_obj);
4356 pr_info("IPA driver initialization was successful.\n");
4357
4358 return 0;
4359
4360fail_teth_bridge_driver_init:
4361 ipa3_teardown_apps_pipes();
4362fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02004363 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03004364fail_register_device:
Amir Levy9659e592016-10-27 18:08:27 +03004365 ipa3_destroy_flt_tbl_idrs();
Amir Levy9659e592016-10-27 18:08:27 +03004366 return result;
4367}
4368
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004369static int ipa3_manual_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03004370{
4371 int result;
4372 const struct firmware *fw;
4373
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004374 IPADBG("Manual FW loading process initiated\n");
Amir Levy9659e592016-10-27 18:08:27 +03004375
4376 result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
4377 if (result < 0) {
4378 IPAERR("request_firmware failed, error %d\n", result);
4379 return result;
4380 }
4381 if (fw == NULL) {
4382 IPAERR("Firmware is NULL!\n");
4383 return -EINVAL;
4384 }
4385
4386 IPADBG("FWs are available for loading\n");
4387
Ghanim Fodi37b64952017-01-24 15:42:30 +02004388 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004389 if (result) {
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004390 IPAERR("Manual IPA FWs loading has failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03004391 release_firmware(fw);
4392 return result;
4393 }
4394
4395 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02004396 ipa3_res.transport_mem_size,
4397 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03004398 if (result) {
4399 IPAERR("Failed to enable GSI FW\n");
4400 release_firmware(fw);
4401 return result;
4402 }
4403
4404 release_firmware(fw);
4405
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004406 IPADBG("Manual FW loading process is complete\n");
Amir Levy9659e592016-10-27 18:08:27 +03004407 return 0;
4408}
4409
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004410static int ipa3_pil_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03004411{
4412 void *subsystem_get_retval = NULL;
4413
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004414 IPADBG("PIL FW loading process initiated\n");
Amir Levy9659e592016-10-27 18:08:27 +03004415
4416 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
4417 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
4418 IPAERR("Unable to trigger PIL process for FW loading\n");
4419 return -EINVAL;
4420 }
4421
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004422 IPADBG("PIL FW loading process is complete\n");
Amir Levy9659e592016-10-27 18:08:27 +03004423 return 0;
4424}
4425
Ghanim Fodia5f376a2017-10-17 18:14:53 +03004426static void ipa3_load_ipa_fw(struct work_struct *work)
4427{
4428 int result;
4429
4430 IPADBG("Entry\n");
4431
4432 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
4433
4434 if (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))
4435 result = ipa3_pil_load_ipa_fws();
4436 else
4437 result = ipa3_manual_load_ipa_fws();
4438
4439 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
4440
4441 if (result) {
4442 IPAERR("IPA FW loading process has failed\n");
4443 return;
4444 }
4445 pr_info("IPA FW loaded successfully\n");
4446
4447 result = ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
4448 if (result)
4449 IPAERR("IPA post init failed %d\n", result);
4450}
4451
Amir Levy9659e592016-10-27 18:08:27 +03004452static ssize_t ipa3_write(struct file *file, const char __user *buf,
4453 size_t count, loff_t *ppos)
4454{
4455 unsigned long missing;
Amir Levy9659e592016-10-27 18:08:27 +03004456
4457 char dbg_buff[16] = { 0 };
4458
4459 if (sizeof(dbg_buff) < count + 1)
4460 return -EFAULT;
4461
4462 missing = copy_from_user(dbg_buff, buf, count);
4463
4464 if (missing) {
4465 IPAERR("Unable to copy data from user\n");
4466 return -EFAULT;
4467 }
4468
Mohammed Javidbf4c8022017-08-07 23:15:48 +05304469 if (count > 0)
4470 dbg_buff[count - 1] = '\0';
4471
Amir Levy9659e592016-10-27 18:08:27 +03004472 /* Prevent consequent calls from trying to load the FW again. */
4473 if (ipa3_is_ready())
4474 return count;
4475
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004476 /* Check MHI configuration on MDM devices */
4477 if (!ipa3_is_msm_device()) {
Amir Levy54fe4d32017-03-16 11:21:49 +02004478 if (!strcasecmp(dbg_buff, "MHI")) {
4479 ipa3_ctx->ipa_config_is_mhi = true;
4480 pr_info(
4481 "IPA is loading with MHI configuration\n");
4482 } else {
4483 pr_info(
4484 "IPA is loading with non MHI configuration\n");
4485 }
Amir Levy54fe4d32017-03-16 11:21:49 +02004486 }
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004487
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004488 queue_work(ipa3_ctx->transport_power_mgmt_wq,
Ghanim Fodia5f376a2017-10-17 18:14:53 +03004489 &ipa3_fw_loading_work);
Ghanim Fodi03dcc272017-08-08 18:13:25 +03004490
Ghanim Fodia5f376a2017-10-17 18:14:53 +03004491 IPADBG("Scheduled a work to load IPA FW\n");
Amir Levy9659e592016-10-27 18:08:27 +03004492 return count;
4493}
4494
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004495static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
4496{
4497 int i, size, ret, resp;
4498 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
4499 struct tz_smmu_ipa_protect_region_s cmd_buf;
4500
4501 if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
4502 size = ipa3_ctx->ipa_tz_unlock_reg_num *
4503 sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
4504 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
4505 if (ipa_tz_unlock_vec == NULL)
4506 return -ENOMEM;
4507
4508 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4509 ipa_tz_unlock_vec[i].input_addr =
4510 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4511 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4512 0xFFF);
4513 ipa_tz_unlock_vec[i].output_addr =
4514 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4515 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4516 0xFFF);
4517 ipa_tz_unlock_vec[i].size =
4518 ipa3_ctx->ipa_tz_unlock_reg[i].size;
4519 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
4520 }
4521
4522 /* pass physical address of command buffer */
4523 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
4524 cmd_buf.size_bytes = size;
4525
4526 /* flush cache to DDR */
4527 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
4528 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
4529
4530 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
4531 sizeof(cmd_buf), &resp, sizeof(resp));
4532 if (ret) {
4533 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
4534 kfree(ipa_tz_unlock_vec);
4535 return -EFAULT;
4536 }
4537 kfree(ipa_tz_unlock_vec);
4538 }
4539 return 0;
4540}
4541
Skylar Changcd3902d2017-03-27 18:08:27 -07004542static int ipa3_alloc_pkt_init(void)
4543{
4544 struct ipa_mem_buffer mem;
4545 struct ipahal_imm_cmd_pyld *cmd_pyld;
4546 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
4547 int i;
4548
4549 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4550 &cmd, false);
4551 if (!cmd_pyld) {
4552 IPAERR("failed to construct IMM cmd\n");
4553 return -ENOMEM;
4554 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07004555 ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
Skylar Changcd3902d2017-03-27 18:08:27 -07004556
4557 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
4558 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
4559 &mem.phys_base, GFP_KERNEL);
4560 if (!mem.base) {
4561 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
4562 ipahal_destroy_imm_cmd(cmd_pyld);
4563 return -ENOMEM;
4564 }
4565 ipahal_destroy_imm_cmd(cmd_pyld);
4566
4567 memset(mem.base, 0, mem.size);
4568 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4569 cmd.destination_pipe_index = i;
4570 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4571 &cmd, false);
4572 if (!cmd_pyld) {
4573 IPAERR("failed to construct IMM cmd\n");
4574 dma_free_coherent(ipa3_ctx->pdev,
4575 mem.size,
4576 mem.base,
4577 mem.phys_base);
4578 return -ENOMEM;
4579 }
4580 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
4581 cmd_pyld->len);
4582 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
4583 ipahal_destroy_imm_cmd(cmd_pyld);
4584 }
4585
4586 return 0;
4587}
4588
Amir Levy9659e592016-10-27 18:08:27 +03004589/**
4590* ipa3_pre_init() - Initialize the IPA Driver.
4591* This part contains all initialization which doesn't require IPA HW, such
4592* as structure allocations and initializations, register writes, etc.
4593*
4594* @resource_p: contain platform specific values from DST file
4595* @pdev: The platform device structure representing the IPA driver
4596*
4597* Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004598* Allocate memory for the driver context data struct
4599* Initializing the ipa3_ctx with :
Amir Levy9659e592016-10-27 18:08:27 +03004600* 1)parsed values from the dts file
4601* 2)parameters passed to the module initialization
4602* 3)read HW values(such as core memory size)
Amir Levy54fe4d32017-03-16 11:21:49 +02004603* Map IPA core registers to CPU memory
4604* Restart IPA core(HW reset)
4605* Initialize the look-aside caches(kmem_cache/slab) for filter,
Amir Levy9659e592016-10-27 18:08:27 +03004606* routing and IPA-tree
Amir Levy54fe4d32017-03-16 11:21:49 +02004607* Create memory pool with 4 objects for DMA operations(each object
Amir Levy9659e592016-10-27 18:08:27 +03004608* is 512Bytes long), this object will be use for tx(A5->IPA)
Amir Levy54fe4d32017-03-16 11:21:49 +02004609* Initialize lists head(routing, hdr, system pipes)
4610* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
4611* Initialize spinlocks (for list related to A5<->IPA pipes)
4612* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
4613* Initialize Red-Black-Tree(s) for handles of header,routing rule,
4614* routing table ,filtering rule
4615* Initialize the filter block by committing IPV4 and IPV6 default rules
4616* Create empty routing table in system memory(no committing)
4617* Create a char-device for IPA
4618* Initialize IPA RM (resource manager)
4619* Configure GSI registers (in GSI case)
Amir Levy9659e592016-10-27 18:08:27 +03004620*/
4621static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
4622 struct device *ipa_dev)
4623{
4624 int result = 0;
4625 int i;
Amir Levy9659e592016-10-27 18:08:27 +03004626 struct ipa3_rt_tbl_set *rset;
4627 struct ipa_active_client_logging_info log_info;
4628
4629 IPADBG("IPA Driver initialization started\n");
4630
4631 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
4632 if (!ipa3_ctx) {
4633 IPAERR(":kzalloc err.\n");
4634 result = -ENOMEM;
4635 goto fail_mem_ctx;
4636 }
4637
4638 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
Skylar Chang841c1452017-04-03 16:07:22 -07004639 if (ipa3_ctx->logbuf == NULL)
4640 IPAERR("failed to create IPC log, continue...\n");
Amir Levy9659e592016-10-27 18:08:27 +03004641
4642 ipa3_ctx->pdev = ipa_dev;
4643 ipa3_ctx->uc_pdev = ipa_dev;
4644 ipa3_ctx->smmu_present = smmu_info.present;
Michael Adisumarta93e97522017-10-06 15:49:46 -07004645 if (!ipa3_ctx->smmu_present) {
4646 for (i = 0; i < IPA_SMMU_CB_MAX; i++)
4647 ipa3_ctx->s1_bypass_arr[i] = true;
4648 } else {
4649 for (i = 0; i < IPA_SMMU_CB_MAX; i++)
4650 ipa3_ctx->s1_bypass_arr[i] = smmu_info.s1_bypass_arr[i];
4651 }
4652
Amir Levy9659e592016-10-27 18:08:27 +03004653 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
4654 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
4655 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
4656 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
4657 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03004658 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
4659 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
4660 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
4661 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
4662 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
4663 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
4664 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03004665 ipa3_ctx->ee = resource_p->ee;
4666 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
4667 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
Michael Adisumarta3e350812017-09-18 14:54:36 -07004668 ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
Amir Levy9659e592016-10-27 18:08:27 +03004669 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004670 if (resource_p->ipa_tz_unlock_reg) {
4671 ipa3_ctx->ipa_tz_unlock_reg_num =
4672 resource_p->ipa_tz_unlock_reg_num;
4673 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
4674 ipa3_ctx->ipa_tz_unlock_reg_num,
4675 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
4676 GFP_KERNEL);
4677 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
4678 result = -ENOMEM;
4679 goto fail_tz_unlock_reg;
4680 }
4681 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4682 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
4683 resource_p->ipa_tz_unlock_reg[i].reg_addr;
4684 ipa3_ctx->ipa_tz_unlock_reg[i].size =
4685 resource_p->ipa_tz_unlock_reg[i].size;
4686 }
4687 }
4688
4689 /* unlock registers for uc */
4690 ipa3_tz_unlock_reg(ipa3_ctx);
Amir Levy9659e592016-10-27 18:08:27 +03004691
4692 /* default aggregation parameters */
4693 ipa3_ctx->aggregation_type = IPA_MBIM_16;
4694 ipa3_ctx->aggregation_byte_limit = 1;
4695 ipa3_ctx->aggregation_time_limit = 0;
4696
4697 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
4698 if (!ipa3_ctx->ctrl) {
4699 IPAERR("memory allocation error for ctrl\n");
4700 result = -ENOMEM;
4701 goto fail_mem_ctrl;
4702 }
4703 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
4704 ipa3_ctx->ipa_hw_type);
4705 if (result) {
4706 IPAERR("fail to static bind IPA ctrl.\n");
4707 result = -EFAULT;
4708 goto fail_bind;
4709 }
4710
4711 result = ipa3_init_mem_partition(master_dev->of_node);
4712 if (result) {
4713 IPAERR(":ipa3_init_mem_partition failed!\n");
4714 result = -ENODEV;
4715 goto fail_init_mem_partition;
4716 }
4717
4718 if (ipa3_bus_scale_table) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004719 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
4720 ipa3_bus_scale_table->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03004721 ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
4722 }
4723
Ghanim Fodi6a831342017-03-07 18:19:15 +02004724 /* get BUS handle */
4725 ipa3_ctx->ipa_bus_hdl =
4726 msm_bus_scale_register_client(
4727 ipa3_ctx->ctrl->msm_bus_data_ptr);
4728 if (!ipa3_ctx->ipa_bus_hdl) {
4729 IPAERR("fail to register with bus mgr!\n");
4730 result = -ENODEV;
4731 goto fail_bus_reg;
Amir Levy9659e592016-10-27 18:08:27 +03004732 }
4733
4734 /* get IPA clocks */
4735 result = ipa3_get_clks(master_dev);
4736 if (result)
4737 goto fail_clk;
4738
4739 /* init active_clients_log after getting ipa-clk */
4740 if (ipa3_active_clients_log_init())
4741 goto fail_init_active_client;
4742
4743 /* Enable ipa3_ctx->enable_clock_scaling */
4744 ipa3_ctx->enable_clock_scaling = 1;
4745 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4746
4747 /* enable IPA clocks explicitly to allow the initialization */
4748 ipa3_enable_clks();
4749
4750 /* setup IPA register access */
4751 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
4752 ipa3_ctx->ctrl->ipa_reg_base_ofst);
4753 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
4754 ipa3_ctx->ctrl->ipa_reg_base_ofst,
4755 resource_p->ipa_mem_size);
4756 if (!ipa3_ctx->mmio) {
4757 IPAERR(":ipa-base ioremap err.\n");
4758 result = -EFAULT;
4759 goto fail_remap;
4760 }
4761
4762 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4763 ipa3_ctx->pdev)) {
4764 IPAERR("fail to init ipahal\n");
4765 result = -EFAULT;
4766 goto fail_ipahal;
4767 }
4768
4769 result = ipa3_init_hw();
4770 if (result) {
4771 IPAERR(":error initializing HW.\n");
4772 result = -ENODEV;
4773 goto fail_init_hw;
4774 }
4775 IPADBG("IPA HW initialization sequence completed");
4776
4777 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4778 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4779 IPAERR("IPA has more pipes then supported! has %d, max %d\n",
4780 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4781 result = -ENODEV;
4782 goto fail_init_hw;
4783 }
4784
Amir Levy9659e592016-10-27 18:08:27 +03004785 ipa3_ctx->ctrl->ipa_sram_read_settings();
4786 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4787 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4788
4789 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4790 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4791 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4792
4793 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4794 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4795
4796 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4797 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4798 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4799
4800 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4801 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4802 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4803
4804 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4805 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4806 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4807 result = -ENOMEM;
4808 goto fail_init_hw;
4809 }
4810
4811 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004812 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
4813 ipa3_active_clients_log_inc(&log_info, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004814 atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004815
Amir Levy9659e592016-10-27 18:08:27 +03004816 /* Create workqueues for power management */
4817 ipa3_ctx->power_mgmt_wq =
4818 create_singlethread_workqueue("ipa_power_mgmt");
4819 if (!ipa3_ctx->power_mgmt_wq) {
4820 IPAERR("failed to create power mgmt wq\n");
4821 result = -ENOMEM;
4822 goto fail_init_hw;
4823 }
4824
4825 ipa3_ctx->transport_power_mgmt_wq =
4826 create_singlethread_workqueue("transport_power_mgmt");
4827 if (!ipa3_ctx->transport_power_mgmt_wq) {
4828 IPAERR("failed to create transport power mgmt wq\n");
4829 result = -ENOMEM;
4830 goto fail_create_transport_wq;
4831 }
4832
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304833 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004834
4835 /* init the lookaside cache */
4836 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
4837 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
4838 if (!ipa3_ctx->flt_rule_cache) {
4839 IPAERR(":ipa flt cache create failed\n");
4840 result = -ENOMEM;
4841 goto fail_flt_rule_cache;
4842 }
4843 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
4844 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
4845 if (!ipa3_ctx->rt_rule_cache) {
4846 IPAERR(":ipa rt cache create failed\n");
4847 result = -ENOMEM;
4848 goto fail_rt_rule_cache;
4849 }
4850 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
4851 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
4852 if (!ipa3_ctx->hdr_cache) {
4853 IPAERR(":ipa hdr cache create failed\n");
4854 result = -ENOMEM;
4855 goto fail_hdr_cache;
4856 }
4857 ipa3_ctx->hdr_offset_cache =
4858 kmem_cache_create("IPA_HDR_OFFSET",
4859 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
4860 if (!ipa3_ctx->hdr_offset_cache) {
4861 IPAERR(":ipa hdr off cache create failed\n");
4862 result = -ENOMEM;
4863 goto fail_hdr_offset_cache;
4864 }
4865 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
4866 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
4867 if (!ipa3_ctx->hdr_proc_ctx_cache) {
4868 IPAERR(":ipa hdr proc ctx cache create failed\n");
4869 result = -ENOMEM;
4870 goto fail_hdr_proc_ctx_cache;
4871 }
4872 ipa3_ctx->hdr_proc_ctx_offset_cache =
4873 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
4874 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
4875 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
4876 IPAERR(":ipa hdr proc ctx off cache create failed\n");
4877 result = -ENOMEM;
4878 goto fail_hdr_proc_ctx_offset_cache;
4879 }
4880 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
4881 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
4882 if (!ipa3_ctx->rt_tbl_cache) {
4883 IPAERR(":ipa rt tbl cache create failed\n");
4884 result = -ENOMEM;
4885 goto fail_rt_tbl_cache;
4886 }
4887 ipa3_ctx->tx_pkt_wrapper_cache =
4888 kmem_cache_create("IPA_TX_PKT_WRAPPER",
4889 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
4890 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
4891 IPAERR(":ipa tx pkt wrapper cache create failed\n");
4892 result = -ENOMEM;
4893 goto fail_tx_pkt_wrapper_cache;
4894 }
4895 ipa3_ctx->rx_pkt_wrapper_cache =
4896 kmem_cache_create("IPA_RX_PKT_WRAPPER",
4897 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
4898 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
4899 IPAERR(":ipa rx pkt wrapper cache create failed\n");
4900 result = -ENOMEM;
4901 goto fail_rx_pkt_wrapper_cache;
4902 }
4903
Skylar Chang6c4bec92017-04-21 16:10:14 -07004904 /* allocate memory for DMA_TASK workaround */
4905 result = ipa3_allocate_dma_task_for_gsi();
4906 if (result) {
4907 IPAERR("failed to allocate dma task\n");
4908 goto fail_dma_task;
4909 }
4910
Amir Levy9659e592016-10-27 18:08:27 +03004911 /* init the various list heads */
4912 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
4913 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
4914 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
4915 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
4916 }
4917 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
4918 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
4919 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
4920 INIT_LIST_HEAD(&ipa3_ctx->
4921 hdr_proc_ctx_tbl.head_free_offset_list[i]);
4922 }
4923 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004924 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004925 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004926 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004927
4928 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
4929 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004930 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004931 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
4932 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004933 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03004934
4935 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
4936 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
4937 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
4938 init_waitqueue_head(&ipa3_ctx->msg_waitq);
4939 mutex_init(&ipa3_ctx->msg_lock);
4940
4941 mutex_init(&ipa3_ctx->lock);
4942 mutex_init(&ipa3_ctx->nat_mem.lock);
Skylar Changfb792c62017-08-17 12:53:23 -07004943 mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05304944 mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
Amir Levy9659e592016-10-27 18:08:27 +03004945
4946 idr_init(&ipa3_ctx->ipa_idr);
4947 spin_lock_init(&ipa3_ctx->idr_lock);
4948
4949 /* wlan related member */
4950 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
4951 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
4952 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
4953 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
4954
Amir Levy9659e592016-10-27 18:08:27 +03004955 ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
4956
4957 result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
4958 if (result) {
4959 IPAERR("alloc_chrdev_region err.\n");
4960 result = -ENODEV;
4961 goto fail_alloc_chrdev_region;
4962 }
4963
4964 ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
4965 ipa3_ctx, DRV_NAME);
4966 if (IS_ERR(ipa3_ctx->dev)) {
4967 IPAERR(":device_create err.\n");
4968 result = -ENODEV;
4969 goto fail_device_create;
4970 }
4971
Amir Levy9659e592016-10-27 18:08:27 +03004972 if (ipa3_create_nat_device()) {
4973 IPAERR("unable to create nat device\n");
4974 result = -ENODEV;
4975 goto fail_nat_dev_add;
4976 }
4977
4978 /* Create a wakeup source. */
4979 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
4980 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
4981
Michael Adisumarta3e350812017-09-18 14:54:36 -07004982 /* Initialize Power Management framework */
4983 if (ipa3_ctx->use_ipa_pm) {
4984 result = ipa_pm_init(&ipa3_res.pm_init);
4985 if (result) {
4986 IPAERR("IPA PM initialization failed (%d)\n", -result);
4987 result = -ENODEV;
4988 goto fail_ipa_rm_init;
4989 }
4990 IPADBG("IPA resource manager initialized");
4991 } else {
4992 result = ipa_rm_initialize();
4993 if (result) {
4994 IPAERR("RM initialization failed (%d)\n", -result);
4995 result = -ENODEV;
4996 goto fail_ipa_rm_init;
4997 }
4998 IPADBG("IPA resource manager initialized");
Amir Levy9659e592016-10-27 18:08:27 +03004999
Michael Adisumarta3e350812017-09-18 14:54:36 -07005000 result = ipa3_create_apps_resource();
5001 if (result) {
5002 IPAERR("Failed to create APPS_CONS resource\n");
5003 result = -ENODEV;
5004 goto fail_create_apps_resource;
5005 }
Amir Levy9659e592016-10-27 18:08:27 +03005006 }
5007
Skylar Changcd3902d2017-03-27 18:08:27 -07005008 result = ipa3_alloc_pkt_init();
5009 if (result) {
5010 IPAERR("Failed to alloc pkt_init payload\n");
5011 result = -ENODEV;
5012 goto fail_create_apps_resource;
5013 }
5014
Amir Levy12ef0912016-08-30 09:27:34 +03005015 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
5016 ipa3_enable_dcd();
5017
Amir Levy9659e592016-10-27 18:08:27 +03005018 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
5019
5020 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07005021 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03005022
5023 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02005024 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03005025 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02005026 *
5027 * For IPA3.0, the GSI configuration is done by the GSI driver.
5028 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03005029 */
Amir Levya59ed3f2017-03-05 17:30:55 +02005030 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
5031 result = ipa3_gsi_pre_fw_load_init();
5032 if (result) {
5033 IPAERR("gsi pre FW loading config failed\n");
5034 result = -ENODEV;
5035 goto fail_ipa_init_interrupts;
Amir Levy9659e592016-10-27 18:08:27 +03005036 }
5037 }
Amir Levy9659e592016-10-27 18:08:27 +03005038
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305039 cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
5040 ipa3_ctx->cdev.owner = THIS_MODULE;
5041 ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
5042
5043 result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
5044 if (result) {
5045 IPAERR(":cdev_add err=%d\n", -result);
5046 result = -ENODEV;
5047 goto fail_cdev_add;
5048 }
5049 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
5050 MAJOR(ipa3_ctx->dev_num),
5051 MINOR(ipa3_ctx->dev_num));
Amir Levy9659e592016-10-27 18:08:27 +03005052 return 0;
5053
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305054fail_cdev_add:
Amir Levy9659e592016-10-27 18:08:27 +03005055fail_ipa_init_interrupts:
Michael Adisumarta3e350812017-09-18 14:54:36 -07005056 if (!ipa3_ctx->use_ipa_pm)
5057 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
Amir Levy9659e592016-10-27 18:08:27 +03005058fail_create_apps_resource:
Michael Adisumarta3e350812017-09-18 14:54:36 -07005059 if (!ipa3_ctx->use_ipa_pm)
5060 ipa_rm_exit();
Amir Levy9659e592016-10-27 18:08:27 +03005061fail_ipa_rm_init:
5062fail_nat_dev_add:
Amir Levy9659e592016-10-27 18:08:27 +03005063 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
5064fail_device_create:
5065 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
5066fail_alloc_chrdev_region:
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005067 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
5068 idr_destroy(&rset->rule_ids);
5069 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
5070 idr_destroy(&rset->rule_ids);
5071 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
5072 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Skylar Chang6c4bec92017-04-21 16:10:14 -07005073 ipa3_free_dma_task_for_gsi();
5074fail_dma_task:
Amir Levy9659e592016-10-27 18:08:27 +03005075 idr_destroy(&ipa3_ctx->ipa_idr);
Amir Levy9659e592016-10-27 18:08:27 +03005076 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
5077fail_rx_pkt_wrapper_cache:
5078 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
5079fail_tx_pkt_wrapper_cache:
5080 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
5081fail_rt_tbl_cache:
5082 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
5083fail_hdr_proc_ctx_offset_cache:
5084 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
5085fail_hdr_proc_ctx_cache:
5086 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
5087fail_hdr_offset_cache:
5088 kmem_cache_destroy(ipa3_ctx->hdr_cache);
5089fail_hdr_cache:
5090 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
5091fail_rt_rule_cache:
5092 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
5093fail_flt_rule_cache:
5094 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
5095fail_create_transport_wq:
5096 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
5097fail_init_hw:
5098 ipahal_destroy();
5099fail_ipahal:
5100 iounmap(ipa3_ctx->mmio);
5101fail_remap:
5102 ipa3_disable_clks();
5103 ipa3_active_clients_log_destroy();
5104fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02005105 if (ipa3_clk)
5106 clk_put(ipa3_clk);
5107 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03005108fail_clk:
5109 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
5110fail_bus_reg:
Ghanim Fodi6a831342017-03-07 18:19:15 +02005111 if (ipa3_bus_scale_table) {
5112 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
5113 ipa3_bus_scale_table = NULL;
5114 }
Amir Levy9659e592016-10-27 18:08:27 +03005115fail_init_mem_partition:
5116fail_bind:
5117 kfree(ipa3_ctx->ctrl);
5118fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005119 kfree(ipa3_ctx->ipa_tz_unlock_reg);
5120fail_tz_unlock_reg:
Skylar Chang841c1452017-04-03 16:07:22 -07005121 if (ipa3_ctx->logbuf)
5122 ipc_log_context_destroy(ipa3_ctx->logbuf);
Amir Levy9659e592016-10-27 18:08:27 +03005123 kfree(ipa3_ctx);
5124 ipa3_ctx = NULL;
5125fail_mem_ctx:
5126 return result;
5127}
5128
Michael Adisumarta3e350812017-09-18 14:54:36 -07005129bool ipa_pm_is_used(void)
5130{
5131 return (ipa3_ctx) ? ipa3_ctx->use_ipa_pm : false;
5132}
5133
5134static int get_ipa_dts_pm_info(struct platform_device *pdev,
5135 struct ipa3_plat_drv_res *ipa_drv_res)
5136{
5137 int result;
5138 int i, j;
5139
5140 ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
5141 "qcom,use-ipa-pm");
5142 IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
5143 if (!ipa_drv_res->use_ipa_pm)
5144 return 0;
5145
5146 result = of_property_read_u32(pdev->dev.of_node,
5147 "qcom,msm-bus,num-cases",
5148 &ipa_drv_res->pm_init.threshold_size);
5149 /* No vote is ignored */
5150 ipa_drv_res->pm_init.threshold_size -= 2;
5151 if (result || ipa_drv_res->pm_init.threshold_size >
5152 IPA_PM_THRESHOLD_MAX) {
5153 IPAERR("invalid property qcom,msm-bus,num-cases %d\n",
5154 ipa_drv_res->pm_init.threshold_size);
5155 return -EFAULT;
5156 }
5157
5158 result = of_property_read_u32_array(pdev->dev.of_node,
5159 "qcom,throughput-threshold",
5160 ipa_drv_res->pm_init.default_threshold,
5161 ipa_drv_res->pm_init.threshold_size);
5162 if (result) {
5163 IPAERR("failed to read qcom,throughput-thresholds\n");
5164 return -EFAULT;
5165 }
5166
5167 result = of_property_count_strings(pdev->dev.of_node,
5168 "qcom,scaling-exceptions");
5169 if (result < 0) {
5170 IPADBG("no exception list for ipa pm\n");
5171 result = 0;
5172 }
5173
5174 if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
5175 IPAERR("failed to read qcom,scaling-exceptions\n");
5176 return -EFAULT;
5177 }
5178
5179 ipa_drv_res->pm_init.exception_size = result /
5180 (ipa_drv_res->pm_init.threshold_size + 1);
5181 if (ipa_drv_res->pm_init.exception_size >=
5182 IPA_PM_EXCEPTION_MAX) {
5183 IPAERR("exception list larger then max %d\n",
5184 ipa_drv_res->pm_init.exception_size);
5185 return -EFAULT;
5186 }
5187
5188 for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
5189 struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
5190
5191 result = of_property_read_string_index(pdev->dev.of_node,
5192 "qcom,scaling-exceptions",
5193 i * ipa_drv_res->pm_init.threshold_size,
5194 &ex[i].usecase);
5195 if (result) {
5196 IPAERR("failed to read qcom,scaling-exceptions");
5197 return -EFAULT;
5198 }
5199
5200 for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
5201 const char *str;
5202
5203 result = of_property_read_string_index(
5204 pdev->dev.of_node,
5205 "qcom,scaling-exceptions",
5206 i * ipa_drv_res->pm_init.threshold_size + j + 1,
5207 &str);
5208 if (result) {
5209 IPAERR("failed to read qcom,scaling-exceptions"
5210 );
5211 return -EFAULT;
5212 }
5213
5214 if (kstrtou32(str, 0, &ex[i].threshold[j])) {
5215 IPAERR("error str=%s\n", str);
5216 return -EFAULT;
5217 }
5218 }
5219 }
5220
5221 return 0;
5222}
5223
Amir Levy9659e592016-10-27 18:08:27 +03005224static int get_ipa_dts_configuration(struct platform_device *pdev,
5225 struct ipa3_plat_drv_res *ipa_drv_res)
5226{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005227 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03005228 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005229 u32 *ipa_tz_unlock_reg;
5230 int elem_num;
Amir Levy9659e592016-10-27 18:08:27 +03005231
5232 /* initialize ipa3_res */
5233 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
5234 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
5235 ipa_drv_res->ipa_hw_type = 0;
5236 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03005237 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
5238 ipa_drv_res->ipa_wdi2 = false;
5239 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02005240 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03005241 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5242 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5243 ipa_drv_res->apply_rg10_wa = false;
5244 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005245 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
5246 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03005247
5248 /* Get IPA HW Version */
5249 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
5250 &ipa_drv_res->ipa_hw_type);
5251 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
5252 IPAERR(":get resource failed for ipa-hw-ver!\n");
5253 return -ENODEV;
5254 }
5255 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
5256
5257 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
5258 IPAERR(":IPA version below 3.0 not supported!\n");
5259 return -ENODEV;
5260 }
5261
5262 /* Get IPA HW mode */
5263 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
5264 &ipa_drv_res->ipa3_hw_mode);
5265 if (result)
5266 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
5267 else
5268 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
5269 ipa_drv_res->ipa3_hw_mode);
5270
5271 /* Get IPA WAN / LAN RX pool size */
5272 result = of_property_read_u32(pdev->dev.of_node,
5273 "qcom,wan-rx-ring-size",
5274 &ipa_drv_res->wan_rx_ring_size);
5275 if (result)
5276 IPADBG("using default for wan-rx-ring-size = %u\n",
5277 ipa_drv_res->wan_rx_ring_size);
5278 else
5279 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
5280 ipa_drv_res->wan_rx_ring_size);
5281
5282 result = of_property_read_u32(pdev->dev.of_node,
5283 "qcom,lan-rx-ring-size",
5284 &ipa_drv_res->lan_rx_ring_size);
5285 if (result)
5286 IPADBG("using default for lan-rx-ring-size = %u\n",
5287 ipa_drv_res->lan_rx_ring_size);
5288 else
5289 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
5290 ipa_drv_res->lan_rx_ring_size);
5291
5292 ipa_drv_res->use_ipa_teth_bridge =
5293 of_property_read_bool(pdev->dev.of_node,
5294 "qcom,use-ipa-tethering-bridge");
5295 IPADBG(": using TBDr = %s",
5296 ipa_drv_res->use_ipa_teth_bridge
5297 ? "True" : "False");
5298
Amir Levy9659e592016-10-27 18:08:27 +03005299 ipa_drv_res->modem_cfg_emb_pipe_flt =
5300 of_property_read_bool(pdev->dev.of_node,
5301 "qcom,modem-cfg-emb-pipe-flt");
5302 IPADBG(": modem configure embedded pipe filtering = %s\n",
5303 ipa_drv_res->modem_cfg_emb_pipe_flt
5304 ? "True" : "False");
5305
5306 ipa_drv_res->ipa_wdi2 =
5307 of_property_read_bool(pdev->dev.of_node,
5308 "qcom,ipa-wdi2");
5309 IPADBG(": WDI-2.0 = %s\n",
5310 ipa_drv_res->ipa_wdi2
5311 ? "True" : "False");
5312
5313 ipa_drv_res->use_64_bit_dma_mask =
5314 of_property_read_bool(pdev->dev.of_node,
5315 "qcom,use-64-bit-dma-mask");
5316 IPADBG(": use_64_bit_dma_mask = %s\n",
5317 ipa_drv_res->use_64_bit_dma_mask
5318 ? "True" : "False");
5319
Ghanim Fodi6a831342017-03-07 18:19:15 +02005320 ipa_drv_res->use_bw_vote =
5321 of_property_read_bool(pdev->dev.of_node,
5322 "qcom,bandwidth-vote-for-ipa");
5323 IPADBG(": use_bw_vote = %s\n",
5324 ipa_drv_res->use_bw_vote
5325 ? "True" : "False");
5326
Amir Levy9659e592016-10-27 18:08:27 +03005327 ipa_drv_res->skip_uc_pipe_reset =
5328 of_property_read_bool(pdev->dev.of_node,
5329 "qcom,skip-uc-pipe-reset");
5330 IPADBG(": skip uC pipe reset = %s\n",
5331 ipa_drv_res->skip_uc_pipe_reset
5332 ? "True" : "False");
5333
5334 ipa_drv_res->tethered_flow_control =
5335 of_property_read_bool(pdev->dev.of_node,
5336 "qcom,tethered-flow-control");
5337 IPADBG(": Use apps based flow control = %s\n",
5338 ipa_drv_res->tethered_flow_control
5339 ? "True" : "False");
5340
Amir Levy9659e592016-10-27 18:08:27 +03005341 /* Get IPA wrapper address */
5342 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5343 "ipa-base");
5344 if (!resource) {
5345 IPAERR(":get resource failed for ipa-base!\n");
5346 return -ENODEV;
5347 }
5348 ipa_drv_res->ipa_mem_base = resource->start;
5349 ipa_drv_res->ipa_mem_size = resource_size(resource);
5350 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
5351 ipa_drv_res->ipa_mem_base,
5352 ipa_drv_res->ipa_mem_size);
5353
5354 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
5355 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
5356
Amir Levya59ed3f2017-03-05 17:30:55 +02005357 /* Get IPA GSI address */
5358 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5359 "gsi-base");
5360 if (!resource) {
5361 IPAERR(":get resource failed for gsi-base!\n");
5362 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03005363 }
Amir Levya59ed3f2017-03-05 17:30:55 +02005364 ipa_drv_res->transport_mem_base = resource->start;
5365 ipa_drv_res->transport_mem_size = resource_size(resource);
5366 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
5367 ipa_drv_res->transport_mem_base,
5368 ipa_drv_res->transport_mem_size);
5369
5370 /* Get IPA GSI IRQ number */
5371 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5372 "gsi-irq");
5373 if (!resource) {
5374 IPAERR(":get resource failed for gsi-irq!\n");
5375 return -ENODEV;
5376 }
5377 ipa_drv_res->transport_irq = resource->start;
5378 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03005379
5380 /* Get IPA pipe mem start ofst */
5381 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5382 "ipa-pipe-mem");
5383 if (!resource) {
5384 IPADBG(":not using pipe memory - resource nonexisting\n");
5385 } else {
5386 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
5387 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
5388 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
5389 ipa_drv_res->ipa_pipe_mem_start_ofst,
5390 ipa_drv_res->ipa_pipe_mem_size);
5391 }
5392
5393 /* Get IPA IRQ number */
5394 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5395 "ipa-irq");
5396 if (!resource) {
5397 IPAERR(":get resource failed for ipa-irq!\n");
5398 return -ENODEV;
5399 }
5400 ipa_drv_res->ipa_irq = resource->start;
5401 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
5402
5403 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
5404 &ipa_drv_res->ee);
5405 if (result)
5406 ipa_drv_res->ee = 0;
5407
5408 ipa_drv_res->apply_rg10_wa =
5409 of_property_read_bool(pdev->dev.of_node,
5410 "qcom,use-rg10-limitation-mitigation");
5411 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
5412 ipa_drv_res->apply_rg10_wa
5413 ? "True" : "False");
5414
5415 ipa_drv_res->gsi_ch20_wa =
5416 of_property_read_bool(pdev->dev.of_node,
5417 "qcom,do-not-use-ch-gsi-20");
5418 IPADBG(": GSI CH 20 WA is = %s\n",
5419 ipa_drv_res->apply_rg10_wa
5420 ? "Needed" : "Not needed");
5421
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005422 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
5423 "qcom,ipa-tz-unlock-reg", sizeof(u32));
5424
5425 if (elem_num > 0 && elem_num % 2 == 0) {
5426 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
5427
5428 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
5429 if (ipa_tz_unlock_reg == NULL)
5430 return -ENOMEM;
5431
5432 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
5433 ipa_drv_res->ipa_tz_unlock_reg_num,
5434 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
5435 GFP_KERNEL);
5436 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
5437 kfree(ipa_tz_unlock_reg);
5438 return -ENOMEM;
5439 }
5440
5441 if (of_property_read_u32_array(pdev->dev.of_node,
5442 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
5443 elem_num)) {
5444 IPAERR("failed to read register addresses\n");
5445 kfree(ipa_tz_unlock_reg);
5446 kfree(ipa_drv_res->ipa_tz_unlock_reg);
5447 return -EFAULT;
5448 }
5449
5450 pos = 0;
5451 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
5452 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
5453 ipa_tz_unlock_reg[pos++];
5454 ipa_drv_res->ipa_tz_unlock_reg[i].size =
5455 ipa_tz_unlock_reg[pos++];
5456 IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
5457 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
5458 ipa_drv_res->ipa_tz_unlock_reg[i].size);
5459 }
5460 kfree(ipa_tz_unlock_reg);
5461 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07005462
5463 /* get IPA PM related information */
5464 result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
5465 if (result) {
5466 IPAERR("failed to get pm info from dts %d\n", result);
5467 return result;
5468 }
5469
Amir Levy9659e592016-10-27 18:08:27 +03005470 return 0;
5471}
5472
5473static int ipa_smmu_wlan_cb_probe(struct device *dev)
5474{
5475 struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005476 int atomic_ctx = 1;
5477 int fast = 1;
5478 int bypass = 1;
5479 int ret;
5480 u32 add_map_size;
5481 const u32 *add_map;
5482 int i;
5483
5484 IPADBG("sub pdev=%p\n", dev);
5485
5486 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005487 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03005488 if (!cb->iommu) {
5489 IPAERR("could not alloc iommu domain\n");
5490 /* assume this failure is because iommu driver is not ready */
5491 return -EPROBE_DEFER;
5492 }
5493 cb->valid = true;
5494
Michael Adisumarta93e97522017-10-06 15:49:46 -07005495 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass")) {
5496 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
Amir Levy9659e592016-10-27 18:08:27 +03005497 if (iommu_domain_set_attr(cb->iommu,
5498 DOMAIN_ATTR_S1_BYPASS,
5499 &bypass)) {
5500 IPAERR("couldn't set bypass\n");
5501 cb->valid = false;
5502 return -EIO;
5503 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005504 IPADBG("WLAN SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03005505 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07005506 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
Amir Levy9659e592016-10-27 18:08:27 +03005507 if (iommu_domain_set_attr(cb->iommu,
5508 DOMAIN_ATTR_ATOMIC,
5509 &atomic_ctx)) {
5510 IPAERR("couldn't disable coherent HTW\n");
5511 cb->valid = false;
5512 return -EIO;
5513 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005514 IPADBG(" WLAN SMMU ATTR ATOMIC\n");
Amir Levy9659e592016-10-27 18:08:27 +03005515
5516 if (smmu_info.fast_map) {
5517 if (iommu_domain_set_attr(cb->iommu,
5518 DOMAIN_ATTR_FAST,
5519 &fast)) {
5520 IPAERR("couldn't set fast map\n");
5521 cb->valid = false;
5522 return -EIO;
5523 }
5524 IPADBG("SMMU fast map set\n");
5525 }
5526 }
5527
Michael Adisumarta93e97522017-10-06 15:49:46 -07005528 pr_info("IPA smmu_info.s1_bypass_arr[WLAN]=%d smmu_info.fast_map=%d\n",
5529 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN], smmu_info.fast_map);
5530
Amir Levy9659e592016-10-27 18:08:27 +03005531 ret = iommu_attach_device(cb->iommu, dev);
5532 if (ret) {
5533 IPAERR("could not attach device ret=%d\n", ret);
5534 cb->valid = false;
5535 return ret;
5536 }
5537 /* MAP ipa-uc ram */
5538 add_map = of_get_property(dev->of_node,
5539 "qcom,additional-mapping", &add_map_size);
5540 if (add_map) {
5541 /* mapping size is an array of 3-tuple of u32 */
5542 if (add_map_size % (3 * sizeof(u32))) {
5543 IPAERR("wrong additional mapping format\n");
5544 cb->valid = false;
5545 return -EFAULT;
5546 }
5547
5548 /* iterate of each entry of the additional mapping array */
5549 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5550 u32 iova = be32_to_cpu(add_map[i]);
5551 u32 pa = be32_to_cpu(add_map[i + 1]);
5552 u32 size = be32_to_cpu(add_map[i + 2]);
5553 unsigned long iova_p;
5554 phys_addr_t pa_p;
5555 u32 size_p;
5556
5557 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5558 iova_p, pa_p, size_p);
5559 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5560 iova_p, &pa_p, size_p);
5561 ipa3_iommu_map(cb->iommu,
5562 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005563 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005564 }
5565 }
5566 return 0;
5567}
5568
5569static int ipa_smmu_uc_cb_probe(struct device *dev)
5570{
5571 struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005572 int atomic_ctx = 1;
5573 int bypass = 1;
5574 int fast = 1;
5575 int ret;
5576 u32 iova_ap_mapping[2];
5577
5578 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
5579
5580 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5581 iova_ap_mapping, 2);
5582 if (ret) {
5583 IPAERR("Fail to read UC start/size iova addresses\n");
5584 return ret;
5585 }
5586 cb->va_start = iova_ap_mapping[0];
5587 cb->va_size = iova_ap_mapping[1];
5588 cb->va_end = cb->va_start + cb->va_size;
5589 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5590
5591 if (smmu_info.use_64_bit_dma_mask) {
5592 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5593 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5594 IPAERR("DMA set 64bit mask failed\n");
5595 return -EOPNOTSUPP;
5596 }
5597 } else {
5598 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5599 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5600 IPAERR("DMA set 32bit mask failed\n");
5601 return -EOPNOTSUPP;
5602 }
5603 }
5604 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
5605
5606 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005607 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005608 cb->va_start, cb->va_size);
5609 if (IS_ERR_OR_NULL(cb->mapping)) {
5610 IPADBG("Fail to create mapping\n");
5611 /* assume this failure is because iommu driver is not ready */
5612 return -EPROBE_DEFER;
5613 }
5614 IPADBG("SMMU mapping created\n");
5615 cb->valid = true;
5616
Amir Levy9659e592016-10-27 18:08:27 +03005617 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
Michael Adisumarta93e97522017-10-06 15:49:46 -07005618
5619 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass")) {
5620 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = true;
Amir Levy9659e592016-10-27 18:08:27 +03005621 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07005622 DOMAIN_ATTR_S1_BYPASS,
5623 &bypass)) {
Amir Levy9659e592016-10-27 18:08:27 +03005624 IPAERR("couldn't set bypass\n");
5625 arm_iommu_release_mapping(cb->mapping);
5626 cb->valid = false;
5627 return -EIO;
5628 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005629 IPADBG("UC SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03005630 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07005631 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = false;
Amir Levy9659e592016-10-27 18:08:27 +03005632 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07005633 DOMAIN_ATTR_ATOMIC,
5634 &atomic_ctx)) {
Amir Levy9659e592016-10-27 18:08:27 +03005635 IPAERR("couldn't set domain as atomic\n");
5636 arm_iommu_release_mapping(cb->mapping);
5637 cb->valid = false;
5638 return -EIO;
5639 }
5640 IPADBG("SMMU atomic set\n");
5641
5642 if (smmu_info.fast_map) {
5643 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07005644 DOMAIN_ATTR_FAST,
5645 &fast)) {
Amir Levy9659e592016-10-27 18:08:27 +03005646 IPAERR("couldn't set fast map\n");
5647 arm_iommu_release_mapping(cb->mapping);
5648 cb->valid = false;
5649 return -EIO;
5650 }
5651 IPADBG("SMMU fast map set\n");
5652 }
5653 }
5654
Michael Adisumarta93e97522017-10-06 15:49:46 -07005655 pr_info("IPA smmu_info.s1_bypass_arr[UC]=%d smmu_info.fast_map=%d\n",
5656 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC], smmu_info.fast_map);
5657
Amir Levy9659e592016-10-27 18:08:27 +03005658 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
5659 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
5660 if (ret) {
5661 IPAERR("could not attach device ret=%d\n", ret);
5662 arm_iommu_release_mapping(cb->mapping);
5663 cb->valid = false;
5664 return ret;
5665 }
5666
5667 cb->next_addr = cb->va_end;
5668 ipa3_ctx->uc_pdev = dev;
5669
5670 return 0;
5671}
5672
5673static int ipa_smmu_ap_cb_probe(struct device *dev)
5674{
5675 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
5676 int result;
Amir Levy9659e592016-10-27 18:08:27 +03005677 int atomic_ctx = 1;
5678 int fast = 1;
5679 int bypass = 1;
5680 u32 iova_ap_mapping[2];
5681 u32 add_map_size;
5682 const u32 *add_map;
5683 void *smem_addr;
5684 int i;
5685
5686 IPADBG("AP CB probe: sub pdev=%p\n", dev);
5687
5688 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5689 iova_ap_mapping, 2);
5690 if (result) {
5691 IPAERR("Fail to read AP start/size iova addresses\n");
5692 return result;
5693 }
5694 cb->va_start = iova_ap_mapping[0];
5695 cb->va_size = iova_ap_mapping[1];
5696 cb->va_end = cb->va_start + cb->va_size;
5697 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5698
5699 if (smmu_info.use_64_bit_dma_mask) {
5700 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5701 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5702 IPAERR("DMA set 64bit mask failed\n");
5703 return -EOPNOTSUPP;
5704 }
5705 } else {
5706 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5707 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5708 IPAERR("DMA set 32bit mask failed\n");
5709 return -EOPNOTSUPP;
5710 }
5711 }
5712
5713 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005714 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005715 cb->va_start, cb->va_size);
5716 if (IS_ERR_OR_NULL(cb->mapping)) {
5717 IPADBG("Fail to create mapping\n");
5718 /* assume this failure is because iommu driver is not ready */
5719 return -EPROBE_DEFER;
5720 }
5721 IPADBG("SMMU mapping created\n");
5722 cb->valid = true;
5723
Michael Adisumarta93e97522017-10-06 15:49:46 -07005724 if (of_property_read_bool(dev->of_node,
5725 "qcom,smmu-s1-bypass")) {
5726 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = true;
Amir Levy9659e592016-10-27 18:08:27 +03005727 if (iommu_domain_set_attr(cb->mapping->domain,
5728 DOMAIN_ATTR_S1_BYPASS,
5729 &bypass)) {
5730 IPAERR("couldn't set bypass\n");
5731 arm_iommu_release_mapping(cb->mapping);
5732 cb->valid = false;
5733 return -EIO;
5734 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005735 IPADBG("AP/USB SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03005736 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07005737 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = false;
Amir Levy9659e592016-10-27 18:08:27 +03005738 if (iommu_domain_set_attr(cb->mapping->domain,
5739 DOMAIN_ATTR_ATOMIC,
5740 &atomic_ctx)) {
5741 IPAERR("couldn't set domain as atomic\n");
5742 arm_iommu_release_mapping(cb->mapping);
5743 cb->valid = false;
5744 return -EIO;
5745 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07005746 IPADBG("AP/USB SMMU atomic set\n");
Amir Levy9659e592016-10-27 18:08:27 +03005747
5748 if (iommu_domain_set_attr(cb->mapping->domain,
5749 DOMAIN_ATTR_FAST,
5750 &fast)) {
5751 IPAERR("couldn't set fast map\n");
5752 arm_iommu_release_mapping(cb->mapping);
5753 cb->valid = false;
5754 return -EIO;
5755 }
5756 IPADBG("SMMU fast map set\n");
5757 }
5758
Michael Adisumarta93e97522017-10-06 15:49:46 -07005759 pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n",
5760 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP], smmu_info.fast_map);
5761
Amir Levy9659e592016-10-27 18:08:27 +03005762 result = arm_iommu_attach_device(cb->dev, cb->mapping);
5763 if (result) {
5764 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
5765 cb->valid = false;
5766 return result;
5767 }
5768
5769 add_map = of_get_property(dev->of_node,
5770 "qcom,additional-mapping", &add_map_size);
5771 if (add_map) {
5772 /* mapping size is an array of 3-tuple of u32 */
5773 if (add_map_size % (3 * sizeof(u32))) {
5774 IPAERR("wrong additional mapping format\n");
5775 cb->valid = false;
5776 return -EFAULT;
5777 }
5778
5779 /* iterate of each entry of the additional mapping array */
5780 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5781 u32 iova = be32_to_cpu(add_map[i]);
5782 u32 pa = be32_to_cpu(add_map[i + 1]);
5783 u32 size = be32_to_cpu(add_map[i + 2]);
5784 unsigned long iova_p;
5785 phys_addr_t pa_p;
5786 u32 size_p;
5787
5788 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5789 iova_p, pa_p, size_p);
5790 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5791 iova_p, &pa_p, size_p);
5792 ipa3_iommu_map(cb->mapping->domain,
5793 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005794 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005795 }
5796 }
5797
5798 /* map SMEM memory for IPA table accesses */
5799 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
5800 SMEM_MODEM, 0);
5801 if (smem_addr) {
5802 phys_addr_t iova = smem_virt_to_phys(smem_addr);
5803 phys_addr_t pa = iova;
5804 unsigned long iova_p;
5805 phys_addr_t pa_p;
5806 u32 size_p;
5807
5808 IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
5809 iova_p, pa_p, size_p);
5810 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5811 iova_p, &pa_p, size_p);
5812 ipa3_iommu_map(cb->mapping->domain,
5813 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005814 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005815 }
5816
5817
5818 smmu_info.present = true;
5819
5820 if (!ipa3_bus_scale_table)
5821 ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
5822
5823 /* Proceed to real initialization */
5824 result = ipa3_pre_init(&ipa3_res, dev);
5825 if (result) {
5826 IPAERR("ipa_init failed\n");
5827 arm_iommu_detach_device(cb->dev);
5828 arm_iommu_release_mapping(cb->mapping);
5829 cb->valid = false;
5830 return result;
5831 }
5832
5833 return result;
5834}
5835
5836static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
5837{
5838 ipa3_freeze_clock_vote_and_notify_modem();
5839
5840 return IRQ_HANDLED;
5841}
5842
5843static int ipa3_smp2p_probe(struct device *dev)
5844{
5845 struct device_node *node = dev->of_node;
5846 int res;
5847
Mohammed Javid7de12702017-07-21 15:22:58 +05305848 if (ipa3_ctx == NULL) {
5849 IPAERR("ipa3_ctx was not initialized\n");
5850 return -ENXIO;
5851 }
Amir Levy9659e592016-10-27 18:08:27 +03005852 IPADBG("node->name=%s\n", node->name);
5853 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
5854 res = of_get_gpio(node, 0);
5855 if (res < 0) {
5856 IPADBG("of_get_gpio returned %d\n", res);
5857 return res;
5858 }
5859
5860 ipa3_ctx->smp2p_info.out_base_id = res;
5861 IPADBG("smp2p out_base_id=%d\n",
5862 ipa3_ctx->smp2p_info.out_base_id);
5863 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
5864 int irq;
5865
5866 res = of_get_gpio(node, 0);
5867 if (res < 0) {
5868 IPADBG("of_get_gpio returned %d\n", res);
5869 return res;
5870 }
5871
5872 ipa3_ctx->smp2p_info.in_base_id = res;
5873 IPADBG("smp2p in_base_id=%d\n",
5874 ipa3_ctx->smp2p_info.in_base_id);
5875
5876 /* register for modem clk query */
5877 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
5878 IPA_GPIO_IN_QUERY_CLK_IDX);
5879 if (irq < 0) {
5880 IPAERR("gpio_to_irq failed %d\n", irq);
5881 return -ENODEV;
5882 }
5883 IPADBG("smp2p irq#=%d\n", irq);
5884 res = request_irq(irq,
5885 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
5886 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
5887 if (res) {
5888 IPAERR("fail to register smp2p irq=%d\n", irq);
5889 return -ENODEV;
5890 }
5891 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
5892 IPA_GPIO_IN_QUERY_CLK_IDX);
5893 if (res)
5894 IPAERR("failed to enable irq wake\n");
5895 }
5896
5897 return 0;
5898}
5899
5900int ipa3_plat_drv_probe(struct platform_device *pdev_p,
5901 struct ipa_api_controller *api_ctrl,
5902 const struct of_device_id *pdrv_match)
5903{
5904 int result;
5905 struct device *dev = &pdev_p->dev;
5906
5907 IPADBG("IPA driver probing started\n");
5908 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
5909
5910 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
5911 return ipa_smmu_ap_cb_probe(dev);
5912
5913 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
5914 return ipa_smmu_wlan_cb_probe(dev);
5915
5916 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
5917 return ipa_smmu_uc_cb_probe(dev);
5918
5919 if (of_device_is_compatible(dev->of_node,
5920 "qcom,smp2pgpio-map-ipa-1-in"))
5921 return ipa3_smp2p_probe(dev);
5922
5923 if (of_device_is_compatible(dev->of_node,
5924 "qcom,smp2pgpio-map-ipa-1-out"))
5925 return ipa3_smp2p_probe(dev);
5926
5927 master_dev = dev;
5928 if (!ipa3_pdev)
5929 ipa3_pdev = pdev_p;
5930
5931 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
5932 if (result) {
5933 IPAERR("IPA dts parsing failed\n");
5934 return result;
5935 }
5936
5937 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
5938 if (result) {
5939 IPAERR("IPA API binding failed\n");
5940 return result;
5941 }
5942
Amir Levy9659e592016-10-27 18:08:27 +03005943 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
5944 if (of_property_read_bool(pdev_p->dev.of_node,
Amir Levy9659e592016-10-27 18:08:27 +03005945 "qcom,smmu-fast-map"))
5946 smmu_info.fast_map = true;
5947 if (of_property_read_bool(pdev_p->dev.of_node,
5948 "qcom,use-64-bit-dma-mask"))
5949 smmu_info.use_64_bit_dma_mask = true;
5950 smmu_info.arm_smmu = true;
Amir Levy9659e592016-10-27 18:08:27 +03005951 } else if (of_property_read_bool(pdev_p->dev.of_node,
5952 "qcom,msm-smmu")) {
5953 IPAERR("Legacy IOMMU not supported\n");
5954 result = -EOPNOTSUPP;
5955 } else {
5956 if (of_property_read_bool(pdev_p->dev.of_node,
5957 "qcom,use-64-bit-dma-mask")) {
5958 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
5959 dma_set_coherent_mask(&pdev_p->dev,
5960 DMA_BIT_MASK(64))) {
5961 IPAERR("DMA set 64bit mask failed\n");
5962 return -EOPNOTSUPP;
5963 }
5964 } else {
5965 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
5966 dma_set_coherent_mask(&pdev_p->dev,
5967 DMA_BIT_MASK(32))) {
5968 IPAERR("DMA set 32bit mask failed\n");
5969 return -EOPNOTSUPP;
5970 }
5971 }
5972
5973 if (!ipa3_bus_scale_table)
5974 ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
5975 /* Proceed to real initialization */
5976 result = ipa3_pre_init(&ipa3_res, dev);
5977 if (result) {
5978 IPAERR("ipa3_init failed\n");
5979 return result;
5980 }
5981 }
5982
Ghanim Fodi115bf8a2017-04-21 01:36:06 -07005983 result = of_platform_populate(pdev_p->dev.of_node,
5984 pdrv_match, NULL, &pdev_p->dev);
5985 if (result) {
5986 IPAERR("failed to populate platform\n");
5987 return result;
5988 }
5989
Amir Levy9659e592016-10-27 18:08:27 +03005990 return result;
5991}
5992
5993/**
5994 * ipa3_ap_suspend() - suspend callback for runtime_pm
5995 * @dev: pointer to device
5996 *
5997 * This callback will be invoked by the runtime_pm framework when an AP suspend
5998 * operation is invoked, usually by pressing a suspend button.
5999 *
6000 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
6001 * This will postpone the suspend operation until IPA is no longer used by AP.
6002*/
6003int ipa3_ap_suspend(struct device *dev)
6004{
6005 int i;
6006
6007 IPADBG("Enter...\n");
6008
6009 /* In case there is a tx/rx handler in polling mode fail to suspend */
6010 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
6011 if (ipa3_ctx->ep[i].sys &&
6012 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
6013 IPAERR("EP %d is in polling state, do not suspend\n",
6014 i);
6015 return -EAGAIN;
6016 }
6017 }
6018
Michael Adisumarta3e350812017-09-18 14:54:36 -07006019 if (ipa3_ctx->use_ipa_pm) {
6020 ipa_pm_deactivate_all_deferred();
6021 } else {
6022 /*
6023 * Release transport IPA resource without waiting
6024 * for inactivity timer
6025 */
6026 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
6027 ipa3_transport_release_resource(NULL);
6028 }
Amir Levy9659e592016-10-27 18:08:27 +03006029 IPADBG("Exit\n");
6030
6031 return 0;
6032}
6033
6034/**
6035* ipa3_ap_resume() - resume callback for runtime_pm
6036* @dev: pointer to device
6037*
6038* This callback will be invoked by the runtime_pm framework when an AP resume
6039* operation is invoked.
6040*
6041* Always returns 0 since resume should always succeed.
6042*/
6043int ipa3_ap_resume(struct device *dev)
6044{
6045 return 0;
6046}
6047
6048struct ipa3_context *ipa3_get_ctx(void)
6049{
6050 return ipa3_ctx;
6051}
6052
Amir Levy9659e592016-10-27 18:08:27 +03006053static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
6054{
6055 switch (notify->evt_id) {
6056 case GSI_PER_EVT_GLOB_ERROR:
6057 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
6058 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
6059 break;
6060 case GSI_PER_EVT_GLOB_GP1:
6061 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
6062 BUG();
6063 break;
6064 case GSI_PER_EVT_GLOB_GP2:
6065 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
6066 BUG();
6067 break;
6068 case GSI_PER_EVT_GLOB_GP3:
6069 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
6070 BUG();
6071 break;
6072 case GSI_PER_EVT_GENERAL_BREAK_POINT:
6073 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
6074 break;
6075 case GSI_PER_EVT_GENERAL_BUS_ERROR:
6076 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
6077 BUG();
6078 break;
6079 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
6080 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
6081 BUG();
6082 break;
6083 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
6084 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
6085 BUG();
6086 break;
6087 default:
6088 IPAERR("Received unexpected evt: %d\n",
6089 notify->evt_id);
6090 BUG();
6091 }
6092}
6093
6094int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
6095{
6096 struct ipa3_ready_cb_info *cb_info = NULL;
6097
6098 /* check ipa3_ctx existed or not */
6099 if (!ipa3_ctx) {
6100 IPADBG("IPA driver haven't initialized\n");
6101 return -ENXIO;
6102 }
6103 mutex_lock(&ipa3_ctx->lock);
6104 if (ipa3_ctx->ipa_initialization_complete) {
6105 mutex_unlock(&ipa3_ctx->lock);
6106 IPADBG("IPA driver finished initialization already\n");
6107 return -EEXIST;
6108 }
6109
6110 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
6111 if (!cb_info) {
6112 mutex_unlock(&ipa3_ctx->lock);
6113 return -ENOMEM;
6114 }
6115
6116 cb_info->ready_cb = ipa_ready_cb;
6117 cb_info->user_data = user_data;
6118
6119 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
6120 mutex_unlock(&ipa3_ctx->lock);
6121
6122 return 0;
6123}
6124
6125int ipa3_iommu_map(struct iommu_domain *domain,
6126 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
6127{
6128 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
6129 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
6130
6131 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
6132 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
6133
6134 /* make sure no overlapping */
6135 if (domain == ipa3_get_smmu_domain()) {
6136 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
6137 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
6138 ipa_assert();
6139 return -EFAULT;
6140 }
6141 } else if (domain == ipa3_get_wlan_smmu_domain()) {
6142 /* wlan is one time map */
6143 } else if (domain == ipa3_get_uc_smmu_domain()) {
6144 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
6145 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
6146 ipa_assert();
6147 return -EFAULT;
6148 }
6149 } else {
6150 IPAERR("Unexpected domain 0x%p\n", domain);
6151 ipa_assert();
6152 return -EFAULT;
6153 }
6154
6155 return iommu_map(domain, iova, paddr, size, prot);
6156}
6157
6158MODULE_LICENSE("GPL v2");
6159MODULE_DESCRIPTION("IPA HW device driver");