blob: 4b056f6cf23e7d435edd08e727a16026d31b0e21 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Amir Levy9659e592016-10-27 18:08:27 +030038#include <soc/qcom/subsystem_restart.h>
39#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020040#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020041#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020042
43#ifdef CONFIG_ARM64
44
45/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
60#define IPA_GPIO_IN_QUERY_CLK_IDX 0
61#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
62#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
63
64#define IPA_SUMMING_THRESHOLD (0x10)
65#define IPA_PIPE_MEM_START_OFST (0x0)
66#define IPA_PIPE_MEM_SIZE (0x0)
67#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
68 x == IPA_MODE_MOBILE_AP_WAN || \
69 x == IPA_MODE_MOBILE_AP_WLAN)
70#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
71#define IPA_A5_MUX_HEADER_LENGTH (8)
72
73#define IPA_AGGR_MAX_STR_LENGTH (10)
74
Gidon Studinski3021a6f2016-11-10 12:48:48 +020075#define CLEANUP_TAG_PROCESS_TIMEOUT 500
Amir Levy9659e592016-10-27 18:08:27 +030076
77#define IPA_AGGR_STR_IN_BYTES(str) \
78 (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
79
80#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
81
82#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
83
84#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
85#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
86#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
87#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
88
89#define IPA_SMEM_SIZE (8 * 1024)
90
91/* round addresses for closes page per SMMU requirements */
92#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
93 do { \
94 (iova_p) = rounddown((iova), PAGE_SIZE); \
95 (pa_p) = rounddown((pa), PAGE_SIZE); \
96 (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
97 } while (0)
98
99
100/* The relative location in /lib/firmware where the FWs will reside */
101#define IPA_FWS_PATH "ipa/ipa_fws.elf"
102
103#ifdef CONFIG_COMPAT
104#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
105 IPA_IOCTL_ADD_HDR, \
106 compat_uptr_t)
107#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
108 IPA_IOCTL_DEL_HDR, \
109 compat_uptr_t)
110#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
111 IPA_IOCTL_ADD_RT_RULE, \
112 compat_uptr_t)
113#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
114 IPA_IOCTL_DEL_RT_RULE, \
115 compat_uptr_t)
116#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
117 IPA_IOCTL_ADD_FLT_RULE, \
118 compat_uptr_t)
119#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
120 IPA_IOCTL_DEL_FLT_RULE, \
121 compat_uptr_t)
122#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
123 IPA_IOCTL_GET_RT_TBL, \
124 compat_uptr_t)
125#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
126 IPA_IOCTL_COPY_HDR, \
127 compat_uptr_t)
128#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
129 IPA_IOCTL_QUERY_INTF, \
130 compat_uptr_t)
131#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
132 IPA_IOCTL_QUERY_INTF_TX_PROPS, \
133 compat_uptr_t)
134#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
135 IPA_IOCTL_QUERY_INTF_RX_PROPS, \
136 compat_uptr_t)
137#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
138 IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
139 compat_uptr_t)
140#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
141 IPA_IOCTL_GET_HDR, \
142 compat_uptr_t)
143#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
144 IPA_IOCTL_ALLOC_NAT_MEM, \
145 compat_uptr_t)
146#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
147 IPA_IOCTL_V4_INIT_NAT, \
148 compat_uptr_t)
149#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
150 IPA_IOCTL_NAT_DMA, \
151 compat_uptr_t)
152#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
153 IPA_IOCTL_V4_DEL_NAT, \
154 compat_uptr_t)
155#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
156 IPA_IOCTL_GET_NAT_OFFSET, \
157 compat_uptr_t)
158#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
159 IPA_IOCTL_PULL_MSG, \
160 compat_uptr_t)
161#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
162 IPA_IOCTL_RM_ADD_DEPENDENCY, \
163 compat_uptr_t)
164#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
165 IPA_IOCTL_RM_DEL_DEPENDENCY, \
166 compat_uptr_t)
167#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
168 IPA_IOCTL_GENERATE_FLT_EQ, \
169 compat_uptr_t)
170#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
171 IPA_IOCTL_QUERY_RT_TBL_INDEX, \
172 compat_uptr_t)
173#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
174 IPA_IOCTL_WRITE_QMAPID, \
175 compat_uptr_t)
176#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
177 IPA_IOCTL_MDFY_FLT_RULE, \
178 compat_uptr_t)
179#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
180 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
181 compat_uptr_t)
182#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
183 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
184 compat_uptr_t)
185#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
186 IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
187 compat_uptr_t)
188#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
189 IPA_IOCTL_ADD_HDR_PROC_CTX, \
190 compat_uptr_t)
191#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
192 IPA_IOCTL_DEL_HDR_PROC_CTX, \
193 compat_uptr_t)
194#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
195 IPA_IOCTL_MDFY_RT_RULE, \
196 compat_uptr_t)
197
198/**
199 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
200 * properties
201 * @dev_name: input parameter, the name of table
202 * @size: input parameter, size of table in bytes
203 * @offset: output parameter, offset into page in case of system memory
204 */
205struct ipa3_ioc_nat_alloc_mem32 {
206 char dev_name[IPA_RESOURCE_NAME_MAX];
207 compat_size_t size;
208 compat_off_t offset;
209};
210#endif
211
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200212#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
213#define TZ_MEM_PROTECT_REGION_ID 0x10
214
215struct tz_smmu_ipa_protect_region_iovec_s {
216 u64 input_addr;
217 u64 output_addr;
218 u64 size;
219 u32 attr;
220} __packed;
221
222struct tz_smmu_ipa_protect_region_s {
223 phys_addr_t iovec_buf;
224 u32 size_bytes;
225} __packed;
226
Amir Levy9659e592016-10-27 18:08:27 +0300227static void ipa3_start_tag_process(struct work_struct *work);
228static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
229
Amir Levya59ed3f2017-03-05 17:30:55 +0200230static void ipa3_transport_release_resource(struct work_struct *work);
231static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
232 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300233static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
234
Utkarsh Saxenaded78142017-05-03 14:04:30 +0530235static void ipa3_post_init_wq(struct work_struct *work);
236static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
237
Skylar Chang242952b2017-07-20 15:04:05 -0700238static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
239static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
240 ipa_dec_clients_disable_clks_on_wq);
241
Amir Levy9659e592016-10-27 18:08:27 +0300242static struct ipa3_plat_drv_res ipa3_res = {0, };
243struct msm_bus_scale_pdata *ipa3_bus_scale_table;
244
245static struct clk *ipa3_clk;
246
247struct ipa3_context *ipa3_ctx;
248static struct device *master_dev;
249struct platform_device *ipa3_pdev;
250static struct {
251 bool present;
252 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300253 bool fast_map;
254 bool s1_bypass;
255 bool use_64_bit_dma_mask;
256 u32 ipa_base;
257 u32 ipa_size;
258} smmu_info;
259
260static char *active_clients_table_buf;
261
262int ipa3_active_clients_log_print_buffer(char *buf, int size)
263{
264 int i;
265 int nbytes;
266 int cnt = 0;
267 int start_idx;
268 int end_idx;
269
270 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
271 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
272 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
273 for (i = start_idx; i != end_idx;
274 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
275 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
276 ipa3_ctx->ipa3_active_clients_logging
277 .log_buffer[i]);
278 cnt += nbytes;
279 }
280
281 return cnt;
282}
283
284int ipa3_active_clients_log_print_table(char *buf, int size)
285{
286 int i;
287 struct ipa3_active_client_htable_entry *iterator;
288 int cnt = 0;
289
290 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
291 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
292 iterator, list) {
293 switch (iterator->type) {
294 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
295 cnt += scnprintf(buf + cnt, size - cnt,
296 "%-40s %-3d ENDPOINT\n",
297 iterator->id_string, iterator->count);
298 break;
299 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
300 cnt += scnprintf(buf + cnt, size - cnt,
301 "%-40s %-3d SIMPLE\n",
302 iterator->id_string, iterator->count);
303 break;
304 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
305 cnt += scnprintf(buf + cnt, size - cnt,
306 "%-40s %-3d RESOURCE\n",
307 iterator->id_string, iterator->count);
308 break;
309 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
310 cnt += scnprintf(buf + cnt, size - cnt,
311 "%-40s %-3d SPECIAL\n",
312 iterator->id_string, iterator->count);
313 break;
314 default:
315 IPAERR("Trying to print illegal active_clients type");
316 break;
317 }
318 }
319 cnt += scnprintf(buf + cnt, size - cnt,
320 "\nTotal active clients count: %d\n",
Skylar Chang242952b2017-07-20 15:04:05 -0700321 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Amir Levy9659e592016-10-27 18:08:27 +0300322
323 return cnt;
324}
325
326static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
327 unsigned long event, void *ptr)
328{
Skylar Chang242952b2017-07-20 15:04:05 -0700329 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300330 ipa3_active_clients_log_print_table(active_clients_table_buf,
331 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
332 IPAERR("%s", active_clients_table_buf);
Skylar Chang242952b2017-07-20 15:04:05 -0700333 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300334
335 return NOTIFY_DONE;
336}
337
338static struct notifier_block ipa3_active_clients_panic_blk = {
339 .notifier_call = ipa3_active_clients_panic_notifier,
340};
341
342static int ipa3_active_clients_log_insert(const char *string)
343{
344 int head;
345 int tail;
346
347 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
348 return -EPERM;
349
350 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
351 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
352
353 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
354 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
355 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
356 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
357 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
358 if (tail == head)
359 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
360
361 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
362 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
363
364 return 0;
365}
366
367static int ipa3_active_clients_log_init(void)
368{
369 int i;
370
371 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
372 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
373 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
374 GFP_KERNEL);
375 active_clients_table_buf = kzalloc(sizeof(
376 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
377 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
378 pr_err("Active Clients Logging memory allocation failed");
379 goto bail;
380 }
381 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
382 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
383 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
384 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
385 }
386 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
387 ipa3_ctx->ipa3_active_clients_logging.log_tail =
388 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
389 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
390 atomic_notifier_chain_register(&panic_notifier_list,
391 &ipa3_active_clients_panic_blk);
392 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
393
394 return 0;
395
396bail:
397 return -ENOMEM;
398}
399
400void ipa3_active_clients_log_clear(void)
401{
Skylar Chang242952b2017-07-20 15:04:05 -0700402 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300403 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
404 ipa3_ctx->ipa3_active_clients_logging.log_tail =
405 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang242952b2017-07-20 15:04:05 -0700406 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +0300407}
408
409static void ipa3_active_clients_log_destroy(void)
410{
411 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
412 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
413 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
414 ipa3_ctx->ipa3_active_clients_logging.log_tail =
415 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
416}
417
418enum ipa_smmu_cb_type {
419 IPA_SMMU_CB_AP,
420 IPA_SMMU_CB_WLAN,
421 IPA_SMMU_CB_UC,
422 IPA_SMMU_CB_MAX
423
424};
425
426static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
427
428struct iommu_domain *ipa3_get_smmu_domain(void)
429{
430 if (smmu_cb[IPA_SMMU_CB_AP].valid)
431 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
432
433 IPAERR("CB not valid\n");
434
435 return NULL;
436}
437
438struct iommu_domain *ipa3_get_uc_smmu_domain(void)
439{
440 if (smmu_cb[IPA_SMMU_CB_UC].valid)
441 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
442
443 IPAERR("CB not valid\n");
444
445 return NULL;
446}
447
448struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
449{
450 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
451 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
452
453 IPAERR("CB not valid\n");
454
455 return NULL;
456}
457
458
459struct device *ipa3_get_dma_dev(void)
460{
461 return ipa3_ctx->pdev;
462}
463
464/**
465 * ipa3_get_smmu_ctx()- Return the wlan smmu context
466 *
467 * Return value: pointer to smmu context address
468 */
469struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
470{
471 return &smmu_cb[IPA_SMMU_CB_AP];
472}
473
474/**
475 * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
476 *
477 * Return value: pointer to smmu context address
478 */
479struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
480{
481 return &smmu_cb[IPA_SMMU_CB_WLAN];
482}
483
484/**
485 * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
486 *
487 * Return value: pointer to smmu context address
488 */
489struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
490{
491 return &smmu_cb[IPA_SMMU_CB_UC];
492}
493
494static int ipa3_open(struct inode *inode, struct file *filp)
495{
496 struct ipa3_context *ctx = NULL;
497
498 IPADBG_LOW("ENTER\n");
499 ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
500 filp->private_data = ctx;
501
502 return 0;
503}
504
Amir Levy9659e592016-10-27 18:08:27 +0300505static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
506{
507 if (!buff) {
508 IPAERR("Null buffer\n");
509 return;
510 }
511
512 if (type != WAN_UPSTREAM_ROUTE_ADD &&
513 type != WAN_UPSTREAM_ROUTE_DEL &&
514 type != WAN_EMBMS_CONNECT) {
515 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
516 return;
517 }
518
519 kfree(buff);
520}
521
522static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
523{
524 int retval;
525 struct ipa_wan_msg *wan_msg;
526 struct ipa_msg_meta msg_meta;
527
528 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
529 if (!wan_msg) {
530 IPAERR("no memory\n");
531 return -ENOMEM;
532 }
533
534 if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
535 sizeof(struct ipa_wan_msg))) {
536 kfree(wan_msg);
537 return -EFAULT;
538 }
539
540 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
541 msg_meta.msg_type = msg_type;
542 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
543 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
544 if (retval) {
545 IPAERR("ipa3_send_msg failed: %d\n", retval);
546 kfree(wan_msg);
547 return retval;
548 }
549
550 return 0;
551}
552
Shihuan Liuc3174f52017-05-04 15:59:13 -0700553static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
554{
555 if (!buff) {
556 IPAERR("Null buffer\n");
557 return;
558 }
559
560 if (type != ADD_VLAN_IFACE &&
561 type != DEL_VLAN_IFACE &&
562 type != ADD_L2TP_VLAN_MAPPING &&
563 type != DEL_L2TP_VLAN_MAPPING) {
564 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
565 return;
566 }
567
568 kfree(buff);
569}
570
571static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
572{
573 int retval;
574 struct ipa_ioc_vlan_iface_info *vlan_info;
575 struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
576 struct ipa_msg_meta msg_meta;
577
578 if (msg_type == ADD_VLAN_IFACE ||
579 msg_type == DEL_VLAN_IFACE) {
580 vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
581 GFP_KERNEL);
582 if (!vlan_info) {
583 IPAERR("no memory\n");
584 return -ENOMEM;
585 }
586
587 if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
588 sizeof(struct ipa_ioc_vlan_iface_info))) {
589 kfree(vlan_info);
590 return -EFAULT;
591 }
592
593 memset(&msg_meta, 0, sizeof(msg_meta));
594 msg_meta.msg_type = msg_type;
595 msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
596 retval = ipa3_send_msg(&msg_meta, vlan_info,
597 ipa3_vlan_l2tp_msg_free_cb);
598 if (retval) {
599 IPAERR("ipa3_send_msg failed: %d\n", retval);
600 kfree(vlan_info);
601 return retval;
602 }
603 } else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
604 msg_type == DEL_L2TP_VLAN_MAPPING) {
605 mapping_info = kzalloc(sizeof(struct
606 ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
607 if (!mapping_info) {
608 IPAERR("no memory\n");
609 return -ENOMEM;
610 }
611
612 if (copy_from_user((u8 *)mapping_info,
613 (void __user *)usr_param,
614 sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
615 kfree(mapping_info);
616 return -EFAULT;
617 }
618
619 memset(&msg_meta, 0, sizeof(msg_meta));
620 msg_meta.msg_type = msg_type;
621 msg_meta.msg_len = sizeof(struct
622 ipa_ioc_l2tp_vlan_mapping_info);
623 retval = ipa3_send_msg(&msg_meta, mapping_info,
624 ipa3_vlan_l2tp_msg_free_cb);
625 if (retval) {
626 IPAERR("ipa3_send_msg failed: %d\n", retval);
627 kfree(mapping_info);
628 return retval;
629 }
630 } else {
631 IPAERR("Unexpected event\n");
632 return -EFAULT;
633 }
634
635 return 0;
636}
Amir Levy9659e592016-10-27 18:08:27 +0300637
638static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
639{
640 int retval = 0;
641 u32 pyld_sz;
642 u8 header[128] = { 0 };
643 u8 *param = NULL;
644 struct ipa_ioc_nat_alloc_mem nat_mem;
645 struct ipa_ioc_v4_nat_init nat_init;
646 struct ipa_ioc_v4_nat_del nat_del;
Amir Levy05fccd02017-06-13 16:25:45 +0300647 struct ipa_ioc_nat_pdn_entry mdfy_pdn;
Amir Levy9659e592016-10-27 18:08:27 +0300648 struct ipa_ioc_rm_dependency rm_depend;
649 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200650 int pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300651
652 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
653
Amir Levy9659e592016-10-27 18:08:27 +0300654 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
655 return -ENOTTY;
656 if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
657 return -ENOTTY;
658
Amir Levy05532622016-11-28 12:12:01 +0200659 if (!ipa3_is_ready()) {
660 IPAERR("IPA not ready, waiting for init completion\n");
661 wait_for_completion(&ipa3_ctx->init_completion_obj);
662 }
663
Amir Levy9659e592016-10-27 18:08:27 +0300664 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
665
666 switch (cmd) {
667 case IPA_IOC_ALLOC_NAT_MEM:
668 if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
669 sizeof(struct ipa_ioc_nat_alloc_mem))) {
670 retval = -EFAULT;
671 break;
672 }
673 /* null terminate the string */
674 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
675
676 if (ipa3_allocate_nat_device(&nat_mem)) {
677 retval = -EFAULT;
678 break;
679 }
680 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
681 sizeof(struct ipa_ioc_nat_alloc_mem))) {
682 retval = -EFAULT;
683 break;
684 }
685 break;
686 case IPA_IOC_V4_INIT_NAT:
687 if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
688 sizeof(struct ipa_ioc_v4_nat_init))) {
689 retval = -EFAULT;
690 break;
691 }
692 if (ipa3_nat_init_cmd(&nat_init)) {
693 retval = -EFAULT;
694 break;
695 }
696 break;
697
698 case IPA_IOC_NAT_DMA:
699 if (copy_from_user(header, (u8 *)arg,
700 sizeof(struct ipa_ioc_nat_dma_cmd))) {
701 retval = -EFAULT;
702 break;
703 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200704 pre_entry =
705 ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
Amir Levy9659e592016-10-27 18:08:27 +0300706 pyld_sz =
707 sizeof(struct ipa_ioc_nat_dma_cmd) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200708 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300709 param = kzalloc(pyld_sz, GFP_KERNEL);
710 if (!param) {
711 retval = -ENOMEM;
712 break;
713 }
714
715 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
716 retval = -EFAULT;
717 break;
718 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200719 /* add check in case user-space module compromised */
720 if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
721 != pre_entry)) {
722 IPAERR("current %d pre %d\n",
723 ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
724 pre_entry);
725 retval = -EFAULT;
726 break;
727 }
Amir Levy9659e592016-10-27 18:08:27 +0300728 if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
729 retval = -EFAULT;
730 break;
731 }
732 break;
733
734 case IPA_IOC_V4_DEL_NAT:
735 if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
736 sizeof(struct ipa_ioc_v4_nat_del))) {
737 retval = -EFAULT;
738 break;
739 }
740 if (ipa3_nat_del_cmd(&nat_del)) {
741 retval = -EFAULT;
742 break;
743 }
744 break;
745
Amir Levy05fccd02017-06-13 16:25:45 +0300746 case IPA_IOC_NAT_MODIFY_PDN:
747 if (copy_from_user((u8 *)&mdfy_pdn, (const void __user *)arg,
748 sizeof(struct ipa_ioc_nat_pdn_entry))) {
749 retval = -EFAULT;
750 break;
751 }
Amir Levydc65f4c2017-07-06 09:49:50 +0300752 if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
Amir Levy05fccd02017-06-13 16:25:45 +0300753 retval = -EFAULT;
754 break;
755 }
756 break;
757
Amir Levy9659e592016-10-27 18:08:27 +0300758 case IPA_IOC_ADD_HDR:
759 if (copy_from_user(header, (u8 *)arg,
760 sizeof(struct ipa_ioc_add_hdr))) {
761 retval = -EFAULT;
762 break;
763 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200764 pre_entry =
765 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +0300766 pyld_sz =
767 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200768 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +0300769 param = kzalloc(pyld_sz, GFP_KERNEL);
770 if (!param) {
771 retval = -ENOMEM;
772 break;
773 }
774 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
775 retval = -EFAULT;
776 break;
777 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200778 /* add check in case user-space module compromised */
779 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
780 != pre_entry)) {
781 IPAERR("current %d pre %d\n",
782 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
783 pre_entry);
784 retval = -EFAULT;
785 break;
786 }
Amir Levy9659e592016-10-27 18:08:27 +0300787 if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
788 retval = -EFAULT;
789 break;
790 }
791 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
792 retval = -EFAULT;
793 break;
794 }
795 break;
796
797 case IPA_IOC_DEL_HDR:
798 if (copy_from_user(header, (u8 *)arg,
799 sizeof(struct ipa_ioc_del_hdr))) {
800 retval = -EFAULT;
801 break;
802 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200803 pre_entry =
804 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300805 pyld_sz =
806 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200807 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +0300808 param = kzalloc(pyld_sz, GFP_KERNEL);
809 if (!param) {
810 retval = -ENOMEM;
811 break;
812 }
813 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
814 retval = -EFAULT;
815 break;
816 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200817 /* add check in case user-space module compromised */
818 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
819 != pre_entry)) {
820 IPAERR("current %d pre %d\n",
821 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
822 pre_entry);
823 retval = -EFAULT;
824 break;
825 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200826 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
827 true)) {
Amir Levy9659e592016-10-27 18:08:27 +0300828 retval = -EFAULT;
829 break;
830 }
831 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
832 retval = -EFAULT;
833 break;
834 }
835 break;
836
837 case IPA_IOC_ADD_RT_RULE:
838 if (copy_from_user(header, (u8 *)arg,
839 sizeof(struct ipa_ioc_add_rt_rule))) {
840 retval = -EFAULT;
841 break;
842 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200843 pre_entry =
844 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300845 pyld_sz =
846 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200847 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300848 param = kzalloc(pyld_sz, GFP_KERNEL);
849 if (!param) {
850 retval = -ENOMEM;
851 break;
852 }
853 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
854 retval = -EFAULT;
855 break;
856 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200857 /* add check in case user-space module compromised */
858 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
859 != pre_entry)) {
860 IPAERR("current %d pre %d\n",
861 ((struct ipa_ioc_add_rt_rule *)param)->
862 num_rules,
863 pre_entry);
864 retval = -EFAULT;
865 break;
866 }
Amir Levy9659e592016-10-27 18:08:27 +0300867 if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
868 retval = -EFAULT;
869 break;
870 }
871 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
872 retval = -EFAULT;
873 break;
874 }
875 break;
876 case IPA_IOC_ADD_RT_RULE_AFTER:
877 if (copy_from_user(header, (u8 *)arg,
878 sizeof(struct ipa_ioc_add_rt_rule_after))) {
879
880 retval = -EFAULT;
881 break;
882 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200883 pre_entry =
884 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300885 pyld_sz =
886 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200887 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300888 param = kzalloc(pyld_sz, GFP_KERNEL);
889 if (!param) {
890 retval = -ENOMEM;
891 break;
892 }
893 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
894 retval = -EFAULT;
895 break;
896 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200897 /* add check in case user-space module compromised */
898 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
899 num_rules != pre_entry)) {
900 IPAERR("current %d pre %d\n",
901 ((struct ipa_ioc_add_rt_rule_after *)param)->
902 num_rules,
903 pre_entry);
904 retval = -EFAULT;
905 break;
906 }
Amir Levy9659e592016-10-27 18:08:27 +0300907 if (ipa3_add_rt_rule_after(
908 (struct ipa_ioc_add_rt_rule_after *)param)) {
909
910 retval = -EFAULT;
911 break;
912 }
913 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
914 retval = -EFAULT;
915 break;
916 }
917 break;
918
919 case IPA_IOC_MDFY_RT_RULE:
920 if (copy_from_user(header, (u8 *)arg,
921 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
922 retval = -EFAULT;
923 break;
924 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200925 pre_entry =
926 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300927 pyld_sz =
928 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200929 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300930 param = kzalloc(pyld_sz, GFP_KERNEL);
931 if (!param) {
932 retval = -ENOMEM;
933 break;
934 }
935 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
936 retval = -EFAULT;
937 break;
938 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200939 /* add check in case user-space module compromised */
940 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
941 != pre_entry)) {
942 IPAERR("current %d pre %d\n",
943 ((struct ipa_ioc_mdfy_rt_rule *)param)->
944 num_rules,
945 pre_entry);
946 retval = -EFAULT;
947 break;
948 }
Amir Levy9659e592016-10-27 18:08:27 +0300949 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
950 retval = -EFAULT;
951 break;
952 }
953 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
954 retval = -EFAULT;
955 break;
956 }
957 break;
958
959 case IPA_IOC_DEL_RT_RULE:
960 if (copy_from_user(header, (u8 *)arg,
961 sizeof(struct ipa_ioc_del_rt_rule))) {
962 retval = -EFAULT;
963 break;
964 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200965 pre_entry =
966 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300967 pyld_sz =
968 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200969 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +0300970 param = kzalloc(pyld_sz, GFP_KERNEL);
971 if (!param) {
972 retval = -ENOMEM;
973 break;
974 }
975 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
976 retval = -EFAULT;
977 break;
978 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200979 /* add check in case user-space module compromised */
980 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
981 != pre_entry)) {
982 IPAERR("current %d pre %d\n",
983 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
984 pre_entry);
985 retval = -EFAULT;
986 break;
987 }
Amir Levy9659e592016-10-27 18:08:27 +0300988 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
989 retval = -EFAULT;
990 break;
991 }
992 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
993 retval = -EFAULT;
994 break;
995 }
996 break;
997
998 case IPA_IOC_ADD_FLT_RULE:
999 if (copy_from_user(header, (u8 *)arg,
1000 sizeof(struct ipa_ioc_add_flt_rule))) {
1001 retval = -EFAULT;
1002 break;
1003 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001004 pre_entry =
1005 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001006 pyld_sz =
1007 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001008 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001009 param = kzalloc(pyld_sz, GFP_KERNEL);
1010 if (!param) {
1011 retval = -ENOMEM;
1012 break;
1013 }
1014 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1015 retval = -EFAULT;
1016 break;
1017 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001018 /* add check in case user-space module compromised */
1019 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
1020 != pre_entry)) {
1021 IPAERR("current %d pre %d\n",
1022 ((struct ipa_ioc_add_flt_rule *)param)->
1023 num_rules,
1024 pre_entry);
1025 retval = -EFAULT;
1026 break;
1027 }
Amir Levy9659e592016-10-27 18:08:27 +03001028 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
1029 retval = -EFAULT;
1030 break;
1031 }
1032 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1033 retval = -EFAULT;
1034 break;
1035 }
1036 break;
1037
1038 case IPA_IOC_ADD_FLT_RULE_AFTER:
1039 if (copy_from_user(header, (u8 *)arg,
1040 sizeof(struct ipa_ioc_add_flt_rule_after))) {
1041
1042 retval = -EFAULT;
1043 break;
1044 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001045 pre_entry =
1046 ((struct ipa_ioc_add_flt_rule_after *)header)->
1047 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001048 pyld_sz =
1049 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001050 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001051 param = kzalloc(pyld_sz, GFP_KERNEL);
1052 if (!param) {
1053 retval = -ENOMEM;
1054 break;
1055 }
1056 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1057 retval = -EFAULT;
1058 break;
1059 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001060 /* add check in case user-space module compromised */
1061 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1062 num_rules != pre_entry)) {
1063 IPAERR("current %d pre %d\n",
1064 ((struct ipa_ioc_add_flt_rule_after *)param)->
1065 num_rules,
1066 pre_entry);
1067 retval = -EFAULT;
1068 break;
1069 }
Amir Levy9659e592016-10-27 18:08:27 +03001070 if (ipa3_add_flt_rule_after(
1071 (struct ipa_ioc_add_flt_rule_after *)param)) {
1072 retval = -EFAULT;
1073 break;
1074 }
1075 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1076 retval = -EFAULT;
1077 break;
1078 }
1079 break;
1080
1081 case IPA_IOC_DEL_FLT_RULE:
1082 if (copy_from_user(header, (u8 *)arg,
1083 sizeof(struct ipa_ioc_del_flt_rule))) {
1084 retval = -EFAULT;
1085 break;
1086 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001087 pre_entry =
1088 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001089 pyld_sz =
1090 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001091 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001092 param = kzalloc(pyld_sz, GFP_KERNEL);
1093 if (!param) {
1094 retval = -ENOMEM;
1095 break;
1096 }
1097 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1098 retval = -EFAULT;
1099 break;
1100 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001101 /* add check in case user-space module compromised */
1102 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1103 != pre_entry)) {
1104 IPAERR("current %d pre %d\n",
1105 ((struct ipa_ioc_del_flt_rule *)param)->
1106 num_hdls,
1107 pre_entry);
1108 retval = -EFAULT;
1109 break;
1110 }
Amir Levy9659e592016-10-27 18:08:27 +03001111 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1112 retval = -EFAULT;
1113 break;
1114 }
1115 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1116 retval = -EFAULT;
1117 break;
1118 }
1119 break;
1120
1121 case IPA_IOC_MDFY_FLT_RULE:
1122 if (copy_from_user(header, (u8 *)arg,
1123 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
1124 retval = -EFAULT;
1125 break;
1126 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001127 pre_entry =
1128 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001129 pyld_sz =
1130 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001131 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001132 param = kzalloc(pyld_sz, GFP_KERNEL);
1133 if (!param) {
1134 retval = -ENOMEM;
1135 break;
1136 }
1137 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1138 retval = -EFAULT;
1139 break;
1140 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001141 /* add check in case user-space module compromised */
1142 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1143 != pre_entry)) {
1144 IPAERR("current %d pre %d\n",
1145 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1146 num_rules,
1147 pre_entry);
1148 retval = -EFAULT;
1149 break;
1150 }
Amir Levy9659e592016-10-27 18:08:27 +03001151 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1152 retval = -EFAULT;
1153 break;
1154 }
1155 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1156 retval = -EFAULT;
1157 break;
1158 }
1159 break;
1160
1161 case IPA_IOC_COMMIT_HDR:
1162 retval = ipa3_commit_hdr();
1163 break;
1164 case IPA_IOC_RESET_HDR:
1165 retval = ipa3_reset_hdr();
1166 break;
1167 case IPA_IOC_COMMIT_RT:
1168 retval = ipa3_commit_rt(arg);
1169 break;
1170 case IPA_IOC_RESET_RT:
1171 retval = ipa3_reset_rt(arg);
1172 break;
1173 case IPA_IOC_COMMIT_FLT:
1174 retval = ipa3_commit_flt(arg);
1175 break;
1176 case IPA_IOC_RESET_FLT:
1177 retval = ipa3_reset_flt(arg);
1178 break;
1179 case IPA_IOC_GET_RT_TBL:
1180 if (copy_from_user(header, (u8 *)arg,
1181 sizeof(struct ipa_ioc_get_rt_tbl))) {
1182 retval = -EFAULT;
1183 break;
1184 }
1185 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1186 retval = -EFAULT;
1187 break;
1188 }
1189 if (copy_to_user((u8 *)arg, header,
1190 sizeof(struct ipa_ioc_get_rt_tbl))) {
1191 retval = -EFAULT;
1192 break;
1193 }
1194 break;
1195 case IPA_IOC_PUT_RT_TBL:
1196 retval = ipa3_put_rt_tbl(arg);
1197 break;
1198 case IPA_IOC_GET_HDR:
1199 if (copy_from_user(header, (u8 *)arg,
1200 sizeof(struct ipa_ioc_get_hdr))) {
1201 retval = -EFAULT;
1202 break;
1203 }
1204 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1205 retval = -EFAULT;
1206 break;
1207 }
1208 if (copy_to_user((u8 *)arg, header,
1209 sizeof(struct ipa_ioc_get_hdr))) {
1210 retval = -EFAULT;
1211 break;
1212 }
1213 break;
1214 case IPA_IOC_PUT_HDR:
1215 retval = ipa3_put_hdr(arg);
1216 break;
1217 case IPA_IOC_SET_FLT:
1218 retval = ipa3_cfg_filter(arg);
1219 break;
1220 case IPA_IOC_COPY_HDR:
1221 if (copy_from_user(header, (u8 *)arg,
1222 sizeof(struct ipa_ioc_copy_hdr))) {
1223 retval = -EFAULT;
1224 break;
1225 }
1226 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1227 retval = -EFAULT;
1228 break;
1229 }
1230 if (copy_to_user((u8 *)arg, header,
1231 sizeof(struct ipa_ioc_copy_hdr))) {
1232 retval = -EFAULT;
1233 break;
1234 }
1235 break;
1236 case IPA_IOC_QUERY_INTF:
1237 if (copy_from_user(header, (u8 *)arg,
1238 sizeof(struct ipa_ioc_query_intf))) {
1239 retval = -EFAULT;
1240 break;
1241 }
1242 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1243 retval = -1;
1244 break;
1245 }
1246 if (copy_to_user((u8 *)arg, header,
1247 sizeof(struct ipa_ioc_query_intf))) {
1248 retval = -EFAULT;
1249 break;
1250 }
1251 break;
1252 case IPA_IOC_QUERY_INTF_TX_PROPS:
1253 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
1254 if (copy_from_user(header, (u8 *)arg, sz)) {
1255 retval = -EFAULT;
1256 break;
1257 }
1258
1259 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
1260 > IPA_NUM_PROPS_MAX) {
1261 retval = -EFAULT;
1262 break;
1263 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001264 pre_entry =
1265 ((struct ipa_ioc_query_intf_tx_props *)
1266 header)->num_tx_props;
1267 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001268 sizeof(struct ipa_ioc_tx_intf_prop);
1269 param = kzalloc(pyld_sz, GFP_KERNEL);
1270 if (!param) {
1271 retval = -ENOMEM;
1272 break;
1273 }
1274 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1275 retval = -EFAULT;
1276 break;
1277 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001278 /* add check in case user-space module compromised */
1279 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1280 param)->num_tx_props
1281 != pre_entry)) {
1282 IPAERR("current %d pre %d\n",
1283 ((struct ipa_ioc_query_intf_tx_props *)
1284 param)->num_tx_props, pre_entry);
1285 retval = -EFAULT;
1286 break;
1287 }
Amir Levy9659e592016-10-27 18:08:27 +03001288 if (ipa3_query_intf_tx_props(
1289 (struct ipa_ioc_query_intf_tx_props *)param)) {
1290 retval = -1;
1291 break;
1292 }
1293 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1294 retval = -EFAULT;
1295 break;
1296 }
1297 break;
1298 case IPA_IOC_QUERY_INTF_RX_PROPS:
1299 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
1300 if (copy_from_user(header, (u8 *)arg, sz)) {
1301 retval = -EFAULT;
1302 break;
1303 }
1304
1305 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
1306 > IPA_NUM_PROPS_MAX) {
1307 retval = -EFAULT;
1308 break;
1309 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001310 pre_entry =
1311 ((struct ipa_ioc_query_intf_rx_props *)
1312 header)->num_rx_props;
1313 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001314 sizeof(struct ipa_ioc_rx_intf_prop);
1315 param = kzalloc(pyld_sz, GFP_KERNEL);
1316 if (!param) {
1317 retval = -ENOMEM;
1318 break;
1319 }
1320 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1321 retval = -EFAULT;
1322 break;
1323 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001324 /* add check in case user-space module compromised */
1325 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1326 param)->num_rx_props != pre_entry)) {
1327 IPAERR("current %d pre %d\n",
1328 ((struct ipa_ioc_query_intf_rx_props *)
1329 param)->num_rx_props, pre_entry);
1330 retval = -EFAULT;
1331 break;
1332 }
Amir Levy9659e592016-10-27 18:08:27 +03001333 if (ipa3_query_intf_rx_props(
1334 (struct ipa_ioc_query_intf_rx_props *)param)) {
1335 retval = -1;
1336 break;
1337 }
1338 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1339 retval = -EFAULT;
1340 break;
1341 }
1342 break;
1343 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1344 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
1345 if (copy_from_user(header, (u8 *)arg, sz)) {
1346 retval = -EFAULT;
1347 break;
1348 }
1349
1350 if (((struct ipa_ioc_query_intf_ext_props *)
1351 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
1352 retval = -EFAULT;
1353 break;
1354 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001355 pre_entry =
1356 ((struct ipa_ioc_query_intf_ext_props *)
1357 header)->num_ext_props;
1358 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001359 sizeof(struct ipa_ioc_ext_intf_prop);
1360 param = kzalloc(pyld_sz, GFP_KERNEL);
1361 if (!param) {
1362 retval = -ENOMEM;
1363 break;
1364 }
1365 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1366 retval = -EFAULT;
1367 break;
1368 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001369 /* add check in case user-space module compromised */
1370 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1371 param)->num_ext_props != pre_entry)) {
1372 IPAERR("current %d pre %d\n",
1373 ((struct ipa_ioc_query_intf_ext_props *)
1374 param)->num_ext_props, pre_entry);
1375 retval = -EFAULT;
1376 break;
1377 }
Amir Levy9659e592016-10-27 18:08:27 +03001378 if (ipa3_query_intf_ext_props(
1379 (struct ipa_ioc_query_intf_ext_props *)param)) {
1380 retval = -1;
1381 break;
1382 }
1383 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1384 retval = -EFAULT;
1385 break;
1386 }
1387 break;
1388 case IPA_IOC_PULL_MSG:
1389 if (copy_from_user(header, (u8 *)arg,
1390 sizeof(struct ipa_msg_meta))) {
1391 retval = -EFAULT;
1392 break;
1393 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001394 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001395 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001396 pyld_sz = sizeof(struct ipa_msg_meta) +
1397 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001398 param = kzalloc(pyld_sz, GFP_KERNEL);
1399 if (!param) {
1400 retval = -ENOMEM;
1401 break;
1402 }
1403 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1404 retval = -EFAULT;
1405 break;
1406 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001407 /* add check in case user-space module compromised */
1408 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1409 != pre_entry)) {
1410 IPAERR("current %d pre %d\n",
1411 ((struct ipa_msg_meta *)param)->msg_len,
1412 pre_entry);
1413 retval = -EFAULT;
1414 break;
1415 }
Amir Levy9659e592016-10-27 18:08:27 +03001416 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
1417 (char *)param + sizeof(struct ipa_msg_meta),
1418 ((struct ipa_msg_meta *)param)->msg_len) !=
1419 ((struct ipa_msg_meta *)param)->msg_len) {
1420 retval = -1;
1421 break;
1422 }
1423 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1424 retval = -EFAULT;
1425 break;
1426 }
1427 break;
1428 case IPA_IOC_RM_ADD_DEPENDENCY:
1429 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1430 sizeof(struct ipa_ioc_rm_dependency))) {
1431 retval = -EFAULT;
1432 break;
1433 }
1434 retval = ipa_rm_add_dependency_from_ioctl(
1435 rm_depend.resource_name, rm_depend.depends_on_name);
1436 break;
1437 case IPA_IOC_RM_DEL_DEPENDENCY:
1438 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1439 sizeof(struct ipa_ioc_rm_dependency))) {
1440 retval = -EFAULT;
1441 break;
1442 }
1443 retval = ipa_rm_delete_dependency_from_ioctl(
1444 rm_depend.resource_name, rm_depend.depends_on_name);
1445 break;
1446 case IPA_IOC_GENERATE_FLT_EQ:
1447 {
1448 struct ipa_ioc_generate_flt_eq flt_eq;
1449
1450 if (copy_from_user(&flt_eq, (u8 *)arg,
1451 sizeof(struct ipa_ioc_generate_flt_eq))) {
1452 retval = -EFAULT;
1453 break;
1454 }
1455 if (ipahal_flt_generate_equation(flt_eq.ip,
1456 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1457 retval = -EFAULT;
1458 break;
1459 }
1460 if (copy_to_user((u8 *)arg, &flt_eq,
1461 sizeof(struct ipa_ioc_generate_flt_eq))) {
1462 retval = -EFAULT;
1463 break;
1464 }
1465 break;
1466 }
1467 case IPA_IOC_QUERY_EP_MAPPING:
1468 {
1469 retval = ipa3_get_ep_mapping(arg);
1470 break;
1471 }
1472 case IPA_IOC_QUERY_RT_TBL_INDEX:
1473 if (copy_from_user(header, (u8 *)arg,
1474 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1475 retval = -EFAULT;
1476 break;
1477 }
1478 if (ipa3_query_rt_index(
1479 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
1480 retval = -EFAULT;
1481 break;
1482 }
1483 if (copy_to_user((u8 *)arg, header,
1484 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1485 retval = -EFAULT;
1486 break;
1487 }
1488 break;
1489 case IPA_IOC_WRITE_QMAPID:
1490 if (copy_from_user(header, (u8 *)arg,
1491 sizeof(struct ipa_ioc_write_qmapid))) {
1492 retval = -EFAULT;
1493 break;
1494 }
1495 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1496 retval = -EFAULT;
1497 break;
1498 }
1499 if (copy_to_user((u8 *)arg, header,
1500 sizeof(struct ipa_ioc_write_qmapid))) {
1501 retval = -EFAULT;
1502 break;
1503 }
1504 break;
1505 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
1506 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
1507 if (retval) {
1508 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1509 break;
1510 }
1511 break;
1512 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
1513 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
1514 if (retval) {
1515 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1516 break;
1517 }
1518 break;
1519 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
1520 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT);
1521 if (retval) {
1522 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1523 break;
1524 }
1525 break;
1526 case IPA_IOC_ADD_HDR_PROC_CTX:
1527 if (copy_from_user(header, (u8 *)arg,
1528 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1529 retval = -EFAULT;
1530 break;
1531 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001532 pre_entry =
1533 ((struct ipa_ioc_add_hdr_proc_ctx *)
1534 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001535 pyld_sz =
1536 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001537 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001538 param = kzalloc(pyld_sz, GFP_KERNEL);
1539 if (!param) {
1540 retval = -ENOMEM;
1541 break;
1542 }
1543 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1544 retval = -EFAULT;
1545 break;
1546 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001547 /* add check in case user-space module compromised */
1548 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1549 param)->num_proc_ctxs != pre_entry)) {
1550 IPAERR("current %d pre %d\n",
1551 ((struct ipa_ioc_add_hdr_proc_ctx *)
1552 param)->num_proc_ctxs, pre_entry);
1553 retval = -EFAULT;
1554 break;
1555 }
Amir Levy9659e592016-10-27 18:08:27 +03001556 if (ipa3_add_hdr_proc_ctx(
1557 (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
1558 retval = -EFAULT;
1559 break;
1560 }
1561 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1562 retval = -EFAULT;
1563 break;
1564 }
1565 break;
1566 case IPA_IOC_DEL_HDR_PROC_CTX:
1567 if (copy_from_user(header, (u8 *)arg,
1568 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1569 retval = -EFAULT;
1570 break;
1571 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001572 pre_entry =
1573 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001574 pyld_sz =
1575 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001576 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001577 param = kzalloc(pyld_sz, GFP_KERNEL);
1578 if (!param) {
1579 retval = -ENOMEM;
1580 break;
1581 }
1582 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1583 retval = -EFAULT;
1584 break;
1585 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001586 /* add check in case user-space module compromised */
1587 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1588 param)->num_hdls != pre_entry)) {
1589 IPAERR("current %d pre %d\n",
1590 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1591 num_hdls,
1592 pre_entry);
1593 retval = -EFAULT;
1594 break;
1595 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001596 if (ipa3_del_hdr_proc_ctx_by_user(
1597 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001598 retval = -EFAULT;
1599 break;
1600 }
1601 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1602 retval = -EFAULT;
1603 break;
1604 }
1605 break;
1606
1607 case IPA_IOC_GET_HW_VERSION:
1608 pyld_sz = sizeof(enum ipa_hw_type);
1609 param = kzalloc(pyld_sz, GFP_KERNEL);
1610 if (!param) {
1611 retval = -ENOMEM;
1612 break;
1613 }
1614 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
1615 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1616 retval = -EFAULT;
1617 break;
1618 }
1619 break;
1620
Shihuan Liuc3174f52017-05-04 15:59:13 -07001621 case IPA_IOC_ADD_VLAN_IFACE:
1622 if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
1623 retval = -EFAULT;
1624 break;
1625 }
1626 break;
1627
1628 case IPA_IOC_DEL_VLAN_IFACE:
1629 if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
1630 retval = -EFAULT;
1631 break;
1632 }
1633 break;
1634
1635 case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
1636 if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
1637 retval = -EFAULT;
1638 break;
1639 }
1640 break;
1641
1642 case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
1643 if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
1644 retval = -EFAULT;
1645 break;
1646 }
1647 break;
1648
Amir Levy9659e592016-10-27 18:08:27 +03001649 default: /* redundant, as cmd was checked against MAXNR */
1650 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1651 return -ENOTTY;
1652 }
1653 kfree(param);
1654 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1655
1656 return retval;
1657}
1658
1659/**
1660* ipa3_setup_dflt_rt_tables() - Setup default routing tables
1661*
1662* Return codes:
1663* 0: success
1664* -ENOMEM: failed to allocate memory
1665* -EPERM: failed to add the tables
1666*/
1667int ipa3_setup_dflt_rt_tables(void)
1668{
1669 struct ipa_ioc_add_rt_rule *rt_rule;
1670 struct ipa_rt_rule_add *rt_rule_entry;
1671
1672 rt_rule =
1673 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
1674 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
1675 if (!rt_rule) {
1676 IPAERR("fail to alloc mem\n");
1677 return -ENOMEM;
1678 }
1679 /* setup a default v4 route to point to Apps */
1680 rt_rule->num_rules = 1;
1681 rt_rule->commit = 1;
1682 rt_rule->ip = IPA_IP_v4;
1683 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
1684 IPA_RESOURCE_NAME_MAX);
1685
1686 rt_rule_entry = &rt_rule->rules[0];
1687 rt_rule_entry->at_rear = 1;
1688 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
1689 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
1690 rt_rule_entry->rule.retain_hdr = 1;
1691
1692 if (ipa3_add_rt_rule(rt_rule)) {
1693 IPAERR("fail to add dflt v4 rule\n");
1694 kfree(rt_rule);
1695 return -EPERM;
1696 }
1697 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1698 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1699
1700 /* setup a default v6 route to point to A5 */
1701 rt_rule->ip = IPA_IP_v6;
1702 if (ipa3_add_rt_rule(rt_rule)) {
1703 IPAERR("fail to add dflt v6 rule\n");
1704 kfree(rt_rule);
1705 return -EPERM;
1706 }
1707 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1708 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1709
1710 /*
1711 * because these tables are the very first to be added, they will both
1712 * have the same index (0) which is essential for programming the
1713 * "route" end-point config
1714 */
1715
1716 kfree(rt_rule);
1717
1718 return 0;
1719}
1720
1721static int ipa3_setup_exception_path(void)
1722{
1723 struct ipa_ioc_add_hdr *hdr;
1724 struct ipa_hdr_add *hdr_entry;
1725 struct ipahal_reg_route route = { 0 };
1726 int ret;
1727
1728 /* install the basic exception header */
1729 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
1730 sizeof(struct ipa_hdr_add), GFP_KERNEL);
1731 if (!hdr) {
1732 IPAERR("fail to alloc exception hdr\n");
1733 return -ENOMEM;
1734 }
1735 hdr->num_hdrs = 1;
1736 hdr->commit = 1;
1737 hdr_entry = &hdr->hdr[0];
1738
1739 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
1740 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
1741
1742 if (ipa3_add_hdr(hdr)) {
1743 IPAERR("fail to add exception hdr\n");
1744 ret = -EPERM;
1745 goto bail;
1746 }
1747
1748 if (hdr_entry->status) {
1749 IPAERR("fail to add exception hdr\n");
1750 ret = -EPERM;
1751 goto bail;
1752 }
1753
1754 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
1755
1756 /* set the route register to pass exception packets to Apps */
1757 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1758 route.route_frag_def_pipe = ipa3_get_ep_mapping(
1759 IPA_CLIENT_APPS_LAN_CONS);
1760 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
1761 route.route_def_retain_hdr = 1;
1762
1763 if (ipa3_cfg_route(&route)) {
1764 IPAERR("fail to add exception hdr\n");
1765 ret = -EPERM;
1766 goto bail;
1767 }
1768
1769 ret = 0;
1770bail:
1771 kfree(hdr);
1772 return ret;
1773}
1774
1775static int ipa3_init_smem_region(int memory_region_size,
1776 int memory_region_offset)
1777{
1778 struct ipahal_imm_cmd_dma_shared_mem cmd;
1779 struct ipahal_imm_cmd_pyld *cmd_pyld;
1780 struct ipa3_desc desc;
1781 struct ipa_mem_buffer mem;
1782 int rc;
1783
1784 if (memory_region_size == 0)
1785 return 0;
1786
1787 memset(&desc, 0, sizeof(desc));
1788 memset(&cmd, 0, sizeof(cmd));
1789 memset(&mem, 0, sizeof(mem));
1790
1791 mem.size = memory_region_size;
1792 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
1793 &mem.phys_base, GFP_KERNEL);
1794 if (!mem.base) {
1795 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
1796 return -ENOMEM;
1797 }
1798
1799 memset(mem.base, 0, mem.size);
1800 cmd.is_read = false;
1801 cmd.skip_pipeline_clear = false;
1802 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1803 cmd.size = mem.size;
1804 cmd.system_addr = mem.phys_base;
1805 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
1806 memory_region_offset;
1807 cmd_pyld = ipahal_construct_imm_cmd(
1808 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1809 if (!cmd_pyld) {
1810 IPAERR("failed to construct dma_shared_mem imm cmd\n");
1811 return -ENOMEM;
1812 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07001813 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03001814 desc.pyld = cmd_pyld->data;
1815 desc.len = cmd_pyld->len;
1816 desc.type = IPA_IMM_CMD_DESC;
1817
1818 rc = ipa3_send_cmd(1, &desc);
1819 if (rc) {
1820 IPAERR("failed to send immediate command (error %d)\n", rc);
1821 rc = -EFAULT;
1822 }
1823
1824 ipahal_destroy_imm_cmd(cmd_pyld);
1825 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
1826 mem.phys_base);
1827
1828 return rc;
1829}
1830
1831/**
1832* ipa3_init_q6_smem() - Initialize Q6 general memory and
1833* header memory regions in IPA.
1834*
1835* Return codes:
1836* 0: success
1837* -ENOMEM: failed to allocate dma memory
1838* -EFAULT: failed to send IPA command to initialize the memory
1839*/
1840int ipa3_init_q6_smem(void)
1841{
1842 int rc;
1843
1844 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1845
1846 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
1847 IPA_MEM_PART(modem_ofst));
1848 if (rc) {
1849 IPAERR("failed to initialize Modem RAM memory\n");
1850 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1851 return rc;
1852 }
1853
1854 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
1855 IPA_MEM_PART(modem_hdr_ofst));
1856 if (rc) {
1857 IPAERR("failed to initialize Modem HDRs RAM memory\n");
1858 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1859 return rc;
1860 }
1861
1862 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
1863 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
1864 if (rc) {
1865 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
1866 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1867 return rc;
1868 }
1869
1870 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
1871 IPA_MEM_PART(modem_comp_decomp_ofst));
1872 if (rc) {
1873 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
1874 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1875 return rc;
1876 }
1877 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1878
1879 return rc;
1880}
1881
1882static void ipa3_destroy_imm(void *user1, int user2)
1883{
1884 ipahal_destroy_imm_cmd(user1);
1885}
1886
1887static void ipa3_q6_pipe_delay(bool delay)
1888{
1889 int client_idx;
1890 int ep_idx;
1891 struct ipa_ep_cfg_ctrl ep_ctrl;
1892
1893 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
1894 ep_ctrl.ipa_ep_delay = delay;
1895
1896 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1897 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
1898 ep_idx = ipa3_get_ep_mapping(client_idx);
1899 if (ep_idx == -1)
1900 continue;
1901
1902 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
1903 ep_idx, &ep_ctrl);
1904 }
1905 }
1906}
1907
1908static void ipa3_q6_avoid_holb(void)
1909{
1910 int ep_idx;
1911 int client_idx;
1912 struct ipa_ep_cfg_ctrl ep_suspend;
1913 struct ipa_ep_cfg_holb ep_holb;
1914
1915 memset(&ep_suspend, 0, sizeof(ep_suspend));
1916 memset(&ep_holb, 0, sizeof(ep_holb));
1917
1918 ep_suspend.ipa_ep_suspend = true;
1919 ep_holb.tmr_val = 0;
1920 ep_holb.en = 1;
1921
1922 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1923 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1924 ep_idx = ipa3_get_ep_mapping(client_idx);
1925 if (ep_idx == -1)
1926 continue;
1927
1928 /*
1929 * ipa3_cfg_ep_holb is not used here because we are
1930 * setting HOLB on Q6 pipes, and from APPS perspective
1931 * they are not valid, therefore, the above function
1932 * will fail.
1933 */
1934 ipahal_write_reg_n_fields(
1935 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
1936 ep_idx, &ep_holb);
1937 ipahal_write_reg_n_fields(
1938 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
1939 ep_idx, &ep_holb);
1940
Skylar Changa699afd2017-06-06 10:06:21 -07001941 /* from IPA 4.0 pipe suspend is not supported */
1942 if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
1943 ipahal_write_reg_n_fields(
1944 IPA_ENDP_INIT_CTRL_n,
1945 ep_idx, &ep_suspend);
Amir Levy9659e592016-10-27 18:08:27 +03001946 }
1947 }
1948}
1949
Skylar Chang94692c92017-03-01 09:07:11 -08001950static void ipa3_halt_q6_cons_gsi_channels(void)
1951{
1952 int ep_idx;
1953 int client_idx;
1954 const struct ipa_gsi_ep_config *gsi_ep_cfg;
1955 int ret;
1956 int code = 0;
1957
1958 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1959 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1960 ep_idx = ipa3_get_ep_mapping(client_idx);
1961 if (ep_idx == -1)
1962 continue;
1963
Skylar Changc1f15312017-05-09 14:14:32 -07001964 gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
Skylar Chang94692c92017-03-01 09:07:11 -08001965 if (!gsi_ep_cfg) {
1966 IPAERR("failed to get GSI config\n");
1967 ipa_assert();
1968 return;
1969 }
1970
1971 ret = gsi_halt_channel_ee(
1972 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
1973 &code);
1974 if (ret == GSI_STATUS_SUCCESS)
1975 IPADBG("halted gsi ch %d ee %d with code %d\n",
1976 gsi_ep_cfg->ipa_gsi_chan_num,
1977 gsi_ep_cfg->ee,
1978 code);
1979 else
1980 IPAERR("failed to halt ch %d ee %d code %d\n",
1981 gsi_ep_cfg->ipa_gsi_chan_num,
1982 gsi_ep_cfg->ee,
1983 code);
1984 }
1985 }
1986}
1987
1988
Amir Levy9659e592016-10-27 18:08:27 +03001989static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
1990 enum ipa_rule_type rlt)
1991{
1992 struct ipa3_desc *desc;
1993 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
1994 struct ipahal_imm_cmd_pyld **cmd_pyld;
1995 int retval = 0;
1996 int pipe_idx;
1997 int flt_idx = 0;
1998 int num_cmds = 0;
1999 int index;
2000 u32 lcl_addr_mem_part;
2001 u32 lcl_hdr_sz;
2002 struct ipa_mem_buffer mem;
2003
2004 IPADBG("Entry\n");
2005
2006 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2007 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2008 return -EINVAL;
2009 }
2010
2011 /* Up to filtering pipes we have filtering tables */
2012 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
2013 GFP_KERNEL);
2014 if (!desc) {
2015 IPAERR("failed to allocate memory\n");
2016 return -ENOMEM;
2017 }
2018
2019 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
2020 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
2021 if (!cmd_pyld) {
2022 IPAERR("failed to allocate memory\n");
2023 retval = -ENOMEM;
2024 goto free_desc;
2025 }
2026
2027 if (ip == IPA_IP_v4) {
2028 if (rlt == IPA_RULE_HASHABLE) {
2029 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
2030 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2031 } else {
2032 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
2033 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2034 }
2035 } else {
2036 if (rlt == IPA_RULE_HASHABLE) {
2037 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
2038 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2039 } else {
2040 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
2041 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2042 }
2043 }
2044
2045 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02002046 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002047 if (retval) {
2048 IPAERR("failed to generate flt single tbl empty img\n");
2049 goto free_cmd_pyld;
2050 }
2051
2052 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
2053 if (!ipa_is_ep_support_flt(pipe_idx))
2054 continue;
2055
2056 /*
2057 * Iterating over all the filtering pipes which are either
2058 * invalid but connected or connected but not configured by AP.
2059 */
2060 if (!ipa3_ctx->ep[pipe_idx].valid ||
2061 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
2062
2063 cmd.is_read = false;
2064 cmd.skip_pipeline_clear = false;
2065 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2066 cmd.size = mem.size;
2067 cmd.system_addr = mem.phys_base;
2068 cmd.local_addr =
2069 ipa3_ctx->smem_restricted_bytes +
2070 lcl_addr_mem_part +
2071 ipahal_get_hw_tbl_hdr_width() +
2072 flt_idx * ipahal_get_hw_tbl_hdr_width();
2073 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
2074 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2075 if (!cmd_pyld[num_cmds]) {
2076 IPAERR("fail construct dma_shared_mem cmd\n");
2077 retval = -ENOMEM;
2078 goto free_empty_img;
2079 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002080 desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002081 desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
2082 desc[num_cmds].len = cmd_pyld[num_cmds]->len;
2083 desc[num_cmds].type = IPA_IMM_CMD_DESC;
2084 num_cmds++;
2085 }
2086
2087 flt_idx++;
2088 }
2089
2090 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2091 retval = ipa3_send_cmd(num_cmds, desc);
2092 if (retval) {
2093 IPAERR("failed to send immediate command (err %d)\n", retval);
2094 retval = -EFAULT;
2095 }
2096
2097free_empty_img:
2098 ipahal_free_dma_mem(&mem);
2099free_cmd_pyld:
2100 for (index = 0; index < num_cmds; index++)
2101 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2102 kfree(cmd_pyld);
2103free_desc:
2104 kfree(desc);
2105 return retval;
2106}
2107
2108static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2109 enum ipa_rule_type rlt)
2110{
2111 struct ipa3_desc *desc;
2112 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2113 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2114 int retval = 0;
2115 u32 modem_rt_index_lo;
2116 u32 modem_rt_index_hi;
2117 u32 lcl_addr_mem_part;
2118 u32 lcl_hdr_sz;
2119 struct ipa_mem_buffer mem;
2120
2121 IPADBG("Entry\n");
2122
2123 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2124 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2125 return -EINVAL;
2126 }
2127
2128 if (ip == IPA_IP_v4) {
2129 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2130 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2131 if (rlt == IPA_RULE_HASHABLE) {
2132 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2133 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2134 } else {
2135 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2136 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2137 }
2138 } else {
2139 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2140 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2141 if (rlt == IPA_RULE_HASHABLE) {
2142 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2143 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2144 } else {
2145 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2146 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2147 }
2148 }
2149
2150 retval = ipahal_rt_generate_empty_img(
2151 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002152 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002153 if (retval) {
2154 IPAERR("fail generate empty rt img\n");
2155 return -ENOMEM;
2156 }
2157
2158 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2159 if (!desc) {
2160 IPAERR("failed to allocate memory\n");
2161 goto free_empty_img;
2162 }
2163
2164 cmd.is_read = false;
2165 cmd.skip_pipeline_clear = false;
2166 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2167 cmd.size = mem.size;
2168 cmd.system_addr = mem.phys_base;
2169 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2170 lcl_addr_mem_part +
2171 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2172 cmd_pyld = ipahal_construct_imm_cmd(
2173 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2174 if (!cmd_pyld) {
2175 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2176 retval = -ENOMEM;
2177 goto free_desc;
2178 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002179 desc->opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002180 desc->pyld = cmd_pyld->data;
2181 desc->len = cmd_pyld->len;
2182 desc->type = IPA_IMM_CMD_DESC;
2183
2184 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2185 retval = ipa3_send_cmd(1, desc);
2186 if (retval) {
2187 IPAERR("failed to send immediate command (err %d)\n", retval);
2188 retval = -EFAULT;
2189 }
2190
2191 ipahal_destroy_imm_cmd(cmd_pyld);
2192free_desc:
2193 kfree(desc);
2194free_empty_img:
2195 ipahal_free_dma_mem(&mem);
2196 return retval;
2197}
2198
2199static int ipa3_q6_clean_q6_tables(void)
2200{
2201 struct ipa3_desc *desc;
2202 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2203 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2204 int retval;
2205 struct ipahal_reg_fltrt_hash_flush flush;
2206 struct ipahal_reg_valmask valmask;
2207
2208 IPADBG("Entry\n");
2209
2210
2211 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2212 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2213 return -EFAULT;
2214 }
2215 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2216 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2217 return -EFAULT;
2218 }
2219 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2220 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2221 return -EFAULT;
2222 }
2223 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2224 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2225 return -EFAULT;
2226 }
2227
2228 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2229 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2230 return -EFAULT;
2231 }
2232 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2233 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2234 return -EFAULT;
2235 }
2236 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2237 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2238 return -EFAULT;
2239 }
2240 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2241 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2242 return -EFAULT;
2243 }
2244
2245 /* Flush rules cache */
2246 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2247 if (!desc) {
2248 IPAERR("failed to allocate memory\n");
2249 return -ENOMEM;
2250 }
2251
2252 flush.v4_flt = true;
2253 flush.v4_rt = true;
2254 flush.v6_flt = true;
2255 flush.v6_rt = true;
2256 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2257 reg_write_cmd.skip_pipeline_clear = false;
2258 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2259 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2260 reg_write_cmd.value = valmask.val;
2261 reg_write_cmd.value_mask = valmask.mask;
2262 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2263 &reg_write_cmd, false);
2264 if (!cmd_pyld) {
2265 IPAERR("fail construct register_write imm cmd\n");
2266 retval = -EFAULT;
2267 goto bail_desc;
2268 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002269 desc->opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002270 desc->pyld = cmd_pyld->data;
2271 desc->len = cmd_pyld->len;
2272 desc->type = IPA_IMM_CMD_DESC;
2273
2274 IPADBG("Sending 1 descriptor for tbls flush\n");
2275 retval = ipa3_send_cmd(1, desc);
2276 if (retval) {
2277 IPAERR("failed to send immediate command (err %d)\n", retval);
2278 retval = -EFAULT;
2279 }
2280
2281 ipahal_destroy_imm_cmd(cmd_pyld);
2282
2283bail_desc:
2284 kfree(desc);
2285 IPADBG("Done - retval = %d\n", retval);
2286 return retval;
2287}
2288
2289static int ipa3_q6_set_ex_path_to_apps(void)
2290{
2291 int ep_idx;
2292 int client_idx;
2293 struct ipa3_desc *desc;
2294 int num_descs = 0;
2295 int index;
2296 struct ipahal_imm_cmd_register_write reg_write;
2297 struct ipahal_imm_cmd_pyld *cmd_pyld;
2298 int retval;
2299 struct ipahal_reg_valmask valmask;
2300
2301 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2302 GFP_KERNEL);
2303 if (!desc) {
2304 IPAERR("failed to allocate memory\n");
2305 return -ENOMEM;
2306 }
2307
2308 /* Set the exception path to AP */
2309 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2310 ep_idx = ipa3_get_ep_mapping(client_idx);
2311 if (ep_idx == -1)
2312 continue;
2313
2314 if (ipa3_ctx->ep[ep_idx].valid &&
2315 ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
2316 BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
2317
2318 reg_write.skip_pipeline_clear = false;
2319 reg_write.pipeline_clear_options =
2320 IPAHAL_HPS_CLEAR;
2321 reg_write.offset =
Amir Levy8c19dd42017-04-02 18:21:09 +03002322 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2323 ep_idx);
Amir Levy9659e592016-10-27 18:08:27 +03002324 ipahal_get_status_ep_valmask(
2325 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
2326 &valmask);
2327 reg_write.value = valmask.val;
2328 reg_write.value_mask = valmask.mask;
2329 cmd_pyld = ipahal_construct_imm_cmd(
2330 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2331 if (!cmd_pyld) {
2332 IPAERR("fail construct register_write cmd\n");
2333 BUG();
2334 }
2335
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002336 desc[num_descs].opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002337 desc[num_descs].type = IPA_IMM_CMD_DESC;
2338 desc[num_descs].callback = ipa3_destroy_imm;
2339 desc[num_descs].user1 = cmd_pyld;
2340 desc[num_descs].pyld = cmd_pyld->data;
2341 desc[num_descs].len = cmd_pyld->len;
2342 num_descs++;
2343 }
Amir Levy5807be32017-04-19 14:35:12 +03002344
2345 /* disable statuses for modem producers */
2346 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
2347 ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
2348
2349 reg_write.skip_pipeline_clear = false;
2350 reg_write.pipeline_clear_options =
2351 IPAHAL_HPS_CLEAR;
2352 reg_write.offset =
2353 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2354 ep_idx);
2355 reg_write.value = 0;
2356 reg_write.value_mask = ~0;
2357 cmd_pyld = ipahal_construct_imm_cmd(
2358 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2359 if (!cmd_pyld) {
2360 IPAERR("fail construct register_write cmd\n");
2361 ipa_assert();
2362 return -EFAULT;
2363 }
2364
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002365 desc[num_descs].opcode = cmd_pyld->opcode;
Amir Levy5807be32017-04-19 14:35:12 +03002366 desc[num_descs].type = IPA_IMM_CMD_DESC;
2367 desc[num_descs].callback = ipa3_destroy_imm;
2368 desc[num_descs].user1 = cmd_pyld;
2369 desc[num_descs].pyld = cmd_pyld->data;
2370 desc[num_descs].len = cmd_pyld->len;
2371 num_descs++;
2372 }
Amir Levy9659e592016-10-27 18:08:27 +03002373 }
2374
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002375 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002376 retval = ipa3_tag_process(desc, num_descs,
2377 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2378 if (retval) {
2379 IPAERR("TAG process failed! (error %d)\n", retval);
2380 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2381 if (retval != -ETIME) {
2382 for (index = 0; index < num_descs; index++)
2383 if (desc[index].callback)
2384 desc[index].callback(desc[index].user1,
2385 desc[index].user2);
2386 retval = -EINVAL;
2387 }
2388 }
2389
2390 kfree(desc);
2391
2392 return retval;
2393}
2394
2395/**
2396* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2397* in IPA HW. This is performed in case of SSR.
2398*
2399* This is a mandatory procedure, in case one of the steps fails, the
2400* AP needs to restart.
2401*/
2402void ipa3_q6_pre_shutdown_cleanup(void)
2403{
2404 IPADBG_LOW("ENTER\n");
2405
2406 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2407
2408 ipa3_q6_pipe_delay(true);
2409 ipa3_q6_avoid_holb();
2410 if (ipa3_q6_clean_q6_tables()) {
2411 IPAERR("Failed to clean Q6 tables\n");
2412 BUG();
2413 }
2414 if (ipa3_q6_set_ex_path_to_apps()) {
2415 IPAERR("Failed to redirect exceptions to APPS\n");
2416 BUG();
2417 }
2418 /* Remove delay from Q6 PRODs to avoid pending descriptors
2419 * on pipe reset procedure
2420 */
2421 ipa3_q6_pipe_delay(false);
2422
2423 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2424 IPADBG_LOW("Exit with success\n");
2425}
2426
2427/*
2428 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2429 * check if GSI channel related to Q6 producer client is empty.
2430 *
2431 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2432 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2433 */
2434void ipa3_q6_post_shutdown_cleanup(void)
2435{
2436 int client_idx;
Skylar Changc1f15312017-05-09 14:14:32 -07002437 int ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03002438
2439 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002440
2441 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2442 IPAERR("uC is not loaded. Skipping\n");
2443 return;
2444 }
2445
Skylar Chang94692c92017-03-01 09:07:11 -08002446 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2447
2448 /* Handle the issue where SUSPEND was removed for some reason */
2449 ipa3_q6_avoid_holb();
2450 ipa3_halt_q6_cons_gsi_channels();
2451
Amir Levy9659e592016-10-27 18:08:27 +03002452 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2453 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
Skylar Changc1f15312017-05-09 14:14:32 -07002454 ep_idx = ipa3_get_ep_mapping(client_idx);
2455 if (ep_idx == -1)
2456 continue;
2457
Amir Levy9659e592016-10-27 18:08:27 +03002458 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2459 IPAERR("fail to validate Q6 ch emptiness %d\n",
2460 client_idx);
2461 BUG();
2462 return;
2463 }
2464 }
2465
2466 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2467 IPADBG_LOW("Exit with success\n");
2468}
2469
2470static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2471{
2472 /* Set 4 bytes of CANARY before the offset */
2473 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2474}
2475
2476/**
Amir Levy9fadeca2017-04-25 10:18:32 +03002477 * _ipa_init_sram_v3() - Initialize IPA local SRAM.
Amir Levy9659e592016-10-27 18:08:27 +03002478 *
2479 * Return codes: 0 for success, negative value for failure
2480 */
Amir Levy9fadeca2017-04-25 10:18:32 +03002481int _ipa_init_sram_v3(void)
Amir Levy9659e592016-10-27 18:08:27 +03002482{
2483 u32 *ipa_sram_mmio;
2484 unsigned long phys_addr;
2485
2486 phys_addr = ipa3_ctx->ipa_wrapper_base +
2487 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2488 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
2489 ipa3_ctx->smem_restricted_bytes / 4);
2490
2491 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
2492 if (!ipa_sram_mmio) {
2493 IPAERR("fail to ioremap IPA SRAM\n");
2494 return -ENOMEM;
2495 }
2496
2497 /* Consult with ipa_i.h on the location of the CANARY values */
2498 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
2499 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
2500 ipa3_sram_set_canary(ipa_sram_mmio,
2501 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
2502 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
2503 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
2504 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
2505 ipa3_sram_set_canary(ipa_sram_mmio,
2506 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
2507 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
2508 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
2509 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
2510 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
2511 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
2512 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
2513 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
2514 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
2515 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
2516 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
2517 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
2518 ipa3_sram_set_canary(ipa_sram_mmio,
2519 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
2520 ipa3_sram_set_canary(ipa_sram_mmio,
2521 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2522 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
2523 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
Amir Levy9fadeca2017-04-25 10:18:32 +03002524 ipa3_sram_set_canary(ipa_sram_mmio,
2525 (ipa_get_hw_type() >= IPA_HW_v3_5) ?
2526 IPA_MEM_PART(uc_event_ring_ofst) :
2527 IPA_MEM_PART(end_ofst));
Amir Levy9659e592016-10-27 18:08:27 +03002528
2529 iounmap(ipa_sram_mmio);
2530
2531 return 0;
2532}
2533
2534/**
2535 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
2536 *
2537 * Return codes: 0 for success, negative value for failure
2538 */
2539int _ipa_init_hdr_v3_0(void)
2540{
2541 struct ipa3_desc desc = { 0 };
2542 struct ipa_mem_buffer mem;
2543 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
2544 struct ipahal_imm_cmd_pyld *cmd_pyld;
2545 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
2546
2547 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2548 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2549 GFP_KERNEL);
2550 if (!mem.base) {
2551 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2552 return -ENOMEM;
2553 }
2554 memset(mem.base, 0, mem.size);
2555
2556 cmd.hdr_table_addr = mem.phys_base;
2557 cmd.size_hdr_table = mem.size;
2558 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
2559 IPA_MEM_PART(modem_hdr_ofst);
2560 cmd_pyld = ipahal_construct_imm_cmd(
2561 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
2562 if (!cmd_pyld) {
2563 IPAERR("fail to construct hdr_init_local imm cmd\n");
2564 dma_free_coherent(ipa3_ctx->pdev,
2565 mem.size, mem.base,
2566 mem.phys_base);
2567 return -EFAULT;
2568 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002569 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002570 desc.type = IPA_IMM_CMD_DESC;
2571 desc.pyld = cmd_pyld->data;
2572 desc.len = cmd_pyld->len;
2573 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2574
2575 if (ipa3_send_cmd(1, &desc)) {
2576 IPAERR("fail to send immediate command\n");
2577 ipahal_destroy_imm_cmd(cmd_pyld);
2578 dma_free_coherent(ipa3_ctx->pdev,
2579 mem.size, mem.base,
2580 mem.phys_base);
2581 return -EFAULT;
2582 }
2583
2584 ipahal_destroy_imm_cmd(cmd_pyld);
2585 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2586
2587 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
2588 IPA_MEM_PART(apps_hdr_proc_ctx_size);
2589 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2590 GFP_KERNEL);
2591 if (!mem.base) {
2592 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2593 return -ENOMEM;
2594 }
2595 memset(mem.base, 0, mem.size);
2596 memset(&desc, 0, sizeof(desc));
2597
2598 dma_cmd.is_read = false;
2599 dma_cmd.skip_pipeline_clear = false;
2600 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2601 dma_cmd.system_addr = mem.phys_base;
2602 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2603 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
2604 dma_cmd.size = mem.size;
2605 cmd_pyld = ipahal_construct_imm_cmd(
2606 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
2607 if (!cmd_pyld) {
2608 IPAERR("fail to construct dma_shared_mem imm\n");
2609 dma_free_coherent(ipa3_ctx->pdev,
2610 mem.size, mem.base,
2611 mem.phys_base);
2612 return -EFAULT;
2613 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002614 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002615 desc.pyld = cmd_pyld->data;
2616 desc.len = cmd_pyld->len;
2617 desc.type = IPA_IMM_CMD_DESC;
2618 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2619
2620 if (ipa3_send_cmd(1, &desc)) {
2621 IPAERR("fail to send immediate command\n");
2622 ipahal_destroy_imm_cmd(cmd_pyld);
2623 dma_free_coherent(ipa3_ctx->pdev,
2624 mem.size,
2625 mem.base,
2626 mem.phys_base);
2627 return -EFAULT;
2628 }
2629 ipahal_destroy_imm_cmd(cmd_pyld);
2630
2631 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
2632
2633 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2634
2635 return 0;
2636}
2637
2638/**
2639 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
2640 *
2641 * Return codes: 0 for success, negative value for failure
2642 */
2643int _ipa_init_rt4_v3(void)
2644{
2645 struct ipa3_desc desc = { 0 };
2646 struct ipa_mem_buffer mem;
2647 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
2648 struct ipahal_imm_cmd_pyld *cmd_pyld;
2649 int i;
2650 int rc = 0;
2651
2652 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
2653 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
2654 i++)
2655 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
2656 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
2657
2658 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
2659 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002660 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002661 if (rc) {
2662 IPAERR("fail generate empty v4 rt img\n");
2663 return rc;
2664 }
2665
2666 v4_cmd.hash_rules_addr = mem.phys_base;
2667 v4_cmd.hash_rules_size = mem.size;
2668 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2669 IPA_MEM_PART(v4_rt_hash_ofst);
2670 v4_cmd.nhash_rules_addr = mem.phys_base;
2671 v4_cmd.nhash_rules_size = mem.size;
2672 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2673 IPA_MEM_PART(v4_rt_nhash_ofst);
2674 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
2675 v4_cmd.hash_local_addr);
2676 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
2677 v4_cmd.nhash_local_addr);
2678 cmd_pyld = ipahal_construct_imm_cmd(
2679 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
2680 if (!cmd_pyld) {
2681 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
2682 rc = -EPERM;
2683 goto free_mem;
2684 }
2685
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002686 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002687 desc.type = IPA_IMM_CMD_DESC;
2688 desc.pyld = cmd_pyld->data;
2689 desc.len = cmd_pyld->len;
2690 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2691
2692 if (ipa3_send_cmd(1, &desc)) {
2693 IPAERR("fail to send immediate command\n");
2694 rc = -EFAULT;
2695 }
2696
2697 ipahal_destroy_imm_cmd(cmd_pyld);
2698
2699free_mem:
2700 ipahal_free_dma_mem(&mem);
2701 return rc;
2702}
2703
2704/**
2705 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
2706 *
2707 * Return codes: 0 for success, negative value for failure
2708 */
2709int _ipa_init_rt6_v3(void)
2710{
2711 struct ipa3_desc desc = { 0 };
2712 struct ipa_mem_buffer mem;
2713 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
2714 struct ipahal_imm_cmd_pyld *cmd_pyld;
2715 int i;
2716 int rc = 0;
2717
2718 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
2719 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
2720 i++)
2721 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
2722 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
2723
2724 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
2725 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002726 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002727 if (rc) {
2728 IPAERR("fail generate empty v6 rt img\n");
2729 return rc;
2730 }
2731
2732 v6_cmd.hash_rules_addr = mem.phys_base;
2733 v6_cmd.hash_rules_size = mem.size;
2734 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2735 IPA_MEM_PART(v6_rt_hash_ofst);
2736 v6_cmd.nhash_rules_addr = mem.phys_base;
2737 v6_cmd.nhash_rules_size = mem.size;
2738 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2739 IPA_MEM_PART(v6_rt_nhash_ofst);
2740 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
2741 v6_cmd.hash_local_addr);
2742 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
2743 v6_cmd.nhash_local_addr);
2744 cmd_pyld = ipahal_construct_imm_cmd(
2745 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
2746 if (!cmd_pyld) {
2747 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
2748 rc = -EPERM;
2749 goto free_mem;
2750 }
2751
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002752 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002753 desc.type = IPA_IMM_CMD_DESC;
2754 desc.pyld = cmd_pyld->data;
2755 desc.len = cmd_pyld->len;
2756 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2757
2758 if (ipa3_send_cmd(1, &desc)) {
2759 IPAERR("fail to send immediate command\n");
2760 rc = -EFAULT;
2761 }
2762
2763 ipahal_destroy_imm_cmd(cmd_pyld);
2764
2765free_mem:
2766 ipahal_free_dma_mem(&mem);
2767 return rc;
2768}
2769
2770/**
2771 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
2772 *
2773 * Return codes: 0 for success, negative value for failure
2774 */
2775int _ipa_init_flt4_v3(void)
2776{
2777 struct ipa3_desc desc = { 0 };
2778 struct ipa_mem_buffer mem;
2779 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
2780 struct ipahal_imm_cmd_pyld *cmd_pyld;
2781 int rc;
2782
2783 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2784 IPA_MEM_PART(v4_flt_hash_size),
2785 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002786 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002787 if (rc) {
2788 IPAERR("fail generate empty v4 flt img\n");
2789 return rc;
2790 }
2791
2792 v4_cmd.hash_rules_addr = mem.phys_base;
2793 v4_cmd.hash_rules_size = mem.size;
2794 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2795 IPA_MEM_PART(v4_flt_hash_ofst);
2796 v4_cmd.nhash_rules_addr = mem.phys_base;
2797 v4_cmd.nhash_rules_size = mem.size;
2798 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2799 IPA_MEM_PART(v4_flt_nhash_ofst);
2800 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
2801 v4_cmd.hash_local_addr);
2802 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
2803 v4_cmd.nhash_local_addr);
2804 cmd_pyld = ipahal_construct_imm_cmd(
2805 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
2806 if (!cmd_pyld) {
2807 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
2808 rc = -EPERM;
2809 goto free_mem;
2810 }
2811
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002812 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002813 desc.type = IPA_IMM_CMD_DESC;
2814 desc.pyld = cmd_pyld->data;
2815 desc.len = cmd_pyld->len;
2816 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2817
2818 if (ipa3_send_cmd(1, &desc)) {
2819 IPAERR("fail to send immediate command\n");
2820 rc = -EFAULT;
2821 }
2822
2823 ipahal_destroy_imm_cmd(cmd_pyld);
2824
2825free_mem:
2826 ipahal_free_dma_mem(&mem);
2827 return rc;
2828}
2829
2830/**
2831 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
2832 *
2833 * Return codes: 0 for success, negative value for failure
2834 */
2835int _ipa_init_flt6_v3(void)
2836{
2837 struct ipa3_desc desc = { 0 };
2838 struct ipa_mem_buffer mem;
2839 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
2840 struct ipahal_imm_cmd_pyld *cmd_pyld;
2841 int rc;
2842
2843 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2844 IPA_MEM_PART(v6_flt_hash_size),
2845 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002846 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002847 if (rc) {
2848 IPAERR("fail generate empty v6 flt img\n");
2849 return rc;
2850 }
2851
2852 v6_cmd.hash_rules_addr = mem.phys_base;
2853 v6_cmd.hash_rules_size = mem.size;
2854 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2855 IPA_MEM_PART(v6_flt_hash_ofst);
2856 v6_cmd.nhash_rules_addr = mem.phys_base;
2857 v6_cmd.nhash_rules_size = mem.size;
2858 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2859 IPA_MEM_PART(v6_flt_nhash_ofst);
2860 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
2861 v6_cmd.hash_local_addr);
2862 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
2863 v6_cmd.nhash_local_addr);
2864
2865 cmd_pyld = ipahal_construct_imm_cmd(
2866 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
2867 if (!cmd_pyld) {
2868 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
2869 rc = -EPERM;
2870 goto free_mem;
2871 }
2872
Michael Adisumartab5d170f2017-05-17 14:34:11 -07002873 desc.opcode = cmd_pyld->opcode;
Amir Levy9659e592016-10-27 18:08:27 +03002874 desc.type = IPA_IMM_CMD_DESC;
2875 desc.pyld = cmd_pyld->data;
2876 desc.len = cmd_pyld->len;
2877 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2878
2879 if (ipa3_send_cmd(1, &desc)) {
2880 IPAERR("fail to send immediate command\n");
2881 rc = -EFAULT;
2882 }
2883
2884 ipahal_destroy_imm_cmd(cmd_pyld);
2885
2886free_mem:
2887 ipahal_free_dma_mem(&mem);
2888 return rc;
2889}
2890
2891static int ipa3_setup_flt_hash_tuple(void)
2892{
2893 int pipe_idx;
2894 struct ipahal_reg_hash_tuple tuple;
2895
2896 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2897
2898 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
2899 if (!ipa_is_ep_support_flt(pipe_idx))
2900 continue;
2901
2902 if (ipa_is_modem_pipe(pipe_idx))
2903 continue;
2904
2905 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
2906 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
2907 return -EFAULT;
2908 }
2909 }
2910
2911 return 0;
2912}
2913
2914static int ipa3_setup_rt_hash_tuple(void)
2915{
2916 int tbl_idx;
2917 struct ipahal_reg_hash_tuple tuple;
2918
2919 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2920
2921 for (tbl_idx = 0;
2922 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
2923 IPA_MEM_PART(v4_rt_num_index));
2924 tbl_idx++) {
2925
2926 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
2927 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
2928 continue;
2929
2930 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
2931 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
2932 continue;
2933
2934 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
2935 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
2936 return -EFAULT;
2937 }
2938 }
2939
2940 return 0;
2941}
2942
2943static int ipa3_setup_apps_pipes(void)
2944{
2945 struct ipa_sys_connect_params sys_in;
2946 int result = 0;
2947
2948 if (ipa3_ctx->gsi_ch20_wa) {
2949 IPADBG("Allocating GSI physical channel 20\n");
2950 result = ipa_gsi_ch20_wa();
2951 if (result) {
2952 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002953 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002954 }
2955 }
2956
Skylar Changd407e592017-03-30 11:25:30 -07002957 /* allocate the common PROD event ring */
2958 if (ipa3_alloc_common_event_ring()) {
2959 IPAERR("ipa3_alloc_common_event_ring failed.\n");
2960 result = -EPERM;
2961 goto fail_ch20_wa;
2962 }
2963
Amir Levy9659e592016-10-27 18:08:27 +03002964 /* CMD OUT (AP->IPA) */
2965 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2966 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
2967 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2968 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
2969 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
2970 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02002971 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03002972 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002973 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002974 }
2975 IPADBG("Apps to IPA cmd pipe is connected\n");
2976
2977 ipa3_ctx->ctrl->ipa_init_sram();
2978 IPADBG("SRAM initialized\n");
2979
2980 ipa3_ctx->ctrl->ipa_init_hdr();
2981 IPADBG("HDR initialized\n");
2982
2983 ipa3_ctx->ctrl->ipa_init_rt4();
2984 IPADBG("V4 RT initialized\n");
2985
2986 ipa3_ctx->ctrl->ipa_init_rt6();
2987 IPADBG("V6 RT initialized\n");
2988
2989 ipa3_ctx->ctrl->ipa_init_flt4();
2990 IPADBG("V4 FLT initialized\n");
2991
2992 ipa3_ctx->ctrl->ipa_init_flt6();
2993 IPADBG("V6 FLT initialized\n");
2994
2995 if (ipa3_setup_flt_hash_tuple()) {
2996 IPAERR(":fail to configure flt hash tuple\n");
2997 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002998 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002999 }
3000 IPADBG("flt hash tuple is configured\n");
3001
3002 if (ipa3_setup_rt_hash_tuple()) {
3003 IPAERR(":fail to configure rt hash tuple\n");
3004 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003005 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003006 }
3007 IPADBG("rt hash tuple is configured\n");
3008
3009 if (ipa3_setup_exception_path()) {
3010 IPAERR(":fail to setup excp path\n");
3011 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003012 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003013 }
3014 IPADBG("Exception path was successfully set");
3015
3016 if (ipa3_setup_dflt_rt_tables()) {
3017 IPAERR(":fail to setup dflt routes\n");
3018 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003019 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003020 }
3021 IPADBG("default routing was set\n");
3022
Ghanim Fodic6b67492017-03-15 14:19:56 +02003023 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03003024 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3025 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
3026 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3027 sys_in.notify = ipa3_lan_rx_cb;
3028 sys_in.priv = NULL;
3029 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
3030 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
3031 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
3032 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
3033 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
3034 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
3035 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
3036 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
3037
3038 /**
3039 * ipa_lan_rx_cb() intended to notify the source EP about packet
3040 * being received on the LAN_CONS via calling the source EP call-back.
3041 * There could be a race condition with calling this call-back. Other
3042 * thread may nullify it - e.g. on EP disconnect.
3043 * This lock intended to protect the access to the source EP call-back
3044 */
3045 spin_lock_init(&ipa3_ctx->disconnect_lock);
3046 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003047 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003048 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003049 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003050 }
3051
Ghanim Fodic6b67492017-03-15 14:19:56 +02003052 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02003053 if (!ipa3_ctx->ipa_config_is_mhi) {
3054 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3055 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
3056 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
3057 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
3058 if (ipa3_setup_sys_pipe(&sys_in,
3059 &ipa3_ctx->clnt_hdl_data_out)) {
3060 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
3061 result = -EPERM;
3062 goto fail_lan_data_out;
3063 }
Amir Levy9659e592016-10-27 18:08:27 +03003064 }
3065
3066 return 0;
3067
Ghanim Fodic6b67492017-03-15 14:19:56 +02003068fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03003069 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003070fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03003071 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
3072 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3073 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
3074 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
3075 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003076 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003077 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003078fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03003079 return result;
3080}
3081
3082static void ipa3_teardown_apps_pipes(void)
3083{
Amir Levy54fe4d32017-03-16 11:21:49 +02003084 if (!ipa3_ctx->ipa_config_is_mhi)
3085 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03003086 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
3087 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3088 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003089 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003090 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
3091}
3092
3093#ifdef CONFIG_COMPAT
3094long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3095{
3096 int retval = 0;
3097 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
3098 struct ipa_ioc_nat_alloc_mem nat_mem;
3099
3100 switch (cmd) {
3101 case IPA_IOC_ADD_HDR32:
3102 cmd = IPA_IOC_ADD_HDR;
3103 break;
3104 case IPA_IOC_DEL_HDR32:
3105 cmd = IPA_IOC_DEL_HDR;
3106 break;
3107 case IPA_IOC_ADD_RT_RULE32:
3108 cmd = IPA_IOC_ADD_RT_RULE;
3109 break;
3110 case IPA_IOC_DEL_RT_RULE32:
3111 cmd = IPA_IOC_DEL_RT_RULE;
3112 break;
3113 case IPA_IOC_ADD_FLT_RULE32:
3114 cmd = IPA_IOC_ADD_FLT_RULE;
3115 break;
3116 case IPA_IOC_DEL_FLT_RULE32:
3117 cmd = IPA_IOC_DEL_FLT_RULE;
3118 break;
3119 case IPA_IOC_GET_RT_TBL32:
3120 cmd = IPA_IOC_GET_RT_TBL;
3121 break;
3122 case IPA_IOC_COPY_HDR32:
3123 cmd = IPA_IOC_COPY_HDR;
3124 break;
3125 case IPA_IOC_QUERY_INTF32:
3126 cmd = IPA_IOC_QUERY_INTF;
3127 break;
3128 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3129 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3130 break;
3131 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3132 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3133 break;
3134 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3135 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3136 break;
3137 case IPA_IOC_GET_HDR32:
3138 cmd = IPA_IOC_GET_HDR;
3139 break;
3140 case IPA_IOC_ALLOC_NAT_MEM32:
3141 if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
3142 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3143 retval = -EFAULT;
3144 goto ret;
3145 }
3146 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3147 IPA_RESOURCE_NAME_MAX);
3148 nat_mem.size = (size_t)nat_mem32.size;
3149 nat_mem.offset = (off_t)nat_mem32.offset;
3150
3151 /* null terminate the string */
3152 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3153
3154 if (ipa3_allocate_nat_device(&nat_mem)) {
3155 retval = -EFAULT;
3156 goto ret;
3157 }
3158 nat_mem32.offset = (compat_off_t)nat_mem.offset;
3159 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
3160 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3161 retval = -EFAULT;
3162 }
3163ret:
3164 return retval;
3165 case IPA_IOC_V4_INIT_NAT32:
3166 cmd = IPA_IOC_V4_INIT_NAT;
3167 break;
3168 case IPA_IOC_NAT_DMA32:
3169 cmd = IPA_IOC_NAT_DMA;
3170 break;
3171 case IPA_IOC_V4_DEL_NAT32:
3172 cmd = IPA_IOC_V4_DEL_NAT;
3173 break;
3174 case IPA_IOC_GET_NAT_OFFSET32:
3175 cmd = IPA_IOC_GET_NAT_OFFSET;
3176 break;
3177 case IPA_IOC_PULL_MSG32:
3178 cmd = IPA_IOC_PULL_MSG;
3179 break;
3180 case IPA_IOC_RM_ADD_DEPENDENCY32:
3181 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3182 break;
3183 case IPA_IOC_RM_DEL_DEPENDENCY32:
3184 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3185 break;
3186 case IPA_IOC_GENERATE_FLT_EQ32:
3187 cmd = IPA_IOC_GENERATE_FLT_EQ;
3188 break;
3189 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3190 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3191 break;
3192 case IPA_IOC_WRITE_QMAPID32:
3193 cmd = IPA_IOC_WRITE_QMAPID;
3194 break;
3195 case IPA_IOC_MDFY_FLT_RULE32:
3196 cmd = IPA_IOC_MDFY_FLT_RULE;
3197 break;
3198 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3199 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3200 break;
3201 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3202 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3203 break;
3204 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3205 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3206 break;
3207 case IPA_IOC_MDFY_RT_RULE32:
3208 cmd = IPA_IOC_MDFY_RT_RULE;
3209 break;
3210 case IPA_IOC_COMMIT_HDR:
3211 case IPA_IOC_RESET_HDR:
3212 case IPA_IOC_COMMIT_RT:
3213 case IPA_IOC_RESET_RT:
3214 case IPA_IOC_COMMIT_FLT:
3215 case IPA_IOC_RESET_FLT:
3216 case IPA_IOC_DUMP:
3217 case IPA_IOC_PUT_RT_TBL:
3218 case IPA_IOC_PUT_HDR:
3219 case IPA_IOC_SET_FLT:
3220 case IPA_IOC_QUERY_EP_MAPPING:
3221 break;
3222 default:
3223 return -ENOIOCTLCMD;
3224 }
3225 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3226}
3227#endif
3228
3229static ssize_t ipa3_write(struct file *file, const char __user *buf,
3230 size_t count, loff_t *ppos);
3231
3232static const struct file_operations ipa3_drv_fops = {
3233 .owner = THIS_MODULE,
3234 .open = ipa3_open,
3235 .read = ipa3_read,
3236 .write = ipa3_write,
3237 .unlocked_ioctl = ipa3_ioctl,
3238#ifdef CONFIG_COMPAT
3239 .compat_ioctl = compat_ipa3_ioctl,
3240#endif
3241};
3242
3243static int ipa3_get_clks(struct device *dev)
3244{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003245 if (ipa3_res.use_bw_vote) {
3246 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3247 ipa3_clk = NULL;
3248 return 0;
3249 }
3250
Amir Levy9659e592016-10-27 18:08:27 +03003251 ipa3_clk = clk_get(dev, "core_clk");
3252 if (IS_ERR(ipa3_clk)) {
3253 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3254 IPAERR("fail to get ipa clk\n");
3255 return PTR_ERR(ipa3_clk);
3256 }
3257 return 0;
3258}
3259
3260/**
3261 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3262 */
3263void _ipa_enable_clks_v3_0(void)
3264{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003265 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003266 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003267 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003268 clk_prepare(ipa3_clk);
3269 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003270 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003271 }
3272
Ghanim Fodi6a831342017-03-07 18:19:15 +02003273 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003274}
3275
3276static unsigned int ipa3_get_bus_vote(void)
3277{
3278 unsigned int idx = 1;
3279
3280 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3281 idx = 1;
3282 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3283 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3284 if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
3285 idx = 1;
3286 else
3287 idx = 2;
3288 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3289 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3290 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3291 } else {
3292 WARN_ON(1);
3293 }
3294
3295 IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
3296
3297 return idx;
3298}
3299
3300/**
3301* ipa3_enable_clks() - Turn on IPA clocks
3302*
3303* Return codes:
3304* None
3305*/
3306void ipa3_enable_clks(void)
3307{
3308 IPADBG("enabling IPA clocks and bus voting\n");
3309
Ghanim Fodi6a831342017-03-07 18:19:15 +02003310 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3311 ipa3_get_bus_vote()))
3312 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003313
Ghanim Fodi6a831342017-03-07 18:19:15 +02003314 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003315}
3316
3317
3318/**
3319 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3320 */
3321void _ipa_disable_clks_v3_0(void)
3322{
Amir Levy9659e592016-10-27 18:08:27 +03003323 ipa3_suspend_apps_pipes(true);
3324 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003325 if (ipa3_clk) {
3326 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003327 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003328 }
Amir Levy9659e592016-10-27 18:08:27 +03003329}
3330
3331/**
3332* ipa3_disable_clks() - Turn off IPA clocks
3333*
3334* Return codes:
3335* None
3336*/
3337void ipa3_disable_clks(void)
3338{
3339 IPADBG("disabling IPA clocks and bus voting\n");
3340
3341 ipa3_ctx->ctrl->ipa3_disable_clks();
3342
Ghanim Fodi6a831342017-03-07 18:19:15 +02003343 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3344 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003345}
3346
3347/**
3348 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3349 *
3350 * This function is called prior to clock gating when active client counter
3351 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003352 * were not submitted to the IPA client via the transport. During TAG process
3353 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003354 *
3355 * Return codes:
3356 * None
3357 */
3358static void ipa3_start_tag_process(struct work_struct *work)
3359{
3360 int res;
3361
3362 IPADBG("starting TAG process\n");
3363 /* close aggregation frames on all pipes */
3364 res = ipa3_tag_aggr_force_close(-1);
3365 if (res)
3366 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3367 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3368
3369 IPADBG("TAG process done\n");
3370}
3371
3372/**
3373* ipa3_active_clients_log_mod() - Log a modification in the active clients
3374* reference count
3375*
3376* This method logs any modification in the active clients reference count:
3377* It logs the modification in the circular history buffer
3378* It logs the modification in the hash table - looking for an entry,
3379* creating one if needed and deleting one if needed.
3380*
3381* @id: ipa3_active client logging info struct to hold the log information
3382* @inc: a boolean variable to indicate whether the modification is an increase
3383* or decrease
3384* @int_ctx: a boolean variable to indicate whether this call is being made from
3385* an interrupt context and therefore should allocate GFP_ATOMIC memory
3386*
3387* Method process:
3388* - Hash the unique identifier string
3389* - Find the hash in the table
3390* 1)If found, increase or decrease the reference count
3391* 2)If not found, allocate a new hash table entry struct and initialize it
3392* - Remove and deallocate unneeded data structure
3393* - Log the call in the circular history buffer (unless it is a simple call)
3394*/
3395void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3396 bool inc, bool int_ctx)
3397{
3398 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3399 unsigned long long t;
3400 unsigned long nanosec_rem;
3401 struct ipa3_active_client_htable_entry *hentry;
3402 struct ipa3_active_client_htable_entry *hfound;
3403 u32 hkey;
3404 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
3405
3406 hfound = NULL;
3407 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3408 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003409 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003410 0);
3411 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3412 hentry, list, hkey) {
3413 if (!strcmp(hentry->id_string, id->id_string)) {
3414 hentry->count = hentry->count + (inc ? 1 : -1);
3415 hfound = hentry;
3416 }
3417 }
3418 if (hfound == NULL) {
3419 hentry = NULL;
3420 hentry = kzalloc(sizeof(
3421 struct ipa3_active_client_htable_entry),
3422 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3423 if (hentry == NULL) {
3424 IPAERR("failed allocating active clients hash entry");
3425 return;
3426 }
3427 hentry->type = id->type;
3428 strlcpy(hentry->id_string, id->id_string,
3429 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3430 INIT_HLIST_NODE(&hentry->list);
3431 hentry->count = inc ? 1 : -1;
3432 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3433 &hentry->list, hkey);
3434 } else if (hfound->count == 0) {
3435 hash_del(&hfound->list);
3436 kfree(hfound);
3437 }
3438
3439 if (id->type != SIMPLE) {
3440 t = local_clock();
3441 nanosec_rem = do_div(t, 1000000000) / 1000;
3442 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3443 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3444 "[%5lu.%06lu] v %s, %s: %d",
3445 (unsigned long)t, nanosec_rem,
3446 id->id_string, id->file, id->line);
3447 ipa3_active_clients_log_insert(temp_str);
3448 }
3449}
3450
3451void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
3452 bool int_ctx)
3453{
3454 ipa3_active_clients_log_mod(id, false, int_ctx);
3455}
3456
3457void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
3458 bool int_ctx)
3459{
3460 ipa3_active_clients_log_mod(id, true, int_ctx);
3461}
3462
3463/**
3464* ipa3_inc_client_enable_clks() - Increase active clients counter, and
3465* enable ipa clocks if necessary
3466*
3467* Return codes:
3468* None
3469*/
3470void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
3471{
Skylar Chang242952b2017-07-20 15:04:05 -07003472 int ret;
3473
Amir Levy9659e592016-10-27 18:08:27 +03003474 ipa3_active_clients_log_inc(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07003475 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3476 if (ret) {
3477 IPADBG_LOW("active clients = %d\n",
3478 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3479 return;
3480 }
3481
3482 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3483
3484 /* somebody might voted to clocks meanwhile */
3485 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3486 if (ret) {
3487 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3488 IPADBG_LOW("active clients = %d\n",
3489 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3490 return;
3491 }
3492
3493 ipa3_enable_clks();
3494 atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
3495 IPADBG_LOW("active clients = %d\n",
3496 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3497 ipa3_suspend_apps_pipes(false);
3498 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003499}
3500
3501/**
3502* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
3503* clients if no asynchronous actions should be done. Asynchronous actions are
3504* locking a mutex and waking up IPA HW.
3505*
3506* Return codes: 0 for success
3507* -EPERM if an asynchronous action should have been done
3508*/
3509int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
3510 *id)
3511{
Skylar Chang242952b2017-07-20 15:04:05 -07003512 int ret;
Amir Levy9659e592016-10-27 18:08:27 +03003513
Skylar Chang242952b2017-07-20 15:04:05 -07003514 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
3515 if (ret) {
3516 ipa3_active_clients_log_inc(id, true);
3517 IPADBG_LOW("active clients = %d\n",
3518 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3519 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03003520 }
Amir Levy9659e592016-10-27 18:08:27 +03003521
Skylar Chang242952b2017-07-20 15:04:05 -07003522 return -EPERM;
3523}
3524
3525static void __ipa3_dec_client_disable_clks(void)
3526{
3527 int ret;
3528
3529 if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
3530 IPAERR("trying to disable clocks with refcnt is 0!\n");
3531 ipa_assert();
3532 return;
3533 }
3534
3535 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
3536 if (ret)
3537 goto bail;
3538
3539 /* seems like this is the only client holding the clocks */
3540 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
3541 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
3542 ipa3_ctx->tag_process_before_gating) {
3543 ipa3_ctx->tag_process_before_gating = false;
3544 /*
3545 * When TAG process ends, active clients will be
3546 * decreased
3547 */
3548 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
3549 goto unlock_mutex;
3550 }
3551
3552 /* a different context might increase the clock reference meanwhile */
3553 ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
3554 if (ret > 0)
3555 goto unlock_mutex;
3556 ipa3_disable_clks();
3557
3558unlock_mutex:
3559 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
3560bail:
3561 IPADBG_LOW("active clients = %d\n",
3562 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Amir Levy9659e592016-10-27 18:08:27 +03003563}
3564
3565/**
3566 * ipa3_dec_client_disable_clks() - Decrease active clients counter
3567 *
3568 * In case that there are no active clients this function also starts
3569 * TAG process. When TAG progress ends ipa clocks will be gated.
3570 * start_tag_process_again flag is set during this function to signal TAG
3571 * process to start again as there was another client that may send data to ipa
3572 *
3573 * Return codes:
3574 * None
3575 */
3576void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
3577{
Amir Levy9659e592016-10-27 18:08:27 +03003578 ipa3_active_clients_log_dec(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07003579 __ipa3_dec_client_disable_clks();
3580}
3581
3582static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
3583{
3584 __ipa3_dec_client_disable_clks();
3585}
3586
3587/**
3588 * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
3589 * if possible without blocking. If this is the last client then the desrease
3590 * will happen from work queue context.
3591 *
3592 * Return codes:
3593 * None
3594 */
3595void ipa3_dec_client_disable_clks_no_block(
3596 struct ipa_active_client_logging_info *id)
3597{
3598 int ret;
3599
3600 ipa3_active_clients_log_dec(id, true);
3601 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
3602 if (ret) {
3603 IPADBG_LOW("active clients = %d\n",
3604 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
3605 return;
Amir Levy9659e592016-10-27 18:08:27 +03003606 }
Skylar Chang242952b2017-07-20 15:04:05 -07003607
3608 /* seems like this is the only client holding the clocks */
3609 queue_work(ipa3_ctx->power_mgmt_wq,
3610 &ipa_dec_clients_disable_clks_on_wq_work);
Amir Levy9659e592016-10-27 18:08:27 +03003611}
3612
3613/**
3614* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
3615* acquire wakelock if necessary
3616*
3617* Return codes:
3618* None
3619*/
3620void ipa3_inc_acquire_wakelock(void)
3621{
3622 unsigned long flags;
3623
3624 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3625 ipa3_ctx->wakelock_ref_cnt.cnt++;
3626 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
3627 __pm_stay_awake(&ipa3_ctx->w_lock);
3628 IPADBG_LOW("active wakelock ref cnt = %d\n",
3629 ipa3_ctx->wakelock_ref_cnt.cnt);
3630 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3631}
3632
3633/**
3634 * ipa3_dec_release_wakelock() - Decrease active clients counter
3635 *
3636 * In case if the ref count is 0, release the wakelock.
3637 *
3638 * Return codes:
3639 * None
3640 */
3641void ipa3_dec_release_wakelock(void)
3642{
3643 unsigned long flags;
3644
3645 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3646 ipa3_ctx->wakelock_ref_cnt.cnt--;
3647 IPADBG_LOW("active wakelock ref cnt = %d\n",
3648 ipa3_ctx->wakelock_ref_cnt.cnt);
3649 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
3650 __pm_relax(&ipa3_ctx->w_lock);
3651 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3652}
3653
3654int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
3655 u32 bandwidth_mbps)
3656{
3657 enum ipa_voltage_level needed_voltage;
3658 u32 clk_rate;
3659
3660 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
3661 floor_voltage, bandwidth_mbps);
3662
3663 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
3664 floor_voltage >= IPA_VOLTAGE_MAX) {
3665 IPAERR("bad voltage\n");
3666 return -EINVAL;
3667 }
3668
3669 if (ipa3_ctx->enable_clock_scaling) {
3670 IPADBG_LOW("Clock scaling is enabled\n");
3671 if (bandwidth_mbps >=
3672 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
3673 needed_voltage = IPA_VOLTAGE_TURBO;
3674 else if (bandwidth_mbps >=
3675 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
3676 needed_voltage = IPA_VOLTAGE_NOMINAL;
3677 else
3678 needed_voltage = IPA_VOLTAGE_SVS;
3679 } else {
3680 IPADBG_LOW("Clock scaling is disabled\n");
3681 needed_voltage = IPA_VOLTAGE_NOMINAL;
3682 }
3683
3684 needed_voltage = max(needed_voltage, floor_voltage);
3685 switch (needed_voltage) {
3686 case IPA_VOLTAGE_SVS:
3687 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3688 break;
3689 case IPA_VOLTAGE_NOMINAL:
3690 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3691 break;
3692 case IPA_VOLTAGE_TURBO:
3693 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3694 break;
3695 default:
3696 IPAERR("bad voltage\n");
3697 WARN_ON(1);
3698 return -EFAULT;
3699 }
3700
3701 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3702 IPADBG_LOW("Same voltage\n");
3703 return 0;
3704 }
3705
Skylar Chang242952b2017-07-20 15:04:05 -07003706 /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
3707 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003708 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3709 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
Skylar Chang242952b2017-07-20 15:04:05 -07003710 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003711 if (ipa3_clk)
3712 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3713 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
Skylar Chang242952b2017-07-20 15:04:05 -07003714 ipa3_get_bus_vote()))
Ghanim Fodi6a831342017-03-07 18:19:15 +02003715 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003716 } else {
3717 IPADBG_LOW("clocks are gated, not setting rate\n");
3718 }
Skylar Chang242952b2017-07-20 15:04:05 -07003719 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003720 IPADBG_LOW("Done\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003721
Amir Levy9659e592016-10-27 18:08:27 +03003722 return 0;
3723}
3724
Amir Levya59ed3f2017-03-05 17:30:55 +02003725static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03003726{
3727 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02003728 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03003729 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
3730}
3731
3732/**
3733* ipa3_suspend_handler() - Handles the suspend interrupt:
3734* wakes up the suspended peripheral by requesting its consumer
3735* @interrupt: Interrupt type
3736* @private_data: The client's private data
3737* @interrupt_data: Interrupt specific information data
3738*/
3739void ipa3_suspend_handler(enum ipa_irq_type interrupt,
3740 void *private_data,
3741 void *interrupt_data)
3742{
3743 enum ipa_rm_resource_name resource;
3744 u32 suspend_data =
3745 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
3746 u32 bmsk = 1;
3747 u32 i = 0;
3748 int res;
3749 struct ipa_ep_cfg_holb holb_cfg;
3750
3751 IPADBG("interrupt=%d, interrupt_data=%u\n",
3752 interrupt, suspend_data);
3753 memset(&holb_cfg, 0, sizeof(holb_cfg));
3754 holb_cfg.tmr_val = 0;
3755
3756 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3757 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
3758 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
3759 /*
3760 * pipe will be unsuspended as part of
3761 * enabling IPA clocks
3762 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08003763 mutex_lock(&ipa3_ctx->transport_pm.
3764 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003765 if (!atomic_read(
3766 &ipa3_ctx->transport_pm.dec_clients)
3767 ) {
3768 IPA_ACTIVE_CLIENTS_INC_EP(
3769 ipa3_ctx->ep[i].client);
3770 IPADBG_LOW("Pipes un-suspended.\n");
3771 IPADBG_LOW("Enter poll mode.\n");
3772 atomic_set(
3773 &ipa3_ctx->transport_pm.dec_clients,
3774 1);
Amir Levya59ed3f2017-03-05 17:30:55 +02003775 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003776 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08003777 mutex_unlock(&ipa3_ctx->transport_pm.
3778 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003779 } else {
3780 resource = ipa3_get_rm_resource_from_ep(i);
3781 res =
3782 ipa_rm_request_resource_with_timer(resource);
3783 if (res == -EPERM &&
3784 IPA_CLIENT_IS_CONS(
3785 ipa3_ctx->ep[i].client)) {
3786 holb_cfg.en = 1;
3787 res = ipa3_cfg_ep_holb_by_client(
3788 ipa3_ctx->ep[i].client, &holb_cfg);
3789 if (res) {
3790 IPAERR("holb en fail, stall\n");
3791 BUG();
3792 }
3793 }
3794 }
3795 }
3796 bmsk = bmsk << 1;
3797 }
3798}
3799
3800/**
3801* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
3802* as it was registered in the IPA init sequence.
3803* Return codes:
3804* 0: success
3805* -EPERM: failed to remove current handler or failed to add original handler
3806*/
3807int ipa3_restore_suspend_handler(void)
3808{
3809 int result = 0;
3810
3811 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
3812 if (result) {
3813 IPAERR("remove handler for suspend interrupt failed\n");
3814 return -EPERM;
3815 }
3816
3817 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3818 ipa3_suspend_handler, false, NULL);
3819 if (result) {
3820 IPAERR("register handler for suspend interrupt failed\n");
3821 result = -EPERM;
3822 }
3823
3824 IPADBG("suspend handler successfully restored\n");
3825
3826 return result;
3827}
3828
3829static int ipa3_apps_cons_release_resource(void)
3830{
3831 return 0;
3832}
3833
3834static int ipa3_apps_cons_request_resource(void)
3835{
3836 return 0;
3837}
3838
Amir Levya59ed3f2017-03-05 17:30:55 +02003839static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03003840{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303841 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003842 /* check whether still need to decrease client usage */
3843 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
3844 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
3845 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02003846 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003847 } else {
3848 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02003849 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03003850 }
3851 }
3852 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303853 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003854}
3855
3856int ipa3_create_apps_resource(void)
3857{
3858 struct ipa_rm_create_params apps_cons_create_params;
3859 struct ipa_rm_perf_profile profile;
3860 int result = 0;
3861
3862 memset(&apps_cons_create_params, 0,
3863 sizeof(apps_cons_create_params));
3864 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
3865 apps_cons_create_params.request_resource =
3866 ipa3_apps_cons_request_resource;
3867 apps_cons_create_params.release_resource =
3868 ipa3_apps_cons_release_resource;
3869 result = ipa_rm_create_resource(&apps_cons_create_params);
3870 if (result) {
3871 IPAERR("ipa_rm_create_resource failed\n");
3872 return result;
3873 }
3874
3875 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
3876 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
3877
3878 return result;
3879}
3880
3881/**
3882 * ipa3_init_interrupts() - Register to IPA IRQs
3883 *
3884 * Return codes: 0 in success, negative in failure
3885 *
3886 */
3887int ipa3_init_interrupts(void)
3888{
3889 int result;
3890
3891 /*register IPA IRQ handler*/
3892 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
3893 master_dev);
3894 if (result) {
3895 IPAERR("ipa interrupts initialization failed\n");
3896 return -ENODEV;
3897 }
3898
3899 /*add handler for suspend interrupt*/
3900 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3901 ipa3_suspend_handler, false, NULL);
3902 if (result) {
3903 IPAERR("register handler for suspend interrupt failed\n");
3904 result = -ENODEV;
3905 goto fail_add_interrupt_handler;
3906 }
3907
3908 return 0;
3909
3910fail_add_interrupt_handler:
3911 free_irq(ipa3_res.ipa_irq, master_dev);
3912 return result;
3913}
3914
3915/**
3916 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
3917 * The idr strcuture per filtering table is intended for rule id generation
3918 * per filtering rule.
3919 */
3920static void ipa3_destroy_flt_tbl_idrs(void)
3921{
3922 int i;
3923 struct ipa3_flt_tbl *flt_tbl;
3924
3925 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3926 if (!ipa_is_ep_support_flt(i))
3927 continue;
3928
3929 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
3930 idr_destroy(&flt_tbl->rule_ids);
3931 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
3932 idr_destroy(&flt_tbl->rule_ids);
3933 }
3934}
3935
3936static void ipa3_freeze_clock_vote_and_notify_modem(void)
3937{
3938 int res;
Amir Levy9659e592016-10-27 18:08:27 +03003939 struct ipa_active_client_logging_info log_info;
3940
3941 if (ipa3_ctx->smp2p_info.res_sent)
3942 return;
3943
Skylar Change1209942017-02-02 14:26:38 -08003944 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
3945 IPAERR("smp2p out gpio not assigned\n");
3946 return;
3947 }
3948
Amir Levy9659e592016-10-27 18:08:27 +03003949 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
3950 res = ipa3_inc_client_enable_clks_no_block(&log_info);
3951 if (res)
Skylar Change1209942017-02-02 14:26:38 -08003952 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03003953 else
Skylar Change1209942017-02-02 14:26:38 -08003954 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03003955
Skylar Change1209942017-02-02 14:26:38 -08003956 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3957 IPA_GPIO_OUT_CLK_VOTE_IDX,
3958 ipa3_ctx->smp2p_info.ipa_clk_on);
3959 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3960 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03003961
Skylar Change1209942017-02-02 14:26:38 -08003962 ipa3_ctx->smp2p_info.res_sent = true;
3963 IPADBG("IPA clocks are %s\n",
3964 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
3965}
3966
3967void ipa3_reset_freeze_vote(void)
3968{
3969 if (ipa3_ctx->smp2p_info.res_sent == false)
3970 return;
3971
3972 if (ipa3_ctx->smp2p_info.ipa_clk_on)
3973 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
3974
3975 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3976 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
3977 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3978 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
3979
3980 ipa3_ctx->smp2p_info.res_sent = false;
3981 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03003982}
3983
3984static int ipa3_panic_notifier(struct notifier_block *this,
3985 unsigned long event, void *ptr)
3986{
3987 int res;
3988
3989 ipa3_freeze_clock_vote_and_notify_modem();
3990
3991 IPADBG("Calling uC panic handler\n");
3992 res = ipa3_uc_panic_notifier(this, event, ptr);
3993 if (res)
3994 IPAERR("uC panic handler failed %d\n", res);
3995
3996 return NOTIFY_DONE;
3997}
3998
3999static struct notifier_block ipa3_panic_blk = {
4000 .notifier_call = ipa3_panic_notifier,
4001 /* IPA panic handler needs to run before modem shuts down */
4002 .priority = INT_MAX,
4003};
4004
4005static void ipa3_register_panic_hdlr(void)
4006{
4007 atomic_notifier_chain_register(&panic_notifier_list,
4008 &ipa3_panic_blk);
4009}
4010
4011static void ipa3_trigger_ipa_ready_cbs(void)
4012{
4013 struct ipa3_ready_cb_info *info;
4014
4015 mutex_lock(&ipa3_ctx->lock);
4016
4017 /* Call all the CBs */
4018 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
4019 if (info->ready_cb)
4020 info->ready_cb(info->user_data);
4021
4022 mutex_unlock(&ipa3_ctx->lock);
4023}
4024
4025static int ipa3_gsi_pre_fw_load_init(void)
4026{
4027 int result;
4028
4029 result = gsi_configure_regs(ipa3_res.transport_mem_base,
4030 ipa3_res.transport_mem_size,
4031 ipa3_res.ipa_mem_base);
4032 if (result) {
4033 IPAERR("Failed to configure GSI registers\n");
4034 return -EINVAL;
4035 }
4036
4037 return 0;
4038}
4039
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004040static void ipa3_uc_is_loaded(void)
4041{
4042 IPADBG("\n");
4043 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
4044}
4045
Amir Levy41644242016-11-03 15:38:09 +02004046static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
4047{
4048 enum gsi_ver gsi_ver;
4049
4050 switch (ipa_hw_type) {
4051 case IPA_HW_v3_0:
4052 case IPA_HW_v3_1:
4053 gsi_ver = GSI_VER_1_0;
4054 break;
4055 case IPA_HW_v3_5:
4056 gsi_ver = GSI_VER_1_2;
4057 break;
4058 case IPA_HW_v3_5_1:
4059 gsi_ver = GSI_VER_1_3;
4060 break;
Michael Adisumarta891a4ff2017-05-16 16:40:06 -07004061 case IPA_HW_v4_0:
4062 gsi_ver = GSI_VER_2_0;
4063 break;
Amir Levy41644242016-11-03 15:38:09 +02004064 default:
4065 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
4066 WARN_ON(1);
4067 gsi_ver = GSI_VER_ERR;
4068 }
4069
4070 IPADBG("GSI version %d\n", gsi_ver);
4071
4072 return gsi_ver;
4073}
4074
Amir Levy9659e592016-10-27 18:08:27 +03004075/**
4076 * ipa3_post_init() - Initialize the IPA Driver (Part II).
4077 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02004078 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03004079 *
4080 * @resource_p: contain platform specific values from DST file
4081 * @pdev: The platform device structure representing the IPA driver
4082 *
4083 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004084 * - Initialize endpoints bitmaps
4085 * - Initialize resource groups min and max values
4086 * - Initialize filtering lists heads and idr
4087 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02004088 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03004089 * - Setup APPS pipes
4090 * - Initialize tethering bridge
4091 * - Initialize IPA debugfs
4092 * - Initialize IPA uC interface
4093 * - Initialize WDI interface
4094 * - Initialize USB interface
4095 * - Register for panic handler
4096 * - Trigger IPA ready callbacks (to all subscribers)
4097 * - Trigger IPA completion object (to all who wait on it)
4098 */
4099static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
4100 struct device *ipa_dev)
4101{
4102 int result;
Amir Levy9659e592016-10-27 18:08:27 +03004103 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004104 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02004105 struct ipa3_flt_tbl *flt_tbl;
4106 int i;
4107
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304108 if (ipa3_ctx == NULL) {
4109 IPADBG("IPA driver haven't initialized\n");
4110 return -ENXIO;
4111 }
4112
4113 /* Prevent consequent calls from trying to load the FW again. */
4114 if (ipa3_ctx->ipa_initialization_complete)
4115 return 0;
4116
Amir Levy54fe4d32017-03-16 11:21:49 +02004117 /*
4118 * indication whether working in MHI config or non MHI config is given
4119 * in ipa3_write which is launched before ipa3_post_init. i.e. from
4120 * this point it is safe to use ipa3_ep_mapping array and the correct
4121 * entry will be returned from ipa3_get_hw_type_index()
4122 */
4123 ipa_init_ep_flt_bitmap();
4124 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
4125 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
4126
4127 /* Assign resource limitation to each group */
4128 ipa3_set_resorce_groups_min_max_limits();
4129
4130 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4131 if (!ipa_is_ep_support_flt(i))
4132 continue;
4133
4134 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
4135 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4136 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4137 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
4138 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4139 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
4140 idr_init(&flt_tbl->rule_ids);
4141
4142 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
4143 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4144 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4145 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
4146 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4147 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
4148 idr_init(&flt_tbl->rule_ids);
4149 }
4150
4151 if (!ipa3_ctx->apply_rg10_wa) {
4152 result = ipa3_init_interrupts();
4153 if (result) {
4154 IPAERR("ipa initialization of interrupts failed\n");
4155 result = -ENODEV;
4156 goto fail_register_device;
4157 }
4158 } else {
4159 IPADBG("Initialization of ipa interrupts skipped\n");
4160 }
Amir Levy9659e592016-10-27 18:08:27 +03004161
Amir Levy3afd94a2017-01-05 10:19:13 +02004162 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02004163 * IPAv3.5 and above requires to disable prefetch for USB in order
4164 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
Amir Levy3afd94a2017-01-05 10:19:13 +02004165 */
Michael Adisumartad68ab112017-06-14 11:40:06 -07004166 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4167 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
Amir Levy5cfbb322017-01-09 14:53:02 +02004168 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02004169 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
4170
Amir Levya59ed3f2017-03-05 17:30:55 +02004171 memset(&gsi_props, 0, sizeof(gsi_props));
4172 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
4173 gsi_props.ee = resource_p->ee;
4174 gsi_props.intr = GSI_INTR_IRQ;
4175 gsi_props.irq = resource_p->transport_irq;
4176 gsi_props.phys_addr = resource_p->transport_mem_base;
4177 gsi_props.size = resource_p->transport_mem_size;
4178 gsi_props.notify_cb = ipa_gsi_notify_cb;
4179 gsi_props.req_clk_cb = NULL;
4180 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004181
Amir Levya59ed3f2017-03-05 17:30:55 +02004182 result = gsi_register_device(&gsi_props,
4183 &ipa3_ctx->gsi_dev_hdl);
4184 if (result != GSI_STATUS_SUCCESS) {
4185 IPAERR(":gsi register error - %d\n", result);
4186 result = -ENODEV;
4187 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03004188 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004189 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03004190
4191 /* setup the AP-IPA pipes */
4192 if (ipa3_setup_apps_pipes()) {
4193 IPAERR(":failed to setup IPA-Apps pipes\n");
4194 result = -ENODEV;
4195 goto fail_setup_apps_pipes;
4196 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004197 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004198
4199 if (ipa3_ctx->use_ipa_teth_bridge) {
4200 /* Initialize the tethering bridge driver */
4201 result = ipa3_teth_bridge_driver_init();
4202 if (result) {
4203 IPAERR(":teth_bridge init failed (%d)\n", -result);
4204 result = -ENODEV;
4205 goto fail_teth_bridge_driver_init;
4206 }
4207 IPADBG("teth_bridge initialized");
4208 }
4209
4210 ipa3_debugfs_init();
4211
4212 result = ipa3_uc_interface_init();
4213 if (result)
4214 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4215 else
4216 IPADBG(":ipa Uc interface init ok\n");
4217
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004218 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4219 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4220
Amir Levy9659e592016-10-27 18:08:27 +03004221 result = ipa3_wdi_init();
4222 if (result)
4223 IPAERR(":wdi init failed (%d)\n", -result);
4224 else
4225 IPADBG(":wdi init ok\n");
4226
4227 result = ipa3_ntn_init();
4228 if (result)
4229 IPAERR(":ntn init failed (%d)\n", -result);
4230 else
4231 IPADBG(":ntn init ok\n");
4232
4233 ipa3_register_panic_hdlr();
4234
4235 ipa3_ctx->q6_proxy_clk_vote_valid = true;
4236
4237 mutex_lock(&ipa3_ctx->lock);
4238 ipa3_ctx->ipa_initialization_complete = true;
4239 mutex_unlock(&ipa3_ctx->lock);
4240
4241 ipa3_trigger_ipa_ready_cbs();
4242 complete_all(&ipa3_ctx->init_completion_obj);
4243 pr_info("IPA driver initialization was successful.\n");
4244
4245 return 0;
4246
4247fail_teth_bridge_driver_init:
4248 ipa3_teardown_apps_pipes();
4249fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02004250 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03004251fail_register_device:
Amir Levy9659e592016-10-27 18:08:27 +03004252 ipa3_destroy_flt_tbl_idrs();
Amir Levy9659e592016-10-27 18:08:27 +03004253 return result;
4254}
4255
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304256static void ipa3_post_init_wq(struct work_struct *work)
4257{
4258 ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
4259}
4260
Amir Levy9659e592016-10-27 18:08:27 +03004261static int ipa3_trigger_fw_loading_mdms(void)
4262{
4263 int result;
4264 const struct firmware *fw;
4265
4266 IPADBG("FW loading process initiated\n");
4267
4268 result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
4269 if (result < 0) {
4270 IPAERR("request_firmware failed, error %d\n", result);
4271 return result;
4272 }
4273 if (fw == NULL) {
4274 IPAERR("Firmware is NULL!\n");
4275 return -EINVAL;
4276 }
4277
4278 IPADBG("FWs are available for loading\n");
4279
Ghanim Fodi37b64952017-01-24 15:42:30 +02004280 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004281 if (result) {
4282 IPAERR("IPA FWs loading has failed\n");
4283 release_firmware(fw);
4284 return result;
4285 }
4286
4287 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02004288 ipa3_res.transport_mem_size,
4289 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03004290 if (result) {
4291 IPAERR("Failed to enable GSI FW\n");
4292 release_firmware(fw);
4293 return result;
4294 }
4295
4296 release_firmware(fw);
4297
4298 IPADBG("FW loading process is complete\n");
4299 return 0;
4300}
4301
4302static int ipa3_trigger_fw_loading_msms(void)
4303{
4304 void *subsystem_get_retval = NULL;
4305
4306 IPADBG("FW loading process initiated\n");
4307
4308 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
4309 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
4310 IPAERR("Unable to trigger PIL process for FW loading\n");
4311 return -EINVAL;
4312 }
4313
4314 IPADBG("FW loading process is complete\n");
4315 return 0;
4316}
4317
4318static ssize_t ipa3_write(struct file *file, const char __user *buf,
4319 size_t count, loff_t *ppos)
4320{
4321 unsigned long missing;
4322 int result = -EINVAL;
4323
4324 char dbg_buff[16] = { 0 };
4325
4326 if (sizeof(dbg_buff) < count + 1)
4327 return -EFAULT;
4328
4329 missing = copy_from_user(dbg_buff, buf, count);
4330
4331 if (missing) {
4332 IPAERR("Unable to copy data from user\n");
4333 return -EFAULT;
4334 }
4335
4336 /* Prevent consequent calls from trying to load the FW again. */
4337 if (ipa3_is_ready())
4338 return count;
4339
Amir Levya59ed3f2017-03-05 17:30:55 +02004340 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03004341
Amir Levy54fe4d32017-03-16 11:21:49 +02004342 if (ipa3_is_msm_device()) {
Amir Levya59ed3f2017-03-05 17:30:55 +02004343 result = ipa3_trigger_fw_loading_msms();
Amir Levy54fe4d32017-03-16 11:21:49 +02004344 } else {
4345 if (!strcasecmp(dbg_buff, "MHI")) {
4346 ipa3_ctx->ipa_config_is_mhi = true;
4347 pr_info(
4348 "IPA is loading with MHI configuration\n");
4349 } else {
4350 pr_info(
4351 "IPA is loading with non MHI configuration\n");
4352 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004353 result = ipa3_trigger_fw_loading_mdms();
Amir Levy54fe4d32017-03-16 11:21:49 +02004354 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004355 /* No IPAv3.x chipsets that don't support FW loading */
Amir Levy9659e592016-10-27 18:08:27 +03004356
Amir Levya59ed3f2017-03-05 17:30:55 +02004357 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03004358
Amir Levya59ed3f2017-03-05 17:30:55 +02004359 if (result) {
4360 IPAERR("FW loading process has failed\n");
Ghanim Fodi24fee1c2017-02-12 15:25:53 +02004361 return result;
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304362 } else {
4363 queue_work(ipa3_ctx->transport_power_mgmt_wq,
4364 &ipa3_post_init_work);
4365 }
Amir Levy9659e592016-10-27 18:08:27 +03004366 return count;
4367}
4368
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004369static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
4370{
4371 int i, size, ret, resp;
4372 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
4373 struct tz_smmu_ipa_protect_region_s cmd_buf;
4374
4375 if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
4376 size = ipa3_ctx->ipa_tz_unlock_reg_num *
4377 sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
4378 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
4379 if (ipa_tz_unlock_vec == NULL)
4380 return -ENOMEM;
4381
4382 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4383 ipa_tz_unlock_vec[i].input_addr =
4384 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4385 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4386 0xFFF);
4387 ipa_tz_unlock_vec[i].output_addr =
4388 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4389 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4390 0xFFF);
4391 ipa_tz_unlock_vec[i].size =
4392 ipa3_ctx->ipa_tz_unlock_reg[i].size;
4393 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
4394 }
4395
4396 /* pass physical address of command buffer */
4397 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
4398 cmd_buf.size_bytes = size;
4399
4400 /* flush cache to DDR */
4401 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
4402 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
4403
4404 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
4405 sizeof(cmd_buf), &resp, sizeof(resp));
4406 if (ret) {
4407 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
4408 kfree(ipa_tz_unlock_vec);
4409 return -EFAULT;
4410 }
4411 kfree(ipa_tz_unlock_vec);
4412 }
4413 return 0;
4414}
4415
Skylar Changcd3902d2017-03-27 18:08:27 -07004416static int ipa3_alloc_pkt_init(void)
4417{
4418 struct ipa_mem_buffer mem;
4419 struct ipahal_imm_cmd_pyld *cmd_pyld;
4420 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
4421 int i;
4422
4423 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4424 &cmd, false);
4425 if (!cmd_pyld) {
4426 IPAERR("failed to construct IMM cmd\n");
4427 return -ENOMEM;
4428 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07004429 ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
Skylar Changcd3902d2017-03-27 18:08:27 -07004430
4431 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
4432 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
4433 &mem.phys_base, GFP_KERNEL);
4434 if (!mem.base) {
4435 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
4436 ipahal_destroy_imm_cmd(cmd_pyld);
4437 return -ENOMEM;
4438 }
4439 ipahal_destroy_imm_cmd(cmd_pyld);
4440
4441 memset(mem.base, 0, mem.size);
4442 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4443 cmd.destination_pipe_index = i;
4444 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4445 &cmd, false);
4446 if (!cmd_pyld) {
4447 IPAERR("failed to construct IMM cmd\n");
4448 dma_free_coherent(ipa3_ctx->pdev,
4449 mem.size,
4450 mem.base,
4451 mem.phys_base);
4452 return -ENOMEM;
4453 }
4454 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
4455 cmd_pyld->len);
4456 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
4457 ipahal_destroy_imm_cmd(cmd_pyld);
4458 }
4459
4460 return 0;
4461}
4462
Amir Levy9659e592016-10-27 18:08:27 +03004463/**
4464* ipa3_pre_init() - Initialize the IPA Driver.
4465* This part contains all initialization which doesn't require IPA HW, such
4466* as structure allocations and initializations, register writes, etc.
4467*
4468* @resource_p: contain platform specific values from DST file
4469* @pdev: The platform device structure representing the IPA driver
4470*
4471* Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004472* Allocate memory for the driver context data struct
4473* Initializing the ipa3_ctx with :
Amir Levy9659e592016-10-27 18:08:27 +03004474* 1)parsed values from the dts file
4475* 2)parameters passed to the module initialization
4476* 3)read HW values(such as core memory size)
Amir Levy54fe4d32017-03-16 11:21:49 +02004477* Map IPA core registers to CPU memory
4478* Restart IPA core(HW reset)
4479* Initialize the look-aside caches(kmem_cache/slab) for filter,
Amir Levy9659e592016-10-27 18:08:27 +03004480* routing and IPA-tree
Amir Levy54fe4d32017-03-16 11:21:49 +02004481* Create memory pool with 4 objects for DMA operations(each object
Amir Levy9659e592016-10-27 18:08:27 +03004482* is 512Bytes long), this object will be use for tx(A5->IPA)
Amir Levy54fe4d32017-03-16 11:21:49 +02004483* Initialize lists head(routing, hdr, system pipes)
4484* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
4485* Initialize spinlocks (for list related to A5<->IPA pipes)
4486* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
4487* Initialize Red-Black-Tree(s) for handles of header,routing rule,
4488* routing table ,filtering rule
4489* Initialize the filter block by committing IPV4 and IPV6 default rules
4490* Create empty routing table in system memory(no committing)
4491* Create a char-device for IPA
4492* Initialize IPA RM (resource manager)
4493* Configure GSI registers (in GSI case)
Amir Levy9659e592016-10-27 18:08:27 +03004494*/
4495static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
4496 struct device *ipa_dev)
4497{
4498 int result = 0;
4499 int i;
Amir Levy9659e592016-10-27 18:08:27 +03004500 struct ipa3_rt_tbl_set *rset;
4501 struct ipa_active_client_logging_info log_info;
4502
4503 IPADBG("IPA Driver initialization started\n");
4504
4505 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
4506 if (!ipa3_ctx) {
4507 IPAERR(":kzalloc err.\n");
4508 result = -ENOMEM;
4509 goto fail_mem_ctx;
4510 }
4511
4512 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
Skylar Chang841c1452017-04-03 16:07:22 -07004513 if (ipa3_ctx->logbuf == NULL)
4514 IPAERR("failed to create IPC log, continue...\n");
Amir Levy9659e592016-10-27 18:08:27 +03004515
4516 ipa3_ctx->pdev = ipa_dev;
4517 ipa3_ctx->uc_pdev = ipa_dev;
4518 ipa3_ctx->smmu_present = smmu_info.present;
4519 if (!ipa3_ctx->smmu_present)
4520 ipa3_ctx->smmu_s1_bypass = true;
4521 else
4522 ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
4523 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
4524 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
4525 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
4526 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
4527 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03004528 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
4529 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
4530 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
4531 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
4532 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
4533 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
4534 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03004535 ipa3_ctx->ee = resource_p->ee;
4536 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
4537 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
4538 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004539 if (resource_p->ipa_tz_unlock_reg) {
4540 ipa3_ctx->ipa_tz_unlock_reg_num =
4541 resource_p->ipa_tz_unlock_reg_num;
4542 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
4543 ipa3_ctx->ipa_tz_unlock_reg_num,
4544 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
4545 GFP_KERNEL);
4546 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
4547 result = -ENOMEM;
4548 goto fail_tz_unlock_reg;
4549 }
4550 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4551 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
4552 resource_p->ipa_tz_unlock_reg[i].reg_addr;
4553 ipa3_ctx->ipa_tz_unlock_reg[i].size =
4554 resource_p->ipa_tz_unlock_reg[i].size;
4555 }
4556 }
4557
4558 /* unlock registers for uc */
4559 ipa3_tz_unlock_reg(ipa3_ctx);
Amir Levy9659e592016-10-27 18:08:27 +03004560
4561 /* default aggregation parameters */
4562 ipa3_ctx->aggregation_type = IPA_MBIM_16;
4563 ipa3_ctx->aggregation_byte_limit = 1;
4564 ipa3_ctx->aggregation_time_limit = 0;
4565
4566 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
4567 if (!ipa3_ctx->ctrl) {
4568 IPAERR("memory allocation error for ctrl\n");
4569 result = -ENOMEM;
4570 goto fail_mem_ctrl;
4571 }
4572 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
4573 ipa3_ctx->ipa_hw_type);
4574 if (result) {
4575 IPAERR("fail to static bind IPA ctrl.\n");
4576 result = -EFAULT;
4577 goto fail_bind;
4578 }
4579
4580 result = ipa3_init_mem_partition(master_dev->of_node);
4581 if (result) {
4582 IPAERR(":ipa3_init_mem_partition failed!\n");
4583 result = -ENODEV;
4584 goto fail_init_mem_partition;
4585 }
4586
4587 if (ipa3_bus_scale_table) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004588 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
4589 ipa3_bus_scale_table->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03004590 ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
4591 }
4592
Ghanim Fodi6a831342017-03-07 18:19:15 +02004593 /* get BUS handle */
4594 ipa3_ctx->ipa_bus_hdl =
4595 msm_bus_scale_register_client(
4596 ipa3_ctx->ctrl->msm_bus_data_ptr);
4597 if (!ipa3_ctx->ipa_bus_hdl) {
4598 IPAERR("fail to register with bus mgr!\n");
4599 result = -ENODEV;
4600 goto fail_bus_reg;
Amir Levy9659e592016-10-27 18:08:27 +03004601 }
4602
4603 /* get IPA clocks */
4604 result = ipa3_get_clks(master_dev);
4605 if (result)
4606 goto fail_clk;
4607
4608 /* init active_clients_log after getting ipa-clk */
4609 if (ipa3_active_clients_log_init())
4610 goto fail_init_active_client;
4611
4612 /* Enable ipa3_ctx->enable_clock_scaling */
4613 ipa3_ctx->enable_clock_scaling = 1;
4614 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4615
4616 /* enable IPA clocks explicitly to allow the initialization */
4617 ipa3_enable_clks();
4618
4619 /* setup IPA register access */
4620 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
4621 ipa3_ctx->ctrl->ipa_reg_base_ofst);
4622 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
4623 ipa3_ctx->ctrl->ipa_reg_base_ofst,
4624 resource_p->ipa_mem_size);
4625 if (!ipa3_ctx->mmio) {
4626 IPAERR(":ipa-base ioremap err.\n");
4627 result = -EFAULT;
4628 goto fail_remap;
4629 }
4630
4631 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4632 ipa3_ctx->pdev)) {
4633 IPAERR("fail to init ipahal\n");
4634 result = -EFAULT;
4635 goto fail_ipahal;
4636 }
4637
4638 result = ipa3_init_hw();
4639 if (result) {
4640 IPAERR(":error initializing HW.\n");
4641 result = -ENODEV;
4642 goto fail_init_hw;
4643 }
4644 IPADBG("IPA HW initialization sequence completed");
4645
4646 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4647 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4648 IPAERR("IPA has more pipes then supported! has %d, max %d\n",
4649 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4650 result = -ENODEV;
4651 goto fail_init_hw;
4652 }
4653
Amir Levy9659e592016-10-27 18:08:27 +03004654 ipa3_ctx->ctrl->ipa_sram_read_settings();
4655 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4656 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4657
4658 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4659 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4660 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4661
4662 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4663 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4664
4665 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4666 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4667 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4668
4669 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4670 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4671 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4672
4673 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4674 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4675 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4676 result = -ENOMEM;
4677 goto fail_init_hw;
4678 }
4679
4680 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004681 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
4682 ipa3_active_clients_log_inc(&log_info, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004683 atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004684
Amir Levy9659e592016-10-27 18:08:27 +03004685 /* Create workqueues for power management */
4686 ipa3_ctx->power_mgmt_wq =
4687 create_singlethread_workqueue("ipa_power_mgmt");
4688 if (!ipa3_ctx->power_mgmt_wq) {
4689 IPAERR("failed to create power mgmt wq\n");
4690 result = -ENOMEM;
4691 goto fail_init_hw;
4692 }
4693
4694 ipa3_ctx->transport_power_mgmt_wq =
4695 create_singlethread_workqueue("transport_power_mgmt");
4696 if (!ipa3_ctx->transport_power_mgmt_wq) {
4697 IPAERR("failed to create transport power mgmt wq\n");
4698 result = -ENOMEM;
4699 goto fail_create_transport_wq;
4700 }
4701
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304702 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004703
4704 /* init the lookaside cache */
4705 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
4706 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
4707 if (!ipa3_ctx->flt_rule_cache) {
4708 IPAERR(":ipa flt cache create failed\n");
4709 result = -ENOMEM;
4710 goto fail_flt_rule_cache;
4711 }
4712 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
4713 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
4714 if (!ipa3_ctx->rt_rule_cache) {
4715 IPAERR(":ipa rt cache create failed\n");
4716 result = -ENOMEM;
4717 goto fail_rt_rule_cache;
4718 }
4719 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
4720 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
4721 if (!ipa3_ctx->hdr_cache) {
4722 IPAERR(":ipa hdr cache create failed\n");
4723 result = -ENOMEM;
4724 goto fail_hdr_cache;
4725 }
4726 ipa3_ctx->hdr_offset_cache =
4727 kmem_cache_create("IPA_HDR_OFFSET",
4728 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
4729 if (!ipa3_ctx->hdr_offset_cache) {
4730 IPAERR(":ipa hdr off cache create failed\n");
4731 result = -ENOMEM;
4732 goto fail_hdr_offset_cache;
4733 }
4734 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
4735 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
4736 if (!ipa3_ctx->hdr_proc_ctx_cache) {
4737 IPAERR(":ipa hdr proc ctx cache create failed\n");
4738 result = -ENOMEM;
4739 goto fail_hdr_proc_ctx_cache;
4740 }
4741 ipa3_ctx->hdr_proc_ctx_offset_cache =
4742 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
4743 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
4744 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
4745 IPAERR(":ipa hdr proc ctx off cache create failed\n");
4746 result = -ENOMEM;
4747 goto fail_hdr_proc_ctx_offset_cache;
4748 }
4749 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
4750 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
4751 if (!ipa3_ctx->rt_tbl_cache) {
4752 IPAERR(":ipa rt tbl cache create failed\n");
4753 result = -ENOMEM;
4754 goto fail_rt_tbl_cache;
4755 }
4756 ipa3_ctx->tx_pkt_wrapper_cache =
4757 kmem_cache_create("IPA_TX_PKT_WRAPPER",
4758 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
4759 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
4760 IPAERR(":ipa tx pkt wrapper cache create failed\n");
4761 result = -ENOMEM;
4762 goto fail_tx_pkt_wrapper_cache;
4763 }
4764 ipa3_ctx->rx_pkt_wrapper_cache =
4765 kmem_cache_create("IPA_RX_PKT_WRAPPER",
4766 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
4767 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
4768 IPAERR(":ipa rx pkt wrapper cache create failed\n");
4769 result = -ENOMEM;
4770 goto fail_rx_pkt_wrapper_cache;
4771 }
4772
Skylar Chang6c4bec92017-04-21 16:10:14 -07004773 /* allocate memory for DMA_TASK workaround */
4774 result = ipa3_allocate_dma_task_for_gsi();
4775 if (result) {
4776 IPAERR("failed to allocate dma task\n");
4777 goto fail_dma_task;
4778 }
4779
Amir Levy9659e592016-10-27 18:08:27 +03004780 /* init the various list heads */
4781 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
4782 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
4783 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
4784 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
4785 }
4786 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
4787 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
4788 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
4789 INIT_LIST_HEAD(&ipa3_ctx->
4790 hdr_proc_ctx_tbl.head_free_offset_list[i]);
4791 }
4792 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
4793 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Amir Levy9659e592016-10-27 18:08:27 +03004794
4795 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
4796 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4797 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
4798 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4799
4800 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
4801 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
4802 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
4803 init_waitqueue_head(&ipa3_ctx->msg_waitq);
4804 mutex_init(&ipa3_ctx->msg_lock);
4805
4806 mutex_init(&ipa3_ctx->lock);
4807 mutex_init(&ipa3_ctx->nat_mem.lock);
4808
4809 idr_init(&ipa3_ctx->ipa_idr);
4810 spin_lock_init(&ipa3_ctx->idr_lock);
4811
4812 /* wlan related member */
4813 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
4814 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
4815 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
4816 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
4817
Amir Levy9659e592016-10-27 18:08:27 +03004818 ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
4819
4820 result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
4821 if (result) {
4822 IPAERR("alloc_chrdev_region err.\n");
4823 result = -ENODEV;
4824 goto fail_alloc_chrdev_region;
4825 }
4826
4827 ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
4828 ipa3_ctx, DRV_NAME);
4829 if (IS_ERR(ipa3_ctx->dev)) {
4830 IPAERR(":device_create err.\n");
4831 result = -ENODEV;
4832 goto fail_device_create;
4833 }
4834
Amir Levy9659e592016-10-27 18:08:27 +03004835 if (ipa3_create_nat_device()) {
4836 IPAERR("unable to create nat device\n");
4837 result = -ENODEV;
4838 goto fail_nat_dev_add;
4839 }
4840
4841 /* Create a wakeup source. */
4842 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
4843 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
4844
4845 /* Initialize IPA RM (resource manager) */
4846 result = ipa_rm_initialize();
4847 if (result) {
4848 IPAERR("RM initialization failed (%d)\n", -result);
4849 result = -ENODEV;
4850 goto fail_ipa_rm_init;
4851 }
4852 IPADBG("IPA resource manager initialized");
4853
4854 result = ipa3_create_apps_resource();
4855 if (result) {
4856 IPAERR("Failed to create APPS_CONS resource\n");
4857 result = -ENODEV;
4858 goto fail_create_apps_resource;
4859 }
4860
Skylar Changcd3902d2017-03-27 18:08:27 -07004861 result = ipa3_alloc_pkt_init();
4862 if (result) {
4863 IPAERR("Failed to alloc pkt_init payload\n");
4864 result = -ENODEV;
4865 goto fail_create_apps_resource;
4866 }
4867
Amir Levy12ef0912016-08-30 09:27:34 +03004868 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
4869 ipa3_enable_dcd();
4870
Amir Levy9659e592016-10-27 18:08:27 +03004871 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
4872
4873 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004874 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03004875
4876 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02004877 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03004878 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02004879 *
4880 * For IPA3.0, the GSI configuration is done by the GSI driver.
4881 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03004882 */
Amir Levya59ed3f2017-03-05 17:30:55 +02004883 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
4884 result = ipa3_gsi_pre_fw_load_init();
4885 if (result) {
4886 IPAERR("gsi pre FW loading config failed\n");
4887 result = -ENODEV;
4888 goto fail_ipa_init_interrupts;
Amir Levy9659e592016-10-27 18:08:27 +03004889 }
4890 }
Amir Levy9659e592016-10-27 18:08:27 +03004891
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304892 cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
4893 ipa3_ctx->cdev.owner = THIS_MODULE;
4894 ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
4895
4896 result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
4897 if (result) {
4898 IPAERR(":cdev_add err=%d\n", -result);
4899 result = -ENODEV;
4900 goto fail_cdev_add;
4901 }
4902 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
4903 MAJOR(ipa3_ctx->dev_num),
4904 MINOR(ipa3_ctx->dev_num));
Amir Levy9659e592016-10-27 18:08:27 +03004905 return 0;
4906
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304907fail_cdev_add:
Amir Levy9659e592016-10-27 18:08:27 +03004908fail_ipa_init_interrupts:
4909 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
4910fail_create_apps_resource:
4911 ipa_rm_exit();
4912fail_ipa_rm_init:
4913fail_nat_dev_add:
Amir Levy9659e592016-10-27 18:08:27 +03004914 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
4915fail_device_create:
4916 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
4917fail_alloc_chrdev_region:
Skylar Chang6c4bec92017-04-21 16:10:14 -07004918 ipa3_free_dma_task_for_gsi();
4919fail_dma_task:
Amir Levy9659e592016-10-27 18:08:27 +03004920 idr_destroy(&ipa3_ctx->ipa_idr);
Amir Levy9659e592016-10-27 18:08:27 +03004921 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
4922fail_rx_pkt_wrapper_cache:
4923 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
4924fail_tx_pkt_wrapper_cache:
4925 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
4926fail_rt_tbl_cache:
4927 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
4928fail_hdr_proc_ctx_offset_cache:
4929 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
4930fail_hdr_proc_ctx_cache:
4931 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
4932fail_hdr_offset_cache:
4933 kmem_cache_destroy(ipa3_ctx->hdr_cache);
4934fail_hdr_cache:
4935 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
4936fail_rt_rule_cache:
4937 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
4938fail_flt_rule_cache:
4939 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
4940fail_create_transport_wq:
4941 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
4942fail_init_hw:
4943 ipahal_destroy();
4944fail_ipahal:
4945 iounmap(ipa3_ctx->mmio);
4946fail_remap:
4947 ipa3_disable_clks();
4948 ipa3_active_clients_log_destroy();
4949fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02004950 if (ipa3_clk)
4951 clk_put(ipa3_clk);
4952 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004953fail_clk:
4954 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
4955fail_bus_reg:
Ghanim Fodi6a831342017-03-07 18:19:15 +02004956 if (ipa3_bus_scale_table) {
4957 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
4958 ipa3_bus_scale_table = NULL;
4959 }
Amir Levy9659e592016-10-27 18:08:27 +03004960fail_init_mem_partition:
4961fail_bind:
4962 kfree(ipa3_ctx->ctrl);
4963fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004964 kfree(ipa3_ctx->ipa_tz_unlock_reg);
4965fail_tz_unlock_reg:
Skylar Chang841c1452017-04-03 16:07:22 -07004966 if (ipa3_ctx->logbuf)
4967 ipc_log_context_destroy(ipa3_ctx->logbuf);
Amir Levy9659e592016-10-27 18:08:27 +03004968 kfree(ipa3_ctx);
4969 ipa3_ctx = NULL;
4970fail_mem_ctx:
4971 return result;
4972}
4973
4974static int get_ipa_dts_configuration(struct platform_device *pdev,
4975 struct ipa3_plat_drv_res *ipa_drv_res)
4976{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004977 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03004978 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004979 u32 *ipa_tz_unlock_reg;
4980 int elem_num;
Amir Levy9659e592016-10-27 18:08:27 +03004981
4982 /* initialize ipa3_res */
4983 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
4984 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
4985 ipa_drv_res->ipa_hw_type = 0;
4986 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03004987 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
4988 ipa_drv_res->ipa_wdi2 = false;
4989 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02004990 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03004991 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4992 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4993 ipa_drv_res->apply_rg10_wa = false;
4994 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004995 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
4996 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004997
4998 /* Get IPA HW Version */
4999 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
5000 &ipa_drv_res->ipa_hw_type);
5001 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
5002 IPAERR(":get resource failed for ipa-hw-ver!\n");
5003 return -ENODEV;
5004 }
5005 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
5006
5007 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
5008 IPAERR(":IPA version below 3.0 not supported!\n");
5009 return -ENODEV;
5010 }
5011
5012 /* Get IPA HW mode */
5013 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
5014 &ipa_drv_res->ipa3_hw_mode);
5015 if (result)
5016 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
5017 else
5018 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
5019 ipa_drv_res->ipa3_hw_mode);
5020
5021 /* Get IPA WAN / LAN RX pool size */
5022 result = of_property_read_u32(pdev->dev.of_node,
5023 "qcom,wan-rx-ring-size",
5024 &ipa_drv_res->wan_rx_ring_size);
5025 if (result)
5026 IPADBG("using default for wan-rx-ring-size = %u\n",
5027 ipa_drv_res->wan_rx_ring_size);
5028 else
5029 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
5030 ipa_drv_res->wan_rx_ring_size);
5031
5032 result = of_property_read_u32(pdev->dev.of_node,
5033 "qcom,lan-rx-ring-size",
5034 &ipa_drv_res->lan_rx_ring_size);
5035 if (result)
5036 IPADBG("using default for lan-rx-ring-size = %u\n",
5037 ipa_drv_res->lan_rx_ring_size);
5038 else
5039 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
5040 ipa_drv_res->lan_rx_ring_size);
5041
5042 ipa_drv_res->use_ipa_teth_bridge =
5043 of_property_read_bool(pdev->dev.of_node,
5044 "qcom,use-ipa-tethering-bridge");
5045 IPADBG(": using TBDr = %s",
5046 ipa_drv_res->use_ipa_teth_bridge
5047 ? "True" : "False");
5048
Amir Levy9659e592016-10-27 18:08:27 +03005049 ipa_drv_res->modem_cfg_emb_pipe_flt =
5050 of_property_read_bool(pdev->dev.of_node,
5051 "qcom,modem-cfg-emb-pipe-flt");
5052 IPADBG(": modem configure embedded pipe filtering = %s\n",
5053 ipa_drv_res->modem_cfg_emb_pipe_flt
5054 ? "True" : "False");
5055
5056 ipa_drv_res->ipa_wdi2 =
5057 of_property_read_bool(pdev->dev.of_node,
5058 "qcom,ipa-wdi2");
5059 IPADBG(": WDI-2.0 = %s\n",
5060 ipa_drv_res->ipa_wdi2
5061 ? "True" : "False");
5062
5063 ipa_drv_res->use_64_bit_dma_mask =
5064 of_property_read_bool(pdev->dev.of_node,
5065 "qcom,use-64-bit-dma-mask");
5066 IPADBG(": use_64_bit_dma_mask = %s\n",
5067 ipa_drv_res->use_64_bit_dma_mask
5068 ? "True" : "False");
5069
Ghanim Fodi6a831342017-03-07 18:19:15 +02005070 ipa_drv_res->use_bw_vote =
5071 of_property_read_bool(pdev->dev.of_node,
5072 "qcom,bandwidth-vote-for-ipa");
5073 IPADBG(": use_bw_vote = %s\n",
5074 ipa_drv_res->use_bw_vote
5075 ? "True" : "False");
5076
Amir Levy9659e592016-10-27 18:08:27 +03005077 ipa_drv_res->skip_uc_pipe_reset =
5078 of_property_read_bool(pdev->dev.of_node,
5079 "qcom,skip-uc-pipe-reset");
5080 IPADBG(": skip uC pipe reset = %s\n",
5081 ipa_drv_res->skip_uc_pipe_reset
5082 ? "True" : "False");
5083
5084 ipa_drv_res->tethered_flow_control =
5085 of_property_read_bool(pdev->dev.of_node,
5086 "qcom,tethered-flow-control");
5087 IPADBG(": Use apps based flow control = %s\n",
5088 ipa_drv_res->tethered_flow_control
5089 ? "True" : "False");
5090
Amir Levy9659e592016-10-27 18:08:27 +03005091 /* Get IPA wrapper address */
5092 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5093 "ipa-base");
5094 if (!resource) {
5095 IPAERR(":get resource failed for ipa-base!\n");
5096 return -ENODEV;
5097 }
5098 ipa_drv_res->ipa_mem_base = resource->start;
5099 ipa_drv_res->ipa_mem_size = resource_size(resource);
5100 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
5101 ipa_drv_res->ipa_mem_base,
5102 ipa_drv_res->ipa_mem_size);
5103
5104 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
5105 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
5106
Amir Levya59ed3f2017-03-05 17:30:55 +02005107 /* Get IPA GSI address */
5108 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5109 "gsi-base");
5110 if (!resource) {
5111 IPAERR(":get resource failed for gsi-base!\n");
5112 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03005113 }
Amir Levya59ed3f2017-03-05 17:30:55 +02005114 ipa_drv_res->transport_mem_base = resource->start;
5115 ipa_drv_res->transport_mem_size = resource_size(resource);
5116 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
5117 ipa_drv_res->transport_mem_base,
5118 ipa_drv_res->transport_mem_size);
5119
5120 /* Get IPA GSI IRQ number */
5121 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5122 "gsi-irq");
5123 if (!resource) {
5124 IPAERR(":get resource failed for gsi-irq!\n");
5125 return -ENODEV;
5126 }
5127 ipa_drv_res->transport_irq = resource->start;
5128 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03005129
5130 /* Get IPA pipe mem start ofst */
5131 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5132 "ipa-pipe-mem");
5133 if (!resource) {
5134 IPADBG(":not using pipe memory - resource nonexisting\n");
5135 } else {
5136 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
5137 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
5138 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
5139 ipa_drv_res->ipa_pipe_mem_start_ofst,
5140 ipa_drv_res->ipa_pipe_mem_size);
5141 }
5142
5143 /* Get IPA IRQ number */
5144 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5145 "ipa-irq");
5146 if (!resource) {
5147 IPAERR(":get resource failed for ipa-irq!\n");
5148 return -ENODEV;
5149 }
5150 ipa_drv_res->ipa_irq = resource->start;
5151 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
5152
5153 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
5154 &ipa_drv_res->ee);
5155 if (result)
5156 ipa_drv_res->ee = 0;
5157
5158 ipa_drv_res->apply_rg10_wa =
5159 of_property_read_bool(pdev->dev.of_node,
5160 "qcom,use-rg10-limitation-mitigation");
5161 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
5162 ipa_drv_res->apply_rg10_wa
5163 ? "True" : "False");
5164
5165 ipa_drv_res->gsi_ch20_wa =
5166 of_property_read_bool(pdev->dev.of_node,
5167 "qcom,do-not-use-ch-gsi-20");
5168 IPADBG(": GSI CH 20 WA is = %s\n",
5169 ipa_drv_res->apply_rg10_wa
5170 ? "Needed" : "Not needed");
5171
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005172 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
5173 "qcom,ipa-tz-unlock-reg", sizeof(u32));
5174
5175 if (elem_num > 0 && elem_num % 2 == 0) {
5176 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
5177
5178 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
5179 if (ipa_tz_unlock_reg == NULL)
5180 return -ENOMEM;
5181
5182 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
5183 ipa_drv_res->ipa_tz_unlock_reg_num,
5184 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
5185 GFP_KERNEL);
5186 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
5187 kfree(ipa_tz_unlock_reg);
5188 return -ENOMEM;
5189 }
5190
5191 if (of_property_read_u32_array(pdev->dev.of_node,
5192 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
5193 elem_num)) {
5194 IPAERR("failed to read register addresses\n");
5195 kfree(ipa_tz_unlock_reg);
5196 kfree(ipa_drv_res->ipa_tz_unlock_reg);
5197 return -EFAULT;
5198 }
5199
5200 pos = 0;
5201 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
5202 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
5203 ipa_tz_unlock_reg[pos++];
5204 ipa_drv_res->ipa_tz_unlock_reg[i].size =
5205 ipa_tz_unlock_reg[pos++];
5206 IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
5207 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
5208 ipa_drv_res->ipa_tz_unlock_reg[i].size);
5209 }
5210 kfree(ipa_tz_unlock_reg);
5211 }
Amir Levy9659e592016-10-27 18:08:27 +03005212 return 0;
5213}
5214
5215static int ipa_smmu_wlan_cb_probe(struct device *dev)
5216{
5217 struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005218 int atomic_ctx = 1;
5219 int fast = 1;
5220 int bypass = 1;
5221 int ret;
5222 u32 add_map_size;
5223 const u32 *add_map;
5224 int i;
5225
5226 IPADBG("sub pdev=%p\n", dev);
5227
5228 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005229 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03005230 if (!cb->iommu) {
5231 IPAERR("could not alloc iommu domain\n");
5232 /* assume this failure is because iommu driver is not ready */
5233 return -EPROBE_DEFER;
5234 }
5235 cb->valid = true;
5236
Amir Levy9659e592016-10-27 18:08:27 +03005237 if (smmu_info.s1_bypass) {
5238 if (iommu_domain_set_attr(cb->iommu,
5239 DOMAIN_ATTR_S1_BYPASS,
5240 &bypass)) {
5241 IPAERR("couldn't set bypass\n");
5242 cb->valid = false;
5243 return -EIO;
5244 }
5245 IPADBG("SMMU S1 BYPASS\n");
5246 } else {
5247 if (iommu_domain_set_attr(cb->iommu,
5248 DOMAIN_ATTR_ATOMIC,
5249 &atomic_ctx)) {
5250 IPAERR("couldn't disable coherent HTW\n");
5251 cb->valid = false;
5252 return -EIO;
5253 }
5254 IPADBG("SMMU ATTR ATOMIC\n");
5255
5256 if (smmu_info.fast_map) {
5257 if (iommu_domain_set_attr(cb->iommu,
5258 DOMAIN_ATTR_FAST,
5259 &fast)) {
5260 IPAERR("couldn't set fast map\n");
5261 cb->valid = false;
5262 return -EIO;
5263 }
5264 IPADBG("SMMU fast map set\n");
5265 }
5266 }
5267
5268 ret = iommu_attach_device(cb->iommu, dev);
5269 if (ret) {
5270 IPAERR("could not attach device ret=%d\n", ret);
5271 cb->valid = false;
5272 return ret;
5273 }
5274 /* MAP ipa-uc ram */
5275 add_map = of_get_property(dev->of_node,
5276 "qcom,additional-mapping", &add_map_size);
5277 if (add_map) {
5278 /* mapping size is an array of 3-tuple of u32 */
5279 if (add_map_size % (3 * sizeof(u32))) {
5280 IPAERR("wrong additional mapping format\n");
5281 cb->valid = false;
5282 return -EFAULT;
5283 }
5284
5285 /* iterate of each entry of the additional mapping array */
5286 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5287 u32 iova = be32_to_cpu(add_map[i]);
5288 u32 pa = be32_to_cpu(add_map[i + 1]);
5289 u32 size = be32_to_cpu(add_map[i + 2]);
5290 unsigned long iova_p;
5291 phys_addr_t pa_p;
5292 u32 size_p;
5293
5294 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5295 iova_p, pa_p, size_p);
5296 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5297 iova_p, &pa_p, size_p);
5298 ipa3_iommu_map(cb->iommu,
5299 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005300 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005301 }
5302 }
5303 return 0;
5304}
5305
5306static int ipa_smmu_uc_cb_probe(struct device *dev)
5307{
5308 struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005309 int atomic_ctx = 1;
5310 int bypass = 1;
5311 int fast = 1;
5312 int ret;
5313 u32 iova_ap_mapping[2];
5314
5315 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
5316
5317 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5318 iova_ap_mapping, 2);
5319 if (ret) {
5320 IPAERR("Fail to read UC start/size iova addresses\n");
5321 return ret;
5322 }
5323 cb->va_start = iova_ap_mapping[0];
5324 cb->va_size = iova_ap_mapping[1];
5325 cb->va_end = cb->va_start + cb->va_size;
5326 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5327
5328 if (smmu_info.use_64_bit_dma_mask) {
5329 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5330 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5331 IPAERR("DMA set 64bit mask failed\n");
5332 return -EOPNOTSUPP;
5333 }
5334 } else {
5335 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5336 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5337 IPAERR("DMA set 32bit mask failed\n");
5338 return -EOPNOTSUPP;
5339 }
5340 }
5341 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
5342
5343 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005344 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005345 cb->va_start, cb->va_size);
5346 if (IS_ERR_OR_NULL(cb->mapping)) {
5347 IPADBG("Fail to create mapping\n");
5348 /* assume this failure is because iommu driver is not ready */
5349 return -EPROBE_DEFER;
5350 }
5351 IPADBG("SMMU mapping created\n");
5352 cb->valid = true;
5353
Amir Levy9659e592016-10-27 18:08:27 +03005354 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
5355 if (smmu_info.s1_bypass) {
5356 if (iommu_domain_set_attr(cb->mapping->domain,
5357 DOMAIN_ATTR_S1_BYPASS,
5358 &bypass)) {
5359 IPAERR("couldn't set bypass\n");
5360 arm_iommu_release_mapping(cb->mapping);
5361 cb->valid = false;
5362 return -EIO;
5363 }
5364 IPADBG("SMMU S1 BYPASS\n");
5365 } else {
5366 if (iommu_domain_set_attr(cb->mapping->domain,
5367 DOMAIN_ATTR_ATOMIC,
5368 &atomic_ctx)) {
5369 IPAERR("couldn't set domain as atomic\n");
5370 arm_iommu_release_mapping(cb->mapping);
5371 cb->valid = false;
5372 return -EIO;
5373 }
5374 IPADBG("SMMU atomic set\n");
5375
5376 if (smmu_info.fast_map) {
5377 if (iommu_domain_set_attr(cb->mapping->domain,
5378 DOMAIN_ATTR_FAST,
5379 &fast)) {
5380 IPAERR("couldn't set fast map\n");
5381 arm_iommu_release_mapping(cb->mapping);
5382 cb->valid = false;
5383 return -EIO;
5384 }
5385 IPADBG("SMMU fast map set\n");
5386 }
5387 }
5388
5389 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
5390 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
5391 if (ret) {
5392 IPAERR("could not attach device ret=%d\n", ret);
5393 arm_iommu_release_mapping(cb->mapping);
5394 cb->valid = false;
5395 return ret;
5396 }
5397
5398 cb->next_addr = cb->va_end;
5399 ipa3_ctx->uc_pdev = dev;
5400
5401 return 0;
5402}
5403
5404static int ipa_smmu_ap_cb_probe(struct device *dev)
5405{
5406 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
5407 int result;
Amir Levy9659e592016-10-27 18:08:27 +03005408 int atomic_ctx = 1;
5409 int fast = 1;
5410 int bypass = 1;
5411 u32 iova_ap_mapping[2];
5412 u32 add_map_size;
5413 const u32 *add_map;
5414 void *smem_addr;
5415 int i;
5416
5417 IPADBG("AP CB probe: sub pdev=%p\n", dev);
5418
5419 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5420 iova_ap_mapping, 2);
5421 if (result) {
5422 IPAERR("Fail to read AP start/size iova addresses\n");
5423 return result;
5424 }
5425 cb->va_start = iova_ap_mapping[0];
5426 cb->va_size = iova_ap_mapping[1];
5427 cb->va_end = cb->va_start + cb->va_size;
5428 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5429
5430 if (smmu_info.use_64_bit_dma_mask) {
5431 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5432 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5433 IPAERR("DMA set 64bit mask failed\n");
5434 return -EOPNOTSUPP;
5435 }
5436 } else {
5437 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5438 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5439 IPAERR("DMA set 32bit mask failed\n");
5440 return -EOPNOTSUPP;
5441 }
5442 }
5443
5444 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005445 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005446 cb->va_start, cb->va_size);
5447 if (IS_ERR_OR_NULL(cb->mapping)) {
5448 IPADBG("Fail to create mapping\n");
5449 /* assume this failure is because iommu driver is not ready */
5450 return -EPROBE_DEFER;
5451 }
5452 IPADBG("SMMU mapping created\n");
5453 cb->valid = true;
5454
Amir Levy9659e592016-10-27 18:08:27 +03005455 if (smmu_info.s1_bypass) {
5456 if (iommu_domain_set_attr(cb->mapping->domain,
5457 DOMAIN_ATTR_S1_BYPASS,
5458 &bypass)) {
5459 IPAERR("couldn't set bypass\n");
5460 arm_iommu_release_mapping(cb->mapping);
5461 cb->valid = false;
5462 return -EIO;
5463 }
5464 IPADBG("SMMU S1 BYPASS\n");
5465 } else {
5466 if (iommu_domain_set_attr(cb->mapping->domain,
5467 DOMAIN_ATTR_ATOMIC,
5468 &atomic_ctx)) {
5469 IPAERR("couldn't set domain as atomic\n");
5470 arm_iommu_release_mapping(cb->mapping);
5471 cb->valid = false;
5472 return -EIO;
5473 }
5474 IPADBG("SMMU atomic set\n");
5475
5476 if (iommu_domain_set_attr(cb->mapping->domain,
5477 DOMAIN_ATTR_FAST,
5478 &fast)) {
5479 IPAERR("couldn't set fast map\n");
5480 arm_iommu_release_mapping(cb->mapping);
5481 cb->valid = false;
5482 return -EIO;
5483 }
5484 IPADBG("SMMU fast map set\n");
5485 }
5486
5487 result = arm_iommu_attach_device(cb->dev, cb->mapping);
5488 if (result) {
5489 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
5490 cb->valid = false;
5491 return result;
5492 }
5493
5494 add_map = of_get_property(dev->of_node,
5495 "qcom,additional-mapping", &add_map_size);
5496 if (add_map) {
5497 /* mapping size is an array of 3-tuple of u32 */
5498 if (add_map_size % (3 * sizeof(u32))) {
5499 IPAERR("wrong additional mapping format\n");
5500 cb->valid = false;
5501 return -EFAULT;
5502 }
5503
5504 /* iterate of each entry of the additional mapping array */
5505 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5506 u32 iova = be32_to_cpu(add_map[i]);
5507 u32 pa = be32_to_cpu(add_map[i + 1]);
5508 u32 size = be32_to_cpu(add_map[i + 2]);
5509 unsigned long iova_p;
5510 phys_addr_t pa_p;
5511 u32 size_p;
5512
5513 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5514 iova_p, pa_p, size_p);
5515 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5516 iova_p, &pa_p, size_p);
5517 ipa3_iommu_map(cb->mapping->domain,
5518 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005519 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005520 }
5521 }
5522
5523 /* map SMEM memory for IPA table accesses */
5524 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
5525 SMEM_MODEM, 0);
5526 if (smem_addr) {
5527 phys_addr_t iova = smem_virt_to_phys(smem_addr);
5528 phys_addr_t pa = iova;
5529 unsigned long iova_p;
5530 phys_addr_t pa_p;
5531 u32 size_p;
5532
5533 IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
5534 iova_p, pa_p, size_p);
5535 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5536 iova_p, &pa_p, size_p);
5537 ipa3_iommu_map(cb->mapping->domain,
5538 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005539 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005540 }
5541
5542
5543 smmu_info.present = true;
5544
5545 if (!ipa3_bus_scale_table)
5546 ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
5547
5548 /* Proceed to real initialization */
5549 result = ipa3_pre_init(&ipa3_res, dev);
5550 if (result) {
5551 IPAERR("ipa_init failed\n");
5552 arm_iommu_detach_device(cb->dev);
5553 arm_iommu_release_mapping(cb->mapping);
5554 cb->valid = false;
5555 return result;
5556 }
5557
5558 return result;
5559}
5560
5561static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
5562{
5563 ipa3_freeze_clock_vote_and_notify_modem();
5564
5565 return IRQ_HANDLED;
5566}
5567
5568static int ipa3_smp2p_probe(struct device *dev)
5569{
5570 struct device_node *node = dev->of_node;
5571 int res;
5572
5573 IPADBG("node->name=%s\n", node->name);
5574 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
5575 res = of_get_gpio(node, 0);
5576 if (res < 0) {
5577 IPADBG("of_get_gpio returned %d\n", res);
5578 return res;
5579 }
5580
5581 ipa3_ctx->smp2p_info.out_base_id = res;
5582 IPADBG("smp2p out_base_id=%d\n",
5583 ipa3_ctx->smp2p_info.out_base_id);
5584 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
5585 int irq;
5586
5587 res = of_get_gpio(node, 0);
5588 if (res < 0) {
5589 IPADBG("of_get_gpio returned %d\n", res);
5590 return res;
5591 }
5592
5593 ipa3_ctx->smp2p_info.in_base_id = res;
5594 IPADBG("smp2p in_base_id=%d\n",
5595 ipa3_ctx->smp2p_info.in_base_id);
5596
5597 /* register for modem clk query */
5598 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
5599 IPA_GPIO_IN_QUERY_CLK_IDX);
5600 if (irq < 0) {
5601 IPAERR("gpio_to_irq failed %d\n", irq);
5602 return -ENODEV;
5603 }
5604 IPADBG("smp2p irq#=%d\n", irq);
5605 res = request_irq(irq,
5606 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
5607 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
5608 if (res) {
5609 IPAERR("fail to register smp2p irq=%d\n", irq);
5610 return -ENODEV;
5611 }
5612 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
5613 IPA_GPIO_IN_QUERY_CLK_IDX);
5614 if (res)
5615 IPAERR("failed to enable irq wake\n");
5616 }
5617
5618 return 0;
5619}
5620
5621int ipa3_plat_drv_probe(struct platform_device *pdev_p,
5622 struct ipa_api_controller *api_ctrl,
5623 const struct of_device_id *pdrv_match)
5624{
5625 int result;
5626 struct device *dev = &pdev_p->dev;
5627
5628 IPADBG("IPA driver probing started\n");
5629 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
5630
5631 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
5632 return ipa_smmu_ap_cb_probe(dev);
5633
5634 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
5635 return ipa_smmu_wlan_cb_probe(dev);
5636
5637 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
5638 return ipa_smmu_uc_cb_probe(dev);
5639
5640 if (of_device_is_compatible(dev->of_node,
5641 "qcom,smp2pgpio-map-ipa-1-in"))
5642 return ipa3_smp2p_probe(dev);
5643
5644 if (of_device_is_compatible(dev->of_node,
5645 "qcom,smp2pgpio-map-ipa-1-out"))
5646 return ipa3_smp2p_probe(dev);
5647
5648 master_dev = dev;
5649 if (!ipa3_pdev)
5650 ipa3_pdev = pdev_p;
5651
5652 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
5653 if (result) {
5654 IPAERR("IPA dts parsing failed\n");
5655 return result;
5656 }
5657
5658 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
5659 if (result) {
5660 IPAERR("IPA API binding failed\n");
5661 return result;
5662 }
5663
Amir Levy9659e592016-10-27 18:08:27 +03005664 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
5665 if (of_property_read_bool(pdev_p->dev.of_node,
5666 "qcom,smmu-s1-bypass"))
5667 smmu_info.s1_bypass = true;
5668 if (of_property_read_bool(pdev_p->dev.of_node,
5669 "qcom,smmu-fast-map"))
5670 smmu_info.fast_map = true;
5671 if (of_property_read_bool(pdev_p->dev.of_node,
5672 "qcom,use-64-bit-dma-mask"))
5673 smmu_info.use_64_bit_dma_mask = true;
5674 smmu_info.arm_smmu = true;
5675 pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
5676 smmu_info.s1_bypass, smmu_info.fast_map);
5677 } else if (of_property_read_bool(pdev_p->dev.of_node,
5678 "qcom,msm-smmu")) {
5679 IPAERR("Legacy IOMMU not supported\n");
5680 result = -EOPNOTSUPP;
5681 } else {
5682 if (of_property_read_bool(pdev_p->dev.of_node,
5683 "qcom,use-64-bit-dma-mask")) {
5684 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
5685 dma_set_coherent_mask(&pdev_p->dev,
5686 DMA_BIT_MASK(64))) {
5687 IPAERR("DMA set 64bit mask failed\n");
5688 return -EOPNOTSUPP;
5689 }
5690 } else {
5691 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
5692 dma_set_coherent_mask(&pdev_p->dev,
5693 DMA_BIT_MASK(32))) {
5694 IPAERR("DMA set 32bit mask failed\n");
5695 return -EOPNOTSUPP;
5696 }
5697 }
5698
5699 if (!ipa3_bus_scale_table)
5700 ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
5701 /* Proceed to real initialization */
5702 result = ipa3_pre_init(&ipa3_res, dev);
5703 if (result) {
5704 IPAERR("ipa3_init failed\n");
5705 return result;
5706 }
5707 }
5708
Ghanim Fodi115bf8a2017-04-21 01:36:06 -07005709 result = of_platform_populate(pdev_p->dev.of_node,
5710 pdrv_match, NULL, &pdev_p->dev);
5711 if (result) {
5712 IPAERR("failed to populate platform\n");
5713 return result;
5714 }
5715
Amir Levy9659e592016-10-27 18:08:27 +03005716 return result;
5717}
5718
5719/**
5720 * ipa3_ap_suspend() - suspend callback for runtime_pm
5721 * @dev: pointer to device
5722 *
5723 * This callback will be invoked by the runtime_pm framework when an AP suspend
5724 * operation is invoked, usually by pressing a suspend button.
5725 *
5726 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
5727 * This will postpone the suspend operation until IPA is no longer used by AP.
5728*/
5729int ipa3_ap_suspend(struct device *dev)
5730{
5731 int i;
5732
5733 IPADBG("Enter...\n");
5734
5735 /* In case there is a tx/rx handler in polling mode fail to suspend */
5736 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
5737 if (ipa3_ctx->ep[i].sys &&
5738 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
5739 IPAERR("EP %d is in polling state, do not suspend\n",
5740 i);
5741 return -EAGAIN;
5742 }
5743 }
5744
Amir Levya59ed3f2017-03-05 17:30:55 +02005745 /*
5746 * Release transport IPA resource without waiting for inactivity timer
5747 */
Amir Levy9659e592016-10-27 18:08:27 +03005748 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02005749 ipa3_transport_release_resource(NULL);
Amir Levy9659e592016-10-27 18:08:27 +03005750 IPADBG("Exit\n");
5751
5752 return 0;
5753}
5754
5755/**
5756* ipa3_ap_resume() - resume callback for runtime_pm
5757* @dev: pointer to device
5758*
5759* This callback will be invoked by the runtime_pm framework when an AP resume
5760* operation is invoked.
5761*
5762* Always returns 0 since resume should always succeed.
5763*/
5764int ipa3_ap_resume(struct device *dev)
5765{
5766 return 0;
5767}
5768
5769struct ipa3_context *ipa3_get_ctx(void)
5770{
5771 return ipa3_ctx;
5772}
5773
Amir Levy9659e592016-10-27 18:08:27 +03005774static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
5775{
5776 switch (notify->evt_id) {
5777 case GSI_PER_EVT_GLOB_ERROR:
5778 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
5779 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
5780 break;
5781 case GSI_PER_EVT_GLOB_GP1:
5782 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
5783 BUG();
5784 break;
5785 case GSI_PER_EVT_GLOB_GP2:
5786 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
5787 BUG();
5788 break;
5789 case GSI_PER_EVT_GLOB_GP3:
5790 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
5791 BUG();
5792 break;
5793 case GSI_PER_EVT_GENERAL_BREAK_POINT:
5794 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
5795 break;
5796 case GSI_PER_EVT_GENERAL_BUS_ERROR:
5797 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
5798 BUG();
5799 break;
5800 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
5801 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
5802 BUG();
5803 break;
5804 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
5805 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
5806 BUG();
5807 break;
5808 default:
5809 IPAERR("Received unexpected evt: %d\n",
5810 notify->evt_id);
5811 BUG();
5812 }
5813}
5814
5815int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
5816{
5817 struct ipa3_ready_cb_info *cb_info = NULL;
5818
5819 /* check ipa3_ctx existed or not */
5820 if (!ipa3_ctx) {
5821 IPADBG("IPA driver haven't initialized\n");
5822 return -ENXIO;
5823 }
5824 mutex_lock(&ipa3_ctx->lock);
5825 if (ipa3_ctx->ipa_initialization_complete) {
5826 mutex_unlock(&ipa3_ctx->lock);
5827 IPADBG("IPA driver finished initialization already\n");
5828 return -EEXIST;
5829 }
5830
5831 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
5832 if (!cb_info) {
5833 mutex_unlock(&ipa3_ctx->lock);
5834 return -ENOMEM;
5835 }
5836
5837 cb_info->ready_cb = ipa_ready_cb;
5838 cb_info->user_data = user_data;
5839
5840 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
5841 mutex_unlock(&ipa3_ctx->lock);
5842
5843 return 0;
5844}
5845
5846int ipa3_iommu_map(struct iommu_domain *domain,
5847 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
5848{
5849 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
5850 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
5851
5852 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
5853 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
5854
5855 /* make sure no overlapping */
5856 if (domain == ipa3_get_smmu_domain()) {
5857 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
5858 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
5859 ipa_assert();
5860 return -EFAULT;
5861 }
5862 } else if (domain == ipa3_get_wlan_smmu_domain()) {
5863 /* wlan is one time map */
5864 } else if (domain == ipa3_get_uc_smmu_domain()) {
5865 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
5866 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
5867 ipa_assert();
5868 return -EFAULT;
5869 }
5870 } else {
5871 IPAERR("Unexpected domain 0x%p\n", domain);
5872 ipa_assert();
5873 return -EFAULT;
5874 }
5875
5876 return iommu_map(domain, iova, paddr, size, prot);
5877}
5878
5879MODULE_LICENSE("GPL v2");
5880MODULE_DESCRIPTION("IPA HW device driver");