blob: 23e0bf70c9d8f402ae373358205b242d40d70e55 [file] [log] [blame]
Akshay Pandit0e2e68a2020-02-05 17:58:06 +05301/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040038#include <linux/pci.h>
Amir Levy9659e592016-10-27 18:08:27 +030039#include <soc/qcom/subsystem_restart.h>
40#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020041#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020042#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020043
44#ifdef CONFIG_ARM64
Gidon Studinski3021a6f2016-11-10 12:48:48 +020045/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040060/*
61 * The following for adding code (ie. for EMULATION) not found on x86.
62 */
63#if IPA_EMULATION_COMPILE == 1
64# include "ipa_emulation_stubs.h"
65#endif
Amir Levy9659e592016-10-27 18:08:27 +030066
67#ifdef CONFIG_COMPAT
Amir Levy9659e592016-10-27 18:08:27 +030068/**
69 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
70 * properties
71 * @dev_name: input parameter, the name of table
72 * @size: input parameter, size of table in bytes
73 * @offset: output parameter, offset into page in case of system memory
74 */
75struct ipa3_ioc_nat_alloc_mem32 {
76 char dev_name[IPA_RESOURCE_NAME_MAX];
77 compat_size_t size;
78 compat_off_t offset;
79};
Amir Levy479cfdd2017-10-26 12:23:14 +030080
81/**
82 * struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation
83 * properties
84 * @size: input parameter, size of table in bytes
85 * @offset: output parameter, offset into page in case of system memory
86 */
87struct ipa_ioc_nat_ipv6ct_table_alloc32 {
88 compat_size_t size;
89 compat_off_t offset;
90};
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040091#endif /* #ifdef CONFIG_COMPAT */
Amir Levy9659e592016-10-27 18:08:27 +030092
Gidon Studinski3021a6f2016-11-10 12:48:48 +020093#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
94#define TZ_MEM_PROTECT_REGION_ID 0x10
95
96struct tz_smmu_ipa_protect_region_iovec_s {
97 u64 input_addr;
98 u64 output_addr;
99 u64 size;
100 u32 attr;
101} __packed;
102
103struct tz_smmu_ipa_protect_region_s {
104 phys_addr_t iovec_buf;
105 u32 size_bytes;
106} __packed;
107
Amir Levy9659e592016-10-27 18:08:27 +0300108static void ipa3_start_tag_process(struct work_struct *work);
109static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
110
Amir Levya59ed3f2017-03-05 17:30:55 +0200111static void ipa3_transport_release_resource(struct work_struct *work);
112static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
113 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300114static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
115
Skylar Changefc0a0f2018-03-29 11:17:40 -0700116static int ipa3_attach_to_smmu(void);
117static int ipa3_alloc_pkt_init(void);
118
Ghanim Fodia5f376a2017-10-17 18:14:53 +0300119static void ipa3_load_ipa_fw(struct work_struct *work);
120static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
Utkarsh Saxenaded78142017-05-03 14:04:30 +0530121
Skylar Chang242952b2017-07-20 15:04:05 -0700122static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
123static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
124 ipa_dec_clients_disable_clks_on_wq);
125
Amir Levy9659e592016-10-27 18:08:27 +0300126static struct ipa3_plat_drv_res ipa3_res = {0, };
Amir Levy9659e592016-10-27 18:08:27 +0300127
128static struct clk *ipa3_clk;
129
130struct ipa3_context *ipa3_ctx;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -0400131
Amir Levy9659e592016-10-27 18:08:27 +0300132static struct {
Skylar Changefc0a0f2018-03-29 11:17:40 -0700133 bool present[IPA_SMMU_CB_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300134 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300135 bool fast_map;
Michael Adisumarta93e97522017-10-06 15:49:46 -0700136 bool s1_bypass_arr[IPA_SMMU_CB_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300137 bool use_64_bit_dma_mask;
138 u32 ipa_base;
139 u32 ipa_size;
140} smmu_info;
141
142static char *active_clients_table_buf;
143
144int ipa3_active_clients_log_print_buffer(char *buf, int size)
145{
146 int i;
147 int nbytes;
148 int cnt = 0;
149 int start_idx;
150 int end_idx;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700151 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300152
Skylar Chang69ae50e2017-07-31 13:13:29 -0700153 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300154 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
155 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
156 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
157 for (i = start_idx; i != end_idx;
158 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
159 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
160 ipa3_ctx->ipa3_active_clients_logging
161 .log_buffer[i]);
162 cnt += nbytes;
163 }
Skylar Chang69ae50e2017-07-31 13:13:29 -0700164 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
165 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300166
167 return cnt;
168}
169
170int ipa3_active_clients_log_print_table(char *buf, int size)
171{
172 int i;
173 struct ipa3_active_client_htable_entry *iterator;
174 int cnt = 0;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700175 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300176
Skylar Chang69ae50e2017-07-31 13:13:29 -0700177 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300178 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
179 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
180 iterator, list) {
181 switch (iterator->type) {
182 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
183 cnt += scnprintf(buf + cnt, size - cnt,
184 "%-40s %-3d ENDPOINT\n",
185 iterator->id_string, iterator->count);
186 break;
187 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
188 cnt += scnprintf(buf + cnt, size - cnt,
189 "%-40s %-3d SIMPLE\n",
190 iterator->id_string, iterator->count);
191 break;
192 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
193 cnt += scnprintf(buf + cnt, size - cnt,
194 "%-40s %-3d RESOURCE\n",
195 iterator->id_string, iterator->count);
196 break;
197 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
198 cnt += scnprintf(buf + cnt, size - cnt,
199 "%-40s %-3d SPECIAL\n",
200 iterator->id_string, iterator->count);
201 break;
202 default:
203 IPAERR("Trying to print illegal active_clients type");
204 break;
205 }
206 }
207 cnt += scnprintf(buf + cnt, size - cnt,
208 "\nTotal active clients count: %d\n",
Skylar Chang242952b2017-07-20 15:04:05 -0700209 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Skylar Chang69ae50e2017-07-31 13:13:29 -0700210 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
211 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300212
213 return cnt;
214}
215
Skylar Chang68c37d82018-04-07 16:42:36 -0700216static int ipa3_clean_modem_rule(void)
217{
218 struct ipa_install_fltr_rule_req_msg_v01 *req;
219 struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex;
220 int val = 0;
221
222 if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) {
223 req = kzalloc(
224 sizeof(struct ipa_install_fltr_rule_req_msg_v01),
225 GFP_KERNEL);
226 if (!req) {
227 IPAERR("mem allocated failed!\n");
228 return -ENOMEM;
229 }
230 req->filter_spec_list_valid = false;
231 req->filter_spec_list_len = 0;
232 req->source_pipe_index_valid = 0;
233 val = ipa3_qmi_filter_request_send(req);
234 kfree(req);
235 } else {
236 req_ex = kzalloc(
237 sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01),
238 GFP_KERNEL);
239 if (!req_ex) {
240 IPAERR("mem allocated failed!\n");
241 return -ENOMEM;
242 }
243 req_ex->filter_spec_ex_list_valid = false;
244 req_ex->filter_spec_ex_list_len = 0;
245 req_ex->source_pipe_index_valid = 0;
246 val = ipa3_qmi_filter_request_ex_send(req_ex);
247 kfree(req_ex);
248 }
249
250 return val;
251}
252
Amir Levy9659e592016-10-27 18:08:27 +0300253static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
254 unsigned long event, void *ptr)
255{
Amir Levy9659e592016-10-27 18:08:27 +0300256 ipa3_active_clients_log_print_table(active_clients_table_buf,
257 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
Michael Adisumartaedba22d2018-04-19 12:28:33 -0700258 IPAERR("%s\n", active_clients_table_buf);
Amir Levy9659e592016-10-27 18:08:27 +0300259
260 return NOTIFY_DONE;
261}
262
263static struct notifier_block ipa3_active_clients_panic_blk = {
264 .notifier_call = ipa3_active_clients_panic_notifier,
265};
266
267static int ipa3_active_clients_log_insert(const char *string)
268{
269 int head;
270 int tail;
271
272 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
273 return -EPERM;
274
275 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
276 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
277
278 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
279 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
280 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
281 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
282 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
283 if (tail == head)
284 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
285
286 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
287 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
288
289 return 0;
290}
291
292static int ipa3_active_clients_log_init(void)
293{
294 int i;
295
Skylar Chang69ae50e2017-07-31 13:13:29 -0700296 spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
Amir Levy9659e592016-10-27 18:08:27 +0300297 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
298 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
299 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
300 GFP_KERNEL);
301 active_clients_table_buf = kzalloc(sizeof(
302 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
303 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
304 pr_err("Active Clients Logging memory allocation failed");
305 goto bail;
306 }
307 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
308 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
309 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
310 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
311 }
312 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
313 ipa3_ctx->ipa3_active_clients_logging.log_tail =
314 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
315 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
316 atomic_notifier_chain_register(&panic_notifier_list,
317 &ipa3_active_clients_panic_blk);
318 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
319
320 return 0;
321
322bail:
323 return -ENOMEM;
324}
325
326void ipa3_active_clients_log_clear(void)
327{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700328 unsigned long flags;
329
330 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300331 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
332 ipa3_ctx->ipa3_active_clients_logging.log_tail =
333 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700334 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
335 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300336}
337
338static void ipa3_active_clients_log_destroy(void)
339{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300343 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
Ghanim Fodic48ba992017-12-24 19:28:38 +0200344 kfree(active_clients_table_buf);
345 active_clients_table_buf = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300346 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
347 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
348 ipa3_ctx->ipa3_active_clients_logging.log_tail =
349 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700350 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
351 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300352}
353
Amir Levy9659e592016-10-27 18:08:27 +0300354static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
355
356struct iommu_domain *ipa3_get_smmu_domain(void)
357{
358 if (smmu_cb[IPA_SMMU_CB_AP].valid)
359 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
360
361 IPAERR("CB not valid\n");
362
363 return NULL;
364}
365
366struct iommu_domain *ipa3_get_uc_smmu_domain(void)
367{
368 if (smmu_cb[IPA_SMMU_CB_UC].valid)
369 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
370
371 IPAERR("CB not valid\n");
372
373 return NULL;
374}
375
376struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
377{
378 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
379 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
380
381 IPAERR("CB not valid\n");
382
383 return NULL;
384}
385
Michael Adisumartab1bafa42018-04-16 16:48:10 -0700386struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type)
387{
388
389 if (cb_type == IPA_SMMU_CB_WLAN && smmu_cb[IPA_SMMU_CB_WLAN].valid)
390 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
391
392 if (smmu_cb[cb_type].valid)
393 return smmu_cb[cb_type].mapping->domain;
394
395 IPAERR("CB#%d not valid\n", cb_type);
396
397 return NULL;
398}
Amir Levy9659e592016-10-27 18:08:27 +0300399
400struct device *ipa3_get_dma_dev(void)
401{
402 return ipa3_ctx->pdev;
403}
404
405/**
Skylar Changefc0a0f2018-03-29 11:17:40 -0700406 * ipa3_get_smmu_ctx()- Return smmu context for the given cb_type
Amir Levy9659e592016-10-27 18:08:27 +0300407 *
408 * Return value: pointer to smmu context address
409 */
Skylar Changefc0a0f2018-03-29 11:17:40 -0700410struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type cb_type)
Amir Levy9659e592016-10-27 18:08:27 +0300411{
Skylar Changefc0a0f2018-03-29 11:17:40 -0700412 return &smmu_cb[cb_type];
Amir Levy9659e592016-10-27 18:08:27 +0300413}
414
415static int ipa3_open(struct inode *inode, struct file *filp)
416{
Amir Levy9659e592016-10-27 18:08:27 +0300417 IPADBG_LOW("ENTER\n");
Skylar Changefc0a0f2018-03-29 11:17:40 -0700418 filp->private_data = ipa3_ctx;
Amir Levy9659e592016-10-27 18:08:27 +0300419
420 return 0;
421}
422
Amir Levy9659e592016-10-27 18:08:27 +0300423static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
424{
425 if (!buff) {
426 IPAERR("Null buffer\n");
427 return;
428 }
429
430 if (type != WAN_UPSTREAM_ROUTE_ADD &&
431 type != WAN_UPSTREAM_ROUTE_DEL &&
432 type != WAN_EMBMS_CONNECT) {
433 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
434 return;
435 }
436
437 kfree(buff);
438}
439
Skylar Chang68c37d82018-04-07 16:42:36 -0700440static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type,
441 bool is_cache)
Amir Levy9659e592016-10-27 18:08:27 +0300442{
443 int retval;
444 struct ipa_wan_msg *wan_msg;
445 struct ipa_msg_meta msg_meta;
Mohammed Javid616bb992017-10-03 13:10:05 +0530446 struct ipa_wan_msg cache_wan_msg;
Amir Levy9659e592016-10-27 18:08:27 +0300447
448 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
449 if (!wan_msg) {
450 IPAERR("no memory\n");
451 return -ENOMEM;
452 }
453
Amir Levy479cfdd2017-10-26 12:23:14 +0300454 if (copy_from_user(wan_msg, (const void __user *)usr_param,
Amir Levy9659e592016-10-27 18:08:27 +0300455 sizeof(struct ipa_wan_msg))) {
456 kfree(wan_msg);
457 return -EFAULT;
458 }
459
Mohammed Javid616bb992017-10-03 13:10:05 +0530460 memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
461
Amir Levy9659e592016-10-27 18:08:27 +0300462 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
463 msg_meta.msg_type = msg_type;
464 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
465 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
466 if (retval) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530467 IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
Amir Levy9659e592016-10-27 18:08:27 +0300468 kfree(wan_msg);
469 return retval;
470 }
471
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530472 if (is_cache) {
473 mutex_lock(&ipa3_ctx->ipa_cne_evt_lock);
474
475 /* cache the cne event */
476 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
477 ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
Mohammed Javid616bb992017-10-03 13:10:05 +0530478 &cache_wan_msg,
479 sizeof(cache_wan_msg));
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530480
481 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
482 ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
483 &msg_meta,
484 sizeof(struct ipa_msg_meta));
485
486 ipa3_ctx->num_ipa_cne_evt_req++;
487 ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
488 mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock);
489 }
490
Amir Levy9659e592016-10-27 18:08:27 +0300491 return 0;
492}
493
Shihuan Liuc3174f52017-05-04 15:59:13 -0700494static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
495{
496 if (!buff) {
497 IPAERR("Null buffer\n");
498 return;
499 }
500
Amir Levy4f8b4832018-06-05 15:48:03 +0300501 switch (type) {
502 case ADD_VLAN_IFACE:
503 case DEL_VLAN_IFACE:
504 case ADD_L2TP_VLAN_MAPPING:
505 case DEL_L2TP_VLAN_MAPPING:
506 case ADD_BRIDGE_VLAN_MAPPING:
507 case DEL_BRIDGE_VLAN_MAPPING:
508 break;
509 default:
Shihuan Liuc3174f52017-05-04 15:59:13 -0700510 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
511 return;
512 }
513
514 kfree(buff);
515}
516
517static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
518{
519 int retval;
520 struct ipa_ioc_vlan_iface_info *vlan_info;
521 struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
Amir Levy4f8b4832018-06-05 15:48:03 +0300522 struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700523 struct ipa_msg_meta msg_meta;
Amir Levy4f8b4832018-06-05 15:48:03 +0300524 void *buff;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700525
Amir Levy4f8b4832018-06-05 15:48:03 +0300526 IPADBG("type %d\n", msg_type);
527
528 memset(&msg_meta, 0, sizeof(msg_meta));
529 msg_meta.msg_type = msg_type;
530
531 if ((msg_type == ADD_VLAN_IFACE) ||
532 (msg_type == DEL_VLAN_IFACE)) {
Shihuan Liuc3174f52017-05-04 15:59:13 -0700533 vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
534 GFP_KERNEL);
535 if (!vlan_info) {
536 IPAERR("no memory\n");
537 return -ENOMEM;
538 }
539
540 if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
541 sizeof(struct ipa_ioc_vlan_iface_info))) {
542 kfree(vlan_info);
543 return -EFAULT;
544 }
545
Shihuan Liuc3174f52017-05-04 15:59:13 -0700546 msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
Amir Levy4f8b4832018-06-05 15:48:03 +0300547 buff = vlan_info;
548 } else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
549 (msg_type == DEL_L2TP_VLAN_MAPPING)) {
Shihuan Liuc3174f52017-05-04 15:59:13 -0700550 mapping_info = kzalloc(sizeof(struct
551 ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
552 if (!mapping_info) {
553 IPAERR("no memory\n");
554 return -ENOMEM;
555 }
556
557 if (copy_from_user((u8 *)mapping_info,
558 (void __user *)usr_param,
559 sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
560 kfree(mapping_info);
561 return -EFAULT;
562 }
563
Shihuan Liuc3174f52017-05-04 15:59:13 -0700564 msg_meta.msg_len = sizeof(struct
565 ipa_ioc_l2tp_vlan_mapping_info);
Amir Levy4f8b4832018-06-05 15:48:03 +0300566 buff = mapping_info;
567 } else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
568 (msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
569 bridge_vlan_info = kzalloc(
570 sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
571 GFP_KERNEL);
572 if (!bridge_vlan_info) {
573 IPAERR("no memory\n");
574 return -ENOMEM;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700575 }
Amir Levy4f8b4832018-06-05 15:48:03 +0300576
577 if (copy_from_user((u8 *)bridge_vlan_info,
578 (void __user *)usr_param,
579 sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
580 kfree(bridge_vlan_info);
581 IPAERR("copy from user failed\n");
582 return -EFAULT;
583 }
584
585 msg_meta.msg_len = sizeof(struct
586 ipa_ioc_bridge_vlan_mapping_info);
587 buff = bridge_vlan_info;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700588 } else {
589 IPAERR("Unexpected event\n");
590 return -EFAULT;
591 }
592
Amir Levy4f8b4832018-06-05 15:48:03 +0300593 retval = ipa3_send_msg(&msg_meta, buff,
594 ipa3_vlan_l2tp_msg_free_cb);
595 if (retval) {
596 IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
597 retval,
598 msg_type);
599 kfree(buff);
600 return retval;
601 }
602 IPADBG("exit\n");
603
Shihuan Liuc3174f52017-05-04 15:59:13 -0700604 return 0;
605}
Amir Levy9659e592016-10-27 18:08:27 +0300606
Mohammed Javida0f23d92018-09-11 10:50:28 +0530607static void ipa3_gsb_msg_free_cb(void *buff, u32 len, u32 type)
608{
609 if (!buff) {
610 IPAERR("Null buffer\n");
611 return;
612 }
613
614 switch (type) {
615 case IPA_GSB_CONNECT:
616 case IPA_GSB_DISCONNECT:
617 break;
618 default:
619 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
620 return;
621 }
622
623 kfree(buff);
624}
625
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530626static void ipa3_get_usb_ep_info(
627 struct ipa_ioc_get_ep_info *ep_info,
628 struct ipa_ep_pair_info *pair_info
629 )
630{
631 int ep_index = -1, i;
632
633 ep_info->num_ep_pairs = 0;
634 for (i = 0; i < ep_info->max_ep_pairs; i++) {
635 pair_info[i].consumer_pipe_num = -1;
636 pair_info[i].producer_pipe_num = -1;
637 pair_info[i].ep_id = -1;
638 }
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530639 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB2_PROD);
640
641 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
642 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
643 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB2_CONS);
644 if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
645 pair_info[ep_info->num_ep_pairs].producer_pipe_num =
646 ep_index;
647 pair_info[ep_info->num_ep_pairs].ep_id =
648 IPA_USB1_EP_ID;
649
650 IPADBG("ep_pair_info consumer_pipe_num %d",
651 pair_info[ep_info->num_ep_pairs].
652 consumer_pipe_num);
653 IPADBG(" producer_pipe_num %d ep_id %d\n",
654 pair_info[ep_info->num_ep_pairs].
655 producer_pipe_num,
656 pair_info[ep_info->num_ep_pairs].ep_id);
657 ep_info->num_ep_pairs++;
658 } else {
659 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1;
660 IPADBG("ep_pair_info consumer_pipe_num %d",
661 pair_info[ep_info->num_ep_pairs].
662 consumer_pipe_num);
663 IPADBG(" producer_pipe_num %d ep_id %d\n",
664 pair_info[ep_info->num_ep_pairs].
665 producer_pipe_num,
666 pair_info[ep_info->num_ep_pairs].ep_id);
667 }
668 }
669
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530670
671 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
672
673 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
674 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
675 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS);
676 if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
677 pair_info[ep_info->num_ep_pairs].producer_pipe_num =
678 ep_index;
679 pair_info[ep_info->num_ep_pairs].ep_id =
680 IPA_USB0_EP_ID;
681
682 IPADBG("ep_pair_info consumer_pipe_num %d",
683 pair_info[ep_info->num_ep_pairs].
684 consumer_pipe_num);
685 IPADBG(" producer_pipe_num %d ep_id %d\n",
686 pair_info[ep_info->num_ep_pairs].
687 producer_pipe_num,
688 pair_info[ep_info->num_ep_pairs].ep_id);
689 ep_info->num_ep_pairs++;
690 } else {
691 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1;
692 IPADBG("ep_pair_info consumer_pipe_num %d",
693 pair_info[ep_info->num_ep_pairs].
694 consumer_pipe_num);
695 IPADBG(" producer_pipe_num %d ep_id %d\n",
696 pair_info[ep_info->num_ep_pairs].
697 producer_pipe_num,
698 pair_info[ep_info->num_ep_pairs].ep_id);
699 }
700 }
701
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530702}
703
704static void ipa3_get_pcie_ep_info(
705 struct ipa_ioc_get_ep_info *ep_info,
706 struct ipa_ep_pair_info *pair_info
707 )
708{
709 int ep_index = -1, i;
710
711 ep_info->num_ep_pairs = 0;
712 for (i = 0; i < ep_info->max_ep_pairs; i++) {
713 pair_info[i].consumer_pipe_num = -1;
714 pair_info[i].producer_pipe_num = -1;
715 pair_info[i].ep_id = -1;
716 }
717
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530718 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI2_PROD);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530719
720 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
721 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530722 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI2_CONS);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530723 if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
724 pair_info[ep_info->num_ep_pairs].producer_pipe_num =
725 ep_index;
726 pair_info[ep_info->num_ep_pairs].ep_id =
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530727 IPA_PCIE1_EP_ID;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530728
729 IPADBG("ep_pair_info consumer_pipe_num %d",
730 pair_info[ep_info->num_ep_pairs].
731 consumer_pipe_num);
732 IPADBG(" producer_pipe_num %d ep_id %d\n",
733 pair_info[ep_info->num_ep_pairs].
734 producer_pipe_num,
735 pair_info[ep_info->num_ep_pairs].ep_id);
736 ep_info->num_ep_pairs++;
737 } else {
738 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1;
739 IPADBG("ep_pair_info consumer_pipe_num %d",
740 pair_info[ep_info->num_ep_pairs].
741 consumer_pipe_num);
742 IPADBG(" producer_pipe_num %d ep_id %d\n",
743 pair_info[ep_info->num_ep_pairs].
744 producer_pipe_num,
745 pair_info[ep_info->num_ep_pairs].ep_id);
746 }
747 }
748
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530749 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI_PROD);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530750
751 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
752 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530753 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI_CONS);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530754 if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
755 pair_info[ep_info->num_ep_pairs].producer_pipe_num =
756 ep_index;
757 pair_info[ep_info->num_ep_pairs].ep_id =
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530758 IPA_PCIE0_EP_ID;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530759
760 IPADBG("ep_pair_info consumer_pipe_num %d",
761 pair_info[ep_info->num_ep_pairs].
762 consumer_pipe_num);
763 IPADBG(" producer_pipe_num %d ep_id %d\n",
764 pair_info[ep_info->num_ep_pairs].
765 producer_pipe_num,
766 pair_info[ep_info->num_ep_pairs].ep_id);
767 ep_info->num_ep_pairs++;
768 } else {
769 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1;
770 IPADBG("ep_pair_info consumer_pipe_num %d",
771 pair_info[ep_info->num_ep_pairs].
772 consumer_pipe_num);
773 IPADBG(" producer_pipe_num %d ep_id %d\n",
774 pair_info[ep_info->num_ep_pairs].
775 producer_pipe_num,
776 pair_info[ep_info->num_ep_pairs].ep_id);
777 }
778 }
779}
780
781
782static int ipa3_get_ep_info(struct ipa_ioc_get_ep_info *ep_info,
783 u8 *param)
784{
785 int ret = 0;
786 struct ipa_ep_pair_info *pair_info = (struct ipa_ep_pair_info *)param;
787
788 switch (ep_info->ep_type) {
789 case IPA_DATA_EP_TYP_HSUSB:
790 ipa3_get_usb_ep_info(ep_info, pair_info);
791 break;
792
793 case IPA_DATA_EP_TYP_PCIE:
794 ipa3_get_pcie_ep_info(ep_info, pair_info);
795 break;
796
797 default:
798 IPAERR_RL("Undefined ep_type %d\n", ep_info->ep_type);
799 ret = -EFAULT;
800 break;
801 }
802
803 return ret;
804}
805
Mohammed Javida0f23d92018-09-11 10:50:28 +0530806static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type)
807{
808 int retval;
809 struct ipa_ioc_gsb_info *gsb_info;
810 struct ipa_msg_meta msg_meta;
811 void *buff;
812
813 IPADBG("type %d\n", msg_type);
814
815 memset(&msg_meta, 0, sizeof(msg_meta));
816 msg_meta.msg_type = msg_type;
817
818 if ((msg_type == IPA_GSB_CONNECT) ||
819 (msg_type == IPA_GSB_DISCONNECT)) {
820 gsb_info = kzalloc(sizeof(struct ipa_ioc_gsb_info),
821 GFP_KERNEL);
822 if (!gsb_info) {
823 IPAERR("no memory\n");
824 return -ENOMEM;
825 }
826
827 if (copy_from_user((u8 *)gsb_info, (void __user *)usr_param,
828 sizeof(struct ipa_ioc_gsb_info))) {
829 kfree(gsb_info);
830 return -EFAULT;
831 }
832
833 msg_meta.msg_len = sizeof(struct ipa_ioc_gsb_info);
834 buff = gsb_info;
835 } else {
836 IPAERR("Unexpected event\n");
837 return -EFAULT;
838 }
839
840 retval = ipa3_send_msg(&msg_meta, buff,
841 ipa3_gsb_msg_free_cb);
842 if (retval) {
843 IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
844 retval,
845 msg_type);
846 kfree(buff);
847 return retval;
848 }
849 IPADBG("exit\n");
850
851 return 0;
852}
853
Amir Levy9659e592016-10-27 18:08:27 +0300854static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
855{
856 int retval = 0;
857 u32 pyld_sz;
858 u8 header[128] = { 0 };
859 u8 *param = NULL;
Amir Levya5361ab2018-05-01 13:25:37 +0300860 bool is_vlan_mode;
Amir Levy9659e592016-10-27 18:08:27 +0300861 struct ipa_ioc_nat_alloc_mem nat_mem;
Amir Levy479cfdd2017-10-26 12:23:14 +0300862 struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
Amir Levy9659e592016-10-27 18:08:27 +0300863 struct ipa_ioc_v4_nat_init nat_init;
Amir Levy479cfdd2017-10-26 12:23:14 +0300864 struct ipa_ioc_ipv6ct_init ipv6ct_init;
Amir Levy9659e592016-10-27 18:08:27 +0300865 struct ipa_ioc_v4_nat_del nat_del;
Amir Levy479cfdd2017-10-26 12:23:14 +0300866 struct ipa_ioc_nat_ipv6ct_table_del table_del;
Amir Levy05fccd02017-06-13 16:25:45 +0300867 struct ipa_ioc_nat_pdn_entry mdfy_pdn;
Amir Levy9659e592016-10-27 18:08:27 +0300868 struct ipa_ioc_rm_dependency rm_depend;
Amir Levy479cfdd2017-10-26 12:23:14 +0300869 struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
Amir Levya5361ab2018-05-01 13:25:37 +0300870 struct ipa_ioc_get_vlan_mode vlan_mode;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530871 struct ipa_ioc_get_ep_info ep_info;
Amir Levy9659e592016-10-27 18:08:27 +0300872 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200873 int pre_entry;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530874 unsigned long uptr = 0;
Amir Levy9659e592016-10-27 18:08:27 +0300875
876 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
877
Amir Levy9659e592016-10-27 18:08:27 +0300878 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
879 return -ENOTTY;
Amir Levy9659e592016-10-27 18:08:27 +0300880
Amir Levy05532622016-11-28 12:12:01 +0200881 if (!ipa3_is_ready()) {
882 IPAERR("IPA not ready, waiting for init completion\n");
883 wait_for_completion(&ipa3_ctx->init_completion_obj);
884 }
885
Amir Levy9659e592016-10-27 18:08:27 +0300886 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
887
888 switch (cmd) {
889 case IPA_IOC_ALLOC_NAT_MEM:
Amir Levy479cfdd2017-10-26 12:23:14 +0300890 if (copy_from_user(&nat_mem, (const void __user *)arg,
891 sizeof(struct ipa_ioc_nat_alloc_mem))) {
Amir Levy9659e592016-10-27 18:08:27 +0300892 retval = -EFAULT;
893 break;
894 }
895 /* null terminate the string */
896 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
897
898 if (ipa3_allocate_nat_device(&nat_mem)) {
899 retval = -EFAULT;
900 break;
901 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300902 if (copy_to_user((void __user *)arg, &nat_mem,
903 sizeof(struct ipa_ioc_nat_alloc_mem))) {
Amir Levy9659e592016-10-27 18:08:27 +0300904 retval = -EFAULT;
905 break;
906 }
907 break;
Amir Levy479cfdd2017-10-26 12:23:14 +0300908 case IPA_IOC_ALLOC_NAT_TABLE:
909 if (copy_from_user(&table_alloc, (const void __user *)arg,
910 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
911 retval = -EFAULT;
912 break;
913 }
914
915 if (ipa3_allocate_nat_table(&table_alloc)) {
916 retval = -EFAULT;
917 break;
918 }
919 if (table_alloc.offset &&
920 copy_to_user((void __user *)arg, &table_alloc, sizeof(
921 struct ipa_ioc_nat_ipv6ct_table_alloc))) {
922 retval = -EFAULT;
923 break;
924 }
925 break;
926
927 case IPA_IOC_ALLOC_IPV6CT_TABLE:
928 if (copy_from_user(&table_alloc, (const void __user *)arg,
929 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
930 retval = -EFAULT;
931 break;
932 }
933
934 if (ipa3_allocate_ipv6ct_table(&table_alloc)) {
935 retval = -EFAULT;
936 break;
937 }
938 if (table_alloc.offset &&
939 copy_to_user((void __user *)arg, &table_alloc, sizeof(
940 struct ipa_ioc_nat_ipv6ct_table_alloc))) {
941 retval = -EFAULT;
942 break;
943 }
944 break;
945
Amir Levy9659e592016-10-27 18:08:27 +0300946 case IPA_IOC_V4_INIT_NAT:
Amir Levy479cfdd2017-10-26 12:23:14 +0300947 if (copy_from_user(&nat_init, (const void __user *)arg,
948 sizeof(struct ipa_ioc_v4_nat_init))) {
Amir Levy9659e592016-10-27 18:08:27 +0300949 retval = -EFAULT;
950 break;
951 }
952 if (ipa3_nat_init_cmd(&nat_init)) {
953 retval = -EFAULT;
954 break;
955 }
956 break;
957
Amir Levy479cfdd2017-10-26 12:23:14 +0300958 case IPA_IOC_INIT_IPV6CT_TABLE:
959 if (copy_from_user(&ipv6ct_init, (const void __user *)arg,
960 sizeof(struct ipa_ioc_ipv6ct_init))) {
Amir Levy9659e592016-10-27 18:08:27 +0300961 retval = -EFAULT;
962 break;
963 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300964 if (ipa3_ipv6ct_init_cmd(&ipv6ct_init)) {
965 retval = -EFAULT;
966 break;
967 }
968 break;
969
970 case IPA_IOC_TABLE_DMA_CMD:
971 table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)header;
972 if (copy_from_user(header, (const void __user *)arg,
973 sizeof(struct ipa_ioc_nat_dma_cmd))) {
974 retval = -EFAULT;
975 break;
976 }
977 pre_entry = table_dma_cmd->entries;
978 pyld_sz = sizeof(struct ipa_ioc_nat_dma_cmd) +
979 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300980 param = kzalloc(pyld_sz, GFP_KERNEL);
981 if (!param) {
982 retval = -ENOMEM;
983 break;
984 }
985
Amir Levy479cfdd2017-10-26 12:23:14 +0300986 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +0300987 retval = -EFAULT;
988 break;
989 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300990 table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)param;
991
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200992 /* add check in case user-space module compromised */
Amir Levy479cfdd2017-10-26 12:23:14 +0300993 if (unlikely(table_dma_cmd->entries != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530994 IPAERR_RL("current %d pre %d\n",
Amir Levy479cfdd2017-10-26 12:23:14 +0300995 table_dma_cmd->entries, pre_entry);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200996 retval = -EFAULT;
997 break;
998 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300999 if (ipa3_table_dma_cmd(table_dma_cmd)) {
Amir Levy9659e592016-10-27 18:08:27 +03001000 retval = -EFAULT;
1001 break;
1002 }
1003 break;
1004
1005 case IPA_IOC_V4_DEL_NAT:
Amir Levy479cfdd2017-10-26 12:23:14 +03001006 if (copy_from_user(&nat_del, (const void __user *)arg,
1007 sizeof(struct ipa_ioc_v4_nat_del))) {
Amir Levy9659e592016-10-27 18:08:27 +03001008 retval = -EFAULT;
1009 break;
1010 }
1011 if (ipa3_nat_del_cmd(&nat_del)) {
1012 retval = -EFAULT;
1013 break;
1014 }
1015 break;
1016
Amir Levy479cfdd2017-10-26 12:23:14 +03001017 case IPA_IOC_DEL_NAT_TABLE:
1018 if (copy_from_user(&table_del, (const void __user *)arg,
1019 sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
1020 retval = -EFAULT;
1021 break;
1022 }
1023 if (ipa3_del_nat_table(&table_del)) {
1024 retval = -EFAULT;
1025 break;
1026 }
1027 break;
1028
1029 case IPA_IOC_DEL_IPV6CT_TABLE:
1030 if (copy_from_user(&table_del, (const void __user *)arg,
1031 sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
1032 retval = -EFAULT;
1033 break;
1034 }
1035 if (ipa3_del_ipv6ct_table(&table_del)) {
1036 retval = -EFAULT;
1037 break;
1038 }
1039 break;
1040
Amir Levy05fccd02017-06-13 16:25:45 +03001041 case IPA_IOC_NAT_MODIFY_PDN:
Amir Levy479cfdd2017-10-26 12:23:14 +03001042 if (copy_from_user(&mdfy_pdn, (const void __user *)arg,
Amir Levy05fccd02017-06-13 16:25:45 +03001043 sizeof(struct ipa_ioc_nat_pdn_entry))) {
1044 retval = -EFAULT;
1045 break;
1046 }
Amir Levydc65f4c2017-07-06 09:49:50 +03001047 if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
Amir Levy05fccd02017-06-13 16:25:45 +03001048 retval = -EFAULT;
1049 break;
1050 }
1051 break;
1052
Amir Levy9659e592016-10-27 18:08:27 +03001053 case IPA_IOC_ADD_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001054 if (copy_from_user(header, (const void __user *)arg,
1055 sizeof(struct ipa_ioc_add_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001056 retval = -EFAULT;
1057 break;
1058 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001059 pre_entry =
1060 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +03001061 pyld_sz =
1062 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001063 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +03001064 param = kzalloc(pyld_sz, GFP_KERNEL);
1065 if (!param) {
1066 retval = -ENOMEM;
1067 break;
1068 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001069 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001070 retval = -EFAULT;
1071 break;
1072 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001073 /* add check in case user-space module compromised */
1074 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
1075 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301076 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001077 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
1078 pre_entry);
1079 retval = -EFAULT;
1080 break;
1081 }
Skylar Chang68c37d82018-04-07 16:42:36 -07001082 if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param,
1083 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001084 retval = -EFAULT;
1085 break;
1086 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001087 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001088 retval = -EFAULT;
1089 break;
1090 }
1091 break;
1092
1093 case IPA_IOC_DEL_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001094 if (copy_from_user(header, (const void __user *)arg,
1095 sizeof(struct ipa_ioc_del_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001096 retval = -EFAULT;
1097 break;
1098 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001099 pre_entry =
1100 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001101 pyld_sz =
1102 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001103 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +03001104 param = kzalloc(pyld_sz, GFP_KERNEL);
1105 if (!param) {
1106 retval = -ENOMEM;
1107 break;
1108 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001109 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001110 retval = -EFAULT;
1111 break;
1112 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001113 /* add check in case user-space module compromised */
1114 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
1115 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301116 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001117 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
1118 pre_entry);
1119 retval = -EFAULT;
1120 break;
1121 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001122 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
1123 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001124 retval = -EFAULT;
1125 break;
1126 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001127 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001128 retval = -EFAULT;
1129 break;
1130 }
1131 break;
1132
1133 case IPA_IOC_ADD_RT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001134 if (copy_from_user(header, (const void __user *)arg,
1135 sizeof(struct ipa_ioc_add_rt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001136 retval = -EFAULT;
1137 break;
1138 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001139 pre_entry =
1140 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001141 pyld_sz =
1142 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001143 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001144 param = kzalloc(pyld_sz, GFP_KERNEL);
1145 if (!param) {
1146 retval = -ENOMEM;
1147 break;
1148 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001149 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001150 retval = -EFAULT;
1151 break;
1152 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001153 /* add check in case user-space module compromised */
1154 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
1155 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301156 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001157 ((struct ipa_ioc_add_rt_rule *)param)->
1158 num_rules,
1159 pre_entry);
1160 retval = -EFAULT;
1161 break;
1162 }
Skylar Chang68c37d82018-04-07 16:42:36 -07001163 if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param,
1164 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001165 retval = -EFAULT;
1166 break;
1167 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001168 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001169 retval = -EFAULT;
1170 break;
1171 }
1172 break;
Mohammed Javidd0c2a1e2017-10-30 15:34:22 +05301173
1174 case IPA_IOC_ADD_RT_RULE_EXT:
1175 if (copy_from_user(header,
1176 (const void __user *)arg,
1177 sizeof(struct ipa_ioc_add_rt_rule_ext))) {
1178 retval = -EFAULT;
1179 break;
1180 }
1181 pre_entry =
1182 ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
1183 pyld_sz =
1184 sizeof(struct ipa_ioc_add_rt_rule_ext) +
1185 pre_entry * sizeof(struct ipa_rt_rule_add_ext);
1186 param = kzalloc(pyld_sz, GFP_KERNEL);
1187 if (!param) {
1188 retval = -ENOMEM;
1189 break;
1190 }
1191 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
1192 retval = -EFAULT;
1193 break;
1194 }
1195 /* add check in case user-space module compromised */
1196 if (unlikely(
1197 ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
1198 != pre_entry)) {
1199 IPAERR(" prevent memory corruption(%d not match %d)\n",
1200 ((struct ipa_ioc_add_rt_rule_ext *)param)->
1201 num_rules,
1202 pre_entry);
1203 retval = -EINVAL;
1204 break;
1205 }
1206 if (ipa3_add_rt_rule_ext(
1207 (struct ipa_ioc_add_rt_rule_ext *)param)) {
1208 retval = -EFAULT;
1209 break;
1210 }
1211 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
1212 retval = -EFAULT;
1213 break;
1214 }
1215 break;
Amir Levy9659e592016-10-27 18:08:27 +03001216 case IPA_IOC_ADD_RT_RULE_AFTER:
Amir Levy479cfdd2017-10-26 12:23:14 +03001217 if (copy_from_user(header, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001218 sizeof(struct ipa_ioc_add_rt_rule_after))) {
1219
1220 retval = -EFAULT;
1221 break;
1222 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001223 pre_entry =
1224 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001225 pyld_sz =
1226 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001227 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001228 param = kzalloc(pyld_sz, GFP_KERNEL);
1229 if (!param) {
1230 retval = -ENOMEM;
1231 break;
1232 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001233 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001234 retval = -EFAULT;
1235 break;
1236 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001237 /* add check in case user-space module compromised */
1238 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
1239 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301240 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001241 ((struct ipa_ioc_add_rt_rule_after *)param)->
1242 num_rules,
1243 pre_entry);
1244 retval = -EFAULT;
1245 break;
1246 }
Amir Levy9659e592016-10-27 18:08:27 +03001247 if (ipa3_add_rt_rule_after(
1248 (struct ipa_ioc_add_rt_rule_after *)param)) {
1249
1250 retval = -EFAULT;
1251 break;
1252 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001253 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001254 retval = -EFAULT;
1255 break;
1256 }
1257 break;
1258
1259 case IPA_IOC_MDFY_RT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001260 if (copy_from_user(header, (const void __user *)arg,
1261 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001262 retval = -EFAULT;
1263 break;
1264 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001265 pre_entry =
1266 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001267 pyld_sz =
1268 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001269 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001270 param = kzalloc(pyld_sz, GFP_KERNEL);
1271 if (!param) {
1272 retval = -ENOMEM;
1273 break;
1274 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001275 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001276 retval = -EFAULT;
1277 break;
1278 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001279 /* add check in case user-space module compromised */
1280 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
1281 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301282 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001283 ((struct ipa_ioc_mdfy_rt_rule *)param)->
1284 num_rules,
1285 pre_entry);
1286 retval = -EFAULT;
1287 break;
1288 }
Amir Levy9659e592016-10-27 18:08:27 +03001289 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
1290 retval = -EFAULT;
1291 break;
1292 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001293 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001294 retval = -EFAULT;
1295 break;
1296 }
1297 break;
1298
1299 case IPA_IOC_DEL_RT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001300 if (copy_from_user(header, (const void __user *)arg,
1301 sizeof(struct ipa_ioc_del_rt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001302 retval = -EFAULT;
1303 break;
1304 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001305 pre_entry =
1306 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001307 pyld_sz =
1308 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001309 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001310 param = kzalloc(pyld_sz, GFP_KERNEL);
1311 if (!param) {
1312 retval = -ENOMEM;
1313 break;
1314 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001315 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001316 retval = -EFAULT;
1317 break;
1318 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001319 /* add check in case user-space module compromised */
1320 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
1321 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301322 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001323 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
1324 pre_entry);
1325 retval = -EFAULT;
1326 break;
1327 }
Amir Levy9659e592016-10-27 18:08:27 +03001328 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
1329 retval = -EFAULT;
1330 break;
1331 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001332 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001333 retval = -EFAULT;
1334 break;
1335 }
1336 break;
1337
1338 case IPA_IOC_ADD_FLT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001339 if (copy_from_user(header, (const void __user *)arg,
1340 sizeof(struct ipa_ioc_add_flt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001341 retval = -EFAULT;
1342 break;
1343 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001344 pre_entry =
1345 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001346 pyld_sz =
1347 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001348 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001349 param = kzalloc(pyld_sz, GFP_KERNEL);
1350 if (!param) {
1351 retval = -ENOMEM;
1352 break;
1353 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001354 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001355 retval = -EFAULT;
1356 break;
1357 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001358 /* add check in case user-space module compromised */
1359 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
1360 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301361 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001362 ((struct ipa_ioc_add_flt_rule *)param)->
1363 num_rules,
1364 pre_entry);
1365 retval = -EFAULT;
1366 break;
1367 }
Skylar Chang68c37d82018-04-07 16:42:36 -07001368 if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param,
1369 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001370 retval = -EFAULT;
1371 break;
1372 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001373 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001374 retval = -EFAULT;
1375 break;
1376 }
1377 break;
1378
1379 case IPA_IOC_ADD_FLT_RULE_AFTER:
Amir Levy479cfdd2017-10-26 12:23:14 +03001380 if (copy_from_user(header, (const void __user *)arg,
1381 sizeof(struct ipa_ioc_add_flt_rule_after))) {
Amir Levy9659e592016-10-27 18:08:27 +03001382
1383 retval = -EFAULT;
1384 break;
1385 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001386 pre_entry =
1387 ((struct ipa_ioc_add_flt_rule_after *)header)->
1388 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001389 pyld_sz =
1390 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001391 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001392 param = kzalloc(pyld_sz, GFP_KERNEL);
1393 if (!param) {
1394 retval = -ENOMEM;
1395 break;
1396 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001397 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001398 retval = -EFAULT;
1399 break;
1400 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001401 /* add check in case user-space module compromised */
1402 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1403 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301404 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001405 ((struct ipa_ioc_add_flt_rule_after *)param)->
1406 num_rules,
1407 pre_entry);
1408 retval = -EFAULT;
1409 break;
1410 }
Amir Levy9659e592016-10-27 18:08:27 +03001411 if (ipa3_add_flt_rule_after(
1412 (struct ipa_ioc_add_flt_rule_after *)param)) {
1413 retval = -EFAULT;
1414 break;
1415 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001416 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001417 retval = -EFAULT;
1418 break;
1419 }
1420 break;
1421
1422 case IPA_IOC_DEL_FLT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001423 if (copy_from_user(header, (const void __user *)arg,
1424 sizeof(struct ipa_ioc_del_flt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001425 retval = -EFAULT;
1426 break;
1427 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001428 pre_entry =
1429 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001430 pyld_sz =
1431 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001432 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001433 param = kzalloc(pyld_sz, GFP_KERNEL);
1434 if (!param) {
1435 retval = -ENOMEM;
1436 break;
1437 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001438 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001439 retval = -EFAULT;
1440 break;
1441 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001442 /* add check in case user-space module compromised */
1443 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1444 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301445 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001446 ((struct ipa_ioc_del_flt_rule *)param)->
1447 num_hdls,
1448 pre_entry);
1449 retval = -EFAULT;
1450 break;
1451 }
Amir Levy9659e592016-10-27 18:08:27 +03001452 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1453 retval = -EFAULT;
1454 break;
1455 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001456 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001457 retval = -EFAULT;
1458 break;
1459 }
1460 break;
1461
1462 case IPA_IOC_MDFY_FLT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001463 if (copy_from_user(header, (const void __user *)arg,
1464 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001465 retval = -EFAULT;
1466 break;
1467 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001468 pre_entry =
1469 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001470 pyld_sz =
1471 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001472 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001473 param = kzalloc(pyld_sz, GFP_KERNEL);
1474 if (!param) {
1475 retval = -ENOMEM;
1476 break;
1477 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001478 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001479 retval = -EFAULT;
1480 break;
1481 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001482 /* add check in case user-space module compromised */
1483 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1484 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301485 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001486 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1487 num_rules,
1488 pre_entry);
1489 retval = -EFAULT;
1490 break;
1491 }
Amir Levy9659e592016-10-27 18:08:27 +03001492 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1493 retval = -EFAULT;
1494 break;
1495 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001496 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001497 retval = -EFAULT;
1498 break;
1499 }
1500 break;
1501
1502 case IPA_IOC_COMMIT_HDR:
1503 retval = ipa3_commit_hdr();
1504 break;
1505 case IPA_IOC_RESET_HDR:
Skylar Chang68c37d82018-04-07 16:42:36 -07001506 retval = ipa3_reset_hdr(false);
Amir Levy9659e592016-10-27 18:08:27 +03001507 break;
1508 case IPA_IOC_COMMIT_RT:
1509 retval = ipa3_commit_rt(arg);
1510 break;
1511 case IPA_IOC_RESET_RT:
Skylar Chang68c37d82018-04-07 16:42:36 -07001512 retval = ipa3_reset_rt(arg, false);
Amir Levy9659e592016-10-27 18:08:27 +03001513 break;
1514 case IPA_IOC_COMMIT_FLT:
1515 retval = ipa3_commit_flt(arg);
1516 break;
1517 case IPA_IOC_RESET_FLT:
Skylar Chang68c37d82018-04-07 16:42:36 -07001518 retval = ipa3_reset_flt(arg, false);
Amir Levy9659e592016-10-27 18:08:27 +03001519 break;
1520 case IPA_IOC_GET_RT_TBL:
Amir Levy479cfdd2017-10-26 12:23:14 +03001521 if (copy_from_user(header, (const void __user *)arg,
1522 sizeof(struct ipa_ioc_get_rt_tbl))) {
Amir Levy9659e592016-10-27 18:08:27 +03001523 retval = -EFAULT;
1524 break;
1525 }
1526 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1527 retval = -EFAULT;
1528 break;
1529 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001530 if (copy_to_user((void __user *)arg, header,
Amir Levy9659e592016-10-27 18:08:27 +03001531 sizeof(struct ipa_ioc_get_rt_tbl))) {
1532 retval = -EFAULT;
1533 break;
1534 }
1535 break;
1536 case IPA_IOC_PUT_RT_TBL:
1537 retval = ipa3_put_rt_tbl(arg);
1538 break;
1539 case IPA_IOC_GET_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001540 if (copy_from_user(header, (const void __user *)arg,
1541 sizeof(struct ipa_ioc_get_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001542 retval = -EFAULT;
1543 break;
1544 }
1545 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1546 retval = -EFAULT;
1547 break;
1548 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001549 if (copy_to_user((void __user *)arg, header,
1550 sizeof(struct ipa_ioc_get_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001551 retval = -EFAULT;
1552 break;
1553 }
1554 break;
1555 case IPA_IOC_PUT_HDR:
1556 retval = ipa3_put_hdr(arg);
1557 break;
1558 case IPA_IOC_SET_FLT:
1559 retval = ipa3_cfg_filter(arg);
1560 break;
1561 case IPA_IOC_COPY_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001562 if (copy_from_user(header, (const void __user *)arg,
1563 sizeof(struct ipa_ioc_copy_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001564 retval = -EFAULT;
1565 break;
1566 }
1567 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1568 retval = -EFAULT;
1569 break;
1570 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001571 if (copy_to_user((void __user *)arg, header,
1572 sizeof(struct ipa_ioc_copy_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001573 retval = -EFAULT;
1574 break;
1575 }
1576 break;
1577 case IPA_IOC_QUERY_INTF:
Amir Levy479cfdd2017-10-26 12:23:14 +03001578 if (copy_from_user(header, (const void __user *)arg,
1579 sizeof(struct ipa_ioc_query_intf))) {
Amir Levy9659e592016-10-27 18:08:27 +03001580 retval = -EFAULT;
1581 break;
1582 }
1583 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1584 retval = -1;
1585 break;
1586 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001587 if (copy_to_user((void __user *)arg, header,
1588 sizeof(struct ipa_ioc_query_intf))) {
Amir Levy9659e592016-10-27 18:08:27 +03001589 retval = -EFAULT;
1590 break;
1591 }
1592 break;
1593 case IPA_IOC_QUERY_INTF_TX_PROPS:
1594 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
Amir Levy479cfdd2017-10-26 12:23:14 +03001595 if (copy_from_user(header, (const void __user *)arg, sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001596 retval = -EFAULT;
1597 break;
1598 }
1599
1600 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
Amir Levy479cfdd2017-10-26 12:23:14 +03001601 > IPA_NUM_PROPS_MAX) {
Amir Levy9659e592016-10-27 18:08:27 +03001602 retval = -EFAULT;
1603 break;
1604 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001605 pre_entry =
1606 ((struct ipa_ioc_query_intf_tx_props *)
1607 header)->num_tx_props;
1608 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001609 sizeof(struct ipa_ioc_tx_intf_prop);
1610 param = kzalloc(pyld_sz, GFP_KERNEL);
1611 if (!param) {
1612 retval = -ENOMEM;
1613 break;
1614 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001615 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001616 retval = -EFAULT;
1617 break;
1618 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001619 /* add check in case user-space module compromised */
1620 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1621 param)->num_tx_props
1622 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301623 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001624 ((struct ipa_ioc_query_intf_tx_props *)
1625 param)->num_tx_props, pre_entry);
1626 retval = -EFAULT;
1627 break;
1628 }
Amir Levy9659e592016-10-27 18:08:27 +03001629 if (ipa3_query_intf_tx_props(
Amir Levy479cfdd2017-10-26 12:23:14 +03001630 (struct ipa_ioc_query_intf_tx_props *)param)) {
Amir Levy9659e592016-10-27 18:08:27 +03001631 retval = -1;
1632 break;
1633 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001634 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001635 retval = -EFAULT;
1636 break;
1637 }
1638 break;
1639 case IPA_IOC_QUERY_INTF_RX_PROPS:
1640 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
Amir Levy479cfdd2017-10-26 12:23:14 +03001641 if (copy_from_user(header, (const void __user *)arg, sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001642 retval = -EFAULT;
1643 break;
1644 }
1645
1646 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
Amir Levy479cfdd2017-10-26 12:23:14 +03001647 > IPA_NUM_PROPS_MAX) {
Amir Levy9659e592016-10-27 18:08:27 +03001648 retval = -EFAULT;
1649 break;
1650 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001651 pre_entry =
1652 ((struct ipa_ioc_query_intf_rx_props *)
1653 header)->num_rx_props;
1654 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001655 sizeof(struct ipa_ioc_rx_intf_prop);
1656 param = kzalloc(pyld_sz, GFP_KERNEL);
1657 if (!param) {
1658 retval = -ENOMEM;
1659 break;
1660 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001661 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001662 retval = -EFAULT;
1663 break;
1664 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001665 /* add check in case user-space module compromised */
1666 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1667 param)->num_rx_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301668 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001669 ((struct ipa_ioc_query_intf_rx_props *)
1670 param)->num_rx_props, pre_entry);
1671 retval = -EFAULT;
1672 break;
1673 }
Amir Levy9659e592016-10-27 18:08:27 +03001674 if (ipa3_query_intf_rx_props(
Amir Levy479cfdd2017-10-26 12:23:14 +03001675 (struct ipa_ioc_query_intf_rx_props *)param)) {
Amir Levy9659e592016-10-27 18:08:27 +03001676 retval = -1;
1677 break;
1678 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001679 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001680 retval = -EFAULT;
1681 break;
1682 }
1683 break;
1684 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1685 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
Amir Levy479cfdd2017-10-26 12:23:14 +03001686 if (copy_from_user(header, (const void __user *)arg, sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001687 retval = -EFAULT;
1688 break;
1689 }
1690
1691 if (((struct ipa_ioc_query_intf_ext_props *)
Amir Levy479cfdd2017-10-26 12:23:14 +03001692 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
Amir Levy9659e592016-10-27 18:08:27 +03001693 retval = -EFAULT;
1694 break;
1695 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001696 pre_entry =
1697 ((struct ipa_ioc_query_intf_ext_props *)
1698 header)->num_ext_props;
1699 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001700 sizeof(struct ipa_ioc_ext_intf_prop);
1701 param = kzalloc(pyld_sz, GFP_KERNEL);
1702 if (!param) {
1703 retval = -ENOMEM;
1704 break;
1705 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001706 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001707 retval = -EFAULT;
1708 break;
1709 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001710 /* add check in case user-space module compromised */
1711 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1712 param)->num_ext_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301713 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001714 ((struct ipa_ioc_query_intf_ext_props *)
1715 param)->num_ext_props, pre_entry);
1716 retval = -EFAULT;
1717 break;
1718 }
Amir Levy9659e592016-10-27 18:08:27 +03001719 if (ipa3_query_intf_ext_props(
Amir Levy479cfdd2017-10-26 12:23:14 +03001720 (struct ipa_ioc_query_intf_ext_props *)param)) {
Amir Levy9659e592016-10-27 18:08:27 +03001721 retval = -1;
1722 break;
1723 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001724 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001725 retval = -EFAULT;
1726 break;
1727 }
1728 break;
1729 case IPA_IOC_PULL_MSG:
Amir Levy479cfdd2017-10-26 12:23:14 +03001730 if (copy_from_user(header, (const void __user *)arg,
1731 sizeof(struct ipa_msg_meta))) {
Amir Levy9659e592016-10-27 18:08:27 +03001732 retval = -EFAULT;
1733 break;
1734 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001735 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001736 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001737 pyld_sz = sizeof(struct ipa_msg_meta) +
1738 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001739 param = kzalloc(pyld_sz, GFP_KERNEL);
1740 if (!param) {
1741 retval = -ENOMEM;
1742 break;
1743 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001744 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001745 retval = -EFAULT;
1746 break;
1747 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001748 /* add check in case user-space module compromised */
1749 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1750 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301751 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001752 ((struct ipa_msg_meta *)param)->msg_len,
1753 pre_entry);
1754 retval = -EFAULT;
1755 break;
1756 }
Amir Levy9659e592016-10-27 18:08:27 +03001757 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
Amir Levy479cfdd2017-10-26 12:23:14 +03001758 (char *)param + sizeof(struct ipa_msg_meta),
1759 ((struct ipa_msg_meta *)param)->msg_len) !=
1760 ((struct ipa_msg_meta *)param)->msg_len) {
Amir Levy9659e592016-10-27 18:08:27 +03001761 retval = -1;
1762 break;
1763 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001764 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001765 retval = -EFAULT;
1766 break;
1767 }
1768 break;
1769 case IPA_IOC_RM_ADD_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001770 /* deprecate if IPA PM is used */
1771 if (ipa3_ctx->use_ipa_pm)
1772 return 0;
1773
Amir Levy479cfdd2017-10-26 12:23:14 +03001774 if (copy_from_user(&rm_depend, (const void __user *)arg,
1775 sizeof(struct ipa_ioc_rm_dependency))) {
Amir Levy9659e592016-10-27 18:08:27 +03001776 retval = -EFAULT;
1777 break;
1778 }
1779 retval = ipa_rm_add_dependency_from_ioctl(
1780 rm_depend.resource_name, rm_depend.depends_on_name);
1781 break;
1782 case IPA_IOC_RM_DEL_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001783 /* deprecate if IPA PM is used */
1784 if (ipa3_ctx->use_ipa_pm)
1785 return 0;
1786
Amir Levy479cfdd2017-10-26 12:23:14 +03001787 if (copy_from_user(&rm_depend, (const void __user *)arg,
1788 sizeof(struct ipa_ioc_rm_dependency))) {
Amir Levy9659e592016-10-27 18:08:27 +03001789 retval = -EFAULT;
1790 break;
1791 }
1792 retval = ipa_rm_delete_dependency_from_ioctl(
1793 rm_depend.resource_name, rm_depend.depends_on_name);
1794 break;
1795 case IPA_IOC_GENERATE_FLT_EQ:
1796 {
1797 struct ipa_ioc_generate_flt_eq flt_eq;
1798
Amir Levy479cfdd2017-10-26 12:23:14 +03001799 if (copy_from_user(&flt_eq, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001800 sizeof(struct ipa_ioc_generate_flt_eq))) {
1801 retval = -EFAULT;
1802 break;
1803 }
1804 if (ipahal_flt_generate_equation(flt_eq.ip,
1805 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1806 retval = -EFAULT;
1807 break;
1808 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001809 if (copy_to_user((void __user *)arg, &flt_eq,
Amir Levy9659e592016-10-27 18:08:27 +03001810 sizeof(struct ipa_ioc_generate_flt_eq))) {
1811 retval = -EFAULT;
1812 break;
1813 }
1814 break;
1815 }
1816 case IPA_IOC_QUERY_EP_MAPPING:
1817 {
1818 retval = ipa3_get_ep_mapping(arg);
1819 break;
1820 }
1821 case IPA_IOC_QUERY_RT_TBL_INDEX:
Amir Levy479cfdd2017-10-26 12:23:14 +03001822 if (copy_from_user(header, (const void __user *)arg,
1823 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
Amir Levy9659e592016-10-27 18:08:27 +03001824 retval = -EFAULT;
1825 break;
1826 }
1827 if (ipa3_query_rt_index(
Amir Levy479cfdd2017-10-26 12:23:14 +03001828 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
Amir Levy9659e592016-10-27 18:08:27 +03001829 retval = -EFAULT;
1830 break;
1831 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001832 if (copy_to_user((void __user *)arg, header,
1833 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
Amir Levy9659e592016-10-27 18:08:27 +03001834 retval = -EFAULT;
1835 break;
1836 }
1837 break;
1838 case IPA_IOC_WRITE_QMAPID:
Amir Levy479cfdd2017-10-26 12:23:14 +03001839 if (copy_from_user(header, (const void __user *)arg,
1840 sizeof(struct ipa_ioc_write_qmapid))) {
Amir Levy9659e592016-10-27 18:08:27 +03001841 retval = -EFAULT;
1842 break;
1843 }
1844 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1845 retval = -EFAULT;
1846 break;
1847 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001848 if (copy_to_user((void __user *)arg, header,
1849 sizeof(struct ipa_ioc_write_qmapid))) {
Amir Levy9659e592016-10-27 18:08:27 +03001850 retval = -EFAULT;
1851 break;
1852 }
1853 break;
1854 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301855 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
Amir Levy9659e592016-10-27 18:08:27 +03001856 if (retval) {
1857 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1858 break;
1859 }
1860 break;
1861 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301862 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
Amir Levy9659e592016-10-27 18:08:27 +03001863 if (retval) {
1864 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1865 break;
1866 }
1867 break;
1868 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301869 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
Amir Levy9659e592016-10-27 18:08:27 +03001870 if (retval) {
1871 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1872 break;
1873 }
1874 break;
1875 case IPA_IOC_ADD_HDR_PROC_CTX:
Amir Levy479cfdd2017-10-26 12:23:14 +03001876 if (copy_from_user(header, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001877 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1878 retval = -EFAULT;
1879 break;
1880 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001881 pre_entry =
1882 ((struct ipa_ioc_add_hdr_proc_ctx *)
1883 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001884 pyld_sz =
1885 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001886 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001887 param = kzalloc(pyld_sz, GFP_KERNEL);
1888 if (!param) {
1889 retval = -ENOMEM;
1890 break;
1891 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001892 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001893 retval = -EFAULT;
1894 break;
1895 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001896 /* add check in case user-space module compromised */
1897 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1898 param)->num_proc_ctxs != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301899 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001900 ((struct ipa_ioc_add_hdr_proc_ctx *)
1901 param)->num_proc_ctxs, pre_entry);
1902 retval = -EFAULT;
1903 break;
1904 }
Amir Levy9659e592016-10-27 18:08:27 +03001905 if (ipa3_add_hdr_proc_ctx(
Skylar Chang68c37d82018-04-07 16:42:36 -07001906 (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001907 retval = -EFAULT;
1908 break;
1909 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001910 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001911 retval = -EFAULT;
1912 break;
1913 }
1914 break;
1915 case IPA_IOC_DEL_HDR_PROC_CTX:
Amir Levy479cfdd2017-10-26 12:23:14 +03001916 if (copy_from_user(header, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001917 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1918 retval = -EFAULT;
1919 break;
1920 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001921 pre_entry =
1922 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001923 pyld_sz =
1924 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001925 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001926 param = kzalloc(pyld_sz, GFP_KERNEL);
1927 if (!param) {
1928 retval = -ENOMEM;
1929 break;
1930 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001931 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001932 retval = -EFAULT;
1933 break;
1934 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001935 /* add check in case user-space module compromised */
1936 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1937 param)->num_hdls != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301938 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001939 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1940 num_hdls,
1941 pre_entry);
1942 retval = -EFAULT;
1943 break;
1944 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001945 if (ipa3_del_hdr_proc_ctx_by_user(
1946 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001947 retval = -EFAULT;
1948 break;
1949 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001950 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001951 retval = -EFAULT;
1952 break;
1953 }
1954 break;
1955
1956 case IPA_IOC_GET_HW_VERSION:
1957 pyld_sz = sizeof(enum ipa_hw_type);
1958 param = kzalloc(pyld_sz, GFP_KERNEL);
1959 if (!param) {
1960 retval = -ENOMEM;
1961 break;
1962 }
1963 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
Amir Levy479cfdd2017-10-26 12:23:14 +03001964 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001965 retval = -EFAULT;
1966 break;
1967 }
1968 break;
1969
Amir Levya5361ab2018-05-01 13:25:37 +03001970 case IPA_IOC_GET_VLAN_MODE:
1971 if (copy_from_user(&vlan_mode, (const void __user *)arg,
1972 sizeof(struct ipa_ioc_get_vlan_mode))) {
1973 retval = -EFAULT;
1974 break;
1975 }
1976 retval = ipa3_is_vlan_mode(
1977 vlan_mode.iface,
1978 &is_vlan_mode);
1979 if (retval)
1980 break;
1981
1982 vlan_mode.is_vlan_mode = is_vlan_mode;
1983
1984 if (copy_to_user((void __user *)arg,
1985 &vlan_mode,
1986 sizeof(struct ipa_ioc_get_vlan_mode))) {
1987 retval = -EFAULT;
1988 break;
1989 }
1990 break;
1991
Shihuan Liuc3174f52017-05-04 15:59:13 -07001992 case IPA_IOC_ADD_VLAN_IFACE:
1993 if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
1994 retval = -EFAULT;
1995 break;
1996 }
1997 break;
1998
1999 case IPA_IOC_DEL_VLAN_IFACE:
2000 if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
2001 retval = -EFAULT;
2002 break;
2003 }
2004 break;
Amir Levy4f8b4832018-06-05 15:48:03 +03002005 case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
2006 if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
2007 retval = -EFAULT;
2008 break;
2009 }
2010 break;
2011 case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
2012 if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
2013 retval = -EFAULT;
2014 break;
2015 }
2016 break;
Shihuan Liuc3174f52017-05-04 15:59:13 -07002017 case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
2018 if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
2019 retval = -EFAULT;
2020 break;
2021 }
2022 break;
2023
2024 case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
2025 if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
2026 retval = -EFAULT;
2027 break;
2028 }
2029 break;
2030
Skylar Chang68c37d82018-04-07 16:42:36 -07002031 case IPA_IOC_CLEANUP:
2032 /*Route and filter rules will also be clean*/
2033 IPADBG("Got IPA_IOC_CLEANUP\n");
2034 retval = ipa3_reset_hdr(true);
2035 memset(&nat_del, 0, sizeof(nat_del));
2036 nat_del.table_index = 0;
2037 retval = ipa3_nat_del_cmd(&nat_del);
2038 retval = ipa3_clean_modem_rule();
2039 break;
2040
2041 case IPA_IOC_QUERY_WLAN_CLIENT:
2042 IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n");
2043 retval = ipa3_resend_wlan_msg();
2044 break;
2045
Mohammed Javida0f23d92018-09-11 10:50:28 +05302046 case IPA_IOC_GSB_CONNECT:
2047 IPADBG("Got IPA_IOC_GSB_CONNECT\n");
2048 if (ipa3_send_gsb_msg(arg, IPA_GSB_CONNECT)) {
2049 retval = -EFAULT;
2050 break;
2051 }
2052 break;
2053
2054 case IPA_IOC_GSB_DISCONNECT:
2055 IPADBG("Got IPA_IOC_GSB_DISCONNECT\n");
2056 if (ipa3_send_gsb_msg(arg, IPA_GSB_DISCONNECT)) {
2057 retval = -EFAULT;
2058 break;
2059 }
2060 break;
2061
Mohammed Javidd636e0c2019-06-13 16:16:59 +05302062 case IPA_IOC_GET_PHERIPHERAL_EP_INFO:
2063 IPADBG("Got IPA_IOC_GET_EP_INFO\n");
sivakanth reddy vaka2a5a5ee2019-12-08 13:50:56 +05302064 if (ipa3_ctx->ipa_config_is_auto == false) {
2065 IPADBG("not an auto config: returning error\n");
2066 retval = -ENOTTY;
2067 break;
2068 }
Mohammed Javidd636e0c2019-06-13 16:16:59 +05302069 if (copy_from_user(&ep_info, (const void __user *)arg,
2070 sizeof(struct ipa_ioc_get_ep_info))) {
2071 IPAERR_RL("copy_from_user fails\n");
2072 retval = -EFAULT;
2073 break;
2074 }
2075
2076 if (ep_info.max_ep_pairs != QUERY_MAX_EP_PAIRS)
2077 IPAERR_RL("unexpected max_ep_pairs %d\n",
2078 ep_info.max_ep_pairs);
2079
2080 if (ep_info.ep_pair_size !=
2081 (QUERY_MAX_EP_PAIRS * sizeof(struct ipa_ep_pair_info)))
2082 IPAERR_RL("unexpected ep_pair_size %d\n",
2083 ep_info.max_ep_pairs);
2084
2085 uptr = ep_info.info;
2086 if (unlikely(!uptr)) {
2087 IPAERR_RL("unexpected NULL info\n");
2088 retval = -EFAULT;
2089 break;
2090 }
2091
2092 param = kzalloc(ep_info.ep_pair_size, GFP_KERNEL);
2093 if (!param) {
2094 IPAERR_RL("kzalloc fails\n");
2095 retval = -ENOMEM;
2096 break;
2097 }
2098
2099 retval = ipa3_get_ep_info(&ep_info, param);
2100 if (retval < 0) {
2101 IPAERR("ipa3_get_ep_info failed\n");
2102 retval = -EFAULT;
2103 break;
2104 }
2105
2106 if (copy_to_user((void __user *)uptr, param,
2107 ep_info.ep_pair_size)) {
2108 IPAERR_RL("copy_to_user fails\n");
2109 retval = -EFAULT;
2110 break;
2111 }
2112
2113 if (copy_to_user((void __user *)arg, &ep_info,
2114 sizeof(struct ipa_ioc_get_ep_info))) {
2115 IPAERR_RL("copy_to_user fails\n");
2116 retval = -EFAULT;
2117 break;
2118 }
2119 break;
2120
Amir Levy479cfdd2017-10-26 12:23:14 +03002121 default:
Amir Levy9659e592016-10-27 18:08:27 +03002122 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2123 return -ENOTTY;
2124 }
2125 kfree(param);
2126 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2127
2128 return retval;
2129}
2130
2131/**
Skylar Chang68c37d82018-04-07 16:42:36 -07002132 * ipa3_setup_dflt_rt_tables() - Setup default routing tables
2133 *
2134 * Return codes:
2135 * 0: success
2136 * -ENOMEM: failed to allocate memory
2137 * -EPERM: failed to add the tables
2138 */
Amir Levy9659e592016-10-27 18:08:27 +03002139int ipa3_setup_dflt_rt_tables(void)
2140{
2141 struct ipa_ioc_add_rt_rule *rt_rule;
2142 struct ipa_rt_rule_add *rt_rule_entry;
2143
2144 rt_rule =
Amir Levy479cfdd2017-10-26 12:23:14 +03002145 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
2146 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002147 if (!rt_rule) {
2148 IPAERR("fail to alloc mem\n");
2149 return -ENOMEM;
2150 }
2151 /* setup a default v4 route to point to Apps */
2152 rt_rule->num_rules = 1;
2153 rt_rule->commit = 1;
2154 rt_rule->ip = IPA_IP_v4;
2155 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
Amir Levy479cfdd2017-10-26 12:23:14 +03002156 IPA_RESOURCE_NAME_MAX);
Amir Levy9659e592016-10-27 18:08:27 +03002157
2158 rt_rule_entry = &rt_rule->rules[0];
2159 rt_rule_entry->at_rear = 1;
2160 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
2161 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
2162 rt_rule_entry->rule.retain_hdr = 1;
2163
2164 if (ipa3_add_rt_rule(rt_rule)) {
2165 IPAERR("fail to add dflt v4 rule\n");
2166 kfree(rt_rule);
2167 return -EPERM;
2168 }
2169 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
2170 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
2171
2172 /* setup a default v6 route to point to A5 */
2173 rt_rule->ip = IPA_IP_v6;
2174 if (ipa3_add_rt_rule(rt_rule)) {
2175 IPAERR("fail to add dflt v6 rule\n");
2176 kfree(rt_rule);
2177 return -EPERM;
2178 }
2179 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
2180 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
2181
2182 /*
2183 * because these tables are the very first to be added, they will both
2184 * have the same index (0) which is essential for programming the
2185 * "route" end-point config
2186 */
2187
2188 kfree(rt_rule);
2189
2190 return 0;
2191}
2192
2193static int ipa3_setup_exception_path(void)
2194{
2195 struct ipa_ioc_add_hdr *hdr;
2196 struct ipa_hdr_add *hdr_entry;
2197 struct ipahal_reg_route route = { 0 };
2198 int ret;
2199
2200 /* install the basic exception header */
2201 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
2202 sizeof(struct ipa_hdr_add), GFP_KERNEL);
2203 if (!hdr) {
2204 IPAERR("fail to alloc exception hdr\n");
2205 return -ENOMEM;
2206 }
2207 hdr->num_hdrs = 1;
2208 hdr->commit = 1;
2209 hdr_entry = &hdr->hdr[0];
2210
2211 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
2212 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
2213
2214 if (ipa3_add_hdr(hdr)) {
2215 IPAERR("fail to add exception hdr\n");
2216 ret = -EPERM;
2217 goto bail;
2218 }
2219
2220 if (hdr_entry->status) {
2221 IPAERR("fail to add exception hdr\n");
2222 ret = -EPERM;
2223 goto bail;
2224 }
2225
2226 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
2227
2228 /* set the route register to pass exception packets to Apps */
2229 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
2230 route.route_frag_def_pipe = ipa3_get_ep_mapping(
2231 IPA_CLIENT_APPS_LAN_CONS);
2232 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
2233 route.route_def_retain_hdr = 1;
2234
2235 if (ipa3_cfg_route(&route)) {
2236 IPAERR("fail to add exception hdr\n");
2237 ret = -EPERM;
2238 goto bail;
2239 }
2240
2241 ret = 0;
2242bail:
2243 kfree(hdr);
2244 return ret;
2245}
2246
2247static int ipa3_init_smem_region(int memory_region_size,
2248 int memory_region_offset)
2249{
2250 struct ipahal_imm_cmd_dma_shared_mem cmd;
2251 struct ipahal_imm_cmd_pyld *cmd_pyld;
2252 struct ipa3_desc desc;
2253 struct ipa_mem_buffer mem;
2254 int rc;
2255
2256 if (memory_region_size == 0)
2257 return 0;
2258
2259 memset(&desc, 0, sizeof(desc));
2260 memset(&cmd, 0, sizeof(cmd));
2261 memset(&mem, 0, sizeof(mem));
2262
2263 mem.size = memory_region_size;
2264 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
2265 &mem.phys_base, GFP_KERNEL);
2266 if (!mem.base) {
2267 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
2268 return -ENOMEM;
2269 }
2270
2271 memset(mem.base, 0, mem.size);
2272 cmd.is_read = false;
2273 cmd.skip_pipeline_clear = false;
2274 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2275 cmd.size = mem.size;
2276 cmd.system_addr = mem.phys_base;
2277 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2278 memory_region_offset;
2279 cmd_pyld = ipahal_construct_imm_cmd(
2280 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2281 if (!cmd_pyld) {
2282 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2283 return -ENOMEM;
2284 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002285 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03002286
2287 rc = ipa3_send_cmd(1, &desc);
2288 if (rc) {
2289 IPAERR("failed to send immediate command (error %d)\n", rc);
2290 rc = -EFAULT;
2291 }
2292
2293 ipahal_destroy_imm_cmd(cmd_pyld);
2294 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
2295 mem.phys_base);
2296
2297 return rc;
2298}
2299
2300/**
Skylar Chang68c37d82018-04-07 16:42:36 -07002301 * ipa3_init_q6_smem() - Initialize Q6 general memory and
2302 * header memory regions in IPA.
2303 *
2304 * Return codes:
2305 * 0: success
2306 * -ENOMEM: failed to allocate dma memory
2307 * -EFAULT: failed to send IPA command to initialize the memory
2308 */
Amir Levy9659e592016-10-27 18:08:27 +03002309int ipa3_init_q6_smem(void)
2310{
2311 int rc;
2312
2313 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2314
2315 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
2316 IPA_MEM_PART(modem_ofst));
2317 if (rc) {
2318 IPAERR("failed to initialize Modem RAM memory\n");
2319 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2320 return rc;
2321 }
2322
2323 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
2324 IPA_MEM_PART(modem_hdr_ofst));
2325 if (rc) {
2326 IPAERR("failed to initialize Modem HDRs RAM memory\n");
2327 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2328 return rc;
2329 }
2330
2331 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
2332 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2333 if (rc) {
2334 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
2335 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2336 return rc;
2337 }
2338
2339 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
2340 IPA_MEM_PART(modem_comp_decomp_ofst));
2341 if (rc) {
2342 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
2343 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2344 return rc;
2345 }
2346 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2347
2348 return rc;
2349}
2350
2351static void ipa3_destroy_imm(void *user1, int user2)
2352{
2353 ipahal_destroy_imm_cmd(user1);
2354}
2355
2356static void ipa3_q6_pipe_delay(bool delay)
2357{
2358 int client_idx;
2359 int ep_idx;
2360 struct ipa_ep_cfg_ctrl ep_ctrl;
2361
2362 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
2363 ep_ctrl.ipa_ep_delay = delay;
2364
2365 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2366 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
2367 ep_idx = ipa3_get_ep_mapping(client_idx);
2368 if (ep_idx == -1)
2369 continue;
2370
2371 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
2372 ep_idx, &ep_ctrl);
2373 }
2374 }
2375}
2376
2377static void ipa3_q6_avoid_holb(void)
2378{
2379 int ep_idx;
2380 int client_idx;
2381 struct ipa_ep_cfg_ctrl ep_suspend;
2382 struct ipa_ep_cfg_holb ep_holb;
2383
2384 memset(&ep_suspend, 0, sizeof(ep_suspend));
2385 memset(&ep_holb, 0, sizeof(ep_holb));
2386
2387 ep_suspend.ipa_ep_suspend = true;
2388 ep_holb.tmr_val = 0;
2389 ep_holb.en = 1;
2390
2391 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2392 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
2393 ep_idx = ipa3_get_ep_mapping(client_idx);
2394 if (ep_idx == -1)
2395 continue;
2396
Skylar Changde679dc2017-11-21 10:11:34 -08002397 /* from IPA 4.0 pipe suspend is not supported */
2398 if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
2399 ipahal_write_reg_n_fields(
2400 IPA_ENDP_INIT_CTRL_n,
2401 ep_idx, &ep_suspend);
2402
Amir Levy9659e592016-10-27 18:08:27 +03002403 /*
2404 * ipa3_cfg_ep_holb is not used here because we are
2405 * setting HOLB on Q6 pipes, and from APPS perspective
2406 * they are not valid, therefore, the above function
2407 * will fail.
2408 */
2409 ipahal_write_reg_n_fields(
2410 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
2411 ep_idx, &ep_holb);
2412 ipahal_write_reg_n_fields(
2413 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
2414 ep_idx, &ep_holb);
Amir Levy9659e592016-10-27 18:08:27 +03002415 }
2416 }
2417}
2418
Michael Adisumarta0090e542018-03-14 10:44:53 -07002419static void ipa3_halt_q6_gsi_channels(bool prod)
Skylar Chang94692c92017-03-01 09:07:11 -08002420{
2421 int ep_idx;
2422 int client_idx;
2423 const struct ipa_gsi_ep_config *gsi_ep_cfg;
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002424 int i;
Skylar Chang94692c92017-03-01 09:07:11 -08002425 int ret;
2426 int code = 0;
2427
Michael Adisumarta0090e542018-03-14 10:44:53 -07002428 /* if prod flag is true, then we halt the producer channels also */
Skylar Chang94692c92017-03-01 09:07:11 -08002429 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
Michael Adisumarta0090e542018-03-14 10:44:53 -07002430 if (IPA_CLIENT_IS_Q6_CONS(client_idx)
2431 || (IPA_CLIENT_IS_Q6_PROD(client_idx) && prod)) {
Skylar Chang94692c92017-03-01 09:07:11 -08002432 ep_idx = ipa3_get_ep_mapping(client_idx);
2433 if (ep_idx == -1)
2434 continue;
2435
Skylar Changc1f15312017-05-09 14:14:32 -07002436 gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
Skylar Chang94692c92017-03-01 09:07:11 -08002437 if (!gsi_ep_cfg) {
2438 IPAERR("failed to get GSI config\n");
2439 ipa_assert();
2440 return;
2441 }
2442
2443 ret = gsi_halt_channel_ee(
2444 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
2445 &code);
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002446 for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY &&
2447 ret == -GSI_STATUS_AGAIN; i++) {
2448 IPADBG(
2449 "ch %d ee %d with code %d\n is busy try again",
2450 gsi_ep_cfg->ipa_gsi_chan_num,
2451 gsi_ep_cfg->ee,
2452 code);
2453 usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP,
2454 IPA_GSI_CHANNEL_HALT_MAX_SLEEP);
2455 ret = gsi_halt_channel_ee(
2456 gsi_ep_cfg->ipa_gsi_chan_num,
2457 gsi_ep_cfg->ee, &code);
2458 }
Skylar Chang94692c92017-03-01 09:07:11 -08002459 if (ret == GSI_STATUS_SUCCESS)
2460 IPADBG("halted gsi ch %d ee %d with code %d\n",
2461 gsi_ep_cfg->ipa_gsi_chan_num,
2462 gsi_ep_cfg->ee,
2463 code);
2464 else
2465 IPAERR("failed to halt ch %d ee %d code %d\n",
2466 gsi_ep_cfg->ipa_gsi_chan_num,
2467 gsi_ep_cfg->ee,
2468 code);
2469 }
2470 }
2471}
2472
Amir Levy9659e592016-10-27 18:08:27 +03002473static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
2474 enum ipa_rule_type rlt)
2475{
2476 struct ipa3_desc *desc;
2477 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2478 struct ipahal_imm_cmd_pyld **cmd_pyld;
2479 int retval = 0;
2480 int pipe_idx;
2481 int flt_idx = 0;
2482 int num_cmds = 0;
2483 int index;
2484 u32 lcl_addr_mem_part;
2485 u32 lcl_hdr_sz;
2486 struct ipa_mem_buffer mem;
2487
2488 IPADBG("Entry\n");
2489
2490 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2491 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2492 return -EINVAL;
2493 }
2494
2495 /* Up to filtering pipes we have filtering tables */
2496 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
2497 GFP_KERNEL);
2498 if (!desc) {
2499 IPAERR("failed to allocate memory\n");
2500 return -ENOMEM;
2501 }
2502
2503 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
2504 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
2505 if (!cmd_pyld) {
2506 IPAERR("failed to allocate memory\n");
2507 retval = -ENOMEM;
2508 goto free_desc;
2509 }
2510
2511 if (ip == IPA_IP_v4) {
2512 if (rlt == IPA_RULE_HASHABLE) {
2513 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
2514 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2515 } else {
2516 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
2517 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2518 }
2519 } else {
2520 if (rlt == IPA_RULE_HASHABLE) {
2521 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
2522 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2523 } else {
2524 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
2525 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2526 }
2527 }
2528
2529 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02002530 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002531 if (retval) {
2532 IPAERR("failed to generate flt single tbl empty img\n");
2533 goto free_cmd_pyld;
2534 }
2535
2536 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
2537 if (!ipa_is_ep_support_flt(pipe_idx))
2538 continue;
2539
2540 /*
2541 * Iterating over all the filtering pipes which are either
2542 * invalid but connected or connected but not configured by AP.
2543 */
2544 if (!ipa3_ctx->ep[pipe_idx].valid ||
2545 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
2546
Amir Levy479cfdd2017-10-26 12:23:14 +03002547 if (num_cmds >= ipa3_ctx->ep_flt_num) {
2548 IPAERR("number of commands is out of range\n");
2549 retval = -ENOBUFS;
2550 goto free_empty_img;
2551 }
2552
Amir Levy9659e592016-10-27 18:08:27 +03002553 cmd.is_read = false;
2554 cmd.skip_pipeline_clear = false;
2555 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2556 cmd.size = mem.size;
2557 cmd.system_addr = mem.phys_base;
2558 cmd.local_addr =
2559 ipa3_ctx->smem_restricted_bytes +
2560 lcl_addr_mem_part +
2561 ipahal_get_hw_tbl_hdr_width() +
2562 flt_idx * ipahal_get_hw_tbl_hdr_width();
2563 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
2564 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2565 if (!cmd_pyld[num_cmds]) {
2566 IPAERR("fail construct dma_shared_mem cmd\n");
2567 retval = -ENOMEM;
2568 goto free_empty_img;
2569 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002570 ipa3_init_imm_cmd_desc(&desc[num_cmds],
2571 cmd_pyld[num_cmds]);
2572 ++num_cmds;
Amir Levy9659e592016-10-27 18:08:27 +03002573 }
2574
Amir Levy479cfdd2017-10-26 12:23:14 +03002575 ++flt_idx;
Amir Levy9659e592016-10-27 18:08:27 +03002576 }
2577
2578 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2579 retval = ipa3_send_cmd(num_cmds, desc);
2580 if (retval) {
2581 IPAERR("failed to send immediate command (err %d)\n", retval);
2582 retval = -EFAULT;
2583 }
2584
2585free_empty_img:
2586 ipahal_free_dma_mem(&mem);
2587free_cmd_pyld:
2588 for (index = 0; index < num_cmds; index++)
2589 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2590 kfree(cmd_pyld);
2591free_desc:
2592 kfree(desc);
2593 return retval;
2594}
2595
2596static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2597 enum ipa_rule_type rlt)
2598{
2599 struct ipa3_desc *desc;
2600 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2601 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2602 int retval = 0;
2603 u32 modem_rt_index_lo;
2604 u32 modem_rt_index_hi;
2605 u32 lcl_addr_mem_part;
2606 u32 lcl_hdr_sz;
2607 struct ipa_mem_buffer mem;
2608
2609 IPADBG("Entry\n");
2610
2611 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2612 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2613 return -EINVAL;
2614 }
2615
2616 if (ip == IPA_IP_v4) {
2617 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2618 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2619 if (rlt == IPA_RULE_HASHABLE) {
2620 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2621 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2622 } else {
2623 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2624 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2625 }
2626 } else {
2627 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2628 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2629 if (rlt == IPA_RULE_HASHABLE) {
2630 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2631 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2632 } else {
2633 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2634 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2635 }
2636 }
2637
2638 retval = ipahal_rt_generate_empty_img(
2639 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002640 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002641 if (retval) {
2642 IPAERR("fail generate empty rt img\n");
2643 return -ENOMEM;
2644 }
2645
2646 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2647 if (!desc) {
2648 IPAERR("failed to allocate memory\n");
2649 goto free_empty_img;
2650 }
2651
2652 cmd.is_read = false;
2653 cmd.skip_pipeline_clear = false;
2654 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2655 cmd.size = mem.size;
2656 cmd.system_addr = mem.phys_base;
2657 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2658 lcl_addr_mem_part +
2659 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2660 cmd_pyld = ipahal_construct_imm_cmd(
2661 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2662 if (!cmd_pyld) {
2663 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2664 retval = -ENOMEM;
2665 goto free_desc;
2666 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002667 ipa3_init_imm_cmd_desc(desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03002668
2669 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2670 retval = ipa3_send_cmd(1, desc);
2671 if (retval) {
2672 IPAERR("failed to send immediate command (err %d)\n", retval);
2673 retval = -EFAULT;
2674 }
2675
2676 ipahal_destroy_imm_cmd(cmd_pyld);
2677free_desc:
2678 kfree(desc);
2679free_empty_img:
2680 ipahal_free_dma_mem(&mem);
2681 return retval;
2682}
2683
2684static int ipa3_q6_clean_q6_tables(void)
2685{
2686 struct ipa3_desc *desc;
2687 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2688 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2689 int retval;
2690 struct ipahal_reg_fltrt_hash_flush flush;
2691 struct ipahal_reg_valmask valmask;
2692
2693 IPADBG("Entry\n");
2694
2695
2696 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2697 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2698 return -EFAULT;
2699 }
2700 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2701 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2702 return -EFAULT;
2703 }
2704 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2705 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2706 return -EFAULT;
2707 }
2708 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2709 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2710 return -EFAULT;
2711 }
2712
2713 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2714 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2715 return -EFAULT;
2716 }
2717 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2718 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2719 return -EFAULT;
2720 }
2721 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2722 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2723 return -EFAULT;
2724 }
2725 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2726 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2727 return -EFAULT;
2728 }
2729
2730 /* Flush rules cache */
2731 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2732 if (!desc) {
2733 IPAERR("failed to allocate memory\n");
2734 return -ENOMEM;
2735 }
2736
2737 flush.v4_flt = true;
2738 flush.v4_rt = true;
2739 flush.v6_flt = true;
2740 flush.v6_rt = true;
2741 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2742 reg_write_cmd.skip_pipeline_clear = false;
2743 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2744 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2745 reg_write_cmd.value = valmask.val;
2746 reg_write_cmd.value_mask = valmask.mask;
2747 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2748 &reg_write_cmd, false);
2749 if (!cmd_pyld) {
2750 IPAERR("fail construct register_write imm cmd\n");
2751 retval = -EFAULT;
2752 goto bail_desc;
2753 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002754 ipa3_init_imm_cmd_desc(desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03002755
2756 IPADBG("Sending 1 descriptor for tbls flush\n");
2757 retval = ipa3_send_cmd(1, desc);
2758 if (retval) {
2759 IPAERR("failed to send immediate command (err %d)\n", retval);
2760 retval = -EFAULT;
2761 }
2762
2763 ipahal_destroy_imm_cmd(cmd_pyld);
2764
2765bail_desc:
2766 kfree(desc);
2767 IPADBG("Done - retval = %d\n", retval);
2768 return retval;
2769}
2770
2771static int ipa3_q6_set_ex_path_to_apps(void)
2772{
2773 int ep_idx;
2774 int client_idx;
2775 struct ipa3_desc *desc;
2776 int num_descs = 0;
2777 int index;
2778 struct ipahal_imm_cmd_register_write reg_write;
2779 struct ipahal_imm_cmd_pyld *cmd_pyld;
2780 int retval;
Amir Levy9659e592016-10-27 18:08:27 +03002781
2782 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2783 GFP_KERNEL);
2784 if (!desc) {
2785 IPAERR("failed to allocate memory\n");
2786 return -ENOMEM;
2787 }
2788
2789 /* Set the exception path to AP */
2790 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2791 ep_idx = ipa3_get_ep_mapping(client_idx);
Michael Adisumarta74b05d92019-11-29 01:10:52 -08002792 if (ep_idx == -1 || (ep_idx >= IPA3_MAX_NUM_PIPES))
Amir Levy9659e592016-10-27 18:08:27 +03002793 continue;
2794
Skylar Chang53137112017-05-12 17:13:13 -07002795 /* disable statuses for all modem controlled prod pipes */
2796 if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
2797 (ipa3_ctx->ep[ep_idx].valid &&
Skylar Changd8d8b432018-06-15 10:39:10 -07002798 ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
2799 (ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
2800 && ipa3_ctx->modem_cfg_emb_pipe_flt)) {
Amir Levy5807be32017-04-19 14:35:12 +03002801 ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
2802
Skylar Changd8d8b432018-06-15 10:39:10 -07002803 ipa3_ctx->ep[ep_idx].status.status_en = false;
Amir Levy5807be32017-04-19 14:35:12 +03002804 reg_write.skip_pipeline_clear = false;
2805 reg_write.pipeline_clear_options =
2806 IPAHAL_HPS_CLEAR;
2807 reg_write.offset =
2808 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2809 ep_idx);
2810 reg_write.value = 0;
2811 reg_write.value_mask = ~0;
2812 cmd_pyld = ipahal_construct_imm_cmd(
2813 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2814 if (!cmd_pyld) {
2815 IPAERR("fail construct register_write cmd\n");
2816 ipa_assert();
2817 return -EFAULT;
2818 }
2819
Amir Levy479cfdd2017-10-26 12:23:14 +03002820 ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
Amir Levy5807be32017-04-19 14:35:12 +03002821 desc[num_descs].callback = ipa3_destroy_imm;
2822 desc[num_descs].user1 = cmd_pyld;
Amir Levy479cfdd2017-10-26 12:23:14 +03002823 ++num_descs;
Amir Levy5807be32017-04-19 14:35:12 +03002824 }
Amir Levy9659e592016-10-27 18:08:27 +03002825 }
2826
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002827 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002828 retval = ipa3_tag_process(desc, num_descs,
2829 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2830 if (retval) {
2831 IPAERR("TAG process failed! (error %d)\n", retval);
2832 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2833 if (retval != -ETIME) {
2834 for (index = 0; index < num_descs; index++)
2835 if (desc[index].callback)
2836 desc[index].callback(desc[index].user1,
2837 desc[index].user2);
2838 retval = -EINVAL;
2839 }
2840 }
2841
2842 kfree(desc);
2843
2844 return retval;
2845}
2846
2847/**
Skylar Chang68c37d82018-04-07 16:42:36 -07002848 * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2849 * in IPA HW. This is performed in case of SSR.
2850 *
2851 * This is a mandatory procedure, in case one of the steps fails, the
2852 * AP needs to restart.
2853 */
Amir Levy9659e592016-10-27 18:08:27 +03002854void ipa3_q6_pre_shutdown_cleanup(void)
2855{
2856 IPADBG_LOW("ENTER\n");
2857
2858 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2859
2860 ipa3_q6_pipe_delay(true);
2861 ipa3_q6_avoid_holb();
Mohammed Javidf109cf62019-07-02 13:16:54 +05302862 if (ipa3_ctx->ipa_config_is_mhi) {
Mohammed Javidd53feb82018-07-19 20:16:39 +05302863 ipa3_set_reset_client_cons_pipe_sus_holb(true,
2864 IPA_CLIENT_MHI_CONS);
Mohammed Javidf109cf62019-07-02 13:16:54 +05302865 if (ipa3_ctx->ipa_config_is_auto)
2866 ipa3_set_reset_client_cons_pipe_sus_holb(true,
2867 IPA_CLIENT_MHI2_CONS);
2868 }
2869
Amir Levy9659e592016-10-27 18:08:27 +03002870 if (ipa3_q6_clean_q6_tables()) {
2871 IPAERR("Failed to clean Q6 tables\n");
2872 BUG();
2873 }
2874 if (ipa3_q6_set_ex_path_to_apps()) {
2875 IPAERR("Failed to redirect exceptions to APPS\n");
2876 BUG();
2877 }
2878 /* Remove delay from Q6 PRODs to avoid pending descriptors
Skylar Chang68c37d82018-04-07 16:42:36 -07002879 * on pipe reset procedure
2880 */
Amir Levy9659e592016-10-27 18:08:27 +03002881 ipa3_q6_pipe_delay(false);
Mohammed Javidd53feb82018-07-19 20:16:39 +05302882 ipa3_set_reset_client_prod_pipe_delay(true,
2883 IPA_CLIENT_USB_PROD);
Mohammed Javida617b262018-03-19 16:55:00 +05302884
Amir Levy9659e592016-10-27 18:08:27 +03002885 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2886 IPADBG_LOW("Exit with success\n");
2887}
2888
2889/*
2890 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2891 * check if GSI channel related to Q6 producer client is empty.
2892 *
2893 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2894 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2895 */
2896void ipa3_q6_post_shutdown_cleanup(void)
2897{
2898 int client_idx;
Skylar Changc1f15312017-05-09 14:14:32 -07002899 int ep_idx;
Michael Adisumarta0090e542018-03-14 10:44:53 -07002900 bool prod = false;
Amir Levy9659e592016-10-27 18:08:27 +03002901
2902 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002903
2904 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2905 IPAERR("uC is not loaded. Skipping\n");
2906 return;
2907 }
2908
Skylar Chang94692c92017-03-01 09:07:11 -08002909 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2910
2911 /* Handle the issue where SUSPEND was removed for some reason */
2912 ipa3_q6_avoid_holb();
Michael Adisumarta0090e542018-03-14 10:44:53 -07002913
2914 /* halt both prod and cons channels starting at IPAv4 */
2915 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
2916 prod = true;
2917 ipa3_halt_q6_gsi_channels(prod);
2918 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2919 IPADBG("Exit without consumer check\n");
2920 return;
2921 }
2922
2923 ipa3_halt_q6_gsi_channels(prod);
Skylar Chang94692c92017-03-01 09:07:11 -08002924
Amir Levy9659e592016-10-27 18:08:27 +03002925 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2926 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
Skylar Changc1f15312017-05-09 14:14:32 -07002927 ep_idx = ipa3_get_ep_mapping(client_idx);
2928 if (ep_idx == -1)
2929 continue;
2930
Amir Levy9659e592016-10-27 18:08:27 +03002931 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2932 IPAERR("fail to validate Q6 ch emptiness %d\n",
2933 client_idx);
2934 BUG();
2935 return;
2936 }
2937 }
2938
2939 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2940 IPADBG_LOW("Exit with success\n");
2941}
2942
Ashok Vuyyuru27ede172019-01-28 15:35:55 +05302943/**
2944 * ipa3_q6_pre_powerup_cleanup() - A cleanup routine for pheripheral
2945 * configuration in IPA HW. This is performed in case of SSR.
2946 *
2947 * This is a mandatory procedure, in case one of the steps fails, the
2948 * AP needs to restart.
2949 */
2950void ipa3_q6_pre_powerup_cleanup(void)
2951{
2952 IPADBG_LOW("ENTER\n");
2953
2954 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2955
2956 if (ipa3_ctx->ipa_config_is_auto)
2957 ipa3_set_reset_client_prod_pipe_delay(true,
2958 IPA_CLIENT_USB2_PROD);
2959 if (ipa3_ctx->ipa_config_is_mhi) {
2960 ipa3_set_reset_client_prod_pipe_delay(true,
2961 IPA_CLIENT_MHI_PROD);
2962 if (ipa3_ctx->ipa_config_is_auto)
2963 ipa3_set_reset_client_prod_pipe_delay(true,
2964 IPA_CLIENT_MHI2_PROD);
2965 }
2966
2967 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2968 IPADBG_LOW("Exit with success\n");
2969}
2970
Amir Levy9659e592016-10-27 18:08:27 +03002971static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2972{
2973 /* Set 4 bytes of CANARY before the offset */
2974 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2975}
2976
2977/**
Amir Levy9fadeca2017-04-25 10:18:32 +03002978 * _ipa_init_sram_v3() - Initialize IPA local SRAM.
Amir Levy9659e592016-10-27 18:08:27 +03002979 *
2980 * Return codes: 0 for success, negative value for failure
2981 */
Amir Levy9fadeca2017-04-25 10:18:32 +03002982int _ipa_init_sram_v3(void)
Amir Levy9659e592016-10-27 18:08:27 +03002983{
2984 u32 *ipa_sram_mmio;
2985 unsigned long phys_addr;
2986
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04002987 IPADBG(
2988 "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SRAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
2989 ipa3_ctx->ipa_wrapper_base,
2990 ipa3_ctx->ctrl->ipa_reg_base_ofst,
2991 ipahal_get_reg_n_ofst(
2992 IPA_SRAM_DIRECT_ACCESS_n,
2993 ipa3_ctx->smem_restricted_bytes / 4),
2994 ipa3_ctx->smem_restricted_bytes,
2995 ipa3_ctx->smem_sz);
2996
Amir Levy9659e592016-10-27 18:08:27 +03002997 phys_addr = ipa3_ctx->ipa_wrapper_base +
2998 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2999 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
3000 ipa3_ctx->smem_restricted_bytes / 4);
3001
3002 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
3003 if (!ipa_sram_mmio) {
3004 IPAERR("fail to ioremap IPA SRAM\n");
3005 return -ENOMEM;
3006 }
3007
3008 /* Consult with ipa_i.h on the location of the CANARY values */
3009 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
3010 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
3011 ipa3_sram_set_canary(ipa_sram_mmio,
3012 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
3013 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
3014 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
3015 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
3016 ipa3_sram_set_canary(ipa_sram_mmio,
3017 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
3018 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
3019 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
3020 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
3021 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
3022 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
3023 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
3024 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
3025 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
3026 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
3027 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
3028 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
3029 ipa3_sram_set_canary(ipa_sram_mmio,
3030 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
3031 ipa3_sram_set_canary(ipa_sram_mmio,
3032 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
3033 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
3034 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
Amir Levy9fadeca2017-04-25 10:18:32 +03003035 ipa3_sram_set_canary(ipa_sram_mmio,
3036 (ipa_get_hw_type() >= IPA_HW_v3_5) ?
3037 IPA_MEM_PART(uc_event_ring_ofst) :
3038 IPA_MEM_PART(end_ofst));
Amir Levy9659e592016-10-27 18:08:27 +03003039
3040 iounmap(ipa_sram_mmio);
3041
3042 return 0;
3043}
3044
3045/**
3046 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
3047 *
3048 * Return codes: 0 for success, negative value for failure
3049 */
3050int _ipa_init_hdr_v3_0(void)
3051{
Amir Levy479cfdd2017-10-26 12:23:14 +03003052 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003053 struct ipa_mem_buffer mem;
3054 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
3055 struct ipahal_imm_cmd_pyld *cmd_pyld;
3056 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
3057
3058 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
3059 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
3060 GFP_KERNEL);
3061 if (!mem.base) {
3062 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
3063 return -ENOMEM;
3064 }
3065 memset(mem.base, 0, mem.size);
3066
3067 cmd.hdr_table_addr = mem.phys_base;
3068 cmd.size_hdr_table = mem.size;
3069 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
3070 IPA_MEM_PART(modem_hdr_ofst);
3071 cmd_pyld = ipahal_construct_imm_cmd(
3072 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
3073 if (!cmd_pyld) {
3074 IPAERR("fail to construct hdr_init_local imm cmd\n");
3075 dma_free_coherent(ipa3_ctx->pdev,
3076 mem.size, mem.base,
3077 mem.phys_base);
3078 return -EFAULT;
3079 }
Amir Levy479cfdd2017-10-26 12:23:14 +03003080 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003081 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3082
3083 if (ipa3_send_cmd(1, &desc)) {
3084 IPAERR("fail to send immediate command\n");
3085 ipahal_destroy_imm_cmd(cmd_pyld);
3086 dma_free_coherent(ipa3_ctx->pdev,
3087 mem.size, mem.base,
3088 mem.phys_base);
3089 return -EFAULT;
3090 }
3091
3092 ipahal_destroy_imm_cmd(cmd_pyld);
3093 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
3094
3095 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
3096 IPA_MEM_PART(apps_hdr_proc_ctx_size);
3097 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
3098 GFP_KERNEL);
3099 if (!mem.base) {
3100 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
3101 return -ENOMEM;
3102 }
3103 memset(mem.base, 0, mem.size);
Amir Levy9659e592016-10-27 18:08:27 +03003104
3105 dma_cmd.is_read = false;
3106 dma_cmd.skip_pipeline_clear = false;
3107 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
3108 dma_cmd.system_addr = mem.phys_base;
3109 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
3110 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
3111 dma_cmd.size = mem.size;
3112 cmd_pyld = ipahal_construct_imm_cmd(
3113 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
3114 if (!cmd_pyld) {
3115 IPAERR("fail to construct dma_shared_mem imm\n");
3116 dma_free_coherent(ipa3_ctx->pdev,
3117 mem.size, mem.base,
3118 mem.phys_base);
3119 return -EFAULT;
3120 }
Amir Levy479cfdd2017-10-26 12:23:14 +03003121 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003122 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3123
3124 if (ipa3_send_cmd(1, &desc)) {
3125 IPAERR("fail to send immediate command\n");
3126 ipahal_destroy_imm_cmd(cmd_pyld);
3127 dma_free_coherent(ipa3_ctx->pdev,
3128 mem.size,
3129 mem.base,
3130 mem.phys_base);
3131 return -EFAULT;
3132 }
3133 ipahal_destroy_imm_cmd(cmd_pyld);
3134
3135 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
3136
3137 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
3138
3139 return 0;
3140}
3141
3142/**
3143 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
3144 *
3145 * Return codes: 0 for success, negative value for failure
3146 */
3147int _ipa_init_rt4_v3(void)
3148{
Amir Levy479cfdd2017-10-26 12:23:14 +03003149 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003150 struct ipa_mem_buffer mem;
3151 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
3152 struct ipahal_imm_cmd_pyld *cmd_pyld;
3153 int i;
3154 int rc = 0;
3155
3156 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
3157 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
3158 i++)
3159 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
3160 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
3161
3162 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
3163 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02003164 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003165 if (rc) {
3166 IPAERR("fail generate empty v4 rt img\n");
3167 return rc;
3168 }
3169
3170 v4_cmd.hash_rules_addr = mem.phys_base;
3171 v4_cmd.hash_rules_size = mem.size;
3172 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3173 IPA_MEM_PART(v4_rt_hash_ofst);
3174 v4_cmd.nhash_rules_addr = mem.phys_base;
3175 v4_cmd.nhash_rules_size = mem.size;
3176 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3177 IPA_MEM_PART(v4_rt_nhash_ofst);
3178 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
3179 v4_cmd.hash_local_addr);
3180 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
3181 v4_cmd.nhash_local_addr);
3182 cmd_pyld = ipahal_construct_imm_cmd(
3183 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
3184 if (!cmd_pyld) {
3185 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
3186 rc = -EPERM;
3187 goto free_mem;
3188 }
3189
Amir Levy479cfdd2017-10-26 12:23:14 +03003190 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003191 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3192
3193 if (ipa3_send_cmd(1, &desc)) {
3194 IPAERR("fail to send immediate command\n");
3195 rc = -EFAULT;
3196 }
3197
3198 ipahal_destroy_imm_cmd(cmd_pyld);
3199
3200free_mem:
3201 ipahal_free_dma_mem(&mem);
3202 return rc;
3203}
3204
3205/**
3206 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
3207 *
3208 * Return codes: 0 for success, negative value for failure
3209 */
3210int _ipa_init_rt6_v3(void)
3211{
Amir Levy479cfdd2017-10-26 12:23:14 +03003212 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003213 struct ipa_mem_buffer mem;
3214 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
3215 struct ipahal_imm_cmd_pyld *cmd_pyld;
3216 int i;
3217 int rc = 0;
3218
3219 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
3220 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
3221 i++)
3222 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
3223 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
3224
3225 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
3226 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02003227 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003228 if (rc) {
3229 IPAERR("fail generate empty v6 rt img\n");
3230 return rc;
3231 }
3232
3233 v6_cmd.hash_rules_addr = mem.phys_base;
3234 v6_cmd.hash_rules_size = mem.size;
3235 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3236 IPA_MEM_PART(v6_rt_hash_ofst);
3237 v6_cmd.nhash_rules_addr = mem.phys_base;
3238 v6_cmd.nhash_rules_size = mem.size;
3239 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3240 IPA_MEM_PART(v6_rt_nhash_ofst);
3241 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
3242 v6_cmd.hash_local_addr);
3243 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
3244 v6_cmd.nhash_local_addr);
3245 cmd_pyld = ipahal_construct_imm_cmd(
3246 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
3247 if (!cmd_pyld) {
3248 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
3249 rc = -EPERM;
3250 goto free_mem;
3251 }
3252
Amir Levy479cfdd2017-10-26 12:23:14 +03003253 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003254 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3255
3256 if (ipa3_send_cmd(1, &desc)) {
3257 IPAERR("fail to send immediate command\n");
3258 rc = -EFAULT;
3259 }
3260
3261 ipahal_destroy_imm_cmd(cmd_pyld);
3262
3263free_mem:
3264 ipahal_free_dma_mem(&mem);
3265 return rc;
3266}
3267
3268/**
3269 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
3270 *
3271 * Return codes: 0 for success, negative value for failure
3272 */
3273int _ipa_init_flt4_v3(void)
3274{
Amir Levy479cfdd2017-10-26 12:23:14 +03003275 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003276 struct ipa_mem_buffer mem;
3277 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
3278 struct ipahal_imm_cmd_pyld *cmd_pyld;
3279 int rc;
3280
3281 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
3282 IPA_MEM_PART(v4_flt_hash_size),
3283 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02003284 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003285 if (rc) {
3286 IPAERR("fail generate empty v4 flt img\n");
3287 return rc;
3288 }
3289
3290 v4_cmd.hash_rules_addr = mem.phys_base;
3291 v4_cmd.hash_rules_size = mem.size;
3292 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3293 IPA_MEM_PART(v4_flt_hash_ofst);
3294 v4_cmd.nhash_rules_addr = mem.phys_base;
3295 v4_cmd.nhash_rules_size = mem.size;
3296 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3297 IPA_MEM_PART(v4_flt_nhash_ofst);
3298 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
3299 v4_cmd.hash_local_addr);
3300 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
3301 v4_cmd.nhash_local_addr);
3302 cmd_pyld = ipahal_construct_imm_cmd(
3303 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
3304 if (!cmd_pyld) {
3305 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
3306 rc = -EPERM;
3307 goto free_mem;
3308 }
3309
Amir Levy479cfdd2017-10-26 12:23:14 +03003310 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003311 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3312
3313 if (ipa3_send_cmd(1, &desc)) {
3314 IPAERR("fail to send immediate command\n");
3315 rc = -EFAULT;
3316 }
3317
3318 ipahal_destroy_imm_cmd(cmd_pyld);
3319
3320free_mem:
3321 ipahal_free_dma_mem(&mem);
3322 return rc;
3323}
3324
3325/**
3326 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
3327 *
3328 * Return codes: 0 for success, negative value for failure
3329 */
3330int _ipa_init_flt6_v3(void)
3331{
Amir Levy479cfdd2017-10-26 12:23:14 +03003332 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003333 struct ipa_mem_buffer mem;
3334 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
3335 struct ipahal_imm_cmd_pyld *cmd_pyld;
3336 int rc;
3337
3338 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
3339 IPA_MEM_PART(v6_flt_hash_size),
3340 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02003341 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003342 if (rc) {
3343 IPAERR("fail generate empty v6 flt img\n");
3344 return rc;
3345 }
3346
3347 v6_cmd.hash_rules_addr = mem.phys_base;
3348 v6_cmd.hash_rules_size = mem.size;
3349 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3350 IPA_MEM_PART(v6_flt_hash_ofst);
3351 v6_cmd.nhash_rules_addr = mem.phys_base;
3352 v6_cmd.nhash_rules_size = mem.size;
3353 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3354 IPA_MEM_PART(v6_flt_nhash_ofst);
3355 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
3356 v6_cmd.hash_local_addr);
3357 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
3358 v6_cmd.nhash_local_addr);
3359
3360 cmd_pyld = ipahal_construct_imm_cmd(
3361 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
3362 if (!cmd_pyld) {
3363 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
3364 rc = -EPERM;
3365 goto free_mem;
3366 }
3367
Amir Levy479cfdd2017-10-26 12:23:14 +03003368 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003369 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3370
3371 if (ipa3_send_cmd(1, &desc)) {
3372 IPAERR("fail to send immediate command\n");
3373 rc = -EFAULT;
3374 }
3375
3376 ipahal_destroy_imm_cmd(cmd_pyld);
3377
3378free_mem:
3379 ipahal_free_dma_mem(&mem);
3380 return rc;
3381}
3382
3383static int ipa3_setup_flt_hash_tuple(void)
3384{
3385 int pipe_idx;
3386 struct ipahal_reg_hash_tuple tuple;
3387
3388 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
3389
3390 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
3391 if (!ipa_is_ep_support_flt(pipe_idx))
3392 continue;
3393
3394 if (ipa_is_modem_pipe(pipe_idx))
3395 continue;
3396
3397 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
3398 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
3399 return -EFAULT;
3400 }
3401 }
3402
3403 return 0;
3404}
3405
3406static int ipa3_setup_rt_hash_tuple(void)
3407{
3408 int tbl_idx;
3409 struct ipahal_reg_hash_tuple tuple;
3410
3411 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
3412
3413 for (tbl_idx = 0;
3414 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
3415 IPA_MEM_PART(v4_rt_num_index));
3416 tbl_idx++) {
3417
3418 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
3419 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
3420 continue;
3421
3422 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
3423 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
3424 continue;
3425
3426 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
3427 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
3428 return -EFAULT;
3429 }
3430 }
3431
3432 return 0;
3433}
3434
3435static int ipa3_setup_apps_pipes(void)
3436{
3437 struct ipa_sys_connect_params sys_in;
3438 int result = 0;
3439
3440 if (ipa3_ctx->gsi_ch20_wa) {
3441 IPADBG("Allocating GSI physical channel 20\n");
3442 result = ipa_gsi_ch20_wa();
3443 if (result) {
3444 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003445 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03003446 }
3447 }
3448
Skylar Changd407e592017-03-30 11:25:30 -07003449 /* allocate the common PROD event ring */
3450 if (ipa3_alloc_common_event_ring()) {
3451 IPAERR("ipa3_alloc_common_event_ring failed.\n");
3452 result = -EPERM;
3453 goto fail_ch20_wa;
3454 }
3455
Amir Levy9659e592016-10-27 18:08:27 +03003456 /* CMD OUT (AP->IPA) */
3457 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3458 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
3459 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3460 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
3461 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
3462 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003463 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003464 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003465 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03003466 }
3467 IPADBG("Apps to IPA cmd pipe is connected\n");
3468
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003469 IPADBG("Will initialize SRAM\n");
Amir Levy9659e592016-10-27 18:08:27 +03003470 ipa3_ctx->ctrl->ipa_init_sram();
3471 IPADBG("SRAM initialized\n");
3472
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003473 IPADBG("Will initialize HDR\n");
Amir Levy9659e592016-10-27 18:08:27 +03003474 ipa3_ctx->ctrl->ipa_init_hdr();
3475 IPADBG("HDR initialized\n");
3476
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003477 IPADBG("Will initialize V4 RT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003478 ipa3_ctx->ctrl->ipa_init_rt4();
3479 IPADBG("V4 RT initialized\n");
3480
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003481 IPADBG("Will initialize V6 RT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003482 ipa3_ctx->ctrl->ipa_init_rt6();
3483 IPADBG("V6 RT initialized\n");
3484
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003485 IPADBG("Will initialize V4 FLT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003486 ipa3_ctx->ctrl->ipa_init_flt4();
3487 IPADBG("V4 FLT initialized\n");
3488
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003489 IPADBG("Will initialize V6 FLT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003490 ipa3_ctx->ctrl->ipa_init_flt6();
3491 IPADBG("V6 FLT initialized\n");
3492
3493 if (ipa3_setup_flt_hash_tuple()) {
3494 IPAERR(":fail to configure flt hash tuple\n");
3495 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003496 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003497 }
3498 IPADBG("flt hash tuple is configured\n");
3499
3500 if (ipa3_setup_rt_hash_tuple()) {
3501 IPAERR(":fail to configure rt hash tuple\n");
3502 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003503 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003504 }
3505 IPADBG("rt hash tuple is configured\n");
3506
3507 if (ipa3_setup_exception_path()) {
3508 IPAERR(":fail to setup excp path\n");
3509 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003510 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003511 }
3512 IPADBG("Exception path was successfully set");
3513
3514 if (ipa3_setup_dflt_rt_tables()) {
3515 IPAERR(":fail to setup dflt routes\n");
3516 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003517 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003518 }
3519 IPADBG("default routing was set\n");
3520
Ghanim Fodic6b67492017-03-15 14:19:56 +02003521 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03003522 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3523 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
3524 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3525 sys_in.notify = ipa3_lan_rx_cb;
3526 sys_in.priv = NULL;
3527 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
3528 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
3529 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
3530 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
3531 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
3532 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
3533 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
3534 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
3535
3536 /**
3537 * ipa_lan_rx_cb() intended to notify the source EP about packet
3538 * being received on the LAN_CONS via calling the source EP call-back.
3539 * There could be a race condition with calling this call-back. Other
3540 * thread may nullify it - e.g. on EP disconnect.
3541 * This lock intended to protect the access to the source EP call-back
3542 */
3543 spin_lock_init(&ipa3_ctx->disconnect_lock);
3544 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003545 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003546 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003547 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003548 }
3549
Ghanim Fodic6b67492017-03-15 14:19:56 +02003550 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02003551 if (!ipa3_ctx->ipa_config_is_mhi) {
3552 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3553 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
3554 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
3555 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
3556 if (ipa3_setup_sys_pipe(&sys_in,
3557 &ipa3_ctx->clnt_hdl_data_out)) {
3558 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
3559 result = -EPERM;
3560 goto fail_lan_data_out;
3561 }
Amir Levy9659e592016-10-27 18:08:27 +03003562 }
3563
3564 return 0;
3565
Ghanim Fodic6b67492017-03-15 14:19:56 +02003566fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03003567 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003568fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03003569 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
3570 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3571 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
3572 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
3573 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003574 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003575 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003576fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03003577 return result;
3578}
3579
3580static void ipa3_teardown_apps_pipes(void)
3581{
Amir Levy54fe4d32017-03-16 11:21:49 +02003582 if (!ipa3_ctx->ipa_config_is_mhi)
3583 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03003584 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
3585 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3586 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003587 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003588 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
3589}
3590
3591#ifdef CONFIG_COMPAT
Amir Levy479cfdd2017-10-26 12:23:14 +03003592
3593static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg,
3594 int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *))
3595{
3596 long retval;
3597 struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32;
3598 struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
3599
3600 retval = copy_from_user(&table_alloc32, (const void __user *)arg,
3601 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
3602 if (retval)
3603 return retval;
3604
3605 table_alloc.size = (size_t)table_alloc32.size;
3606 table_alloc.offset = (off_t)table_alloc32.offset;
3607
3608 retval = alloc_func(&table_alloc);
3609 if (retval)
3610 return retval;
3611
3612 if (table_alloc.offset) {
3613 table_alloc32.offset = (compat_off_t)table_alloc.offset;
3614 retval = copy_to_user((void __user *)arg, &table_alloc32,
3615 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
3616 }
3617
3618 return retval;
3619}
3620
Amir Levy9659e592016-10-27 18:08:27 +03003621long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3622{
Amir Levy479cfdd2017-10-26 12:23:14 +03003623 long retval = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003624 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
3625 struct ipa_ioc_nat_alloc_mem nat_mem;
3626
3627 switch (cmd) {
3628 case IPA_IOC_ADD_HDR32:
3629 cmd = IPA_IOC_ADD_HDR;
3630 break;
3631 case IPA_IOC_DEL_HDR32:
3632 cmd = IPA_IOC_DEL_HDR;
3633 break;
3634 case IPA_IOC_ADD_RT_RULE32:
3635 cmd = IPA_IOC_ADD_RT_RULE;
3636 break;
3637 case IPA_IOC_DEL_RT_RULE32:
3638 cmd = IPA_IOC_DEL_RT_RULE;
3639 break;
3640 case IPA_IOC_ADD_FLT_RULE32:
3641 cmd = IPA_IOC_ADD_FLT_RULE;
3642 break;
3643 case IPA_IOC_DEL_FLT_RULE32:
3644 cmd = IPA_IOC_DEL_FLT_RULE;
3645 break;
3646 case IPA_IOC_GET_RT_TBL32:
3647 cmd = IPA_IOC_GET_RT_TBL;
3648 break;
3649 case IPA_IOC_COPY_HDR32:
3650 cmd = IPA_IOC_COPY_HDR;
3651 break;
3652 case IPA_IOC_QUERY_INTF32:
3653 cmd = IPA_IOC_QUERY_INTF;
3654 break;
3655 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3656 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3657 break;
3658 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3659 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3660 break;
3661 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3662 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3663 break;
3664 case IPA_IOC_GET_HDR32:
3665 cmd = IPA_IOC_GET_HDR;
3666 break;
3667 case IPA_IOC_ALLOC_NAT_MEM32:
Amir Levy479cfdd2017-10-26 12:23:14 +03003668 retval = copy_from_user(&nat_mem32, (const void __user *)arg,
3669 sizeof(struct ipa3_ioc_nat_alloc_mem32));
3670 if (retval)
3671 return retval;
Amir Levy9659e592016-10-27 18:08:27 +03003672 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3673 IPA_RESOURCE_NAME_MAX);
3674 nat_mem.size = (size_t)nat_mem32.size;
3675 nat_mem.offset = (off_t)nat_mem32.offset;
3676
3677 /* null terminate the string */
3678 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3679
Amir Levy479cfdd2017-10-26 12:23:14 +03003680 retval = ipa3_allocate_nat_device(&nat_mem);
3681 if (retval)
3682 return retval;
Amir Levy9659e592016-10-27 18:08:27 +03003683 nat_mem32.offset = (compat_off_t)nat_mem.offset;
Amir Levy479cfdd2017-10-26 12:23:14 +03003684 retval = copy_to_user((void __user *)arg, &nat_mem32,
3685 sizeof(struct ipa3_ioc_nat_alloc_mem32));
Amir Levy9659e592016-10-27 18:08:27 +03003686 return retval;
Amir Levy479cfdd2017-10-26 12:23:14 +03003687 case IPA_IOC_ALLOC_NAT_TABLE32:
3688 return compat_ipa3_nat_ipv6ct_alloc_table(arg,
3689 ipa3_allocate_nat_table);
3690 case IPA_IOC_ALLOC_IPV6CT_TABLE32:
3691 return compat_ipa3_nat_ipv6ct_alloc_table(arg,
3692 ipa3_allocate_ipv6ct_table);
Amir Levy9659e592016-10-27 18:08:27 +03003693 case IPA_IOC_V4_INIT_NAT32:
3694 cmd = IPA_IOC_V4_INIT_NAT;
3695 break;
Amir Levy479cfdd2017-10-26 12:23:14 +03003696 case IPA_IOC_INIT_IPV6CT_TABLE32:
3697 cmd = IPA_IOC_INIT_IPV6CT_TABLE;
3698 break;
3699 case IPA_IOC_TABLE_DMA_CMD32:
3700 cmd = IPA_IOC_TABLE_DMA_CMD;
Amir Levy9659e592016-10-27 18:08:27 +03003701 break;
3702 case IPA_IOC_V4_DEL_NAT32:
3703 cmd = IPA_IOC_V4_DEL_NAT;
3704 break;
Amir Levy479cfdd2017-10-26 12:23:14 +03003705 case IPA_IOC_DEL_NAT_TABLE32:
3706 cmd = IPA_IOC_DEL_NAT_TABLE;
3707 break;
3708 case IPA_IOC_DEL_IPV6CT_TABLE32:
3709 cmd = IPA_IOC_DEL_IPV6CT_TABLE;
3710 break;
3711 case IPA_IOC_NAT_MODIFY_PDN32:
3712 cmd = IPA_IOC_NAT_MODIFY_PDN;
3713 break;
Amir Levy9659e592016-10-27 18:08:27 +03003714 case IPA_IOC_GET_NAT_OFFSET32:
3715 cmd = IPA_IOC_GET_NAT_OFFSET;
3716 break;
3717 case IPA_IOC_PULL_MSG32:
3718 cmd = IPA_IOC_PULL_MSG;
3719 break;
3720 case IPA_IOC_RM_ADD_DEPENDENCY32:
3721 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3722 break;
3723 case IPA_IOC_RM_DEL_DEPENDENCY32:
3724 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3725 break;
3726 case IPA_IOC_GENERATE_FLT_EQ32:
3727 cmd = IPA_IOC_GENERATE_FLT_EQ;
3728 break;
3729 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3730 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3731 break;
3732 case IPA_IOC_WRITE_QMAPID32:
3733 cmd = IPA_IOC_WRITE_QMAPID;
3734 break;
3735 case IPA_IOC_MDFY_FLT_RULE32:
3736 cmd = IPA_IOC_MDFY_FLT_RULE;
3737 break;
3738 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3739 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3740 break;
3741 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3742 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3743 break;
3744 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3745 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3746 break;
3747 case IPA_IOC_MDFY_RT_RULE32:
3748 cmd = IPA_IOC_MDFY_RT_RULE;
3749 break;
3750 case IPA_IOC_COMMIT_HDR:
3751 case IPA_IOC_RESET_HDR:
3752 case IPA_IOC_COMMIT_RT:
3753 case IPA_IOC_RESET_RT:
3754 case IPA_IOC_COMMIT_FLT:
3755 case IPA_IOC_RESET_FLT:
3756 case IPA_IOC_DUMP:
3757 case IPA_IOC_PUT_RT_TBL:
3758 case IPA_IOC_PUT_HDR:
3759 case IPA_IOC_SET_FLT:
3760 case IPA_IOC_QUERY_EP_MAPPING:
3761 break;
3762 default:
3763 return -ENOIOCTLCMD;
3764 }
3765 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3766}
3767#endif
3768
3769static ssize_t ipa3_write(struct file *file, const char __user *buf,
3770 size_t count, loff_t *ppos);
3771
3772static const struct file_operations ipa3_drv_fops = {
3773 .owner = THIS_MODULE,
3774 .open = ipa3_open,
3775 .read = ipa3_read,
3776 .write = ipa3_write,
3777 .unlocked_ioctl = ipa3_ioctl,
3778#ifdef CONFIG_COMPAT
3779 .compat_ioctl = compat_ipa3_ioctl,
3780#endif
3781};
3782
3783static int ipa3_get_clks(struct device *dev)
3784{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003785 if (ipa3_res.use_bw_vote) {
3786 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3787 ipa3_clk = NULL;
3788 return 0;
3789 }
3790
Amir Levy9659e592016-10-27 18:08:27 +03003791 ipa3_clk = clk_get(dev, "core_clk");
3792 if (IS_ERR(ipa3_clk)) {
3793 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3794 IPAERR("fail to get ipa clk\n");
3795 return PTR_ERR(ipa3_clk);
3796 }
3797 return 0;
3798}
3799
3800/**
3801 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3802 */
3803void _ipa_enable_clks_v3_0(void)
3804{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003805 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003806 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003807 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003808 clk_prepare(ipa3_clk);
3809 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003810 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003811 }
3812
Ghanim Fodi6a831342017-03-07 18:19:15 +02003813 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003814}
3815
3816static unsigned int ipa3_get_bus_vote(void)
3817{
3818 unsigned int idx = 1;
3819
Skylar Chang448d8b82017-08-08 17:30:32 -07003820 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) {
Amir Levy9659e592016-10-27 18:08:27 +03003821 idx = 1;
3822 } else if (ipa3_ctx->curr_ipa_clk_rate ==
Skylar Chang448d8b82017-08-08 17:30:32 -07003823 ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3824 idx = 2;
3825 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3826 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3827 idx = 3;
Amir Levy9659e592016-10-27 18:08:27 +03003828 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3829 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3830 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3831 } else {
3832 WARN_ON(1);
3833 }
Michael Adisumartad8c88e52018-01-05 10:22:38 -08003834 IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
Amir Levy9659e592016-10-27 18:08:27 +03003835
3836 return idx;
3837}
3838
3839/**
Skylar Chang68c37d82018-04-07 16:42:36 -07003840 * ipa3_enable_clks() - Turn on IPA clocks
3841 *
3842 * Return codes:
3843 * None
3844 */
Amir Levy9659e592016-10-27 18:08:27 +03003845void ipa3_enable_clks(void)
3846{
Skylar Changefc0a0f2018-03-29 11:17:40 -07003847 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
3848 IPAERR("not supported in this mode\n");
3849 return;
3850 }
3851
Amir Levy9659e592016-10-27 18:08:27 +03003852 IPADBG("enabling IPA clocks and bus voting\n");
3853
Ghanim Fodi6a831342017-03-07 18:19:15 +02003854 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3855 ipa3_get_bus_vote()))
3856 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003857
Ghanim Fodi6a831342017-03-07 18:19:15 +02003858 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003859}
3860
3861
3862/**
3863 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3864 */
3865void _ipa_disable_clks_v3_0(void)
3866{
Amir Levy9659e592016-10-27 18:08:27 +03003867 ipa3_suspend_apps_pipes(true);
3868 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003869 if (ipa3_clk) {
3870 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003871 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003872 }
Amir Levy9659e592016-10-27 18:08:27 +03003873}
3874
3875/**
Skylar Chang68c37d82018-04-07 16:42:36 -07003876 * ipa3_disable_clks() - Turn off IPA clocks
3877 *
3878 * Return codes:
3879 * None
3880 */
Amir Levy9659e592016-10-27 18:08:27 +03003881void ipa3_disable_clks(void)
3882{
Skylar Changefc0a0f2018-03-29 11:17:40 -07003883 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
3884 IPAERR("not supported in this mode\n");
3885 return;
3886 }
3887
Amir Levy9659e592016-10-27 18:08:27 +03003888 IPADBG("disabling IPA clocks and bus voting\n");
3889
3890 ipa3_ctx->ctrl->ipa3_disable_clks();
3891
Ghanim Fodi6a831342017-03-07 18:19:15 +02003892 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3893 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003894}
3895
3896/**
3897 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3898 *
3899 * This function is called prior to clock gating when active client counter
3900 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003901 * were not submitted to the IPA client via the transport. During TAG process
3902 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003903 *
3904 * Return codes:
3905 * None
3906 */
3907static void ipa3_start_tag_process(struct work_struct *work)
3908{
3909 int res;
3910
3911 IPADBG("starting TAG process\n");
3912 /* close aggregation frames on all pipes */
3913 res = ipa3_tag_aggr_force_close(-1);
3914 if (res)
3915 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3916 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3917
3918 IPADBG("TAG process done\n");
3919}
3920
3921/**
Skylar Chang68c37d82018-04-07 16:42:36 -07003922 * ipa3_active_clients_log_mod() - Log a modification in the active clients
3923 * reference count
3924 *
3925 * This method logs any modification in the active clients reference count:
3926 * It logs the modification in the circular history buffer
3927 * It logs the modification in the hash table - looking for an entry,
3928 * creating one if needed and deleting one if needed.
3929 *
3930 * @id: ipa3_active client logging info struct to hold the log information
3931 * @inc: a boolean variable to indicate whether the modification is an increase
3932 * or decrease
3933 * @int_ctx: a boolean variable to indicate whether this call is being made from
3934 * an interrupt context and therefore should allocate GFP_ATOMIC memory
3935 *
3936 * Method process:
3937 * - Hash the unique identifier string
3938 * - Find the hash in the table
3939 * 1)If found, increase or decrease the reference count
3940 * 2)If not found, allocate a new hash table entry struct and initialize it
3941 * - Remove and deallocate unneeded data structure
3942 * - Log the call in the circular history buffer (unless it is a simple call)
3943 */
Amir Levy9659e592016-10-27 18:08:27 +03003944void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3945 bool inc, bool int_ctx)
3946{
3947 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3948 unsigned long long t;
3949 unsigned long nanosec_rem;
3950 struct ipa3_active_client_htable_entry *hentry;
3951 struct ipa3_active_client_htable_entry *hfound;
3952 u32 hkey;
3953 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
Skylar Chang69ae50e2017-07-31 13:13:29 -07003954 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +03003955
Skylar Chang69ae50e2017-07-31 13:13:29 -07003956 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
3957 int_ctx = true;
Amir Levy9659e592016-10-27 18:08:27 +03003958 hfound = NULL;
3959 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3960 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003961 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003962 0);
3963 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3964 hentry, list, hkey) {
3965 if (!strcmp(hentry->id_string, id->id_string)) {
3966 hentry->count = hentry->count + (inc ? 1 : -1);
3967 hfound = hentry;
3968 }
3969 }
3970 if (hfound == NULL) {
3971 hentry = NULL;
3972 hentry = kzalloc(sizeof(
3973 struct ipa3_active_client_htable_entry),
3974 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3975 if (hentry == NULL) {
3976 IPAERR("failed allocating active clients hash entry");
Skylar Chang69ae50e2017-07-31 13:13:29 -07003977 spin_unlock_irqrestore(
3978 &ipa3_ctx->ipa3_active_clients_logging.lock,
3979 flags);
Amir Levy9659e592016-10-27 18:08:27 +03003980 return;
3981 }
3982 hentry->type = id->type;
3983 strlcpy(hentry->id_string, id->id_string,
3984 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3985 INIT_HLIST_NODE(&hentry->list);
3986 hentry->count = inc ? 1 : -1;
3987 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3988 &hentry->list, hkey);
3989 } else if (hfound->count == 0) {
3990 hash_del(&hfound->list);
3991 kfree(hfound);
3992 }
3993
3994 if (id->type != SIMPLE) {
3995 t = local_clock();
3996 nanosec_rem = do_div(t, 1000000000) / 1000;
3997 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3998 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3999 "[%5lu.%06lu] v %s, %s: %d",
4000 (unsigned long)t, nanosec_rem,
4001 id->id_string, id->file, id->line);
4002 ipa3_active_clients_log_insert(temp_str);
4003 }
Skylar Chang69ae50e2017-07-31 13:13:29 -07004004 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
4005 flags);
Amir Levy9659e592016-10-27 18:08:27 +03004006}
4007
4008void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
4009 bool int_ctx)
4010{
4011 ipa3_active_clients_log_mod(id, false, int_ctx);
4012}
4013
4014void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
4015 bool int_ctx)
4016{
4017 ipa3_active_clients_log_mod(id, true, int_ctx);
4018}
4019
4020/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004021 * ipa3_inc_client_enable_clks() - Increase active clients counter, and
4022 * enable ipa clocks if necessary
4023 *
4024 * Return codes:
4025 * None
4026 */
Amir Levy9659e592016-10-27 18:08:27 +03004027void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
4028{
Skylar Chang242952b2017-07-20 15:04:05 -07004029 int ret;
4030
Amir Levy9659e592016-10-27 18:08:27 +03004031 ipa3_active_clients_log_inc(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004032 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
4033 if (ret) {
4034 IPADBG_LOW("active clients = %d\n",
4035 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4036 return;
4037 }
4038
4039 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
4040
4041 /* somebody might voted to clocks meanwhile */
4042 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
4043 if (ret) {
4044 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
4045 IPADBG_LOW("active clients = %d\n",
4046 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4047 return;
4048 }
4049
4050 ipa3_enable_clks();
4051 atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
4052 IPADBG_LOW("active clients = %d\n",
4053 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4054 ipa3_suspend_apps_pipes(false);
4055 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004056}
4057
4058/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004059 * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
4060 * clients if no asynchronous actions should be done. Asynchronous actions are
4061 * locking a mutex and waking up IPA HW.
4062 *
4063 * Return codes: 0 for success
4064 * -EPERM if an asynchronous action should have been done
4065 */
Amir Levy9659e592016-10-27 18:08:27 +03004066int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
4067 *id)
4068{
Skylar Chang242952b2017-07-20 15:04:05 -07004069 int ret;
Amir Levy9659e592016-10-27 18:08:27 +03004070
Skylar Chang242952b2017-07-20 15:04:05 -07004071 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
4072 if (ret) {
4073 ipa3_active_clients_log_inc(id, true);
4074 IPADBG_LOW("active clients = %d\n",
4075 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4076 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03004077 }
Amir Levy9659e592016-10-27 18:08:27 +03004078
Skylar Chang242952b2017-07-20 15:04:05 -07004079 return -EPERM;
4080}
4081
4082static void __ipa3_dec_client_disable_clks(void)
4083{
4084 int ret;
4085
4086 if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
4087 IPAERR("trying to disable clocks with refcnt is 0!\n");
4088 ipa_assert();
4089 return;
4090 }
4091
4092 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
4093 if (ret)
4094 goto bail;
4095
4096 /* seems like this is the only client holding the clocks */
4097 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
4098 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
4099 ipa3_ctx->tag_process_before_gating) {
4100 ipa3_ctx->tag_process_before_gating = false;
4101 /*
4102 * When TAG process ends, active clients will be
4103 * decreased
4104 */
4105 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
4106 goto unlock_mutex;
4107 }
4108
4109 /* a different context might increase the clock reference meanwhile */
4110 ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
4111 if (ret > 0)
4112 goto unlock_mutex;
4113 ipa3_disable_clks();
4114
4115unlock_mutex:
4116 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
4117bail:
4118 IPADBG_LOW("active clients = %d\n",
4119 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Amir Levy9659e592016-10-27 18:08:27 +03004120}
4121
4122/**
4123 * ipa3_dec_client_disable_clks() - Decrease active clients counter
4124 *
4125 * In case that there are no active clients this function also starts
4126 * TAG process. When TAG progress ends ipa clocks will be gated.
4127 * start_tag_process_again flag is set during this function to signal TAG
4128 * process to start again as there was another client that may send data to ipa
4129 *
4130 * Return codes:
4131 * None
4132 */
4133void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
4134{
Amir Levy9659e592016-10-27 18:08:27 +03004135 ipa3_active_clients_log_dec(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004136 __ipa3_dec_client_disable_clks();
4137}
4138
4139static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
4140{
4141 __ipa3_dec_client_disable_clks();
4142}
4143
4144/**
4145 * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
4146 * if possible without blocking. If this is the last client then the desrease
4147 * will happen from work queue context.
4148 *
4149 * Return codes:
4150 * None
4151 */
4152void ipa3_dec_client_disable_clks_no_block(
4153 struct ipa_active_client_logging_info *id)
4154{
4155 int ret;
4156
4157 ipa3_active_clients_log_dec(id, true);
4158 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
4159 if (ret) {
4160 IPADBG_LOW("active clients = %d\n",
4161 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4162 return;
Amir Levy9659e592016-10-27 18:08:27 +03004163 }
Skylar Chang242952b2017-07-20 15:04:05 -07004164
4165 /* seems like this is the only client holding the clocks */
4166 queue_work(ipa3_ctx->power_mgmt_wq,
4167 &ipa_dec_clients_disable_clks_on_wq_work);
Amir Levy9659e592016-10-27 18:08:27 +03004168}
4169
4170/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004171 * ipa3_inc_acquire_wakelock() - Increase active clients counter, and
4172 * acquire wakelock if necessary
4173 *
4174 * Return codes:
4175 * None
4176 */
Amir Levy9659e592016-10-27 18:08:27 +03004177void ipa3_inc_acquire_wakelock(void)
4178{
4179 unsigned long flags;
4180
4181 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4182 ipa3_ctx->wakelock_ref_cnt.cnt++;
4183 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
4184 __pm_stay_awake(&ipa3_ctx->w_lock);
4185 IPADBG_LOW("active wakelock ref cnt = %d\n",
4186 ipa3_ctx->wakelock_ref_cnt.cnt);
4187 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4188}
4189
4190/**
4191 * ipa3_dec_release_wakelock() - Decrease active clients counter
4192 *
4193 * In case if the ref count is 0, release the wakelock.
4194 *
4195 * Return codes:
4196 * None
4197 */
4198void ipa3_dec_release_wakelock(void)
4199{
4200 unsigned long flags;
4201
4202 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4203 ipa3_ctx->wakelock_ref_cnt.cnt--;
4204 IPADBG_LOW("active wakelock ref cnt = %d\n",
4205 ipa3_ctx->wakelock_ref_cnt.cnt);
4206 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
4207 __pm_relax(&ipa3_ctx->w_lock);
4208 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4209}
4210
Michael Adisumartac06df412017-09-19 10:10:35 -07004211int ipa3_set_clock_plan_from_pm(int idx)
4212{
4213 u32 clk_rate;
4214
Michael Adisumarta9cb4d212018-05-14 18:35:41 -07004215 IPADBG_LOW("idx = %d\n", idx);
4216
4217 if (!ipa3_ctx->enable_clock_scaling) {
4218 ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004219 return 0;
Michael Adisumarta9cb4d212018-05-14 18:35:41 -07004220 }
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004221
Skylar Changefc0a0f2018-03-29 11:17:40 -07004222 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
4223 IPAERR("not supported in this mode\n");
4224 return 0;
4225 }
4226
Michael Adisumartac06df412017-09-19 10:10:35 -07004227 if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
4228 IPAERR("bad voltage\n");
4229 return -EINVAL;
4230 }
4231
4232 if (idx == 1)
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004233 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
Michael Adisumartac06df412017-09-19 10:10:35 -07004234 else if (idx == 2)
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004235 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
Michael Adisumartac06df412017-09-19 10:10:35 -07004236 else if (idx == 3)
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004237 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
4238 else if (idx == 4)
Michael Adisumartac06df412017-09-19 10:10:35 -07004239 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4240 else {
4241 IPAERR("bad voltage\n");
4242 WARN_ON(1);
4243 return -EFAULT;
4244 }
4245
4246 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
4247 IPADBG_LOW("Same voltage\n");
4248 return 0;
4249 }
4250
4251 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
4252 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
4253 ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
4254 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
4255 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
4256 if (ipa3_clk)
4257 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
4258 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
4259 ipa3_get_bus_vote()))
4260 WARN_ON(1);
4261 } else {
4262 IPADBG_LOW("clocks are gated, not setting rate\n");
4263 }
4264 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
4265 IPADBG_LOW("Done\n");
4266
4267 return 0;
4268}
4269
Amir Levy9659e592016-10-27 18:08:27 +03004270int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
4271 u32 bandwidth_mbps)
4272{
4273 enum ipa_voltage_level needed_voltage;
4274 u32 clk_rate;
4275
Skylar Changefc0a0f2018-03-29 11:17:40 -07004276 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
4277 IPAERR("not supported in this mode\n");
4278 return 0;
4279 }
4280
Amir Levy9659e592016-10-27 18:08:27 +03004281 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
4282 floor_voltage, bandwidth_mbps);
4283
4284 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
4285 floor_voltage >= IPA_VOLTAGE_MAX) {
4286 IPAERR("bad voltage\n");
4287 return -EINVAL;
4288 }
4289
4290 if (ipa3_ctx->enable_clock_scaling) {
4291 IPADBG_LOW("Clock scaling is enabled\n");
4292 if (bandwidth_mbps >=
4293 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
4294 needed_voltage = IPA_VOLTAGE_TURBO;
4295 else if (bandwidth_mbps >=
4296 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
4297 needed_voltage = IPA_VOLTAGE_NOMINAL;
Skylar Chang448d8b82017-08-08 17:30:32 -07004298 else if (bandwidth_mbps >=
4299 ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs)
Amir Levy9659e592016-10-27 18:08:27 +03004300 needed_voltage = IPA_VOLTAGE_SVS;
Skylar Chang448d8b82017-08-08 17:30:32 -07004301 else
4302 needed_voltage = IPA_VOLTAGE_SVS2;
Amir Levy9659e592016-10-27 18:08:27 +03004303 } else {
4304 IPADBG_LOW("Clock scaling is disabled\n");
4305 needed_voltage = IPA_VOLTAGE_NOMINAL;
4306 }
4307
4308 needed_voltage = max(needed_voltage, floor_voltage);
4309 switch (needed_voltage) {
Skylar Chang448d8b82017-08-08 17:30:32 -07004310 case IPA_VOLTAGE_SVS2:
4311 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
4312 break;
Amir Levy9659e592016-10-27 18:08:27 +03004313 case IPA_VOLTAGE_SVS:
4314 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
4315 break;
4316 case IPA_VOLTAGE_NOMINAL:
4317 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
4318 break;
4319 case IPA_VOLTAGE_TURBO:
4320 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4321 break;
4322 default:
4323 IPAERR("bad voltage\n");
4324 WARN_ON(1);
4325 return -EFAULT;
4326 }
4327
4328 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
4329 IPADBG_LOW("Same voltage\n");
4330 return 0;
4331 }
4332
Skylar Chang242952b2017-07-20 15:04:05 -07004333 /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
4334 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004335 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
4336 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
Skylar Chang242952b2017-07-20 15:04:05 -07004337 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004338 if (ipa3_clk)
4339 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
4340 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
Skylar Chang242952b2017-07-20 15:04:05 -07004341 ipa3_get_bus_vote()))
Ghanim Fodi6a831342017-03-07 18:19:15 +02004342 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03004343 } else {
4344 IPADBG_LOW("clocks are gated, not setting rate\n");
4345 }
Skylar Chang242952b2017-07-20 15:04:05 -07004346 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004347 IPADBG_LOW("Done\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07004348
Amir Levy9659e592016-10-27 18:08:27 +03004349 return 0;
4350}
4351
Amir Levya59ed3f2017-03-05 17:30:55 +02004352static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03004353{
4354 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02004355 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03004356 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
4357}
4358
4359/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004360 * ipa3_suspend_handler() - Handles the suspend interrupt:
4361 * wakes up the suspended peripheral by requesting its consumer
4362 * @interrupt: Interrupt type
4363 * @private_data: The client's private data
4364 * @interrupt_data: Interrupt specific information data
4365 */
Amir Levy9659e592016-10-27 18:08:27 +03004366void ipa3_suspend_handler(enum ipa_irq_type interrupt,
4367 void *private_data,
4368 void *interrupt_data)
4369{
4370 enum ipa_rm_resource_name resource;
4371 u32 suspend_data =
4372 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
4373 u32 bmsk = 1;
4374 u32 i = 0;
4375 int res;
4376 struct ipa_ep_cfg_holb holb_cfg;
Michael Adisumarta3e350812017-09-18 14:54:36 -07004377 u32 pipe_bitmask = 0;
Amir Levy9659e592016-10-27 18:08:27 +03004378
4379 IPADBG("interrupt=%d, interrupt_data=%u\n",
4380 interrupt, suspend_data);
4381 memset(&holb_cfg, 0, sizeof(holb_cfg));
4382 holb_cfg.tmr_val = 0;
4383
Michael Adisumarta3e350812017-09-18 14:54:36 -07004384 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
Amir Levy9659e592016-10-27 18:08:27 +03004385 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
Michael Adisumarta3e350812017-09-18 14:54:36 -07004386 if (ipa3_ctx->use_ipa_pm) {
4387 pipe_bitmask |= bmsk;
4388 continue;
4389 }
Amir Levy9659e592016-10-27 18:08:27 +03004390 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
4391 /*
4392 * pipe will be unsuspended as part of
4393 * enabling IPA clocks
4394 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08004395 mutex_lock(&ipa3_ctx->transport_pm.
4396 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004397 if (!atomic_read(
4398 &ipa3_ctx->transport_pm.dec_clients)
4399 ) {
4400 IPA_ACTIVE_CLIENTS_INC_EP(
4401 ipa3_ctx->ep[i].client);
4402 IPADBG_LOW("Pipes un-suspended.\n");
4403 IPADBG_LOW("Enter poll mode.\n");
4404 atomic_set(
4405 &ipa3_ctx->transport_pm.dec_clients,
4406 1);
Skylar Chang9e3b6492017-11-07 09:49:48 -08004407 /*
4408 * acquire wake lock as long as suspend
4409 * vote is held
4410 */
4411 ipa3_inc_acquire_wakelock();
Amir Levya59ed3f2017-03-05 17:30:55 +02004412 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03004413 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08004414 mutex_unlock(&ipa3_ctx->transport_pm.
4415 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004416 } else {
4417 resource = ipa3_get_rm_resource_from_ep(i);
4418 res =
4419 ipa_rm_request_resource_with_timer(resource);
4420 if (res == -EPERM &&
4421 IPA_CLIENT_IS_CONS(
4422 ipa3_ctx->ep[i].client)) {
4423 holb_cfg.en = 1;
4424 res = ipa3_cfg_ep_holb_by_client(
4425 ipa3_ctx->ep[i].client, &holb_cfg);
4426 if (res) {
4427 IPAERR("holb en fail, stall\n");
4428 BUG();
4429 }
4430 }
4431 }
4432 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07004433 }
4434 if (ipa3_ctx->use_ipa_pm) {
4435 res = ipa_pm_handle_suspend(pipe_bitmask);
4436 if (res) {
4437 IPAERR("ipa_pm_handle_suspend failed %d\n", res);
4438 return;
4439 }
Amir Levy9659e592016-10-27 18:08:27 +03004440 }
4441}
4442
4443/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004444 * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
4445 * as it was registered in the IPA init sequence.
4446 * Return codes:
4447 * 0: success
4448 * -EPERM: failed to remove current handler or failed to add original handler
4449 */
Amir Levy9659e592016-10-27 18:08:27 +03004450int ipa3_restore_suspend_handler(void)
4451{
4452 int result = 0;
4453
4454 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
4455 if (result) {
4456 IPAERR("remove handler for suspend interrupt failed\n");
4457 return -EPERM;
4458 }
4459
4460 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
4461 ipa3_suspend_handler, false, NULL);
4462 if (result) {
4463 IPAERR("register handler for suspend interrupt failed\n");
4464 result = -EPERM;
4465 }
4466
4467 IPADBG("suspend handler successfully restored\n");
4468
4469 return result;
4470}
4471
4472static int ipa3_apps_cons_release_resource(void)
4473{
4474 return 0;
4475}
4476
4477static int ipa3_apps_cons_request_resource(void)
4478{
4479 return 0;
4480}
4481
Amir Levya59ed3f2017-03-05 17:30:55 +02004482static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03004483{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304484 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004485 /* check whether still need to decrease client usage */
4486 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
4487 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
4488 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02004489 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03004490 } else {
4491 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Skylar Chang9e3b6492017-11-07 09:49:48 -08004492 ipa3_dec_release_wakelock();
Amir Levya59ed3f2017-03-05 17:30:55 +02004493 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03004494 }
4495 }
4496 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304497 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004498}
4499
4500int ipa3_create_apps_resource(void)
4501{
4502 struct ipa_rm_create_params apps_cons_create_params;
4503 struct ipa_rm_perf_profile profile;
4504 int result = 0;
4505
4506 memset(&apps_cons_create_params, 0,
4507 sizeof(apps_cons_create_params));
4508 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
4509 apps_cons_create_params.request_resource =
4510 ipa3_apps_cons_request_resource;
4511 apps_cons_create_params.release_resource =
4512 ipa3_apps_cons_release_resource;
4513 result = ipa_rm_create_resource(&apps_cons_create_params);
4514 if (result) {
4515 IPAERR("ipa_rm_create_resource failed\n");
4516 return result;
4517 }
4518
4519 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
4520 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
4521
4522 return result;
4523}
4524
4525/**
4526 * ipa3_init_interrupts() - Register to IPA IRQs
4527 *
4528 * Return codes: 0 in success, negative in failure
4529 *
4530 */
4531int ipa3_init_interrupts(void)
4532{
4533 int result;
4534
4535 /*register IPA IRQ handler*/
4536 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
Skylar Changefc0a0f2018-03-29 11:17:40 -07004537 &ipa3_ctx->master_pdev->dev);
Amir Levy9659e592016-10-27 18:08:27 +03004538 if (result) {
4539 IPAERR("ipa interrupts initialization failed\n");
4540 return -ENODEV;
4541 }
4542
4543 /*add handler for suspend interrupt*/
4544 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
4545 ipa3_suspend_handler, false, NULL);
4546 if (result) {
4547 IPAERR("register handler for suspend interrupt failed\n");
4548 result = -ENODEV;
4549 goto fail_add_interrupt_handler;
4550 }
4551
4552 return 0;
4553
4554fail_add_interrupt_handler:
Skylar Changefc0a0f2018-03-29 11:17:40 -07004555 free_irq(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
Amir Levy9659e592016-10-27 18:08:27 +03004556 return result;
4557}
4558
4559/**
4560 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
4561 * The idr strcuture per filtering table is intended for rule id generation
4562 * per filtering rule.
4563 */
4564static void ipa3_destroy_flt_tbl_idrs(void)
4565{
4566 int i;
4567 struct ipa3_flt_tbl *flt_tbl;
4568
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004569 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4570 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4571
Amir Levy9659e592016-10-27 18:08:27 +03004572 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4573 if (!ipa_is_ep_support_flt(i))
4574 continue;
4575
4576 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004577 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004578 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004579 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004580 }
4581}
4582
4583static void ipa3_freeze_clock_vote_and_notify_modem(void)
4584{
4585 int res;
Amir Levy9659e592016-10-27 18:08:27 +03004586 struct ipa_active_client_logging_info log_info;
4587
4588 if (ipa3_ctx->smp2p_info.res_sent)
4589 return;
4590
Skylar Change1209942017-02-02 14:26:38 -08004591 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
4592 IPAERR("smp2p out gpio not assigned\n");
4593 return;
4594 }
4595
Amir Levy9659e592016-10-27 18:08:27 +03004596 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
4597 res = ipa3_inc_client_enable_clks_no_block(&log_info);
4598 if (res)
Skylar Change1209942017-02-02 14:26:38 -08004599 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004600 else
Skylar Change1209942017-02-02 14:26:38 -08004601 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03004602
Skylar Change1209942017-02-02 14:26:38 -08004603 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4604 IPA_GPIO_OUT_CLK_VOTE_IDX,
4605 ipa3_ctx->smp2p_info.ipa_clk_on);
4606 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4607 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004608
Skylar Change1209942017-02-02 14:26:38 -08004609 ipa3_ctx->smp2p_info.res_sent = true;
4610 IPADBG("IPA clocks are %s\n",
4611 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
4612}
4613
4614void ipa3_reset_freeze_vote(void)
4615{
4616 if (ipa3_ctx->smp2p_info.res_sent == false)
4617 return;
4618
4619 if (ipa3_ctx->smp2p_info.ipa_clk_on)
4620 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
4621
4622 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4623 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
4624 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4625 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
4626
4627 ipa3_ctx->smp2p_info.res_sent = false;
4628 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004629}
4630
4631static int ipa3_panic_notifier(struct notifier_block *this,
4632 unsigned long event, void *ptr)
4633{
4634 int res;
4635
4636 ipa3_freeze_clock_vote_and_notify_modem();
4637
4638 IPADBG("Calling uC panic handler\n");
4639 res = ipa3_uc_panic_notifier(this, event, ptr);
4640 if (res)
4641 IPAERR("uC panic handler failed %d\n", res);
4642
Michael Adisumartaedba22d2018-04-19 12:28:33 -07004643 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0)
Michael Adisumartac50b8002018-06-13 15:21:07 -07004644 ipahal_print_all_regs(false);
Michael Adisumartaedba22d2018-04-19 12:28:33 -07004645
Amir Levy9659e592016-10-27 18:08:27 +03004646 return NOTIFY_DONE;
4647}
4648
4649static struct notifier_block ipa3_panic_blk = {
4650 .notifier_call = ipa3_panic_notifier,
4651 /* IPA panic handler needs to run before modem shuts down */
4652 .priority = INT_MAX,
4653};
4654
4655static void ipa3_register_panic_hdlr(void)
4656{
4657 atomic_notifier_chain_register(&panic_notifier_list,
4658 &ipa3_panic_blk);
4659}
4660
4661static void ipa3_trigger_ipa_ready_cbs(void)
4662{
4663 struct ipa3_ready_cb_info *info;
4664
4665 mutex_lock(&ipa3_ctx->lock);
4666
4667 /* Call all the CBs */
4668 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
4669 if (info->ready_cb)
4670 info->ready_cb(info->user_data);
4671
4672 mutex_unlock(&ipa3_ctx->lock);
4673}
4674
4675static int ipa3_gsi_pre_fw_load_init(void)
4676{
4677 int result;
4678
4679 result = gsi_configure_regs(ipa3_res.transport_mem_base,
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004680 ipa3_res.transport_mem_size,
4681 ipa3_res.ipa_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004682 if (result) {
4683 IPAERR("Failed to configure GSI registers\n");
4684 return -EINVAL;
4685 }
4686
4687 return 0;
4688}
4689
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004690static void ipa3_uc_is_loaded(void)
4691{
4692 IPADBG("\n");
4693 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
4694}
4695
Amir Levy41644242016-11-03 15:38:09 +02004696static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
4697{
4698 enum gsi_ver gsi_ver;
4699
4700 switch (ipa_hw_type) {
4701 case IPA_HW_v3_0:
4702 case IPA_HW_v3_1:
4703 gsi_ver = GSI_VER_1_0;
4704 break;
4705 case IPA_HW_v3_5:
4706 gsi_ver = GSI_VER_1_2;
4707 break;
4708 case IPA_HW_v3_5_1:
4709 gsi_ver = GSI_VER_1_3;
4710 break;
Michael Adisumarta891a4ff2017-05-16 16:40:06 -07004711 case IPA_HW_v4_0:
4712 gsi_ver = GSI_VER_2_0;
4713 break;
Amir Levy41644242016-11-03 15:38:09 +02004714 default:
4715 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
4716 WARN_ON(1);
4717 gsi_ver = GSI_VER_ERR;
4718 }
4719
4720 IPADBG("GSI version %d\n", gsi_ver);
4721
4722 return gsi_ver;
4723}
4724
Amir Levy9659e592016-10-27 18:08:27 +03004725/**
4726 * ipa3_post_init() - Initialize the IPA Driver (Part II).
4727 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02004728 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03004729 *
4730 * @resource_p: contain platform specific values from DST file
4731 * @pdev: The platform device structure representing the IPA driver
4732 *
4733 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004734 * - Initialize endpoints bitmaps
4735 * - Initialize resource groups min and max values
4736 * - Initialize filtering lists heads and idr
4737 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02004738 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03004739 * - Setup APPS pipes
4740 * - Initialize tethering bridge
4741 * - Initialize IPA debugfs
4742 * - Initialize IPA uC interface
4743 * - Initialize WDI interface
4744 * - Initialize USB interface
4745 * - Register for panic handler
4746 * - Trigger IPA ready callbacks (to all subscribers)
4747 * - Trigger IPA completion object (to all who wait on it)
4748 */
4749static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
4750 struct device *ipa_dev)
4751{
4752 int result;
Amir Levy9659e592016-10-27 18:08:27 +03004753 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004754 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02004755 struct ipa3_flt_tbl *flt_tbl;
4756 int i;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004757 struct idr *idr;
Amir Levy54fe4d32017-03-16 11:21:49 +02004758
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304759 if (ipa3_ctx == NULL) {
4760 IPADBG("IPA driver haven't initialized\n");
4761 return -ENXIO;
4762 }
4763
4764 /* Prevent consequent calls from trying to load the FW again. */
4765 if (ipa3_ctx->ipa_initialization_complete)
4766 return 0;
Mohammed Javidc6db3362018-02-13 13:41:38 +05304767
4768 IPADBG("active clients = %d\n",
4769 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Skylar Chang40430532017-07-06 14:31:57 -07004770 /* move proxy vote for modem on ipa3_post_init */
Mohammed Javidc6db3362018-02-13 13:41:38 +05304771 if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
4772 ipa3_proxy_clk_vote();
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304773
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004774 /*
4775 * SMMU was already attached if used, safe to do allocations
4776 *
4777 * NOTE WELL: On an emulation system, this allocation is done
4778 * in ipa3_pre_init()
4779 */
4780 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
4781 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4782 ipa3_ctx->pdev)) {
4783 IPAERR("fail to init ipahal\n");
4784 result = -EFAULT;
4785 goto fail_ipahal;
4786 }
Skylar Changefc0a0f2018-03-29 11:17:40 -07004787 }
4788
4789 result = ipa3_init_hw();
4790 if (result) {
4791 IPAERR(":error initializing HW\n");
4792 result = -ENODEV;
4793 goto fail_init_hw;
4794 }
4795 IPADBG("IPA HW initialization sequence completed");
4796
4797 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4798 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4799 IPAERR("IPA has more pipes then supported has %d, max %d\n",
4800 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4801 result = -ENODEV;
4802 goto fail_init_hw;
4803 }
4804
4805 ipa3_ctx->ctrl->ipa_sram_read_settings();
4806 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4807 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4808
4809 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4810 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4811 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4812
4813 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4814 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4815
4816 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4817 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4818 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4819
4820 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4821 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4822 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4823
4824 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4825 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4826 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4827 result = -ENOMEM;
4828 goto fail_init_hw;
4829 }
4830
4831 result = ipa3_allocate_dma_task_for_gsi();
4832 if (result) {
4833 IPAERR("failed to allocate dma task\n");
4834 goto fail_dma_task;
4835 }
4836
4837 if (ipa3_nat_ipv6ct_init_devices()) {
4838 IPAERR("unable to init NAT and IPv6CT devices\n");
4839 result = -ENODEV;
4840 goto fail_nat_ipv6ct_init_dev;
4841 }
4842
4843 result = ipa3_alloc_pkt_init();
4844 if (result) {
4845 IPAERR("Failed to alloc pkt_init payload\n");
4846 result = -ENODEV;
4847 goto fail_allok_pkt_init;
4848 }
4849
4850 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
4851 ipa3_enable_dcd();
4852
Amir Levy54fe4d32017-03-16 11:21:49 +02004853 /*
4854 * indication whether working in MHI config or non MHI config is given
4855 * in ipa3_write which is launched before ipa3_post_init. i.e. from
4856 * this point it is safe to use ipa3_ep_mapping array and the correct
4857 * entry will be returned from ipa3_get_hw_type_index()
4858 */
4859 ipa_init_ep_flt_bitmap();
4860 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
4861 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
4862
4863 /* Assign resource limitation to each group */
4864 ipa3_set_resorce_groups_min_max_limits();
4865
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004866 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4867 idr_init(idr);
4868 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4869 idr_init(idr);
4870
Amir Levy54fe4d32017-03-16 11:21:49 +02004871 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4872 if (!ipa_is_ep_support_flt(i))
4873 continue;
4874
4875 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
4876 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4877 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4878 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
4879 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4880 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004881 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
Amir Levy54fe4d32017-03-16 11:21:49 +02004882
4883 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
4884 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4885 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4886 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
4887 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4888 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004889 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
Amir Levy54fe4d32017-03-16 11:21:49 +02004890 }
4891
4892 if (!ipa3_ctx->apply_rg10_wa) {
4893 result = ipa3_init_interrupts();
4894 if (result) {
4895 IPAERR("ipa initialization of interrupts failed\n");
4896 result = -ENODEV;
4897 goto fail_register_device;
4898 }
4899 } else {
4900 IPADBG("Initialization of ipa interrupts skipped\n");
4901 }
Amir Levy9659e592016-10-27 18:08:27 +03004902
Amir Levy3afd94a2017-01-05 10:19:13 +02004903 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02004904 * IPAv3.5 and above requires to disable prefetch for USB in order
Skylar Chang84099692018-04-24 14:43:03 -07004905 * to allow MBIM to work.
Amir Levy3afd94a2017-01-05 10:19:13 +02004906 */
Michael Adisumartad68ab112017-06-14 11:40:06 -07004907 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4908 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
Amir Levy5cfbb322017-01-09 14:53:02 +02004909 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02004910 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
4911
Skylar Chang84099692018-04-24 14:43:03 -07004912 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4913 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
4914 (ipa3_ctx->ipa_config_is_mhi))
4915 ipa3_disable_prefetch(IPA_CLIENT_MHI_CONS);
4916
Amir Levya59ed3f2017-03-05 17:30:55 +02004917 memset(&gsi_props, 0, sizeof(gsi_props));
4918 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
4919 gsi_props.ee = resource_p->ee;
4920 gsi_props.intr = GSI_INTR_IRQ;
Amir Levya59ed3f2017-03-05 17:30:55 +02004921 gsi_props.phys_addr = resource_p->transport_mem_base;
4922 gsi_props.size = resource_p->transport_mem_size;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004923 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
4924 gsi_props.irq = resource_p->emulator_irq;
4925 gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
4926 gsi_props.emulator_intcntrlr_addr =
4927 resource_p->emulator_intcntrlr_mem_base;
4928 gsi_props.emulator_intcntrlr_size =
4929 resource_p->emulator_intcntrlr_mem_size;
4930 } else {
4931 gsi_props.irq = resource_p->transport_irq;
4932 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004933 gsi_props.notify_cb = ipa_gsi_notify_cb;
4934 gsi_props.req_clk_cb = NULL;
4935 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004936
Ghanim Fodic823bc62017-10-21 17:29:53 +03004937 if (ipa3_ctx->ipa_config_is_mhi) {
4938 gsi_props.mhi_er_id_limits_valid = true;
4939 gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
4940 gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
4941 }
4942
Amir Levya59ed3f2017-03-05 17:30:55 +02004943 result = gsi_register_device(&gsi_props,
4944 &ipa3_ctx->gsi_dev_hdl);
4945 if (result != GSI_STATUS_SUCCESS) {
4946 IPAERR(":gsi register error - %d\n", result);
4947 result = -ENODEV;
4948 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03004949 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004950 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03004951
4952 /* setup the AP-IPA pipes */
4953 if (ipa3_setup_apps_pipes()) {
4954 IPAERR(":failed to setup IPA-Apps pipes\n");
4955 result = -ENODEV;
4956 goto fail_setup_apps_pipes;
4957 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004958 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004959
4960 if (ipa3_ctx->use_ipa_teth_bridge) {
4961 /* Initialize the tethering bridge driver */
4962 result = ipa3_teth_bridge_driver_init();
4963 if (result) {
4964 IPAERR(":teth_bridge init failed (%d)\n", -result);
4965 result = -ENODEV;
4966 goto fail_teth_bridge_driver_init;
4967 }
4968 IPADBG("teth_bridge initialized");
4969 }
4970
Amir Levy9659e592016-10-27 18:08:27 +03004971 result = ipa3_uc_interface_init();
4972 if (result)
4973 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4974 else
4975 IPADBG(":ipa Uc interface init ok\n");
4976
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004977 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4978 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4979
Amir Levy9659e592016-10-27 18:08:27 +03004980 result = ipa3_wdi_init();
4981 if (result)
4982 IPAERR(":wdi init failed (%d)\n", -result);
4983 else
4984 IPADBG(":wdi init ok\n");
4985
4986 result = ipa3_ntn_init();
4987 if (result)
4988 IPAERR(":ntn init failed (%d)\n", -result);
4989 else
4990 IPADBG(":ntn init ok\n");
4991
Skylar Chang6f6e3072017-07-28 10:03:47 -07004992 result = ipa_hw_stats_init();
4993 if (result)
4994 IPAERR("fail to init stats %d\n", result);
4995 else
4996 IPADBG(":stats init ok\n");
4997
Amir Levy9659e592016-10-27 18:08:27 +03004998 ipa3_register_panic_hdlr();
4999
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005000 ipa3_debugfs_init();
5001
Amir Levy9659e592016-10-27 18:08:27 +03005002 mutex_lock(&ipa3_ctx->lock);
5003 ipa3_ctx->ipa_initialization_complete = true;
5004 mutex_unlock(&ipa3_ctx->lock);
5005
5006 ipa3_trigger_ipa_ready_cbs();
5007 complete_all(&ipa3_ctx->init_completion_obj);
5008 pr_info("IPA driver initialization was successful.\n");
5009
5010 return 0;
5011
5012fail_teth_bridge_driver_init:
5013 ipa3_teardown_apps_pipes();
5014fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02005015 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03005016fail_register_device:
Amir Levy9659e592016-10-27 18:08:27 +03005017 ipa3_destroy_flt_tbl_idrs();
Skylar Changefc0a0f2018-03-29 11:17:40 -07005018fail_allok_pkt_init:
5019 ipa3_nat_ipv6ct_destroy_devices();
5020fail_nat_ipv6ct_init_dev:
5021 ipa3_free_dma_task_for_gsi();
5022fail_dma_task:
5023fail_init_hw:
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005024 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION)
5025 ipahal_destroy();
Skylar Changefc0a0f2018-03-29 11:17:40 -07005026fail_ipahal:
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005027 ipa3_proxy_clk_unvote();
Skylar Changefc0a0f2018-03-29 11:17:40 -07005028
Amir Levy9659e592016-10-27 18:08:27 +03005029 return result;
5030}
5031
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005032static int ipa3_manual_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03005033{
5034 int result;
5035 const struct firmware *fw;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005036 const char *path = IPA_FWS_PATH;
Amir Levy9659e592016-10-27 18:08:27 +03005037
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005038 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
5039 switch (ipa3_get_emulation_type()) {
5040 case IPA_HW_v3_5_1:
5041 path = IPA_FWS_PATH_3_5_1;
5042 break;
5043 case IPA_HW_v4_0:
5044 path = IPA_FWS_PATH_4_0;
5045 break;
5046 default:
5047 break;
5048 }
5049 }
Amir Levy9659e592016-10-27 18:08:27 +03005050
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005051 IPADBG("Manual FW loading (%s) process initiated\n", path);
5052
5053 result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
Amir Levy9659e592016-10-27 18:08:27 +03005054 if (result < 0) {
5055 IPAERR("request_firmware failed, error %d\n", result);
5056 return result;
5057 }
5058 if (fw == NULL) {
5059 IPAERR("Firmware is NULL!\n");
5060 return -EINVAL;
5061 }
5062
5063 IPADBG("FWs are available for loading\n");
5064
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005065 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
5066 result = emulator_load_fws(fw,
5067 ipa3_res.transport_mem_base,
5068 ipa3_res.transport_mem_size);
5069 } else {
5070 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
5071 }
Amir Levy9659e592016-10-27 18:08:27 +03005072 if (result) {
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005073 IPAERR("Manual IPA FWs loading has failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03005074 release_firmware(fw);
5075 return result;
5076 }
5077
5078 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02005079 ipa3_res.transport_mem_size,
5080 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03005081 if (result) {
5082 IPAERR("Failed to enable GSI FW\n");
5083 release_firmware(fw);
5084 return result;
5085 }
5086
5087 release_firmware(fw);
5088
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005089 IPADBG("Manual FW loading process is complete\n");
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005090
Amir Levy9659e592016-10-27 18:08:27 +03005091 return 0;
5092}
5093
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005094static int ipa3_pil_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03005095{
5096 void *subsystem_get_retval = NULL;
5097
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005098 IPADBG("PIL FW loading process initiated\n");
Amir Levy9659e592016-10-27 18:08:27 +03005099
5100 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
5101 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
5102 IPAERR("Unable to trigger PIL process for FW loading\n");
5103 return -EINVAL;
5104 }
5105
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005106 IPADBG("PIL FW loading process is complete\n");
Amir Levy9659e592016-10-27 18:08:27 +03005107 return 0;
5108}
5109
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005110static void ipa3_load_ipa_fw(struct work_struct *work)
5111{
5112 int result;
5113
5114 IPADBG("Entry\n");
5115
Skylar Changefc0a0f2018-03-29 11:17:40 -07005116 result = ipa3_attach_to_smmu();
5117 if (result) {
5118 IPAERR("IPA attach to smmu failed %d\n", result);
5119 return;
5120 }
5121
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005122 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
5123
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005124 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
5125 (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005126 result = ipa3_pil_load_ipa_fws();
5127 else
5128 result = ipa3_manual_load_ipa_fws();
5129
5130 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
5131
5132 if (result) {
5133 IPAERR("IPA FW loading process has failed\n");
5134 return;
5135 }
5136 pr_info("IPA FW loaded successfully\n");
5137
Skylar Changefc0a0f2018-03-29 11:17:40 -07005138 result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005139 if (result)
5140 IPAERR("IPA post init failed %d\n", result);
5141}
5142
Amir Levy9659e592016-10-27 18:08:27 +03005143static ssize_t ipa3_write(struct file *file, const char __user *buf,
5144 size_t count, loff_t *ppos)
5145{
5146 unsigned long missing;
Amir Levy9659e592016-10-27 18:08:27 +03005147
Amir Levy2da9d452017-12-12 10:09:46 +02005148 char dbg_buff[32] = { 0 };
Amir Levy9659e592016-10-27 18:08:27 +03005149
5150 if (sizeof(dbg_buff) < count + 1)
5151 return -EFAULT;
5152
5153 missing = copy_from_user(dbg_buff, buf, count);
5154
5155 if (missing) {
5156 IPAERR("Unable to copy data from user\n");
5157 return -EFAULT;
5158 }
5159
Amir Levya5774c42017-12-14 22:15:54 +02005160 dbg_buff[count] = '\0';
Mohammed Javidbf4c8022017-08-07 23:15:48 +05305161
Amir Levy2da9d452017-12-12 10:09:46 +02005162 IPADBG("user input string %s\n", dbg_buff);
5163
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005164 /* Check MHI configuration on MDM devices */
5165 if (!ipa3_is_msm_device()) {
Amir Levy2da9d452017-12-12 10:09:46 +02005166
5167 if (strnstr(dbg_buff, "vlan", strlen(dbg_buff))) {
5168 if (strnstr(dbg_buff, "eth", strlen(dbg_buff)))
5169 ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_EMAC] =
5170 true;
5171 if (strnstr(dbg_buff, "rndis", strlen(dbg_buff)))
5172 ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_RNDIS] =
5173 true;
5174 if (strnstr(dbg_buff, "ecm", strlen(dbg_buff)))
5175 ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_ECM] =
5176 true;
5177
5178 /*
5179 * when vlan mode is passed to our dev we expect
5180 * another write
5181 */
5182 return count;
5183 }
5184
Amir Levya5774c42017-12-14 22:15:54 +02005185 /* trim ending newline character if any */
5186 if (count && (dbg_buff[count - 1] == '\n'))
5187 dbg_buff[count - 1] = '\0';
5188
Amir Levy54fe4d32017-03-16 11:21:49 +02005189 if (!strcasecmp(dbg_buff, "MHI")) {
5190 ipa3_ctx->ipa_config_is_mhi = true;
5191 pr_info(
Amir Levy2da9d452017-12-12 10:09:46 +02005192 "IPA is loading with MHI configuration\n");
Amir Levya5774c42017-12-14 22:15:54 +02005193 } else if (!strcmp(dbg_buff, "1")) {
Amir Levy54fe4d32017-03-16 11:21:49 +02005194 pr_info(
Amir Levy2da9d452017-12-12 10:09:46 +02005195 "IPA is loading with non MHI configuration\n");
5196 } else {
5197 IPAERR("got invalid string %s not loading FW\n",
5198 dbg_buff);
5199 return count;
Amir Levy54fe4d32017-03-16 11:21:49 +02005200 }
Amir Levy54fe4d32017-03-16 11:21:49 +02005201 }
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005202
Chaitanya Pratapa31bf6432019-12-11 22:18:17 -08005203 /* Prevent consequent calls from trying to load the FW again. */
5204 if (ipa3_is_ready())
5205 return count;
5206
Skylar Changafc22fe2019-04-25 14:10:52 -07005207 /* Prevent multiple calls from trying to load the FW again. */
5208 if (ipa3_ctx->fw_loaded) {
5209 IPAERR("not load FW again\n");
5210 return count;
5211 }
5212
5213 /* Schedule WQ to load ipa-fws */
5214 ipa3_ctx->fw_loaded = true;
5215
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005216 queue_work(ipa3_ctx->transport_power_mgmt_wq,
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005217 &ipa3_fw_loading_work);
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005218
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005219 IPADBG("Scheduled a work to load IPA FW\n");
Amir Levy9659e592016-10-27 18:08:27 +03005220 return count;
5221}
5222
Skylar Chang48afa052017-10-25 09:32:57 -07005223/**
5224 * ipa3_tz_unlock_reg - Unlocks memory regions so that they become accessible
5225 * from AP.
5226 * @reg_info - Pointer to array of memory regions to unlock
5227 * @num_regs - Number of elements in the array
5228 *
5229 * Converts the input array of regions to a struct that TZ understands and
5230 * issues an SCM call.
5231 * Also flushes the memory cache to DDR in order to make sure that TZ sees the
5232 * correct data structure.
5233 *
5234 * Returns: 0 on success, negative on failure
5235 */
5236int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005237{
5238 int i, size, ret, resp;
5239 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
5240 struct tz_smmu_ipa_protect_region_s cmd_buf;
Skylar Chang3a696ba2017-10-25 09:35:07 -07005241 struct scm_desc desc = {0};
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005242
Skylar Chang48afa052017-10-25 09:32:57 -07005243 if (reg_info == NULL || num_regs == 0) {
5244 IPAERR("Bad parameters\n");
5245 return -EFAULT;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005246 }
Skylar Chang48afa052017-10-25 09:32:57 -07005247
5248 size = num_regs * sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
5249 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
5250 if (ipa_tz_unlock_vec == NULL)
5251 return -ENOMEM;
5252
5253 for (i = 0; i < num_regs; i++) {
5254 ipa_tz_unlock_vec[i].input_addr = reg_info[i].reg_addr ^
5255 (reg_info[i].reg_addr & 0xFFF);
5256 ipa_tz_unlock_vec[i].output_addr = reg_info[i].reg_addr ^
5257 (reg_info[i].reg_addr & 0xFFF);
5258 ipa_tz_unlock_vec[i].size = reg_info[i].size;
5259 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
5260 }
5261
5262 /* pass physical address of command buffer */
5263 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
5264 cmd_buf.size_bytes = size;
5265
5266 /* flush cache to DDR */
5267 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
5268 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
Skylar Chang3a696ba2017-10-25 09:35:07 -07005269 if (!is_scm_armv8())
5270 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID,
5271 &cmd_buf, sizeof(cmd_buf), &resp, sizeof(resp));
5272 else {
5273 desc.args[0] = virt_to_phys((void *)ipa_tz_unlock_vec);
5274 desc.args[1] = size;
5275 desc.arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL);
5276 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
5277 TZ_MEM_PROTECT_REGION_ID), &desc);
5278 }
Skylar Chang48afa052017-10-25 09:32:57 -07005279
Skylar Chang48afa052017-10-25 09:32:57 -07005280 if (ret) {
5281 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
5282 kfree(ipa_tz_unlock_vec);
5283 return -EFAULT;
5284 }
5285 kfree(ipa_tz_unlock_vec);
5286
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005287 return 0;
5288}
5289
Skylar Changcd3902d2017-03-27 18:08:27 -07005290static int ipa3_alloc_pkt_init(void)
5291{
5292 struct ipa_mem_buffer mem;
5293 struct ipahal_imm_cmd_pyld *cmd_pyld;
5294 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
5295 int i;
5296
5297 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
5298 &cmd, false);
5299 if (!cmd_pyld) {
5300 IPAERR("failed to construct IMM cmd\n");
5301 return -ENOMEM;
5302 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07005303 ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
Skylar Changcd3902d2017-03-27 18:08:27 -07005304
5305 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
5306 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
5307 &mem.phys_base, GFP_KERNEL);
5308 if (!mem.base) {
5309 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
5310 ipahal_destroy_imm_cmd(cmd_pyld);
5311 return -ENOMEM;
5312 }
5313 ipahal_destroy_imm_cmd(cmd_pyld);
5314
5315 memset(mem.base, 0, mem.size);
5316 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
5317 cmd.destination_pipe_index = i;
5318 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
5319 &cmd, false);
5320 if (!cmd_pyld) {
5321 IPAERR("failed to construct IMM cmd\n");
5322 dma_free_coherent(ipa3_ctx->pdev,
5323 mem.size,
5324 mem.base,
5325 mem.phys_base);
5326 return -ENOMEM;
5327 }
5328 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
5329 cmd_pyld->len);
5330 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
5331 ipahal_destroy_imm_cmd(cmd_pyld);
5332 }
5333
5334 return 0;
5335}
5336
Amir Levy9659e592016-10-27 18:08:27 +03005337/**
Skylar Chang68c37d82018-04-07 16:42:36 -07005338 * ipa3_pre_init() - Initialize the IPA Driver.
5339 * This part contains all initialization which doesn't require IPA HW, such
5340 * as structure allocations and initializations, register writes, etc.
5341 *
5342 * @resource_p: contain platform specific values from DST file
5343 * @pdev: The platform device structure representing the IPA driver
5344 *
5345 * Function initialization process:
5346 * Allocate memory for the driver context data struct
5347 * Initializing the ipa3_ctx with :
5348 * 1)parsed values from the dts file
5349 * 2)parameters passed to the module initialization
5350 * 3)read HW values(such as core memory size)
5351 * Map IPA core registers to CPU memory
5352 * Restart IPA core(HW reset)
5353 * Initialize the look-aside caches(kmem_cache/slab) for filter,
5354 * routing and IPA-tree
5355 * Create memory pool with 4 objects for DMA operations(each object
5356 * is 512Bytes long), this object will be use for tx(A5->IPA)
5357 * Initialize lists head(routing, hdr, system pipes)
5358 * Initialize mutexes (for ipa_ctx and NAT memory mutexes)
5359 * Initialize spinlocks (for list related to A5<->IPA pipes)
5360 * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
5361 * Initialize Red-Black-Tree(s) for handles of header,routing rule,
5362 * routing table ,filtering rule
5363 * Initialize the filter block by committing IPV4 and IPV6 default rules
5364 * Create empty routing table in system memory(no committing)
5365 * Create a char-device for IPA
5366 * Initialize IPA RM (resource manager)
5367 * Configure GSI registers (in GSI case)
5368 */
Amir Levy9659e592016-10-27 18:08:27 +03005369static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
Skylar Changefc0a0f2018-03-29 11:17:40 -07005370 struct platform_device *ipa_pdev)
Amir Levy9659e592016-10-27 18:08:27 +03005371{
5372 int result = 0;
5373 int i;
Amir Levy9659e592016-10-27 18:08:27 +03005374 struct ipa3_rt_tbl_set *rset;
Mohammed Javidc6db3362018-02-13 13:41:38 +05305375 struct ipa_active_client_logging_info log_info;
Skylar Changefc0a0f2018-03-29 11:17:40 -07005376 struct cdev *cdev;
Amir Levy9659e592016-10-27 18:08:27 +03005377
5378 IPADBG("IPA Driver initialization started\n");
5379
5380 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
5381 if (!ipa3_ctx) {
5382 IPAERR(":kzalloc err.\n");
5383 result = -ENOMEM;
5384 goto fail_mem_ctx;
5385 }
5386
5387 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
Skylar Chang841c1452017-04-03 16:07:22 -07005388 if (ipa3_ctx->logbuf == NULL)
Mohammed Javid0af3c662018-06-29 15:06:00 +05305389 IPADBG("failed to create IPC log, continue...\n");
Amir Levy9659e592016-10-27 18:08:27 +03005390
Skylar Changefc0a0f2018-03-29 11:17:40 -07005391 /* ipa3_ctx->pdev and ipa3_ctx->uc_pdev will be set in the smmu probes*/
5392 ipa3_ctx->master_pdev = ipa_pdev;
Michael Adisumartac8c404a2018-04-05 18:01:45 -07005393 for (i = 0; i < IPA_SMMU_CB_MAX; i++)
5394 ipa3_ctx->s1_bypass_arr[i] = true;
Michael Adisumarta93e97522017-10-06 15:49:46 -07005395
Amir Levy9659e592016-10-27 18:08:27 +03005396 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
5397 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
5398 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
5399 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
5400 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03005401 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
5402 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
Mohammed Javid80d0e2a2019-06-10 14:11:42 +05305403 ipa3_ctx->ipa_config_is_auto = resource_p->ipa_config_is_auto;
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08005404 ipa3_ctx->use_xbl_boot = resource_p->use_xbl_boot;
Amir Levy9659e592016-10-27 18:08:27 +03005405 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
5406 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
5407 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
5408 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
5409 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03005410 ipa3_ctx->ee = resource_p->ee;
5411 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
5412 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
Michael Adisumarta3e350812017-09-18 14:54:36 -07005413 ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
Mohammed Javid03854df2018-06-20 18:36:57 +05305414 ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
Amir Levy9659e592016-10-27 18:08:27 +03005415 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Mohammed Javid73cd4d22018-04-03 17:15:49 +05305416 ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005417 ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
5418 ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
Skylar Changefc0a0f2018-03-29 11:17:40 -07005419
5420 WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL,
5421 "Non NORMAL IPA HW mode, is this emulation platform ?");
5422
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005423 if (resource_p->ipa_tz_unlock_reg) {
5424 ipa3_ctx->ipa_tz_unlock_reg_num =
5425 resource_p->ipa_tz_unlock_reg_num;
5426 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
5427 ipa3_ctx->ipa_tz_unlock_reg_num,
5428 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
5429 GFP_KERNEL);
5430 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
5431 result = -ENOMEM;
5432 goto fail_tz_unlock_reg;
5433 }
5434 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
5435 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
5436 resource_p->ipa_tz_unlock_reg[i].reg_addr;
5437 ipa3_ctx->ipa_tz_unlock_reg[i].size =
5438 resource_p->ipa_tz_unlock_reg[i].size;
5439 }
5440 }
5441
5442 /* unlock registers for uc */
Skylar Chang48afa052017-10-25 09:32:57 -07005443 result = ipa3_tz_unlock_reg(ipa3_ctx->ipa_tz_unlock_reg,
5444 ipa3_ctx->ipa_tz_unlock_reg_num);
5445 if (result)
5446 IPAERR("Failed to unlock memory region using TZ\n");
Amir Levy9659e592016-10-27 18:08:27 +03005447
5448 /* default aggregation parameters */
5449 ipa3_ctx->aggregation_type = IPA_MBIM_16;
5450 ipa3_ctx->aggregation_byte_limit = 1;
5451 ipa3_ctx->aggregation_time_limit = 0;
5452
5453 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
5454 if (!ipa3_ctx->ctrl) {
5455 IPAERR("memory allocation error for ctrl\n");
5456 result = -ENOMEM;
5457 goto fail_mem_ctrl;
5458 }
5459 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
5460 ipa3_ctx->ipa_hw_type);
5461 if (result) {
5462 IPAERR("fail to static bind IPA ctrl.\n");
5463 result = -EFAULT;
5464 goto fail_bind;
5465 }
5466
Skylar Changefc0a0f2018-03-29 11:17:40 -07005467 result = ipa3_init_mem_partition(ipa3_ctx->master_pdev->dev.of_node);
Amir Levy9659e592016-10-27 18:08:27 +03005468 if (result) {
5469 IPAERR(":ipa3_init_mem_partition failed!\n");
5470 result = -ENODEV;
5471 goto fail_init_mem_partition;
5472 }
5473
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005474 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
5475 ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
Skylar Changefc0a0f2018-03-29 11:17:40 -07005476 ipa3_ctx->ctrl->msm_bus_data_ptr =
5477 msm_bus_cl_get_pdata(ipa3_ctx->master_pdev);
5478 if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) {
5479 IPAERR("failed to get bus scaling\n");
5480 goto fail_bus_reg;
5481 }
Ghanim Fodi6a831342017-03-07 18:19:15 +02005482 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
Skylar Changefc0a0f2018-03-29 11:17:40 -07005483 ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03005484
Skylar Changefc0a0f2018-03-29 11:17:40 -07005485 /* get BUS handle */
5486 ipa3_ctx->ipa_bus_hdl =
5487 msm_bus_scale_register_client(
5488 ipa3_ctx->ctrl->msm_bus_data_ptr);
5489 if (!ipa3_ctx->ipa_bus_hdl) {
5490 IPAERR("fail to register with bus mgr!\n");
5491 result = -ENODEV;
5492 goto fail_bus_reg;
5493 }
Amir Levy9659e592016-10-27 18:08:27 +03005494 }
5495
5496 /* get IPA clocks */
Skylar Changefc0a0f2018-03-29 11:17:40 -07005497 result = ipa3_get_clks(&ipa3_ctx->master_pdev->dev);
Amir Levy9659e592016-10-27 18:08:27 +03005498 if (result)
5499 goto fail_clk;
5500
5501 /* init active_clients_log after getting ipa-clk */
Ghanim Fodic48ba992017-12-24 19:28:38 +02005502 result = ipa3_active_clients_log_init();
5503 if (result)
Amir Levy9659e592016-10-27 18:08:27 +03005504 goto fail_init_active_client;
5505
5506 /* Enable ipa3_ctx->enable_clock_scaling */
5507 ipa3_ctx->enable_clock_scaling = 1;
5508 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
5509
5510 /* enable IPA clocks explicitly to allow the initialization */
5511 ipa3_enable_clks();
5512
5513 /* setup IPA register access */
5514 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
5515 ipa3_ctx->ctrl->ipa_reg_base_ofst);
5516 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
5517 ipa3_ctx->ctrl->ipa_reg_base_ofst,
5518 resource_p->ipa_mem_size);
5519 if (!ipa3_ctx->mmio) {
5520 IPAERR(":ipa-base ioremap err.\n");
5521 result = -EFAULT;
5522 goto fail_remap;
5523 }
5524
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005525 IPADBG(
5526 "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
5527 resource_p->ipa_mem_base,
5528 ipa3_ctx->ctrl->ipa_reg_base_ofst,
5529 resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
5530 ipa3_ctx->mmio,
5531 resource_p->ipa_mem_size);
5532
5533 /*
5534 * Emulation requires ipahal be initialized early...for FW
5535 * download, hence...
5536 */
5537 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
5538 if (ipahal_init(ipa3_ctx->ipa_hw_type,
5539 ipa3_ctx->mmio,
5540 &(ipa3_ctx->master_pdev->dev))) {
5541 IPAERR("fail to init ipahal\n");
5542 result = -EFAULT;
5543 goto fail_ipahal_init;
5544 }
5545 }
5546
Amir Levy9659e592016-10-27 18:08:27 +03005547 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
Mohammed Javidc6db3362018-02-13 13:41:38 +05305548
5549 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
5550 ipa3_active_clients_log_inc(&log_info, false);
5551 ipa3_ctx->q6_proxy_clk_vote_valid = true;
5552 ipa3_ctx->q6_proxy_clk_vote_cnt = 1;
5553
5554 /*Updating the proxy vote cnt 1 */
Skylar Chang242952b2017-07-20 15:04:05 -07005555 atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
Amir Levy9659e592016-10-27 18:08:27 +03005556
Amir Levy9659e592016-10-27 18:08:27 +03005557 /* Create workqueues for power management */
5558 ipa3_ctx->power_mgmt_wq =
5559 create_singlethread_workqueue("ipa_power_mgmt");
5560 if (!ipa3_ctx->power_mgmt_wq) {
5561 IPAERR("failed to create power mgmt wq\n");
5562 result = -ENOMEM;
5563 goto fail_init_hw;
5564 }
5565
5566 ipa3_ctx->transport_power_mgmt_wq =
5567 create_singlethread_workqueue("transport_power_mgmt");
5568 if (!ipa3_ctx->transport_power_mgmt_wq) {
5569 IPAERR("failed to create transport power mgmt wq\n");
5570 result = -ENOMEM;
5571 goto fail_create_transport_wq;
5572 }
5573
Sridhar Ancha99b505b2016-04-21 23:11:10 +05305574 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03005575
5576 /* init the lookaside cache */
5577 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
5578 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
5579 if (!ipa3_ctx->flt_rule_cache) {
5580 IPAERR(":ipa flt cache create failed\n");
5581 result = -ENOMEM;
5582 goto fail_flt_rule_cache;
5583 }
5584 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
5585 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
5586 if (!ipa3_ctx->rt_rule_cache) {
5587 IPAERR(":ipa rt cache create failed\n");
5588 result = -ENOMEM;
5589 goto fail_rt_rule_cache;
5590 }
5591 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
5592 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
5593 if (!ipa3_ctx->hdr_cache) {
5594 IPAERR(":ipa hdr cache create failed\n");
5595 result = -ENOMEM;
5596 goto fail_hdr_cache;
5597 }
5598 ipa3_ctx->hdr_offset_cache =
5599 kmem_cache_create("IPA_HDR_OFFSET",
5600 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
5601 if (!ipa3_ctx->hdr_offset_cache) {
5602 IPAERR(":ipa hdr off cache create failed\n");
5603 result = -ENOMEM;
5604 goto fail_hdr_offset_cache;
5605 }
5606 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
5607 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
5608 if (!ipa3_ctx->hdr_proc_ctx_cache) {
5609 IPAERR(":ipa hdr proc ctx cache create failed\n");
5610 result = -ENOMEM;
5611 goto fail_hdr_proc_ctx_cache;
5612 }
5613 ipa3_ctx->hdr_proc_ctx_offset_cache =
5614 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
5615 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
5616 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
5617 IPAERR(":ipa hdr proc ctx off cache create failed\n");
5618 result = -ENOMEM;
5619 goto fail_hdr_proc_ctx_offset_cache;
5620 }
5621 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
5622 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
5623 if (!ipa3_ctx->rt_tbl_cache) {
5624 IPAERR(":ipa rt tbl cache create failed\n");
5625 result = -ENOMEM;
5626 goto fail_rt_tbl_cache;
5627 }
5628 ipa3_ctx->tx_pkt_wrapper_cache =
5629 kmem_cache_create("IPA_TX_PKT_WRAPPER",
5630 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
5631 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
5632 IPAERR(":ipa tx pkt wrapper cache create failed\n");
5633 result = -ENOMEM;
5634 goto fail_tx_pkt_wrapper_cache;
5635 }
5636 ipa3_ctx->rx_pkt_wrapper_cache =
5637 kmem_cache_create("IPA_RX_PKT_WRAPPER",
5638 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
5639 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
5640 IPAERR(":ipa rx pkt wrapper cache create failed\n");
5641 result = -ENOMEM;
5642 goto fail_rx_pkt_wrapper_cache;
5643 }
5644
Amir Levy9659e592016-10-27 18:08:27 +03005645 /* init the various list heads */
5646 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
5647 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
5648 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
5649 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
5650 }
5651 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
5652 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
5653 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
5654 INIT_LIST_HEAD(&ipa3_ctx->
5655 hdr_proc_ctx_tbl.head_free_offset_list[i]);
5656 }
5657 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005658 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005659 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005660 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005661
5662 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
5663 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005664 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005665 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
5666 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005667 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005668
5669 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
5670 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
5671 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
5672 init_waitqueue_head(&ipa3_ctx->msg_waitq);
5673 mutex_init(&ipa3_ctx->msg_lock);
5674
Skylar Chang68c37d82018-04-07 16:42:36 -07005675 /* store wlan client-connect-msg-list */
5676 INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list);
5677 mutex_init(&ipa3_ctx->msg_wlan_client_lock);
5678
Amir Levy9659e592016-10-27 18:08:27 +03005679 mutex_init(&ipa3_ctx->lock);
Skylar Changfb792c62017-08-17 12:53:23 -07005680 mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05305681 mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
Amir Levy9659e592016-10-27 18:08:27 +03005682
5683 idr_init(&ipa3_ctx->ipa_idr);
5684 spin_lock_init(&ipa3_ctx->idr_lock);
5685
5686 /* wlan related member */
5687 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
5688 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
5689 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
5690 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
5691
Skylar Changefc0a0f2018-03-29 11:17:40 -07005692 ipa3_ctx->cdev.class = class_create(THIS_MODULE, DRV_NAME);
Amir Levy9659e592016-10-27 18:08:27 +03005693
Skylar Changefc0a0f2018-03-29 11:17:40 -07005694 result = alloc_chrdev_region(&ipa3_ctx->cdev.dev_num, 0, 1, DRV_NAME);
Amir Levy9659e592016-10-27 18:08:27 +03005695 if (result) {
5696 IPAERR("alloc_chrdev_region err.\n");
5697 result = -ENODEV;
5698 goto fail_alloc_chrdev_region;
5699 }
5700
Skylar Changefc0a0f2018-03-29 11:17:40 -07005701 ipa3_ctx->cdev.dev = device_create(ipa3_ctx->cdev.class, NULL,
5702 ipa3_ctx->cdev.dev_num, ipa3_ctx, DRV_NAME);
5703 if (IS_ERR(ipa3_ctx->cdev.dev)) {
Amir Levy9659e592016-10-27 18:08:27 +03005704 IPAERR(":device_create err.\n");
5705 result = -ENODEV;
5706 goto fail_device_create;
5707 }
5708
Amir Levy9659e592016-10-27 18:08:27 +03005709 /* Create a wakeup source. */
5710 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
5711 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
5712
Michael Adisumarta3e350812017-09-18 14:54:36 -07005713 /* Initialize Power Management framework */
5714 if (ipa3_ctx->use_ipa_pm) {
5715 result = ipa_pm_init(&ipa3_res.pm_init);
5716 if (result) {
5717 IPAERR("IPA PM initialization failed (%d)\n", -result);
5718 result = -ENODEV;
5719 goto fail_ipa_rm_init;
5720 }
5721 IPADBG("IPA resource manager initialized");
5722 } else {
5723 result = ipa_rm_initialize();
5724 if (result) {
5725 IPAERR("RM initialization failed (%d)\n", -result);
5726 result = -ENODEV;
5727 goto fail_ipa_rm_init;
5728 }
5729 IPADBG("IPA resource manager initialized");
Amir Levy9659e592016-10-27 18:08:27 +03005730
Michael Adisumarta3e350812017-09-18 14:54:36 -07005731 result = ipa3_create_apps_resource();
5732 if (result) {
5733 IPAERR("Failed to create APPS_CONS resource\n");
5734 result = -ENODEV;
5735 goto fail_create_apps_resource;
5736 }
Amir Levy9659e592016-10-27 18:08:27 +03005737 }
5738
Amir Levy9659e592016-10-27 18:08:27 +03005739 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
5740
5741 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07005742 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03005743
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005744 result = ipa3_dma_setup();
5745 if (result) {
5746 IPAERR("Failed to setup IPA DMA\n");
5747 result = -ENODEV;
5748 goto fail_ipa_dma_setup;
5749 }
5750
Amir Levy9659e592016-10-27 18:08:27 +03005751 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02005752 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03005753 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02005754 *
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005755 * For IPA3.0 and the emulation system, the GSI configuration
5756 * is done by the GSI driver.
5757 *
Amir Levya59ed3f2017-03-05 17:30:55 +02005758 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03005759 */
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005760 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
5761 ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
Amir Levya59ed3f2017-03-05 17:30:55 +02005762 result = ipa3_gsi_pre_fw_load_init();
5763 if (result) {
5764 IPAERR("gsi pre FW loading config failed\n");
5765 result = -ENODEV;
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005766 goto fail_gsi_pre_fw_load_init;
Amir Levy9659e592016-10-27 18:08:27 +03005767 }
5768 }
Amir Levy9659e592016-10-27 18:08:27 +03005769
Skylar Changefc0a0f2018-03-29 11:17:40 -07005770 cdev = &ipa3_ctx->cdev.cdev;
5771 cdev_init(cdev, &ipa3_drv_fops);
5772 cdev->owner = THIS_MODULE;
5773 cdev->ops = &ipa3_drv_fops; /* from LDD3 */
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305774
Skylar Changefc0a0f2018-03-29 11:17:40 -07005775 result = cdev_add(cdev, ipa3_ctx->cdev.dev_num, 1);
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305776 if (result) {
5777 IPAERR(":cdev_add err=%d\n", -result);
5778 result = -ENODEV;
5779 goto fail_cdev_add;
5780 }
5781 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
Skylar Changefc0a0f2018-03-29 11:17:40 -07005782 MAJOR(ipa3_ctx->cdev.dev_num),
5783 MINOR(ipa3_ctx->cdev.dev_num));
Mohammed Javidc6db3362018-02-13 13:41:38 +05305784 /*
5785 * for IPA 4.0 offline charge is not needed and we need to prevent
5786 * power collapse until IPA uC is loaded.
5787 */
5788
Skylar Chang40430532017-07-06 14:31:57 -07005789 /* proxy vote for modem is added in ipa3_post_init() phase */
Mohammed Javidc6db3362018-02-13 13:41:38 +05305790 if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
5791 ipa3_proxy_clk_unvote();
Amir Levy9659e592016-10-27 18:08:27 +03005792 return 0;
5793
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305794fail_cdev_add:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005795fail_gsi_pre_fw_load_init:
5796 ipa3_dma_shutdown();
5797fail_ipa_dma_setup:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005798 if (ipa3_ctx->use_ipa_pm)
5799 ipa_pm_destroy();
5800 else
Michael Adisumarta3e350812017-09-18 14:54:36 -07005801 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
Amir Levy9659e592016-10-27 18:08:27 +03005802fail_create_apps_resource:
Michael Adisumarta3e350812017-09-18 14:54:36 -07005803 if (!ipa3_ctx->use_ipa_pm)
5804 ipa_rm_exit();
Amir Levy9659e592016-10-27 18:08:27 +03005805fail_ipa_rm_init:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005806 device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num);
Amir Levy9659e592016-10-27 18:08:27 +03005807fail_device_create:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005808 unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1);
Amir Levy9659e592016-10-27 18:08:27 +03005809fail_alloc_chrdev_region:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005810 idr_destroy(&ipa3_ctx->ipa_idr);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005811 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
5812 idr_destroy(&rset->rule_ids);
5813 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
5814 idr_destroy(&rset->rule_ids);
5815 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
5816 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005817 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
5818fail_rx_pkt_wrapper_cache:
5819 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
5820fail_tx_pkt_wrapper_cache:
5821 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
5822fail_rt_tbl_cache:
5823 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
5824fail_hdr_proc_ctx_offset_cache:
5825 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
5826fail_hdr_proc_ctx_cache:
5827 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
5828fail_hdr_offset_cache:
5829 kmem_cache_destroy(ipa3_ctx->hdr_cache);
5830fail_hdr_cache:
5831 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
5832fail_rt_rule_cache:
5833 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
5834fail_flt_rule_cache:
5835 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
5836fail_create_transport_wq:
5837 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
5838fail_init_hw:
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005839 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
5840 ipahal_destroy();
5841fail_ipahal_init:
Amir Levy9659e592016-10-27 18:08:27 +03005842 iounmap(ipa3_ctx->mmio);
5843fail_remap:
5844 ipa3_disable_clks();
5845 ipa3_active_clients_log_destroy();
5846fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02005847 if (ipa3_clk)
5848 clk_put(ipa3_clk);
5849 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03005850fail_clk:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005851 if (ipa3_ctx->ipa_bus_hdl)
5852 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03005853fail_bus_reg:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005854 if (ipa3_ctx->ctrl->msm_bus_data_ptr)
5855 msm_bus_cl_clear_pdata(ipa3_ctx->ctrl->msm_bus_data_ptr);
Amir Levy9659e592016-10-27 18:08:27 +03005856fail_init_mem_partition:
5857fail_bind:
5858 kfree(ipa3_ctx->ctrl);
5859fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005860 kfree(ipa3_ctx->ipa_tz_unlock_reg);
5861fail_tz_unlock_reg:
Skylar Chang841c1452017-04-03 16:07:22 -07005862 if (ipa3_ctx->logbuf)
5863 ipc_log_context_destroy(ipa3_ctx->logbuf);
Amir Levy9659e592016-10-27 18:08:27 +03005864 kfree(ipa3_ctx);
5865 ipa3_ctx = NULL;
5866fail_mem_ctx:
5867 return result;
5868}
5869
Michael Adisumarta3e350812017-09-18 14:54:36 -07005870static int get_ipa_dts_pm_info(struct platform_device *pdev,
5871 struct ipa3_plat_drv_res *ipa_drv_res)
5872{
5873 int result;
5874 int i, j;
5875
5876 ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
5877 "qcom,use-ipa-pm");
5878 IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
5879 if (!ipa_drv_res->use_ipa_pm)
5880 return 0;
5881
5882 result = of_property_read_u32(pdev->dev.of_node,
5883 "qcom,msm-bus,num-cases",
5884 &ipa_drv_res->pm_init.threshold_size);
5885 /* No vote is ignored */
5886 ipa_drv_res->pm_init.threshold_size -= 2;
5887 if (result || ipa_drv_res->pm_init.threshold_size >
5888 IPA_PM_THRESHOLD_MAX) {
5889 IPAERR("invalid property qcom,msm-bus,num-cases %d\n",
5890 ipa_drv_res->pm_init.threshold_size);
5891 return -EFAULT;
5892 }
5893
5894 result = of_property_read_u32_array(pdev->dev.of_node,
5895 "qcom,throughput-threshold",
5896 ipa_drv_res->pm_init.default_threshold,
5897 ipa_drv_res->pm_init.threshold_size);
5898 if (result) {
5899 IPAERR("failed to read qcom,throughput-thresholds\n");
5900 return -EFAULT;
5901 }
5902
5903 result = of_property_count_strings(pdev->dev.of_node,
5904 "qcom,scaling-exceptions");
5905 if (result < 0) {
5906 IPADBG("no exception list for ipa pm\n");
5907 result = 0;
5908 }
5909
5910 if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
5911 IPAERR("failed to read qcom,scaling-exceptions\n");
5912 return -EFAULT;
5913 }
5914
5915 ipa_drv_res->pm_init.exception_size = result /
5916 (ipa_drv_res->pm_init.threshold_size + 1);
5917 if (ipa_drv_res->pm_init.exception_size >=
5918 IPA_PM_EXCEPTION_MAX) {
5919 IPAERR("exception list larger then max %d\n",
5920 ipa_drv_res->pm_init.exception_size);
5921 return -EFAULT;
5922 }
5923
5924 for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
5925 struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
5926
5927 result = of_property_read_string_index(pdev->dev.of_node,
5928 "qcom,scaling-exceptions",
5929 i * ipa_drv_res->pm_init.threshold_size,
5930 &ex[i].usecase);
5931 if (result) {
5932 IPAERR("failed to read qcom,scaling-exceptions");
5933 return -EFAULT;
5934 }
5935
5936 for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
5937 const char *str;
5938
5939 result = of_property_read_string_index(
5940 pdev->dev.of_node,
5941 "qcom,scaling-exceptions",
5942 i * ipa_drv_res->pm_init.threshold_size + j + 1,
5943 &str);
5944 if (result) {
5945 IPAERR("failed to read qcom,scaling-exceptions"
5946 );
5947 return -EFAULT;
5948 }
5949
5950 if (kstrtou32(str, 0, &ex[i].threshold[j])) {
5951 IPAERR("error str=%s\n", str);
5952 return -EFAULT;
5953 }
5954 }
5955 }
5956
5957 return 0;
5958}
5959
Amir Levy9659e592016-10-27 18:08:27 +03005960static int get_ipa_dts_configuration(struct platform_device *pdev,
5961 struct ipa3_plat_drv_res *ipa_drv_res)
5962{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005963 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03005964 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005965 u32 *ipa_tz_unlock_reg;
5966 int elem_num;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005967 u32 mhi_evid_limits[2];
Amir Levy9659e592016-10-27 18:08:27 +03005968
5969 /* initialize ipa3_res */
5970 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
5971 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
5972 ipa_drv_res->ipa_hw_type = 0;
5973 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03005974 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
5975 ipa_drv_res->ipa_wdi2 = false;
Mohammed Javid80d0e2a2019-06-10 14:11:42 +05305976 ipa_drv_res->ipa_config_is_auto = false;
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08005977 ipa_drv_res->use_xbl_boot = false;
Mohammed Javid73cd4d22018-04-03 17:15:49 +05305978 ipa_drv_res->ipa_mhi_dynamic_config = false;
Amir Levy9659e592016-10-27 18:08:27 +03005979 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02005980 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03005981 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5982 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5983 ipa_drv_res->apply_rg10_wa = false;
5984 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005985 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
5986 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005987 ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START;
5988 ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
Amir Levy9659e592016-10-27 18:08:27 +03005989
5990 /* Get IPA HW Version */
5991 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
5992 &ipa_drv_res->ipa_hw_type);
5993 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
5994 IPAERR(":get resource failed for ipa-hw-ver!\n");
5995 return -ENODEV;
5996 }
5997 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
5998
5999 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
6000 IPAERR(":IPA version below 3.0 not supported!\n");
6001 return -ENODEV;
6002 }
6003
6004 /* Get IPA HW mode */
6005 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
6006 &ipa_drv_res->ipa3_hw_mode);
6007 if (result)
6008 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
6009 else
6010 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
6011 ipa_drv_res->ipa3_hw_mode);
6012
6013 /* Get IPA WAN / LAN RX pool size */
6014 result = of_property_read_u32(pdev->dev.of_node,
6015 "qcom,wan-rx-ring-size",
6016 &ipa_drv_res->wan_rx_ring_size);
6017 if (result)
6018 IPADBG("using default for wan-rx-ring-size = %u\n",
6019 ipa_drv_res->wan_rx_ring_size);
6020 else
6021 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
6022 ipa_drv_res->wan_rx_ring_size);
6023
6024 result = of_property_read_u32(pdev->dev.of_node,
6025 "qcom,lan-rx-ring-size",
6026 &ipa_drv_res->lan_rx_ring_size);
6027 if (result)
6028 IPADBG("using default for lan-rx-ring-size = %u\n",
6029 ipa_drv_res->lan_rx_ring_size);
6030 else
6031 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
6032 ipa_drv_res->lan_rx_ring_size);
6033
6034 ipa_drv_res->use_ipa_teth_bridge =
6035 of_property_read_bool(pdev->dev.of_node,
6036 "qcom,use-ipa-tethering-bridge");
6037 IPADBG(": using TBDr = %s",
6038 ipa_drv_res->use_ipa_teth_bridge
6039 ? "True" : "False");
6040
Mohammed Javid73cd4d22018-04-03 17:15:49 +05306041 ipa_drv_res->ipa_mhi_dynamic_config =
6042 of_property_read_bool(pdev->dev.of_node,
6043 "qcom,use-ipa-in-mhi-mode");
6044 IPADBG(": ipa_mhi_dynamic_config (%s)\n",
6045 ipa_drv_res->ipa_mhi_dynamic_config
6046 ? "True" : "False");
6047
Amir Levy9659e592016-10-27 18:08:27 +03006048 ipa_drv_res->modem_cfg_emb_pipe_flt =
6049 of_property_read_bool(pdev->dev.of_node,
6050 "qcom,modem-cfg-emb-pipe-flt");
6051 IPADBG(": modem configure embedded pipe filtering = %s\n",
6052 ipa_drv_res->modem_cfg_emb_pipe_flt
6053 ? "True" : "False");
6054
6055 ipa_drv_res->ipa_wdi2 =
6056 of_property_read_bool(pdev->dev.of_node,
6057 "qcom,ipa-wdi2");
6058 IPADBG(": WDI-2.0 = %s\n",
6059 ipa_drv_res->ipa_wdi2
6060 ? "True" : "False");
6061
Mohammed Javid80d0e2a2019-06-10 14:11:42 +05306062 ipa_drv_res->ipa_config_is_auto =
6063 of_property_read_bool(pdev->dev.of_node,
6064 "qcom,ipa-config-is-auto");
6065 IPADBG(": ipa-config-is-auto = %s\n",
6066 ipa_drv_res->ipa_config_is_auto
6067 ? "True" : "False");
6068
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08006069 ipa_drv_res->use_xbl_boot =
6070 of_property_read_bool(pdev->dev.of_node,
6071 "qcom,use-xbl-boot");
6072 IPADBG("Is xbl loading used ? (%s)\n",
6073 ipa_drv_res->use_xbl_boot ? "Yes":"No");
6074
Amir Levy9659e592016-10-27 18:08:27 +03006075 ipa_drv_res->use_64_bit_dma_mask =
6076 of_property_read_bool(pdev->dev.of_node,
6077 "qcom,use-64-bit-dma-mask");
6078 IPADBG(": use_64_bit_dma_mask = %s\n",
6079 ipa_drv_res->use_64_bit_dma_mask
6080 ? "True" : "False");
6081
Ghanim Fodi6a831342017-03-07 18:19:15 +02006082 ipa_drv_res->use_bw_vote =
6083 of_property_read_bool(pdev->dev.of_node,
6084 "qcom,bandwidth-vote-for-ipa");
6085 IPADBG(": use_bw_vote = %s\n",
6086 ipa_drv_res->use_bw_vote
6087 ? "True" : "False");
6088
Amir Levy9659e592016-10-27 18:08:27 +03006089 ipa_drv_res->skip_uc_pipe_reset =
6090 of_property_read_bool(pdev->dev.of_node,
6091 "qcom,skip-uc-pipe-reset");
6092 IPADBG(": skip uC pipe reset = %s\n",
6093 ipa_drv_res->skip_uc_pipe_reset
6094 ? "True" : "False");
6095
6096 ipa_drv_res->tethered_flow_control =
6097 of_property_read_bool(pdev->dev.of_node,
6098 "qcom,tethered-flow-control");
6099 IPADBG(": Use apps based flow control = %s\n",
6100 ipa_drv_res->tethered_flow_control
6101 ? "True" : "False");
6102
Amir Levy9659e592016-10-27 18:08:27 +03006103 /* Get IPA wrapper address */
6104 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
6105 "ipa-base");
6106 if (!resource) {
6107 IPAERR(":get resource failed for ipa-base!\n");
6108 return -ENODEV;
6109 }
6110 ipa_drv_res->ipa_mem_base = resource->start;
6111 ipa_drv_res->ipa_mem_size = resource_size(resource);
6112 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
6113 ipa_drv_res->ipa_mem_base,
6114 ipa_drv_res->ipa_mem_size);
6115
6116 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
6117 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
6118
Amir Levya59ed3f2017-03-05 17:30:55 +02006119 /* Get IPA GSI address */
6120 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
6121 "gsi-base");
6122 if (!resource) {
6123 IPAERR(":get resource failed for gsi-base!\n");
6124 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03006125 }
Amir Levya59ed3f2017-03-05 17:30:55 +02006126 ipa_drv_res->transport_mem_base = resource->start;
6127 ipa_drv_res->transport_mem_size = resource_size(resource);
6128 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
6129 ipa_drv_res->transport_mem_base,
6130 ipa_drv_res->transport_mem_size);
6131
6132 /* Get IPA GSI IRQ number */
6133 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
6134 "gsi-irq");
6135 if (!resource) {
6136 IPAERR(":get resource failed for gsi-irq!\n");
6137 return -ENODEV;
6138 }
6139 ipa_drv_res->transport_irq = resource->start;
6140 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03006141
6142 /* Get IPA pipe mem start ofst */
6143 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
6144 "ipa-pipe-mem");
6145 if (!resource) {
6146 IPADBG(":not using pipe memory - resource nonexisting\n");
6147 } else {
6148 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
6149 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
6150 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
6151 ipa_drv_res->ipa_pipe_mem_start_ofst,
6152 ipa_drv_res->ipa_pipe_mem_size);
6153 }
6154
6155 /* Get IPA IRQ number */
6156 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
6157 "ipa-irq");
6158 if (!resource) {
6159 IPAERR(":get resource failed for ipa-irq!\n");
6160 return -ENODEV;
6161 }
6162 ipa_drv_res->ipa_irq = resource->start;
6163 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
6164
6165 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
6166 &ipa_drv_res->ee);
6167 if (result)
6168 ipa_drv_res->ee = 0;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04006169 IPADBG(":ee = %u\n", ipa_drv_res->ee);
Amir Levy9659e592016-10-27 18:08:27 +03006170
6171 ipa_drv_res->apply_rg10_wa =
6172 of_property_read_bool(pdev->dev.of_node,
6173 "qcom,use-rg10-limitation-mitigation");
6174 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
6175 ipa_drv_res->apply_rg10_wa
6176 ? "True" : "False");
6177
6178 ipa_drv_res->gsi_ch20_wa =
6179 of_property_read_bool(pdev->dev.of_node,
6180 "qcom,do-not-use-ch-gsi-20");
6181 IPADBG(": GSI CH 20 WA is = %s\n",
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04006182 ipa_drv_res->gsi_ch20_wa
Amir Levy9659e592016-10-27 18:08:27 +03006183 ? "Needed" : "Not needed");
6184
Gidon Studinski3021a6f2016-11-10 12:48:48 +02006185 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
Ghanim Fodic823bc62017-10-21 17:29:53 +03006186 "qcom,mhi-event-ring-id-limits", sizeof(u32));
6187
6188 if (elem_num == 2) {
6189 if (of_property_read_u32_array(pdev->dev.of_node,
6190 "qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) {
6191 IPAERR("failed to read mhi event ring id limits\n");
6192 return -EFAULT;
6193 }
6194 if (mhi_evid_limits[0] > mhi_evid_limits[1]) {
6195 IPAERR("mhi event ring id low limit > high limit\n");
6196 return -EFAULT;
6197 }
6198 ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0];
6199 ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1];
6200 IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n",
6201 mhi_evid_limits[0], mhi_evid_limits[1]);
6202 } else {
6203 if (elem_num > 0) {
6204 IPAERR("Invalid mhi event ring id limits number %d\n",
6205 elem_num);
6206 return -EINVAL;
6207 }
6208 IPADBG("use default mhi evt ring id limits start=%u end=%u\n",
6209 ipa_drv_res->mhi_evid_limits[0],
6210 ipa_drv_res->mhi_evid_limits[1]);
6211 }
6212
6213 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02006214 "qcom,ipa-tz-unlock-reg", sizeof(u32));
6215
6216 if (elem_num > 0 && elem_num % 2 == 0) {
6217 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
6218
6219 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
6220 if (ipa_tz_unlock_reg == NULL)
6221 return -ENOMEM;
6222
6223 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
6224 ipa_drv_res->ipa_tz_unlock_reg_num,
6225 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
6226 GFP_KERNEL);
6227 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
6228 kfree(ipa_tz_unlock_reg);
6229 return -ENOMEM;
6230 }
6231
6232 if (of_property_read_u32_array(pdev->dev.of_node,
6233 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
6234 elem_num)) {
6235 IPAERR("failed to read register addresses\n");
6236 kfree(ipa_tz_unlock_reg);
6237 kfree(ipa_drv_res->ipa_tz_unlock_reg);
6238 return -EFAULT;
6239 }
6240
6241 pos = 0;
6242 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
6243 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
6244 ipa_tz_unlock_reg[pos++];
6245 ipa_drv_res->ipa_tz_unlock_reg[i].size =
6246 ipa_tz_unlock_reg[pos++];
Skylar Chang48afa052017-10-25 09:32:57 -07006247 IPADBG("tz unlock reg %d: addr 0x%pa size %llu\n", i,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02006248 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
6249 ipa_drv_res->ipa_tz_unlock_reg[i].size);
6250 }
6251 kfree(ipa_tz_unlock_reg);
6252 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07006253
6254 /* get IPA PM related information */
6255 result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
6256 if (result) {
6257 IPAERR("failed to get pm info from dts %d\n", result);
6258 return result;
6259 }
6260
Mohammed Javid03854df2018-06-20 18:36:57 +05306261 ipa_drv_res->wdi_over_pcie =
Ghanim Fodi0ef92fc2018-07-08 11:21:31 +03006262 of_property_read_bool(pdev->dev.of_node,
6263 "qcom,wlan-ce-db-over-pcie");
Mohammed Javid03854df2018-06-20 18:36:57 +05306264 IPADBG("Is wdi_over_pcie ? (%s)\n",
Ghanim Fodi0ef92fc2018-07-08 11:21:31 +03006265 ipa_drv_res->wdi_over_pcie ? "Yes":"No");
Mohammed Javid03854df2018-06-20 18:36:57 +05306266
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04006267 /*
6268 * If we're on emulator, get its interrupt controller's mem
6269 * start and size
6270 */
6271 if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
6272 resource = platform_get_resource_byname(
6273 pdev, IORESOURCE_MEM, "intctrl-base");
6274 if (!resource) {
6275 IPAERR(":Can't find intctrl-base resource\n");
6276 return -ENODEV;
6277 }
6278 ipa_drv_res->emulator_intcntrlr_mem_base =
6279 resource->start;
6280 ipa_drv_res->emulator_intcntrlr_mem_size =
6281 resource_size(resource);
6282 IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
6283 ipa_drv_res->emulator_intcntrlr_mem_base,
6284 ipa_drv_res->emulator_intcntrlr_mem_size);
6285 }
6286
Amir Levy9659e592016-10-27 18:08:27 +03006287 return 0;
6288}
6289
6290static int ipa_smmu_wlan_cb_probe(struct device *dev)
6291{
Skylar Changefc0a0f2018-03-29 11:17:40 -07006292 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
Amir Levy9659e592016-10-27 18:08:27 +03006293 int atomic_ctx = 1;
6294 int fast = 1;
6295 int bypass = 1;
6296 int ret;
6297 u32 add_map_size;
6298 const u32 *add_map;
6299 int i;
6300
6301 IPADBG("sub pdev=%p\n", dev);
6302
Skylar Changefc0a0f2018-03-29 11:17:40 -07006303 if (!smmu_info.present[IPA_SMMU_CB_WLAN]) {
6304 IPAERR("WLAN SMMU is disabled\n");
6305 return 0;
6306 }
6307
Amir Levy9659e592016-10-27 18:08:27 +03006308 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02006309 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03006310 if (!cb->iommu) {
6311 IPAERR("could not alloc iommu domain\n");
6312 /* assume this failure is because iommu driver is not ready */
6313 return -EPROBE_DEFER;
6314 }
6315 cb->valid = true;
6316
Skylar Changefc0a0f2018-03-29 11:17:40 -07006317 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") ||
6318 ipa3_ctx->ipa_config_is_mhi) {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006319 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006320 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
6321
Amir Levy9659e592016-10-27 18:08:27 +03006322 if (iommu_domain_set_attr(cb->iommu,
6323 DOMAIN_ATTR_S1_BYPASS,
6324 &bypass)) {
6325 IPAERR("couldn't set bypass\n");
6326 cb->valid = false;
6327 return -EIO;
6328 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006329 IPADBG("WLAN SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03006330 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006331 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006332 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
6333
Amir Levy9659e592016-10-27 18:08:27 +03006334 if (iommu_domain_set_attr(cb->iommu,
6335 DOMAIN_ATTR_ATOMIC,
6336 &atomic_ctx)) {
6337 IPAERR("couldn't disable coherent HTW\n");
6338 cb->valid = false;
6339 return -EIO;
6340 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006341 IPADBG(" WLAN SMMU ATTR ATOMIC\n");
Amir Levy9659e592016-10-27 18:08:27 +03006342
6343 if (smmu_info.fast_map) {
6344 if (iommu_domain_set_attr(cb->iommu,
6345 DOMAIN_ATTR_FAST,
6346 &fast)) {
6347 IPAERR("couldn't set fast map\n");
6348 cb->valid = false;
6349 return -EIO;
6350 }
6351 IPADBG("SMMU fast map set\n");
6352 }
6353 }
6354
Michael Adisumarta93e97522017-10-06 15:49:46 -07006355 pr_info("IPA smmu_info.s1_bypass_arr[WLAN]=%d smmu_info.fast_map=%d\n",
6356 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN], smmu_info.fast_map);
6357
Amir Levy9659e592016-10-27 18:08:27 +03006358 ret = iommu_attach_device(cb->iommu, dev);
6359 if (ret) {
6360 IPAERR("could not attach device ret=%d\n", ret);
6361 cb->valid = false;
6362 return ret;
6363 }
6364 /* MAP ipa-uc ram */
6365 add_map = of_get_property(dev->of_node,
6366 "qcom,additional-mapping", &add_map_size);
6367 if (add_map) {
6368 /* mapping size is an array of 3-tuple of u32 */
6369 if (add_map_size % (3 * sizeof(u32))) {
6370 IPAERR("wrong additional mapping format\n");
6371 cb->valid = false;
6372 return -EFAULT;
6373 }
6374
6375 /* iterate of each entry of the additional mapping array */
6376 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
6377 u32 iova = be32_to_cpu(add_map[i]);
6378 u32 pa = be32_to_cpu(add_map[i + 1]);
6379 u32 size = be32_to_cpu(add_map[i + 2]);
6380 unsigned long iova_p;
6381 phys_addr_t pa_p;
6382 u32 size_p;
6383
6384 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
6385 iova_p, pa_p, size_p);
6386 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
6387 iova_p, &pa_p, size_p);
6388 ipa3_iommu_map(cb->iommu,
6389 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02006390 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03006391 }
6392 }
6393 return 0;
6394}
6395
6396static int ipa_smmu_uc_cb_probe(struct device *dev)
6397{
Skylar Changefc0a0f2018-03-29 11:17:40 -07006398 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
Amir Levy9659e592016-10-27 18:08:27 +03006399 int atomic_ctx = 1;
6400 int bypass = 1;
6401 int fast = 1;
6402 int ret;
6403 u32 iova_ap_mapping[2];
6404
6405 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
6406
Skylar Changefc0a0f2018-03-29 11:17:40 -07006407 if (!smmu_info.present[IPA_SMMU_CB_UC]) {
6408 IPAERR("UC SMMU is disabled\n");
6409 return 0;
6410 }
6411
Amir Levy9659e592016-10-27 18:08:27 +03006412 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
6413 iova_ap_mapping, 2);
6414 if (ret) {
6415 IPAERR("Fail to read UC start/size iova addresses\n");
6416 return ret;
6417 }
6418 cb->va_start = iova_ap_mapping[0];
6419 cb->va_size = iova_ap_mapping[1];
6420 cb->va_end = cb->va_start + cb->va_size;
6421 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
6422
6423 if (smmu_info.use_64_bit_dma_mask) {
6424 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
6425 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
6426 IPAERR("DMA set 64bit mask failed\n");
6427 return -EOPNOTSUPP;
6428 }
6429 } else {
6430 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
6431 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
6432 IPAERR("DMA set 32bit mask failed\n");
6433 return -EOPNOTSUPP;
6434 }
6435 }
6436 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
6437
6438 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02006439 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03006440 cb->va_start, cb->va_size);
6441 if (IS_ERR_OR_NULL(cb->mapping)) {
6442 IPADBG("Fail to create mapping\n");
6443 /* assume this failure is because iommu driver is not ready */
6444 return -EPROBE_DEFER;
6445 }
6446 IPADBG("SMMU mapping created\n");
6447 cb->valid = true;
6448
Amir Levy9659e592016-10-27 18:08:27 +03006449 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
Michael Adisumarta93e97522017-10-06 15:49:46 -07006450
Skylar Changefc0a0f2018-03-29 11:17:40 -07006451 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") ||
6452 ipa3_ctx->ipa_config_is_mhi) {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006453 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = true;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006454 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = true;
6455
Amir Levy9659e592016-10-27 18:08:27 +03006456 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07006457 DOMAIN_ATTR_S1_BYPASS,
6458 &bypass)) {
Amir Levy9659e592016-10-27 18:08:27 +03006459 IPAERR("couldn't set bypass\n");
6460 arm_iommu_release_mapping(cb->mapping);
6461 cb->valid = false;
6462 return -EIO;
6463 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006464 IPADBG("UC SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03006465 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006466 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = false;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006467 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = false;
6468
Amir Levy9659e592016-10-27 18:08:27 +03006469 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07006470 DOMAIN_ATTR_ATOMIC,
6471 &atomic_ctx)) {
Amir Levy9659e592016-10-27 18:08:27 +03006472 IPAERR("couldn't set domain as atomic\n");
6473 arm_iommu_release_mapping(cb->mapping);
6474 cb->valid = false;
6475 return -EIO;
6476 }
6477 IPADBG("SMMU atomic set\n");
6478
6479 if (smmu_info.fast_map) {
6480 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07006481 DOMAIN_ATTR_FAST,
6482 &fast)) {
Amir Levy9659e592016-10-27 18:08:27 +03006483 IPAERR("couldn't set fast map\n");
6484 arm_iommu_release_mapping(cb->mapping);
6485 cb->valid = false;
6486 return -EIO;
6487 }
6488 IPADBG("SMMU fast map set\n");
6489 }
6490 }
6491
Michael Adisumarta93e97522017-10-06 15:49:46 -07006492 pr_info("IPA smmu_info.s1_bypass_arr[UC]=%d smmu_info.fast_map=%d\n",
6493 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC], smmu_info.fast_map);
6494
Amir Levy9659e592016-10-27 18:08:27 +03006495 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
6496 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
6497 if (ret) {
6498 IPAERR("could not attach device ret=%d\n", ret);
6499 arm_iommu_release_mapping(cb->mapping);
6500 cb->valid = false;
6501 return ret;
6502 }
6503
6504 cb->next_addr = cb->va_end;
6505 ipa3_ctx->uc_pdev = dev;
6506
6507 return 0;
6508}
6509
6510static int ipa_smmu_ap_cb_probe(struct device *dev)
6511{
Skylar Changefc0a0f2018-03-29 11:17:40 -07006512 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
Amir Levy9659e592016-10-27 18:08:27 +03006513 int result;
Amir Levy9659e592016-10-27 18:08:27 +03006514 int atomic_ctx = 1;
6515 int fast = 1;
6516 int bypass = 1;
6517 u32 iova_ap_mapping[2];
6518 u32 add_map_size;
Mohammed Javid36d13cf2018-01-26 22:49:03 +05306519 u32 q6_smem_size;
Amir Levy9659e592016-10-27 18:08:27 +03006520 const u32 *add_map;
6521 void *smem_addr;
6522 int i;
6523
6524 IPADBG("AP CB probe: sub pdev=%p\n", dev);
6525
Skylar Changefc0a0f2018-03-29 11:17:40 -07006526 if (!smmu_info.present[IPA_SMMU_CB_AP]) {
6527 IPAERR("AP SMMU is disabled");
6528 return 0;
6529 }
6530
Amir Levy9659e592016-10-27 18:08:27 +03006531 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
6532 iova_ap_mapping, 2);
6533 if (result) {
6534 IPAERR("Fail to read AP start/size iova addresses\n");
6535 return result;
6536 }
6537 cb->va_start = iova_ap_mapping[0];
6538 cb->va_size = iova_ap_mapping[1];
6539 cb->va_end = cb->va_start + cb->va_size;
6540 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
6541
6542 if (smmu_info.use_64_bit_dma_mask) {
6543 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
6544 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
6545 IPAERR("DMA set 64bit mask failed\n");
6546 return -EOPNOTSUPP;
6547 }
6548 } else {
6549 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
6550 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
6551 IPAERR("DMA set 32bit mask failed\n");
6552 return -EOPNOTSUPP;
6553 }
6554 }
6555
6556 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02006557 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03006558 cb->va_start, cb->va_size);
6559 if (IS_ERR_OR_NULL(cb->mapping)) {
6560 IPADBG("Fail to create mapping\n");
6561 /* assume this failure is because iommu driver is not ready */
6562 return -EPROBE_DEFER;
6563 }
6564 IPADBG("SMMU mapping created\n");
6565 cb->valid = true;
6566
Michael Adisumarta93e97522017-10-06 15:49:46 -07006567 if (of_property_read_bool(dev->of_node,
Skylar Changefc0a0f2018-03-29 11:17:40 -07006568 "qcom,smmu-s1-bypass") || ipa3_ctx->ipa_config_is_mhi) {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006569 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = true;
Skylar Change87894f2018-04-02 15:49:12 -07006570 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = true;
Amir Levy9659e592016-10-27 18:08:27 +03006571 if (iommu_domain_set_attr(cb->mapping->domain,
6572 DOMAIN_ATTR_S1_BYPASS,
6573 &bypass)) {
6574 IPAERR("couldn't set bypass\n");
6575 arm_iommu_release_mapping(cb->mapping);
6576 cb->valid = false;
6577 return -EIO;
6578 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006579 IPADBG("AP/USB SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03006580 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006581 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = false;
Skylar Change87894f2018-04-02 15:49:12 -07006582 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = false;
Amir Levy9659e592016-10-27 18:08:27 +03006583 if (iommu_domain_set_attr(cb->mapping->domain,
6584 DOMAIN_ATTR_ATOMIC,
6585 &atomic_ctx)) {
6586 IPAERR("couldn't set domain as atomic\n");
6587 arm_iommu_release_mapping(cb->mapping);
6588 cb->valid = false;
6589 return -EIO;
6590 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006591 IPADBG("AP/USB SMMU atomic set\n");
Amir Levy9659e592016-10-27 18:08:27 +03006592
Skylar Chang578e1a42018-06-15 10:33:26 -07006593 if (smmu_info.fast_map) {
6594 if (iommu_domain_set_attr(cb->mapping->domain,
Amir Levy9659e592016-10-27 18:08:27 +03006595 DOMAIN_ATTR_FAST,
6596 &fast)) {
Skylar Chang578e1a42018-06-15 10:33:26 -07006597 IPAERR("couldn't set fast map\n");
6598 arm_iommu_release_mapping(cb->mapping);
6599 cb->valid = false;
6600 return -EIO;
6601 }
6602 IPADBG("SMMU fast map set\n");
Amir Levy9659e592016-10-27 18:08:27 +03006603 }
Amir Levy9659e592016-10-27 18:08:27 +03006604 }
6605
Michael Adisumarta93e97522017-10-06 15:49:46 -07006606 pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n",
6607 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP], smmu_info.fast_map);
6608
Amir Levy9659e592016-10-27 18:08:27 +03006609 result = arm_iommu_attach_device(cb->dev, cb->mapping);
6610 if (result) {
6611 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
6612 cb->valid = false;
6613 return result;
6614 }
6615
6616 add_map = of_get_property(dev->of_node,
6617 "qcom,additional-mapping", &add_map_size);
6618 if (add_map) {
6619 /* mapping size is an array of 3-tuple of u32 */
6620 if (add_map_size % (3 * sizeof(u32))) {
6621 IPAERR("wrong additional mapping format\n");
6622 cb->valid = false;
6623 return -EFAULT;
6624 }
6625
6626 /* iterate of each entry of the additional mapping array */
6627 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
6628 u32 iova = be32_to_cpu(add_map[i]);
6629 u32 pa = be32_to_cpu(add_map[i + 1]);
6630 u32 size = be32_to_cpu(add_map[i + 2]);
6631 unsigned long iova_p;
6632 phys_addr_t pa_p;
6633 u32 size_p;
6634
6635 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
6636 iova_p, pa_p, size_p);
6637 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
6638 iova_p, &pa_p, size_p);
6639 ipa3_iommu_map(cb->mapping->domain,
6640 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02006641 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03006642 }
6643 }
6644
Mohammed Javid36d13cf2018-01-26 22:49:03 +05306645 result = of_property_read_u32_array(dev->of_node,
6646 "qcom,ipa-q6-smem-size", &q6_smem_size, 1);
6647 if (result) {
6648 IPADBG("ipa q6 smem size = %d\n", IPA_SMEM_SIZE);
6649 /* map SMEM memory for IPA table accesses */
6650 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
6651 SMEM_MODEM, 0);
Chaitanya Pratapa4c38c592019-07-31 19:56:44 +05306652 q6_smem_size = IPA_SMEM_SIZE;
Mohammed Javid36d13cf2018-01-26 22:49:03 +05306653 } else {
6654 IPADBG("ipa q6 smem size = %d\n", q6_smem_size);
6655 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, q6_smem_size,
6656 SMEM_MODEM, 0);
6657 }
Amir Levy9659e592016-10-27 18:08:27 +03006658 if (smem_addr) {
6659 phys_addr_t iova = smem_virt_to_phys(smem_addr);
6660 phys_addr_t pa = iova;
6661 unsigned long iova_p;
6662 phys_addr_t pa_p;
6663 u32 size_p;
6664
Chaitanya Pratapa4c38c592019-07-31 19:56:44 +05306665 IPA_SMMU_ROUND_TO_PAGE(iova, pa, q6_smem_size,
Amir Levy9659e592016-10-27 18:08:27 +03006666 iova_p, pa_p, size_p);
6667 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
6668 iova_p, &pa_p, size_p);
6669 ipa3_iommu_map(cb->mapping->domain,
6670 iova_p, pa_p, size_p,
Skylar Chang2d1a7622018-05-30 17:01:58 -07006671 IOMMU_READ | IOMMU_WRITE);
Amir Levy9659e592016-10-27 18:08:27 +03006672 }
6673
6674
Skylar Changefc0a0f2018-03-29 11:17:40 -07006675 smmu_info.present[IPA_SMMU_CB_AP] = true;
6676 ipa3_ctx->pdev = dev;
Amir Levy9659e592016-10-27 18:08:27 +03006677
Michael Adisumartac8c404a2018-04-05 18:01:45 -07006678 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03006679}
6680
Skylar Changefc0a0f2018-03-29 11:17:40 -07006681static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
6682{
6683 switch (cb_type) {
6684 case IPA_SMMU_CB_AP:
6685 return ipa_smmu_ap_cb_probe(dev);
6686 case IPA_SMMU_CB_WLAN:
6687 return ipa_smmu_wlan_cb_probe(dev);
6688 case IPA_SMMU_CB_UC:
6689 return ipa_smmu_uc_cb_probe(dev);
6690 case IPA_SMMU_CB_MAX:
6691 IPAERR("Invalid cb_type\n");
6692 }
6693 return 0;
6694}
6695
6696static int ipa3_attach_to_smmu(void)
6697{
6698 struct ipa_smmu_cb_ctx *cb;
6699 int i, result;
6700
6701 ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
6702 ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
6703
6704 if (smmu_info.arm_smmu) {
6705 IPADBG("smmu is enabled\n");
6706 for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
6707 cb = ipa3_get_smmu_ctx(i);
6708 result = ipa_smmu_cb_probe(cb->dev, i);
6709 if (result)
6710 IPAERR("probe failed for cb %d\n", i);
6711 }
6712 } else {
6713 IPADBG("smmu is disabled\n");
6714 }
6715 return 0;
6716}
6717
Amir Levy9659e592016-10-27 18:08:27 +03006718static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
6719{
6720 ipa3_freeze_clock_vote_and_notify_modem();
6721
6722 return IRQ_HANDLED;
6723}
6724
6725static int ipa3_smp2p_probe(struct device *dev)
6726{
6727 struct device_node *node = dev->of_node;
6728 int res;
6729
Mohammed Javid7de12702017-07-21 15:22:58 +05306730 if (ipa3_ctx == NULL) {
6731 IPAERR("ipa3_ctx was not initialized\n");
6732 return -ENXIO;
6733 }
Amir Levy9659e592016-10-27 18:08:27 +03006734 IPADBG("node->name=%s\n", node->name);
6735 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
6736 res = of_get_gpio(node, 0);
6737 if (res < 0) {
6738 IPADBG("of_get_gpio returned %d\n", res);
6739 return res;
6740 }
6741
6742 ipa3_ctx->smp2p_info.out_base_id = res;
6743 IPADBG("smp2p out_base_id=%d\n",
6744 ipa3_ctx->smp2p_info.out_base_id);
6745 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
6746 int irq;
6747
6748 res = of_get_gpio(node, 0);
6749 if (res < 0) {
6750 IPADBG("of_get_gpio returned %d\n", res);
6751 return res;
6752 }
6753
6754 ipa3_ctx->smp2p_info.in_base_id = res;
6755 IPADBG("smp2p in_base_id=%d\n",
6756 ipa3_ctx->smp2p_info.in_base_id);
6757
6758 /* register for modem clk query */
6759 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
6760 IPA_GPIO_IN_QUERY_CLK_IDX);
6761 if (irq < 0) {
6762 IPAERR("gpio_to_irq failed %d\n", irq);
6763 return -ENODEV;
6764 }
6765 IPADBG("smp2p irq#=%d\n", irq);
6766 res = request_irq(irq,
6767 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
6768 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
6769 if (res) {
6770 IPAERR("fail to register smp2p irq=%d\n", irq);
6771 return -ENODEV;
6772 }
6773 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
6774 IPA_GPIO_IN_QUERY_CLK_IDX);
6775 if (res)
6776 IPAERR("failed to enable irq wake\n");
6777 }
6778
6779 return 0;
6780}
6781
6782int ipa3_plat_drv_probe(struct platform_device *pdev_p,
6783 struct ipa_api_controller *api_ctrl,
6784 const struct of_device_id *pdrv_match)
6785{
6786 int result;
6787 struct device *dev = &pdev_p->dev;
Skylar Changefc0a0f2018-03-29 11:17:40 -07006788 struct ipa_smmu_cb_ctx *cb;
Amir Levy9659e592016-10-27 18:08:27 +03006789
6790 IPADBG("IPA driver probing started\n");
6791 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
6792
Skylar Changefc0a0f2018-03-29 11:17:40 -07006793 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) {
6794 cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
6795 cb->dev = dev;
6796 smmu_info.present[IPA_SMMU_CB_AP] = true;
Amir Levy9659e592016-10-27 18:08:27 +03006797
Skylar Changefc0a0f2018-03-29 11:17:40 -07006798 return 0;
6799 }
Amir Levy9659e592016-10-27 18:08:27 +03006800
Skylar Changefc0a0f2018-03-29 11:17:40 -07006801 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) {
6802 cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
6803 cb->dev = dev;
6804 smmu_info.present[IPA_SMMU_CB_WLAN] = true;
6805
6806 return 0;
6807 }
6808
6809 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) {
6810 cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
6811 cb->dev = dev;
6812 smmu_info.present[IPA_SMMU_CB_UC] = true;
6813
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08006814 if (ipa3_ctx->use_xbl_boot) {
6815 /* Ensure uC probe is the last. */
6816 if (!smmu_info.present[IPA_SMMU_CB_AP] ||
6817 !smmu_info.present[IPA_SMMU_CB_WLAN]) {
6818 IPAERR("AP or WLAN CB probe not done. Defer");
6819 return -EPROBE_DEFER;
6820 }
6821
6822 pr_info("Using XBL boot load for IPA FW\n");
6823 ipa3_ctx->fw_loaded = true;
6824
6825 result = ipa3_attach_to_smmu();
6826 if (result) {
6827 IPAERR("IPA attach to smmu failed %d\n",
6828 result);
6829 return result;
6830 }
6831
6832 result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
6833 if (result) {
6834 IPAERR("IPA post init failed %d\n", result);
6835 return result;
6836 }
6837 }
6838
6839
Skylar Changefc0a0f2018-03-29 11:17:40 -07006840 return 0;
6841 }
Amir Levy9659e592016-10-27 18:08:27 +03006842
6843 if (of_device_is_compatible(dev->of_node,
6844 "qcom,smp2pgpio-map-ipa-1-in"))
6845 return ipa3_smp2p_probe(dev);
6846
6847 if (of_device_is_compatible(dev->of_node,
6848 "qcom,smp2pgpio-map-ipa-1-out"))
6849 return ipa3_smp2p_probe(dev);
6850
Amir Levy9659e592016-10-27 18:08:27 +03006851 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
6852 if (result) {
6853 IPAERR("IPA dts parsing failed\n");
6854 return result;
6855 }
6856
6857 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
6858 if (result) {
6859 IPAERR("IPA API binding failed\n");
6860 return result;
6861 }
6862
Amir Levy9659e592016-10-27 18:08:27 +03006863 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
6864 if (of_property_read_bool(pdev_p->dev.of_node,
Amir Levy9659e592016-10-27 18:08:27 +03006865 "qcom,smmu-fast-map"))
6866 smmu_info.fast_map = true;
6867 if (of_property_read_bool(pdev_p->dev.of_node,
6868 "qcom,use-64-bit-dma-mask"))
6869 smmu_info.use_64_bit_dma_mask = true;
6870 smmu_info.arm_smmu = true;
Amir Levy9659e592016-10-27 18:08:27 +03006871 } else if (of_property_read_bool(pdev_p->dev.of_node,
6872 "qcom,msm-smmu")) {
6873 IPAERR("Legacy IOMMU not supported\n");
6874 result = -EOPNOTSUPP;
6875 } else {
6876 if (of_property_read_bool(pdev_p->dev.of_node,
6877 "qcom,use-64-bit-dma-mask")) {
6878 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
6879 dma_set_coherent_mask(&pdev_p->dev,
6880 DMA_BIT_MASK(64))) {
6881 IPAERR("DMA set 64bit mask failed\n");
6882 return -EOPNOTSUPP;
6883 }
6884 } else {
6885 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
6886 dma_set_coherent_mask(&pdev_p->dev,
6887 DMA_BIT_MASK(32))) {
6888 IPAERR("DMA set 32bit mask failed\n");
6889 return -EOPNOTSUPP;
6890 }
6891 }
Skylar Changefc0a0f2018-03-29 11:17:40 -07006892 }
Amir Levy9659e592016-10-27 18:08:27 +03006893
Skylar Changefc0a0f2018-03-29 11:17:40 -07006894 /* Proceed to real initialization */
6895 result = ipa3_pre_init(&ipa3_res, pdev_p);
6896 if (result) {
6897 IPAERR("ipa3_init failed\n");
6898 return result;
Amir Levy9659e592016-10-27 18:08:27 +03006899 }
6900
Ghanim Fodi115bf8a2017-04-21 01:36:06 -07006901 result = of_platform_populate(pdev_p->dev.of_node,
6902 pdrv_match, NULL, &pdev_p->dev);
6903 if (result) {
6904 IPAERR("failed to populate platform\n");
6905 return result;
6906 }
6907
Amir Levy9659e592016-10-27 18:08:27 +03006908 return result;
6909}
6910
6911/**
6912 * ipa3_ap_suspend() - suspend callback for runtime_pm
6913 * @dev: pointer to device
6914 *
6915 * This callback will be invoked by the runtime_pm framework when an AP suspend
6916 * operation is invoked, usually by pressing a suspend button.
6917 *
6918 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
6919 * This will postpone the suspend operation until IPA is no longer used by AP.
Skylar Chang68c37d82018-04-07 16:42:36 -07006920 */
Amir Levy9659e592016-10-27 18:08:27 +03006921int ipa3_ap_suspend(struct device *dev)
6922{
6923 int i;
6924
6925 IPADBG("Enter...\n");
6926
6927 /* In case there is a tx/rx handler in polling mode fail to suspend */
6928 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
6929 if (ipa3_ctx->ep[i].sys &&
6930 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
6931 IPAERR("EP %d is in polling state, do not suspend\n",
6932 i);
6933 return -EAGAIN;
6934 }
6935 }
6936
Michael Adisumarta3e350812017-09-18 14:54:36 -07006937 if (ipa3_ctx->use_ipa_pm) {
6938 ipa_pm_deactivate_all_deferred();
6939 } else {
6940 /*
6941 * Release transport IPA resource without waiting
6942 * for inactivity timer
6943 */
6944 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
6945 ipa3_transport_release_resource(NULL);
6946 }
Amir Levy9659e592016-10-27 18:08:27 +03006947 IPADBG("Exit\n");
6948
6949 return 0;
6950}
6951
6952/**
Skylar Chang68c37d82018-04-07 16:42:36 -07006953 * ipa3_ap_resume() - resume callback for runtime_pm
6954 * @dev: pointer to device
6955 *
6956 * This callback will be invoked by the runtime_pm framework when an AP resume
6957 * operation is invoked.
6958 *
6959 * Always returns 0 since resume should always succeed.
6960 */
Amir Levy9659e592016-10-27 18:08:27 +03006961int ipa3_ap_resume(struct device *dev)
6962{
6963 return 0;
6964}
6965
6966struct ipa3_context *ipa3_get_ctx(void)
6967{
6968 return ipa3_ctx;
6969}
6970
Amir Levy8fb98e02019-10-29 14:22:26 +02006971bool ipa3_get_lan_rx_napi(void)
6972{
6973 return false;
6974}
6975
Amir Levy9659e592016-10-27 18:08:27 +03006976static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
6977{
6978 switch (notify->evt_id) {
6979 case GSI_PER_EVT_GLOB_ERROR:
6980 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
6981 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
6982 break;
6983 case GSI_PER_EVT_GLOB_GP1:
6984 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
6985 BUG();
6986 break;
6987 case GSI_PER_EVT_GLOB_GP2:
6988 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
6989 BUG();
6990 break;
6991 case GSI_PER_EVT_GLOB_GP3:
6992 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
6993 BUG();
6994 break;
6995 case GSI_PER_EVT_GENERAL_BREAK_POINT:
6996 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
6997 break;
6998 case GSI_PER_EVT_GENERAL_BUS_ERROR:
6999 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
7000 BUG();
7001 break;
7002 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
7003 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
7004 BUG();
7005 break;
7006 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
7007 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
7008 BUG();
7009 break;
7010 default:
7011 IPAERR("Received unexpected evt: %d\n",
7012 notify->evt_id);
7013 BUG();
7014 }
7015}
7016
7017int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
7018{
7019 struct ipa3_ready_cb_info *cb_info = NULL;
7020
7021 /* check ipa3_ctx existed or not */
7022 if (!ipa3_ctx) {
7023 IPADBG("IPA driver haven't initialized\n");
7024 return -ENXIO;
7025 }
7026 mutex_lock(&ipa3_ctx->lock);
7027 if (ipa3_ctx->ipa_initialization_complete) {
7028 mutex_unlock(&ipa3_ctx->lock);
7029 IPADBG("IPA driver finished initialization already\n");
7030 return -EEXIST;
7031 }
7032
7033 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
7034 if (!cb_info) {
7035 mutex_unlock(&ipa3_ctx->lock);
7036 return -ENOMEM;
7037 }
7038
7039 cb_info->ready_cb = ipa_ready_cb;
7040 cb_info->user_data = user_data;
7041
7042 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
7043 mutex_unlock(&ipa3_ctx->lock);
7044
7045 return 0;
7046}
7047
7048int ipa3_iommu_map(struct iommu_domain *domain,
7049 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
7050{
Skylar Changefc0a0f2018-03-29 11:17:40 -07007051 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
7052 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
Amir Levy9659e592016-10-27 18:08:27 +03007053
7054 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
7055 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
7056
7057 /* make sure no overlapping */
7058 if (domain == ipa3_get_smmu_domain()) {
7059 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
7060 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
7061 ipa_assert();
7062 return -EFAULT;
7063 }
7064 } else if (domain == ipa3_get_wlan_smmu_domain()) {
7065 /* wlan is one time map */
7066 } else if (domain == ipa3_get_uc_smmu_domain()) {
7067 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
7068 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
7069 ipa_assert();
7070 return -EFAULT;
7071 }
7072 } else {
7073 IPAERR("Unexpected domain 0x%p\n", domain);
7074 ipa_assert();
7075 return -EFAULT;
7076 }
7077
7078 return iommu_map(domain, iova, paddr, size, prot);
7079}
7080
Michael Adisumartad04e6d62017-11-09 17:46:35 -08007081/**
7082 * ipa3_get_smmu_params()- Return the ipa3 smmu related params.
7083 */
7084int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
7085 struct ipa_smmu_out_params *out)
7086{
7087 bool is_smmu_enable = 0;
7088
7089 if (out == NULL || in == NULL) {
7090 IPAERR("bad parms for Client SMMU out params\n");
7091 return -EINVAL;
7092 }
7093
7094 if (!ipa3_ctx) {
7095 IPAERR("IPA not yet initialized\n");
7096 return -EINVAL;
7097 }
7098
7099 switch (in->smmu_client) {
7100 case IPA_SMMU_WLAN_CLIENT:
7101 is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
7102 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
7103 break;
7104 default:
7105 is_smmu_enable = 0;
7106 IPAERR("Trying to get illegal clients SMMU status");
7107 return -EINVAL;
7108 }
7109
7110 out->smmu_enable = is_smmu_enable;
7111
7112 return 0;
7113}
7114
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04007115/**************************************************************
7116 * PCIe Version
7117 *************************************************************/
7118
7119int ipa3_pci_drv_probe(
7120 struct pci_dev *pci_dev,
7121 struct ipa_api_controller *api_ctrl,
7122 const struct of_device_id *pdrv_match)
7123{
7124 int result;
7125 struct ipa3_plat_drv_res *ipa_drv_res;
7126 u32 bar0_offset;
7127 u32 mem_start;
7128 u32 mem_end;
7129 uint32_t bits;
7130 uint32_t ipa_start, gsi_start, intctrl_start;
7131 struct device *dev;
7132 static struct platform_device platform_dev;
7133
7134 if (!pci_dev || !api_ctrl || !pdrv_match) {
7135 IPAERR(
7136 "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
7137 pci_dev, api_ctrl, pdrv_match);
7138 return -EOPNOTSUPP;
7139 }
7140
7141 dev = &(pci_dev->dev);
7142
7143 IPADBG("IPA PCI driver probing started\n");
7144
7145 /*
7146 * Follow PCI driver flow here.
7147 * pci_enable_device: Enables device and assigns resources
7148 * pci_request_region: Makes BAR0 address region usable
7149 */
7150 result = pci_enable_device(pci_dev);
7151 if (result < 0) {
7152 IPAERR("pci_enable_device() failed\n");
7153 return -EOPNOTSUPP;
7154 }
7155
7156 result = pci_request_region(pci_dev, 0, "IPA Memory");
7157 if (result < 0) {
7158 IPAERR("pci_request_region() failed\n");
7159 pci_disable_device(pci_dev);
7160 return -EOPNOTSUPP;
7161 }
7162
7163 /*
7164 * When in the PCI/emulation environment, &platform_dev is
7165 * passed to get_ipa_dts_configuration(), but is unused, since
7166 * all usages of it in the function are replaced by CPP
7167 * relative to definitions in ipa_emulation_stubs.h. Passing
7168 * &platform_dev makes code validity tools happy.
7169 */
7170 if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
7171 IPAERR("get_ipa_dts_configuration() failed\n");
7172 pci_release_region(pci_dev, 0);
7173 pci_disable_device(pci_dev);
7174 return -EOPNOTSUPP;
7175 }
7176
7177 ipa_drv_res = &ipa3_res;
7178
7179 result =
7180 of_property_read_u32(NULL, "emulator-bar0-offset",
7181 &bar0_offset);
7182 if (result) {
7183 IPAERR(":get resource failed for emulator-bar0-offset!\n");
7184 pci_release_region(pci_dev, 0);
7185 pci_disable_device(pci_dev);
7186 return -ENODEV;
7187 }
7188 IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
7189
7190 ipa_start = ipa_drv_res->ipa_mem_base;
7191 gsi_start = ipa_drv_res->transport_mem_base;
7192 intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
7193
7194 /*
7195 * Where will we be inerrupted at?
7196 */
7197 ipa_drv_res->emulator_irq = pci_dev->irq;
7198 IPADBG(
7199 "EMULATION PCI_INTERRUPT_PIN(%u)\n",
7200 ipa_drv_res->emulator_irq);
7201
7202 /*
7203 * Set the ipa_mem_base to the PCI base address of BAR0
7204 */
7205 mem_start = pci_resource_start(pci_dev, 0);
7206 mem_end = pci_resource_end(pci_dev, 0);
7207
7208 IPADBG("PCI START = 0x%x\n", mem_start);
7209 IPADBG("PCI END = 0x%x\n", mem_end);
7210
7211 ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
7212
7213 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
7214 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
7215
7216 ipa_drv_res->transport_mem_base =
7217 ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
7218
7219 ipa_drv_res->emulator_intcntrlr_mem_base =
7220 ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
7221
7222 IPADBG("ipa_mem_base = 0x%x\n",
7223 ipa_drv_res->ipa_mem_base);
7224 IPADBG("ipa_mem_size = 0x%x\n",
7225 ipa_drv_res->ipa_mem_size);
7226
7227 IPADBG("transport_mem_base = 0x%x\n",
7228 ipa_drv_res->transport_mem_base);
7229 IPADBG("transport_mem_size = 0x%x\n",
7230 ipa_drv_res->transport_mem_size);
7231
7232 IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
7233 ipa_drv_res->emulator_intcntrlr_mem_base);
7234 IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
7235 ipa_drv_res->emulator_intcntrlr_mem_size);
7236
7237 result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
7238 if (result != 0) {
7239 IPAERR("ipa3_bind_api_controller() failed\n");
7240 pci_release_region(pci_dev, 0);
7241 pci_disable_device(pci_dev);
7242 return result;
7243 }
7244
7245 bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
7246
7247 if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
7248 IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
7249 pci_release_region(pci_dev, 0);
7250 pci_disable_device(pci_dev);
7251 return -EOPNOTSUPP;
7252 }
7253
7254 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
7255 IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
7256 pci_release_region(pci_dev, 0);
7257 pci_disable_device(pci_dev);
7258 return -EOPNOTSUPP;
7259 }
7260
7261 pci_set_master(pci_dev);
7262
7263 memset(&platform_dev, 0, sizeof(platform_dev));
7264 platform_dev.dev = *dev;
7265
7266 /* Proceed to real initialization */
7267 result = ipa3_pre_init(&ipa3_res, &platform_dev);
7268 if (result) {
7269 IPAERR("ipa3_init failed\n");
7270 pci_clear_master(pci_dev);
7271 pci_release_region(pci_dev, 0);
7272 pci_disable_device(pci_dev);
7273 return result;
7274 }
7275
7276 return result;
7277}
7278
7279/*
7280 * The following returns transport register memory location and
7281 * size...
7282 */
7283int ipa3_get_transport_info(
7284 phys_addr_t *phys_addr_ptr,
7285 unsigned long *size_ptr)
7286{
7287 if (!phys_addr_ptr || !size_ptr) {
7288 IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
7289 phys_addr_ptr, size_ptr);
7290 return -EINVAL;
7291 }
7292
7293 *phys_addr_ptr = ipa3_res.transport_mem_base;
7294 *size_ptr = ipa3_res.transport_mem_size;
7295
7296 return 0;
7297}
7298EXPORT_SYMBOL(ipa3_get_transport_info);
7299
7300static uint emulation_type = IPA_HW_v4_0;
7301
7302/*
7303 * The following returns emulation type...
7304 */
7305uint ipa3_get_emulation_type(void)
7306{
7307 return emulation_type;
7308}
7309
Amir Levy9659e592016-10-27 18:08:27 +03007310MODULE_LICENSE("GPL v2");
7311MODULE_DESCRIPTION("IPA HW device driver");
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04007312
7313/*
7314 * Module parameter. Invoke as follows:
7315 * insmod ipat.ko emulation_type=[13|14|...|N]
7316 * Examples:
7317 * insmod ipat.ko emulation_type=13 # for IPA 3.5.1
7318 * insmod ipat.ko emulation_type=14 # for IPA 4.0
7319 *
7320 * NOTE: The emulation_type values need to come from: enum ipa_hw_type
7321 *
7322 */
7323
7324module_param(emulation_type, uint, 0000);
7325MODULE_PARM_DESC(
7326 emulation_type,
7327 "IPA emulation type (Use 13 for IPA 3.5.1, 14 for IPA 4.0)");