blob: 4b6922b7c6bf27754d286e79cb969cf3df727ea2 [file] [log] [blame]
Akshay Pandit0e2e68a2020-02-05 17:58:06 +05301/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040038#include <linux/pci.h>
Amir Levy9659e592016-10-27 18:08:27 +030039#include <soc/qcom/subsystem_restart.h>
40#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020041#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020042#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020043
44#ifdef CONFIG_ARM64
Gidon Studinski3021a6f2016-11-10 12:48:48 +020045/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040060/*
61 * The following for adding code (ie. for EMULATION) not found on x86.
62 */
63#if IPA_EMULATION_COMPILE == 1
64# include "ipa_emulation_stubs.h"
65#endif
Amir Levy9659e592016-10-27 18:08:27 +030066
67#ifdef CONFIG_COMPAT
Amir Levy9659e592016-10-27 18:08:27 +030068/**
69 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
70 * properties
71 * @dev_name: input parameter, the name of table
72 * @size: input parameter, size of table in bytes
73 * @offset: output parameter, offset into page in case of system memory
74 */
75struct ipa3_ioc_nat_alloc_mem32 {
76 char dev_name[IPA_RESOURCE_NAME_MAX];
77 compat_size_t size;
78 compat_off_t offset;
79};
Amir Levy479cfdd2017-10-26 12:23:14 +030080
81/**
82 * struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation
83 * properties
84 * @size: input parameter, size of table in bytes
85 * @offset: output parameter, offset into page in case of system memory
86 */
87struct ipa_ioc_nat_ipv6ct_table_alloc32 {
88 compat_size_t size;
89 compat_off_t offset;
90};
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040091#endif /* #ifdef CONFIG_COMPAT */
Amir Levy9659e592016-10-27 18:08:27 +030092
Gidon Studinski3021a6f2016-11-10 12:48:48 +020093#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
94#define TZ_MEM_PROTECT_REGION_ID 0x10
95
96struct tz_smmu_ipa_protect_region_iovec_s {
97 u64 input_addr;
98 u64 output_addr;
99 u64 size;
100 u32 attr;
101} __packed;
102
103struct tz_smmu_ipa_protect_region_s {
104 phys_addr_t iovec_buf;
105 u32 size_bytes;
106} __packed;
107
Amir Levy9659e592016-10-27 18:08:27 +0300108static void ipa3_start_tag_process(struct work_struct *work);
109static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
110
Amir Levya59ed3f2017-03-05 17:30:55 +0200111static void ipa3_transport_release_resource(struct work_struct *work);
112static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
113 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300114static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
115
Skylar Changefc0a0f2018-03-29 11:17:40 -0700116static int ipa3_attach_to_smmu(void);
117static int ipa3_alloc_pkt_init(void);
118
Ghanim Fodia5f376a2017-10-17 18:14:53 +0300119static void ipa3_load_ipa_fw(struct work_struct *work);
120static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
Utkarsh Saxenaded78142017-05-03 14:04:30 +0530121
Skylar Chang242952b2017-07-20 15:04:05 -0700122static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
123static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
124 ipa_dec_clients_disable_clks_on_wq);
125
Amir Levy9659e592016-10-27 18:08:27 +0300126static struct ipa3_plat_drv_res ipa3_res = {0, };
Amir Levy9659e592016-10-27 18:08:27 +0300127
128static struct clk *ipa3_clk;
129
130struct ipa3_context *ipa3_ctx;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -0400131
Amir Levy9659e592016-10-27 18:08:27 +0300132static struct {
Skylar Changefc0a0f2018-03-29 11:17:40 -0700133 bool present[IPA_SMMU_CB_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300134 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300135 bool fast_map;
Michael Adisumarta93e97522017-10-06 15:49:46 -0700136 bool s1_bypass_arr[IPA_SMMU_CB_MAX];
Amir Levy9659e592016-10-27 18:08:27 +0300137 bool use_64_bit_dma_mask;
138 u32 ipa_base;
139 u32 ipa_size;
140} smmu_info;
141
142static char *active_clients_table_buf;
143
144int ipa3_active_clients_log_print_buffer(char *buf, int size)
145{
146 int i;
147 int nbytes;
148 int cnt = 0;
149 int start_idx;
150 int end_idx;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700151 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300152
Skylar Chang69ae50e2017-07-31 13:13:29 -0700153 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300154 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
155 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
156 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
157 for (i = start_idx; i != end_idx;
158 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
159 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
160 ipa3_ctx->ipa3_active_clients_logging
161 .log_buffer[i]);
162 cnt += nbytes;
163 }
Skylar Chang69ae50e2017-07-31 13:13:29 -0700164 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
165 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300166
167 return cnt;
168}
169
170int ipa3_active_clients_log_print_table(char *buf, int size)
171{
172 int i;
173 struct ipa3_active_client_htable_entry *iterator;
174 int cnt = 0;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700175 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +0300176
Skylar Chang69ae50e2017-07-31 13:13:29 -0700177 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300178 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
179 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
180 iterator, list) {
181 switch (iterator->type) {
182 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
183 cnt += scnprintf(buf + cnt, size - cnt,
184 "%-40s %-3d ENDPOINT\n",
185 iterator->id_string, iterator->count);
186 break;
187 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
188 cnt += scnprintf(buf + cnt, size - cnt,
189 "%-40s %-3d SIMPLE\n",
190 iterator->id_string, iterator->count);
191 break;
192 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
193 cnt += scnprintf(buf + cnt, size - cnt,
194 "%-40s %-3d RESOURCE\n",
195 iterator->id_string, iterator->count);
196 break;
197 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
198 cnt += scnprintf(buf + cnt, size - cnt,
199 "%-40s %-3d SPECIAL\n",
200 iterator->id_string, iterator->count);
201 break;
202 default:
203 IPAERR("Trying to print illegal active_clients type");
204 break;
205 }
206 }
207 cnt += scnprintf(buf + cnt, size - cnt,
208 "\nTotal active clients count: %d\n",
Skylar Chang242952b2017-07-20 15:04:05 -0700209 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Skylar Chang69ae50e2017-07-31 13:13:29 -0700210 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
211 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300212
213 return cnt;
214}
215
Skylar Chang68c37d82018-04-07 16:42:36 -0700216static int ipa3_clean_modem_rule(void)
217{
218 struct ipa_install_fltr_rule_req_msg_v01 *req;
219 struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex;
220 int val = 0;
221
222 if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) {
223 req = kzalloc(
224 sizeof(struct ipa_install_fltr_rule_req_msg_v01),
225 GFP_KERNEL);
226 if (!req) {
227 IPAERR("mem allocated failed!\n");
228 return -ENOMEM;
229 }
230 req->filter_spec_list_valid = false;
231 req->filter_spec_list_len = 0;
232 req->source_pipe_index_valid = 0;
233 val = ipa3_qmi_filter_request_send(req);
234 kfree(req);
235 } else {
236 req_ex = kzalloc(
237 sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01),
238 GFP_KERNEL);
239 if (!req_ex) {
240 IPAERR("mem allocated failed!\n");
241 return -ENOMEM;
242 }
243 req_ex->filter_spec_ex_list_valid = false;
244 req_ex->filter_spec_ex_list_len = 0;
245 req_ex->source_pipe_index_valid = 0;
246 val = ipa3_qmi_filter_request_ex_send(req_ex);
247 kfree(req_ex);
248 }
249
250 return val;
251}
252
Amir Levy9659e592016-10-27 18:08:27 +0300253static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
254 unsigned long event, void *ptr)
255{
Amir Levy9659e592016-10-27 18:08:27 +0300256 ipa3_active_clients_log_print_table(active_clients_table_buf,
257 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
Michael Adisumartaedba22d2018-04-19 12:28:33 -0700258 IPAERR("%s\n", active_clients_table_buf);
Amir Levy9659e592016-10-27 18:08:27 +0300259
260 return NOTIFY_DONE;
261}
262
263static struct notifier_block ipa3_active_clients_panic_blk = {
264 .notifier_call = ipa3_active_clients_panic_notifier,
265};
266
267static int ipa3_active_clients_log_insert(const char *string)
268{
269 int head;
270 int tail;
271
272 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
273 return -EPERM;
274
275 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
276 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
277
278 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
279 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
280 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
281 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
282 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
283 if (tail == head)
284 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
285
286 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
287 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
288
289 return 0;
290}
291
292static int ipa3_active_clients_log_init(void)
293{
294 int i;
295
Skylar Chang69ae50e2017-07-31 13:13:29 -0700296 spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
Amir Levy9659e592016-10-27 18:08:27 +0300297 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
298 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
299 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
300 GFP_KERNEL);
301 active_clients_table_buf = kzalloc(sizeof(
302 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
303 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
304 pr_err("Active Clients Logging memory allocation failed");
305 goto bail;
306 }
307 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
308 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
309 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
310 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
311 }
312 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
313 ipa3_ctx->ipa3_active_clients_logging.log_tail =
314 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
315 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
316 atomic_notifier_chain_register(&panic_notifier_list,
317 &ipa3_active_clients_panic_blk);
318 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
319
320 return 0;
321
322bail:
323 return -ENOMEM;
324}
325
326void ipa3_active_clients_log_clear(void)
327{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700328 unsigned long flags;
329
330 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300331 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
332 ipa3_ctx->ipa3_active_clients_logging.log_tail =
333 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700334 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
335 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300336}
337
338static void ipa3_active_clients_log_destroy(void)
339{
Skylar Chang69ae50e2017-07-31 13:13:29 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
Amir Levy9659e592016-10-27 18:08:27 +0300343 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
Ghanim Fodic48ba992017-12-24 19:28:38 +0200344 kfree(active_clients_table_buf);
345 active_clients_table_buf = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300346 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
347 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
348 ipa3_ctx->ipa3_active_clients_logging.log_tail =
349 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
Skylar Chang69ae50e2017-07-31 13:13:29 -0700350 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
351 flags);
Amir Levy9659e592016-10-27 18:08:27 +0300352}
353
Amir Levy9659e592016-10-27 18:08:27 +0300354static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
355
356struct iommu_domain *ipa3_get_smmu_domain(void)
357{
358 if (smmu_cb[IPA_SMMU_CB_AP].valid)
359 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
360
361 IPAERR("CB not valid\n");
362
363 return NULL;
364}
365
366struct iommu_domain *ipa3_get_uc_smmu_domain(void)
367{
368 if (smmu_cb[IPA_SMMU_CB_UC].valid)
369 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
370
371 IPAERR("CB not valid\n");
372
373 return NULL;
374}
375
376struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
377{
378 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
379 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
380
381 IPAERR("CB not valid\n");
382
383 return NULL;
384}
385
Michael Adisumartab1bafa42018-04-16 16:48:10 -0700386struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type)
387{
388
389 if (cb_type == IPA_SMMU_CB_WLAN && smmu_cb[IPA_SMMU_CB_WLAN].valid)
390 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
391
392 if (smmu_cb[cb_type].valid)
393 return smmu_cb[cb_type].mapping->domain;
394
395 IPAERR("CB#%d not valid\n", cb_type);
396
397 return NULL;
398}
Amir Levy9659e592016-10-27 18:08:27 +0300399
400struct device *ipa3_get_dma_dev(void)
401{
402 return ipa3_ctx->pdev;
403}
404
405/**
Skylar Changefc0a0f2018-03-29 11:17:40 -0700406 * ipa3_get_smmu_ctx()- Return smmu context for the given cb_type
Amir Levy9659e592016-10-27 18:08:27 +0300407 *
408 * Return value: pointer to smmu context address
409 */
Skylar Changefc0a0f2018-03-29 11:17:40 -0700410struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type cb_type)
Amir Levy9659e592016-10-27 18:08:27 +0300411{
Skylar Changefc0a0f2018-03-29 11:17:40 -0700412 return &smmu_cb[cb_type];
Amir Levy9659e592016-10-27 18:08:27 +0300413}
414
415static int ipa3_open(struct inode *inode, struct file *filp)
416{
Amir Levy9659e592016-10-27 18:08:27 +0300417 IPADBG_LOW("ENTER\n");
Skylar Changefc0a0f2018-03-29 11:17:40 -0700418 filp->private_data = ipa3_ctx;
Amir Levy9659e592016-10-27 18:08:27 +0300419
420 return 0;
421}
422
Amir Levy9659e592016-10-27 18:08:27 +0300423static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
424{
425 if (!buff) {
426 IPAERR("Null buffer\n");
427 return;
428 }
429
430 if (type != WAN_UPSTREAM_ROUTE_ADD &&
431 type != WAN_UPSTREAM_ROUTE_DEL &&
432 type != WAN_EMBMS_CONNECT) {
433 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
434 return;
435 }
436
437 kfree(buff);
438}
439
Skylar Chang68c37d82018-04-07 16:42:36 -0700440static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type,
441 bool is_cache)
Amir Levy9659e592016-10-27 18:08:27 +0300442{
443 int retval;
444 struct ipa_wan_msg *wan_msg;
445 struct ipa_msg_meta msg_meta;
Mohammed Javid616bb992017-10-03 13:10:05 +0530446 struct ipa_wan_msg cache_wan_msg;
Amir Levy9659e592016-10-27 18:08:27 +0300447
448 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
449 if (!wan_msg) {
450 IPAERR("no memory\n");
451 return -ENOMEM;
452 }
453
Amir Levy479cfdd2017-10-26 12:23:14 +0300454 if (copy_from_user(wan_msg, (const void __user *)usr_param,
Amir Levy9659e592016-10-27 18:08:27 +0300455 sizeof(struct ipa_wan_msg))) {
456 kfree(wan_msg);
457 return -EFAULT;
458 }
459
Mohammed Javid616bb992017-10-03 13:10:05 +0530460 memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
461
Amir Levy9659e592016-10-27 18:08:27 +0300462 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
463 msg_meta.msg_type = msg_type;
464 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
465 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
466 if (retval) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530467 IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
Amir Levy9659e592016-10-27 18:08:27 +0300468 kfree(wan_msg);
469 return retval;
470 }
471
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530472 if (is_cache) {
473 mutex_lock(&ipa3_ctx->ipa_cne_evt_lock);
474
475 /* cache the cne event */
476 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
477 ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
Mohammed Javid616bb992017-10-03 13:10:05 +0530478 &cache_wan_msg,
479 sizeof(cache_wan_msg));
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530480
481 memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
482 ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
483 &msg_meta,
484 sizeof(struct ipa_msg_meta));
485
486 ipa3_ctx->num_ipa_cne_evt_req++;
487 ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
488 mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock);
489 }
490
Amir Levy9659e592016-10-27 18:08:27 +0300491 return 0;
492}
493
Shihuan Liuc3174f52017-05-04 15:59:13 -0700494static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
495{
496 if (!buff) {
497 IPAERR("Null buffer\n");
498 return;
499 }
500
Amir Levy4f8b4832018-06-05 15:48:03 +0300501 switch (type) {
502 case ADD_VLAN_IFACE:
503 case DEL_VLAN_IFACE:
504 case ADD_L2TP_VLAN_MAPPING:
505 case DEL_L2TP_VLAN_MAPPING:
506 case ADD_BRIDGE_VLAN_MAPPING:
507 case DEL_BRIDGE_VLAN_MAPPING:
508 break;
509 default:
Shihuan Liuc3174f52017-05-04 15:59:13 -0700510 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
511 return;
512 }
513
514 kfree(buff);
515}
516
517static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
518{
519 int retval;
520 struct ipa_ioc_vlan_iface_info *vlan_info;
521 struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
Amir Levy4f8b4832018-06-05 15:48:03 +0300522 struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700523 struct ipa_msg_meta msg_meta;
Amir Levy4f8b4832018-06-05 15:48:03 +0300524 void *buff;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700525
Amir Levy4f8b4832018-06-05 15:48:03 +0300526 IPADBG("type %d\n", msg_type);
527
528 memset(&msg_meta, 0, sizeof(msg_meta));
529 msg_meta.msg_type = msg_type;
530
531 if ((msg_type == ADD_VLAN_IFACE) ||
532 (msg_type == DEL_VLAN_IFACE)) {
Shihuan Liuc3174f52017-05-04 15:59:13 -0700533 vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
534 GFP_KERNEL);
535 if (!vlan_info) {
536 IPAERR("no memory\n");
537 return -ENOMEM;
538 }
539
540 if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
541 sizeof(struct ipa_ioc_vlan_iface_info))) {
542 kfree(vlan_info);
543 return -EFAULT;
544 }
545
Shihuan Liuc3174f52017-05-04 15:59:13 -0700546 msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
Amir Levy4f8b4832018-06-05 15:48:03 +0300547 buff = vlan_info;
548 } else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
549 (msg_type == DEL_L2TP_VLAN_MAPPING)) {
Shihuan Liuc3174f52017-05-04 15:59:13 -0700550 mapping_info = kzalloc(sizeof(struct
551 ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
552 if (!mapping_info) {
553 IPAERR("no memory\n");
554 return -ENOMEM;
555 }
556
557 if (copy_from_user((u8 *)mapping_info,
558 (void __user *)usr_param,
559 sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
560 kfree(mapping_info);
561 return -EFAULT;
562 }
563
Shihuan Liuc3174f52017-05-04 15:59:13 -0700564 msg_meta.msg_len = sizeof(struct
565 ipa_ioc_l2tp_vlan_mapping_info);
Amir Levy4f8b4832018-06-05 15:48:03 +0300566 buff = mapping_info;
567 } else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
568 (msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
569 bridge_vlan_info = kzalloc(
570 sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
571 GFP_KERNEL);
572 if (!bridge_vlan_info) {
573 IPAERR("no memory\n");
574 return -ENOMEM;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700575 }
Amir Levy4f8b4832018-06-05 15:48:03 +0300576
577 if (copy_from_user((u8 *)bridge_vlan_info,
578 (void __user *)usr_param,
579 sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
580 kfree(bridge_vlan_info);
581 IPAERR("copy from user failed\n");
582 return -EFAULT;
583 }
584
585 msg_meta.msg_len = sizeof(struct
586 ipa_ioc_bridge_vlan_mapping_info);
587 buff = bridge_vlan_info;
Shihuan Liuc3174f52017-05-04 15:59:13 -0700588 } else {
589 IPAERR("Unexpected event\n");
590 return -EFAULT;
591 }
592
Amir Levy4f8b4832018-06-05 15:48:03 +0300593 retval = ipa3_send_msg(&msg_meta, buff,
594 ipa3_vlan_l2tp_msg_free_cb);
595 if (retval) {
596 IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
597 retval,
598 msg_type);
599 kfree(buff);
600 return retval;
601 }
602 IPADBG("exit\n");
603
Shihuan Liuc3174f52017-05-04 15:59:13 -0700604 return 0;
605}
Amir Levy9659e592016-10-27 18:08:27 +0300606
Mohammed Javida0f23d92018-09-11 10:50:28 +0530607static void ipa3_gsb_msg_free_cb(void *buff, u32 len, u32 type)
608{
609 if (!buff) {
610 IPAERR("Null buffer\n");
611 return;
612 }
613
614 switch (type) {
615 case IPA_GSB_CONNECT:
616 case IPA_GSB_DISCONNECT:
617 break;
618 default:
619 IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
620 return;
621 }
622
623 kfree(buff);
624}
625
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530626static void ipa3_get_usb_ep_info(
627 struct ipa_ioc_get_ep_info *ep_info,
628 struct ipa_ep_pair_info *pair_info
629 )
630{
631 int ep_index = -1, i;
Akshay Pandit709d2b72020-03-26 14:48:18 +0530632 int pair_id = 0;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530633
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530634 for (i = 0; i < ep_info->max_ep_pairs; i++) {
635 pair_info[i].consumer_pipe_num = -1;
636 pair_info[i].producer_pipe_num = -1;
637 pair_info[i].ep_id = -1;
638 }
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530639
Akshay Pandit709d2b72020-03-26 14:48:18 +0530640 if ((!ep_info->teth_prot_valid) || (ep_info->teth_prot_valid &&
641 ep_info->teth_prot == IPA_PROT_RMNET_CV2X)) {
642 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB2_PROD);
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530643
Akshay Pandit709d2b72020-03-26 14:48:18 +0530644 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
645 pair_info[pair_id].consumer_pipe_num = ep_index;
646 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB2_CONS);
647
648 if ((ep_index != -1) &&
649 (ipa3_ctx->ep[ep_index].valid)) {
650 pair_info[pair_id].producer_pipe_num = ep_index;
651 pair_info[pair_id].ep_id = IPA_USB1_EP_ID;
652
653 IPADBG("ep_pair_info consumer_pipe_num %d",
654 pair_info[pair_id].consumer_pipe_num);
655 IPADBG(" producer_pipe_num %d ep_id %d\n",
656 pair_info[pair_id].producer_pipe_num,
657 pair_info[pair_id].ep_id);
658 pair_id++;
659 } else {
660 pair_info[pair_id].consumer_pipe_num = -1;
661 IPADBG("ep_pair_info consumer_pipe_num %d",
662 pair_info[pair_id].consumer_pipe_num);
663 IPADBG(" producer_pipe_num %d ep_id %d\n",
664 pair_info[pair_id].producer_pipe_num,
665 pair_info[pair_id].ep_id);
666 }
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530667 }
668 }
669
Akshay Pandit709d2b72020-03-26 14:48:18 +0530670 if ((!ep_info->teth_prot_valid) || (ep_info->teth_prot_valid &&
671 ep_info->teth_prot == IPA_PROT_RMNET)) {
672 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530673
Akshay Pandit709d2b72020-03-26 14:48:18 +0530674 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
675 pair_info[pair_id].consumer_pipe_num = ep_index;
676 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530677
Akshay Pandit709d2b72020-03-26 14:48:18 +0530678 if ((ep_index != -1) &&
679 (ipa3_ctx->ep[ep_index].valid)) {
680 pair_info[pair_id].producer_pipe_num = ep_index;
681 pair_info[pair_id].ep_id = IPA_USB0_EP_ID;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530682
Akshay Pandit709d2b72020-03-26 14:48:18 +0530683 IPADBG("ep_pair_info consumer_pipe_num %d",
684 pair_info[pair_id].consumer_pipe_num);
685 IPADBG(" producer_pipe_num %d ep_id %d\n",
686 pair_info[pair_id].producer_pipe_num,
687 pair_info[pair_id].ep_id);
688 pair_id++;
689 } else {
690 pair_info[pair_id].consumer_pipe_num = -1;
691 IPADBG("ep_pair_info consumer_pipe_num %d",
692 pair_info[pair_id].consumer_pipe_num);
693 IPADBG(" producer_pipe_num %d ep_id %d\n",
694 pair_info[pair_id].producer_pipe_num,
695 pair_info[pair_id].ep_id);
696 }
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530697 }
698 }
Akshay Pandit709d2b72020-03-26 14:48:18 +0530699 ep_info->num_ep_pairs = pair_id;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530700
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530701}
702
703static void ipa3_get_pcie_ep_info(
704 struct ipa_ioc_get_ep_info *ep_info,
705 struct ipa_ep_pair_info *pair_info
706 )
707{
708 int ep_index = -1, i;
709
710 ep_info->num_ep_pairs = 0;
711 for (i = 0; i < ep_info->max_ep_pairs; i++) {
712 pair_info[i].consumer_pipe_num = -1;
713 pair_info[i].producer_pipe_num = -1;
714 pair_info[i].ep_id = -1;
715 }
716
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530717 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI2_PROD);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530718
719 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
720 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530721 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI2_CONS);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530722 if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
723 pair_info[ep_info->num_ep_pairs].producer_pipe_num =
724 ep_index;
725 pair_info[ep_info->num_ep_pairs].ep_id =
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530726 IPA_PCIE1_EP_ID;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530727
728 IPADBG("ep_pair_info consumer_pipe_num %d",
729 pair_info[ep_info->num_ep_pairs].
730 consumer_pipe_num);
731 IPADBG(" producer_pipe_num %d ep_id %d\n",
732 pair_info[ep_info->num_ep_pairs].
733 producer_pipe_num,
734 pair_info[ep_info->num_ep_pairs].ep_id);
735 ep_info->num_ep_pairs++;
736 } else {
737 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1;
738 IPADBG("ep_pair_info consumer_pipe_num %d",
739 pair_info[ep_info->num_ep_pairs].
740 consumer_pipe_num);
741 IPADBG(" producer_pipe_num %d ep_id %d\n",
742 pair_info[ep_info->num_ep_pairs].
743 producer_pipe_num,
744 pair_info[ep_info->num_ep_pairs].ep_id);
745 }
746 }
747
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530748 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI_PROD);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530749
750 if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
751 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530752 ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI_CONS);
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530753 if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
754 pair_info[ep_info->num_ep_pairs].producer_pipe_num =
755 ep_index;
756 pair_info[ep_info->num_ep_pairs].ep_id =
Akshay Pandit0e2e68a2020-02-05 17:58:06 +0530757 IPA_PCIE0_EP_ID;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530758
759 IPADBG("ep_pair_info consumer_pipe_num %d",
760 pair_info[ep_info->num_ep_pairs].
761 consumer_pipe_num);
762 IPADBG(" producer_pipe_num %d ep_id %d\n",
763 pair_info[ep_info->num_ep_pairs].
764 producer_pipe_num,
765 pair_info[ep_info->num_ep_pairs].ep_id);
766 ep_info->num_ep_pairs++;
767 } else {
768 pair_info[ep_info->num_ep_pairs].consumer_pipe_num = -1;
769 IPADBG("ep_pair_info consumer_pipe_num %d",
770 pair_info[ep_info->num_ep_pairs].
771 consumer_pipe_num);
772 IPADBG(" producer_pipe_num %d ep_id %d\n",
773 pair_info[ep_info->num_ep_pairs].
774 producer_pipe_num,
775 pair_info[ep_info->num_ep_pairs].ep_id);
776 }
777 }
778}
779
780
781static int ipa3_get_ep_info(struct ipa_ioc_get_ep_info *ep_info,
782 u8 *param)
783{
784 int ret = 0;
785 struct ipa_ep_pair_info *pair_info = (struct ipa_ep_pair_info *)param;
786
787 switch (ep_info->ep_type) {
788 case IPA_DATA_EP_TYP_HSUSB:
789 ipa3_get_usb_ep_info(ep_info, pair_info);
790 break;
791
792 case IPA_DATA_EP_TYP_PCIE:
793 ipa3_get_pcie_ep_info(ep_info, pair_info);
794 break;
795
796 default:
797 IPAERR_RL("Undefined ep_type %d\n", ep_info->ep_type);
798 ret = -EFAULT;
799 break;
800 }
801
802 return ret;
803}
804
Mohammed Javida0f23d92018-09-11 10:50:28 +0530805static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type)
806{
807 int retval;
808 struct ipa_ioc_gsb_info *gsb_info;
809 struct ipa_msg_meta msg_meta;
810 void *buff;
811
812 IPADBG("type %d\n", msg_type);
813
814 memset(&msg_meta, 0, sizeof(msg_meta));
815 msg_meta.msg_type = msg_type;
816
817 if ((msg_type == IPA_GSB_CONNECT) ||
818 (msg_type == IPA_GSB_DISCONNECT)) {
819 gsb_info = kzalloc(sizeof(struct ipa_ioc_gsb_info),
820 GFP_KERNEL);
821 if (!gsb_info) {
822 IPAERR("no memory\n");
823 return -ENOMEM;
824 }
825
826 if (copy_from_user((u8 *)gsb_info, (void __user *)usr_param,
827 sizeof(struct ipa_ioc_gsb_info))) {
828 kfree(gsb_info);
829 return -EFAULT;
830 }
831
832 msg_meta.msg_len = sizeof(struct ipa_ioc_gsb_info);
833 buff = gsb_info;
834 } else {
835 IPAERR("Unexpected event\n");
836 return -EFAULT;
837 }
838
839 retval = ipa3_send_msg(&msg_meta, buff,
840 ipa3_gsb_msg_free_cb);
841 if (retval) {
842 IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
843 retval,
844 msg_type);
845 kfree(buff);
846 return retval;
847 }
848 IPADBG("exit\n");
849
850 return 0;
851}
852
Amir Levy9659e592016-10-27 18:08:27 +0300853static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
854{
855 int retval = 0;
856 u32 pyld_sz;
857 u8 header[128] = { 0 };
858 u8 *param = NULL;
Amir Levya5361ab2018-05-01 13:25:37 +0300859 bool is_vlan_mode;
Amir Levy9659e592016-10-27 18:08:27 +0300860 struct ipa_ioc_nat_alloc_mem nat_mem;
Amir Levy479cfdd2017-10-26 12:23:14 +0300861 struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
Amir Levy9659e592016-10-27 18:08:27 +0300862 struct ipa_ioc_v4_nat_init nat_init;
Amir Levy479cfdd2017-10-26 12:23:14 +0300863 struct ipa_ioc_ipv6ct_init ipv6ct_init;
Amir Levy9659e592016-10-27 18:08:27 +0300864 struct ipa_ioc_v4_nat_del nat_del;
Amir Levy479cfdd2017-10-26 12:23:14 +0300865 struct ipa_ioc_nat_ipv6ct_table_del table_del;
Amir Levy05fccd02017-06-13 16:25:45 +0300866 struct ipa_ioc_nat_pdn_entry mdfy_pdn;
Amir Levy9659e592016-10-27 18:08:27 +0300867 struct ipa_ioc_rm_dependency rm_depend;
Amir Levy479cfdd2017-10-26 12:23:14 +0300868 struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
Amir Levya5361ab2018-05-01 13:25:37 +0300869 struct ipa_ioc_get_vlan_mode vlan_mode;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530870 struct ipa_ioc_get_ep_info ep_info;
Amir Levy9659e592016-10-27 18:08:27 +0300871 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200872 int pre_entry;
Mohammed Javidd636e0c2019-06-13 16:16:59 +0530873 unsigned long uptr = 0;
Amir Levy9659e592016-10-27 18:08:27 +0300874
875 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
876
Amir Levy9659e592016-10-27 18:08:27 +0300877 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
878 return -ENOTTY;
Amir Levy9659e592016-10-27 18:08:27 +0300879
Amir Levy05532622016-11-28 12:12:01 +0200880 if (!ipa3_is_ready()) {
881 IPAERR("IPA not ready, waiting for init completion\n");
882 wait_for_completion(&ipa3_ctx->init_completion_obj);
883 }
884
Amir Levy9659e592016-10-27 18:08:27 +0300885 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
886
887 switch (cmd) {
888 case IPA_IOC_ALLOC_NAT_MEM:
Amir Levy479cfdd2017-10-26 12:23:14 +0300889 if (copy_from_user(&nat_mem, (const void __user *)arg,
890 sizeof(struct ipa_ioc_nat_alloc_mem))) {
Amir Levy9659e592016-10-27 18:08:27 +0300891 retval = -EFAULT;
892 break;
893 }
894 /* null terminate the string */
895 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
896
897 if (ipa3_allocate_nat_device(&nat_mem)) {
898 retval = -EFAULT;
899 break;
900 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300901 if (copy_to_user((void __user *)arg, &nat_mem,
902 sizeof(struct ipa_ioc_nat_alloc_mem))) {
Amir Levy9659e592016-10-27 18:08:27 +0300903 retval = -EFAULT;
904 break;
905 }
906 break;
Amir Levy479cfdd2017-10-26 12:23:14 +0300907 case IPA_IOC_ALLOC_NAT_TABLE:
908 if (copy_from_user(&table_alloc, (const void __user *)arg,
909 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
910 retval = -EFAULT;
911 break;
912 }
913
914 if (ipa3_allocate_nat_table(&table_alloc)) {
915 retval = -EFAULT;
916 break;
917 }
918 if (table_alloc.offset &&
919 copy_to_user((void __user *)arg, &table_alloc, sizeof(
920 struct ipa_ioc_nat_ipv6ct_table_alloc))) {
921 retval = -EFAULT;
922 break;
923 }
924 break;
925
926 case IPA_IOC_ALLOC_IPV6CT_TABLE:
927 if (copy_from_user(&table_alloc, (const void __user *)arg,
928 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
929 retval = -EFAULT;
930 break;
931 }
932
933 if (ipa3_allocate_ipv6ct_table(&table_alloc)) {
934 retval = -EFAULT;
935 break;
936 }
937 if (table_alloc.offset &&
938 copy_to_user((void __user *)arg, &table_alloc, sizeof(
939 struct ipa_ioc_nat_ipv6ct_table_alloc))) {
940 retval = -EFAULT;
941 break;
942 }
943 break;
944
Amir Levy9659e592016-10-27 18:08:27 +0300945 case IPA_IOC_V4_INIT_NAT:
Amir Levy479cfdd2017-10-26 12:23:14 +0300946 if (copy_from_user(&nat_init, (const void __user *)arg,
947 sizeof(struct ipa_ioc_v4_nat_init))) {
Amir Levy9659e592016-10-27 18:08:27 +0300948 retval = -EFAULT;
949 break;
950 }
951 if (ipa3_nat_init_cmd(&nat_init)) {
952 retval = -EFAULT;
953 break;
954 }
955 break;
956
Amir Levy479cfdd2017-10-26 12:23:14 +0300957 case IPA_IOC_INIT_IPV6CT_TABLE:
958 if (copy_from_user(&ipv6ct_init, (const void __user *)arg,
959 sizeof(struct ipa_ioc_ipv6ct_init))) {
Amir Levy9659e592016-10-27 18:08:27 +0300960 retval = -EFAULT;
961 break;
962 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300963 if (ipa3_ipv6ct_init_cmd(&ipv6ct_init)) {
964 retval = -EFAULT;
965 break;
966 }
967 break;
968
969 case IPA_IOC_TABLE_DMA_CMD:
970 table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)header;
971 if (copy_from_user(header, (const void __user *)arg,
972 sizeof(struct ipa_ioc_nat_dma_cmd))) {
973 retval = -EFAULT;
974 break;
975 }
976 pre_entry = table_dma_cmd->entries;
977 pyld_sz = sizeof(struct ipa_ioc_nat_dma_cmd) +
978 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300979 param = kzalloc(pyld_sz, GFP_KERNEL);
980 if (!param) {
981 retval = -ENOMEM;
982 break;
983 }
984
Amir Levy479cfdd2017-10-26 12:23:14 +0300985 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +0300986 retval = -EFAULT;
987 break;
988 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300989 table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)param;
990
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200991 /* add check in case user-space module compromised */
Amir Levy479cfdd2017-10-26 12:23:14 +0300992 if (unlikely(table_dma_cmd->entries != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530993 IPAERR_RL("current %d pre %d\n",
Amir Levy479cfdd2017-10-26 12:23:14 +0300994 table_dma_cmd->entries, pre_entry);
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200995 retval = -EFAULT;
996 break;
997 }
Amir Levy479cfdd2017-10-26 12:23:14 +0300998 if (ipa3_table_dma_cmd(table_dma_cmd)) {
Amir Levy9659e592016-10-27 18:08:27 +0300999 retval = -EFAULT;
1000 break;
1001 }
1002 break;
1003
1004 case IPA_IOC_V4_DEL_NAT:
Amir Levy479cfdd2017-10-26 12:23:14 +03001005 if (copy_from_user(&nat_del, (const void __user *)arg,
1006 sizeof(struct ipa_ioc_v4_nat_del))) {
Amir Levy9659e592016-10-27 18:08:27 +03001007 retval = -EFAULT;
1008 break;
1009 }
1010 if (ipa3_nat_del_cmd(&nat_del)) {
1011 retval = -EFAULT;
1012 break;
1013 }
1014 break;
1015
Amir Levy479cfdd2017-10-26 12:23:14 +03001016 case IPA_IOC_DEL_NAT_TABLE:
1017 if (copy_from_user(&table_del, (const void __user *)arg,
1018 sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
1019 retval = -EFAULT;
1020 break;
1021 }
1022 if (ipa3_del_nat_table(&table_del)) {
1023 retval = -EFAULT;
1024 break;
1025 }
1026 break;
1027
1028 case IPA_IOC_DEL_IPV6CT_TABLE:
1029 if (copy_from_user(&table_del, (const void __user *)arg,
1030 sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
1031 retval = -EFAULT;
1032 break;
1033 }
1034 if (ipa3_del_ipv6ct_table(&table_del)) {
1035 retval = -EFAULT;
1036 break;
1037 }
1038 break;
1039
Amir Levy05fccd02017-06-13 16:25:45 +03001040 case IPA_IOC_NAT_MODIFY_PDN:
Amir Levy479cfdd2017-10-26 12:23:14 +03001041 if (copy_from_user(&mdfy_pdn, (const void __user *)arg,
Amir Levy05fccd02017-06-13 16:25:45 +03001042 sizeof(struct ipa_ioc_nat_pdn_entry))) {
1043 retval = -EFAULT;
1044 break;
1045 }
Amir Levydc65f4c2017-07-06 09:49:50 +03001046 if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
Amir Levy05fccd02017-06-13 16:25:45 +03001047 retval = -EFAULT;
1048 break;
1049 }
1050 break;
1051
Amir Levy9659e592016-10-27 18:08:27 +03001052 case IPA_IOC_ADD_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001053 if (copy_from_user(header, (const void __user *)arg,
1054 sizeof(struct ipa_ioc_add_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001055 retval = -EFAULT;
1056 break;
1057 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001058 pre_entry =
1059 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +03001060 pyld_sz =
1061 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001062 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +03001063 param = kzalloc(pyld_sz, GFP_KERNEL);
1064 if (!param) {
1065 retval = -ENOMEM;
1066 break;
1067 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001068 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001069 retval = -EFAULT;
1070 break;
1071 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001072 /* add check in case user-space module compromised */
1073 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
1074 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301075 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001076 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
1077 pre_entry);
1078 retval = -EFAULT;
1079 break;
1080 }
Skylar Chang68c37d82018-04-07 16:42:36 -07001081 if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param,
1082 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001083 retval = -EFAULT;
1084 break;
1085 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001086 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001087 retval = -EFAULT;
1088 break;
1089 }
1090 break;
1091
1092 case IPA_IOC_DEL_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001093 if (copy_from_user(header, (const void __user *)arg,
1094 sizeof(struct ipa_ioc_del_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001095 retval = -EFAULT;
1096 break;
1097 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001098 pre_entry =
1099 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001100 pyld_sz =
1101 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001102 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +03001103 param = kzalloc(pyld_sz, GFP_KERNEL);
1104 if (!param) {
1105 retval = -ENOMEM;
1106 break;
1107 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001108 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001109 retval = -EFAULT;
1110 break;
1111 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001112 /* add check in case user-space module compromised */
1113 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
1114 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301115 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001116 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
1117 pre_entry);
1118 retval = -EFAULT;
1119 break;
1120 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001121 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
1122 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001123 retval = -EFAULT;
1124 break;
1125 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001126 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001127 retval = -EFAULT;
1128 break;
1129 }
1130 break;
1131
1132 case IPA_IOC_ADD_RT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001133 if (copy_from_user(header, (const void __user *)arg,
1134 sizeof(struct ipa_ioc_add_rt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001135 retval = -EFAULT;
1136 break;
1137 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001138 pre_entry =
1139 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001140 pyld_sz =
1141 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001142 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001143 param = kzalloc(pyld_sz, GFP_KERNEL);
1144 if (!param) {
1145 retval = -ENOMEM;
1146 break;
1147 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001148 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001149 retval = -EFAULT;
1150 break;
1151 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001152 /* add check in case user-space module compromised */
1153 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
1154 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301155 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001156 ((struct ipa_ioc_add_rt_rule *)param)->
1157 num_rules,
1158 pre_entry);
1159 retval = -EFAULT;
1160 break;
1161 }
Skylar Chang68c37d82018-04-07 16:42:36 -07001162 if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param,
1163 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001164 retval = -EFAULT;
1165 break;
1166 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001167 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001168 retval = -EFAULT;
1169 break;
1170 }
1171 break;
Mohammed Javidd0c2a1e2017-10-30 15:34:22 +05301172
1173 case IPA_IOC_ADD_RT_RULE_EXT:
1174 if (copy_from_user(header,
1175 (const void __user *)arg,
1176 sizeof(struct ipa_ioc_add_rt_rule_ext))) {
1177 retval = -EFAULT;
1178 break;
1179 }
1180 pre_entry =
1181 ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
1182 pyld_sz =
1183 sizeof(struct ipa_ioc_add_rt_rule_ext) +
1184 pre_entry * sizeof(struct ipa_rt_rule_add_ext);
1185 param = kzalloc(pyld_sz, GFP_KERNEL);
1186 if (!param) {
1187 retval = -ENOMEM;
1188 break;
1189 }
1190 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
1191 retval = -EFAULT;
1192 break;
1193 }
1194 /* add check in case user-space module compromised */
1195 if (unlikely(
1196 ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
1197 != pre_entry)) {
1198 IPAERR(" prevent memory corruption(%d not match %d)\n",
1199 ((struct ipa_ioc_add_rt_rule_ext *)param)->
1200 num_rules,
1201 pre_entry);
1202 retval = -EINVAL;
1203 break;
1204 }
1205 if (ipa3_add_rt_rule_ext(
1206 (struct ipa_ioc_add_rt_rule_ext *)param)) {
1207 retval = -EFAULT;
1208 break;
1209 }
1210 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
1211 retval = -EFAULT;
1212 break;
1213 }
1214 break;
Amir Levy9659e592016-10-27 18:08:27 +03001215 case IPA_IOC_ADD_RT_RULE_AFTER:
Amir Levy479cfdd2017-10-26 12:23:14 +03001216 if (copy_from_user(header, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001217 sizeof(struct ipa_ioc_add_rt_rule_after))) {
1218
1219 retval = -EFAULT;
1220 break;
1221 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001222 pre_entry =
1223 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001224 pyld_sz =
1225 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001226 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001227 param = kzalloc(pyld_sz, GFP_KERNEL);
1228 if (!param) {
1229 retval = -ENOMEM;
1230 break;
1231 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001232 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001233 retval = -EFAULT;
1234 break;
1235 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001236 /* add check in case user-space module compromised */
1237 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
1238 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301239 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001240 ((struct ipa_ioc_add_rt_rule_after *)param)->
1241 num_rules,
1242 pre_entry);
1243 retval = -EFAULT;
1244 break;
1245 }
Amir Levy9659e592016-10-27 18:08:27 +03001246 if (ipa3_add_rt_rule_after(
1247 (struct ipa_ioc_add_rt_rule_after *)param)) {
1248
1249 retval = -EFAULT;
1250 break;
1251 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001252 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001253 retval = -EFAULT;
1254 break;
1255 }
1256 break;
1257
1258 case IPA_IOC_MDFY_RT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001259 if (copy_from_user(header, (const void __user *)arg,
1260 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001261 retval = -EFAULT;
1262 break;
1263 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001264 pre_entry =
1265 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001266 pyld_sz =
1267 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001268 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001269 param = kzalloc(pyld_sz, GFP_KERNEL);
1270 if (!param) {
1271 retval = -ENOMEM;
1272 break;
1273 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001274 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001275 retval = -EFAULT;
1276 break;
1277 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001278 /* add check in case user-space module compromised */
1279 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
1280 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301281 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001282 ((struct ipa_ioc_mdfy_rt_rule *)param)->
1283 num_rules,
1284 pre_entry);
1285 retval = -EFAULT;
1286 break;
1287 }
Amir Levy9659e592016-10-27 18:08:27 +03001288 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
1289 retval = -EFAULT;
1290 break;
1291 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001292 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001293 retval = -EFAULT;
1294 break;
1295 }
1296 break;
1297
1298 case IPA_IOC_DEL_RT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001299 if (copy_from_user(header, (const void __user *)arg,
1300 sizeof(struct ipa_ioc_del_rt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001301 retval = -EFAULT;
1302 break;
1303 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001304 pre_entry =
1305 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001306 pyld_sz =
1307 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001308 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001309 param = kzalloc(pyld_sz, GFP_KERNEL);
1310 if (!param) {
1311 retval = -ENOMEM;
1312 break;
1313 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001314 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001315 retval = -EFAULT;
1316 break;
1317 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001318 /* add check in case user-space module compromised */
1319 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
1320 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301321 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001322 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
1323 pre_entry);
1324 retval = -EFAULT;
1325 break;
1326 }
Amir Levy9659e592016-10-27 18:08:27 +03001327 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
1328 retval = -EFAULT;
1329 break;
1330 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001331 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001332 retval = -EFAULT;
1333 break;
1334 }
1335 break;
1336
1337 case IPA_IOC_ADD_FLT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001338 if (copy_from_user(header, (const void __user *)arg,
1339 sizeof(struct ipa_ioc_add_flt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001340 retval = -EFAULT;
1341 break;
1342 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001343 pre_entry =
1344 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001345 pyld_sz =
1346 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001347 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001348 param = kzalloc(pyld_sz, GFP_KERNEL);
1349 if (!param) {
1350 retval = -ENOMEM;
1351 break;
1352 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001353 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001354 retval = -EFAULT;
1355 break;
1356 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001357 /* add check in case user-space module compromised */
1358 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
1359 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301360 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001361 ((struct ipa_ioc_add_flt_rule *)param)->
1362 num_rules,
1363 pre_entry);
1364 retval = -EFAULT;
1365 break;
1366 }
Skylar Chang68c37d82018-04-07 16:42:36 -07001367 if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param,
1368 true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001369 retval = -EFAULT;
1370 break;
1371 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001372 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001373 retval = -EFAULT;
1374 break;
1375 }
1376 break;
1377
1378 case IPA_IOC_ADD_FLT_RULE_AFTER:
Amir Levy479cfdd2017-10-26 12:23:14 +03001379 if (copy_from_user(header, (const void __user *)arg,
1380 sizeof(struct ipa_ioc_add_flt_rule_after))) {
Amir Levy9659e592016-10-27 18:08:27 +03001381
1382 retval = -EFAULT;
1383 break;
1384 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001385 pre_entry =
1386 ((struct ipa_ioc_add_flt_rule_after *)header)->
1387 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001388 pyld_sz =
1389 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001390 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001391 param = kzalloc(pyld_sz, GFP_KERNEL);
1392 if (!param) {
1393 retval = -ENOMEM;
1394 break;
1395 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001396 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001397 retval = -EFAULT;
1398 break;
1399 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001400 /* add check in case user-space module compromised */
1401 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1402 num_rules != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301403 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001404 ((struct ipa_ioc_add_flt_rule_after *)param)->
1405 num_rules,
1406 pre_entry);
1407 retval = -EFAULT;
1408 break;
1409 }
Amir Levy9659e592016-10-27 18:08:27 +03001410 if (ipa3_add_flt_rule_after(
1411 (struct ipa_ioc_add_flt_rule_after *)param)) {
1412 retval = -EFAULT;
1413 break;
1414 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001415 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001416 retval = -EFAULT;
1417 break;
1418 }
1419 break;
1420
1421 case IPA_IOC_DEL_FLT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001422 if (copy_from_user(header, (const void __user *)arg,
1423 sizeof(struct ipa_ioc_del_flt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001424 retval = -EFAULT;
1425 break;
1426 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001427 pre_entry =
1428 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001429 pyld_sz =
1430 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001431 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001432 param = kzalloc(pyld_sz, GFP_KERNEL);
1433 if (!param) {
1434 retval = -ENOMEM;
1435 break;
1436 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001437 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001438 retval = -EFAULT;
1439 break;
1440 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001441 /* add check in case user-space module compromised */
1442 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1443 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301444 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001445 ((struct ipa_ioc_del_flt_rule *)param)->
1446 num_hdls,
1447 pre_entry);
1448 retval = -EFAULT;
1449 break;
1450 }
Amir Levy9659e592016-10-27 18:08:27 +03001451 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1452 retval = -EFAULT;
1453 break;
1454 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001455 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001456 retval = -EFAULT;
1457 break;
1458 }
1459 break;
1460
1461 case IPA_IOC_MDFY_FLT_RULE:
Amir Levy479cfdd2017-10-26 12:23:14 +03001462 if (copy_from_user(header, (const void __user *)arg,
1463 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
Amir Levy9659e592016-10-27 18:08:27 +03001464 retval = -EFAULT;
1465 break;
1466 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001467 pre_entry =
1468 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001469 pyld_sz =
1470 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001471 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001472 param = kzalloc(pyld_sz, GFP_KERNEL);
1473 if (!param) {
1474 retval = -ENOMEM;
1475 break;
1476 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001477 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001478 retval = -EFAULT;
1479 break;
1480 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001481 /* add check in case user-space module compromised */
1482 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1483 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301484 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001485 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1486 num_rules,
1487 pre_entry);
1488 retval = -EFAULT;
1489 break;
1490 }
Amir Levy9659e592016-10-27 18:08:27 +03001491 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1492 retval = -EFAULT;
1493 break;
1494 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001495 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001496 retval = -EFAULT;
1497 break;
1498 }
1499 break;
1500
1501 case IPA_IOC_COMMIT_HDR:
1502 retval = ipa3_commit_hdr();
1503 break;
1504 case IPA_IOC_RESET_HDR:
Skylar Chang68c37d82018-04-07 16:42:36 -07001505 retval = ipa3_reset_hdr(false);
Amir Levy9659e592016-10-27 18:08:27 +03001506 break;
1507 case IPA_IOC_COMMIT_RT:
1508 retval = ipa3_commit_rt(arg);
1509 break;
1510 case IPA_IOC_RESET_RT:
Skylar Chang68c37d82018-04-07 16:42:36 -07001511 retval = ipa3_reset_rt(arg, false);
Amir Levy9659e592016-10-27 18:08:27 +03001512 break;
1513 case IPA_IOC_COMMIT_FLT:
1514 retval = ipa3_commit_flt(arg);
1515 break;
1516 case IPA_IOC_RESET_FLT:
Skylar Chang68c37d82018-04-07 16:42:36 -07001517 retval = ipa3_reset_flt(arg, false);
Amir Levy9659e592016-10-27 18:08:27 +03001518 break;
1519 case IPA_IOC_GET_RT_TBL:
Amir Levy479cfdd2017-10-26 12:23:14 +03001520 if (copy_from_user(header, (const void __user *)arg,
1521 sizeof(struct ipa_ioc_get_rt_tbl))) {
Amir Levy9659e592016-10-27 18:08:27 +03001522 retval = -EFAULT;
1523 break;
1524 }
1525 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1526 retval = -EFAULT;
1527 break;
1528 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001529 if (copy_to_user((void __user *)arg, header,
Amir Levy9659e592016-10-27 18:08:27 +03001530 sizeof(struct ipa_ioc_get_rt_tbl))) {
1531 retval = -EFAULT;
1532 break;
1533 }
1534 break;
1535 case IPA_IOC_PUT_RT_TBL:
1536 retval = ipa3_put_rt_tbl(arg);
1537 break;
1538 case IPA_IOC_GET_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001539 if (copy_from_user(header, (const void __user *)arg,
1540 sizeof(struct ipa_ioc_get_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001541 retval = -EFAULT;
1542 break;
1543 }
1544 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1545 retval = -EFAULT;
1546 break;
1547 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001548 if (copy_to_user((void __user *)arg, header,
1549 sizeof(struct ipa_ioc_get_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001550 retval = -EFAULT;
1551 break;
1552 }
1553 break;
1554 case IPA_IOC_PUT_HDR:
1555 retval = ipa3_put_hdr(arg);
1556 break;
1557 case IPA_IOC_SET_FLT:
1558 retval = ipa3_cfg_filter(arg);
1559 break;
1560 case IPA_IOC_COPY_HDR:
Amir Levy479cfdd2017-10-26 12:23:14 +03001561 if (copy_from_user(header, (const void __user *)arg,
1562 sizeof(struct ipa_ioc_copy_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001563 retval = -EFAULT;
1564 break;
1565 }
1566 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1567 retval = -EFAULT;
1568 break;
1569 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001570 if (copy_to_user((void __user *)arg, header,
1571 sizeof(struct ipa_ioc_copy_hdr))) {
Amir Levy9659e592016-10-27 18:08:27 +03001572 retval = -EFAULT;
1573 break;
1574 }
1575 break;
1576 case IPA_IOC_QUERY_INTF:
Amir Levy479cfdd2017-10-26 12:23:14 +03001577 if (copy_from_user(header, (const void __user *)arg,
1578 sizeof(struct ipa_ioc_query_intf))) {
Amir Levy9659e592016-10-27 18:08:27 +03001579 retval = -EFAULT;
1580 break;
1581 }
1582 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1583 retval = -1;
1584 break;
1585 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001586 if (copy_to_user((void __user *)arg, header,
1587 sizeof(struct ipa_ioc_query_intf))) {
Amir Levy9659e592016-10-27 18:08:27 +03001588 retval = -EFAULT;
1589 break;
1590 }
1591 break;
1592 case IPA_IOC_QUERY_INTF_TX_PROPS:
1593 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
Amir Levy479cfdd2017-10-26 12:23:14 +03001594 if (copy_from_user(header, (const void __user *)arg, sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001595 retval = -EFAULT;
1596 break;
1597 }
1598
1599 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
Amir Levy479cfdd2017-10-26 12:23:14 +03001600 > IPA_NUM_PROPS_MAX) {
Amir Levy9659e592016-10-27 18:08:27 +03001601 retval = -EFAULT;
1602 break;
1603 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001604 pre_entry =
1605 ((struct ipa_ioc_query_intf_tx_props *)
1606 header)->num_tx_props;
1607 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001608 sizeof(struct ipa_ioc_tx_intf_prop);
1609 param = kzalloc(pyld_sz, GFP_KERNEL);
1610 if (!param) {
1611 retval = -ENOMEM;
1612 break;
1613 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001614 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001615 retval = -EFAULT;
1616 break;
1617 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001618 /* add check in case user-space module compromised */
1619 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1620 param)->num_tx_props
1621 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301622 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001623 ((struct ipa_ioc_query_intf_tx_props *)
1624 param)->num_tx_props, pre_entry);
1625 retval = -EFAULT;
1626 break;
1627 }
Amir Levy9659e592016-10-27 18:08:27 +03001628 if (ipa3_query_intf_tx_props(
Amir Levy479cfdd2017-10-26 12:23:14 +03001629 (struct ipa_ioc_query_intf_tx_props *)param)) {
Amir Levy9659e592016-10-27 18:08:27 +03001630 retval = -1;
1631 break;
1632 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001633 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001634 retval = -EFAULT;
1635 break;
1636 }
1637 break;
1638 case IPA_IOC_QUERY_INTF_RX_PROPS:
1639 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
Amir Levy479cfdd2017-10-26 12:23:14 +03001640 if (copy_from_user(header, (const void __user *)arg, sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001641 retval = -EFAULT;
1642 break;
1643 }
1644
1645 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
Amir Levy479cfdd2017-10-26 12:23:14 +03001646 > IPA_NUM_PROPS_MAX) {
Amir Levy9659e592016-10-27 18:08:27 +03001647 retval = -EFAULT;
1648 break;
1649 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001650 pre_entry =
1651 ((struct ipa_ioc_query_intf_rx_props *)
1652 header)->num_rx_props;
1653 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001654 sizeof(struct ipa_ioc_rx_intf_prop);
1655 param = kzalloc(pyld_sz, GFP_KERNEL);
1656 if (!param) {
1657 retval = -ENOMEM;
1658 break;
1659 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001660 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001661 retval = -EFAULT;
1662 break;
1663 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001664 /* add check in case user-space module compromised */
1665 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1666 param)->num_rx_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301667 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001668 ((struct ipa_ioc_query_intf_rx_props *)
1669 param)->num_rx_props, pre_entry);
1670 retval = -EFAULT;
1671 break;
1672 }
Amir Levy9659e592016-10-27 18:08:27 +03001673 if (ipa3_query_intf_rx_props(
Amir Levy479cfdd2017-10-26 12:23:14 +03001674 (struct ipa_ioc_query_intf_rx_props *)param)) {
Amir Levy9659e592016-10-27 18:08:27 +03001675 retval = -1;
1676 break;
1677 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001678 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001679 retval = -EFAULT;
1680 break;
1681 }
1682 break;
1683 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1684 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
Amir Levy479cfdd2017-10-26 12:23:14 +03001685 if (copy_from_user(header, (const void __user *)arg, sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001686 retval = -EFAULT;
1687 break;
1688 }
1689
1690 if (((struct ipa_ioc_query_intf_ext_props *)
Amir Levy479cfdd2017-10-26 12:23:14 +03001691 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
Amir Levy9659e592016-10-27 18:08:27 +03001692 retval = -EFAULT;
1693 break;
1694 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001695 pre_entry =
1696 ((struct ipa_ioc_query_intf_ext_props *)
1697 header)->num_ext_props;
1698 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001699 sizeof(struct ipa_ioc_ext_intf_prop);
1700 param = kzalloc(pyld_sz, GFP_KERNEL);
1701 if (!param) {
1702 retval = -ENOMEM;
1703 break;
1704 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001705 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001706 retval = -EFAULT;
1707 break;
1708 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001709 /* add check in case user-space module compromised */
1710 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1711 param)->num_ext_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301712 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001713 ((struct ipa_ioc_query_intf_ext_props *)
1714 param)->num_ext_props, pre_entry);
1715 retval = -EFAULT;
1716 break;
1717 }
Amir Levy9659e592016-10-27 18:08:27 +03001718 if (ipa3_query_intf_ext_props(
Amir Levy479cfdd2017-10-26 12:23:14 +03001719 (struct ipa_ioc_query_intf_ext_props *)param)) {
Amir Levy9659e592016-10-27 18:08:27 +03001720 retval = -1;
1721 break;
1722 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001723 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001724 retval = -EFAULT;
1725 break;
1726 }
1727 break;
1728 case IPA_IOC_PULL_MSG:
Amir Levy479cfdd2017-10-26 12:23:14 +03001729 if (copy_from_user(header, (const void __user *)arg,
1730 sizeof(struct ipa_msg_meta))) {
Amir Levy9659e592016-10-27 18:08:27 +03001731 retval = -EFAULT;
1732 break;
1733 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001734 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001735 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001736 pyld_sz = sizeof(struct ipa_msg_meta) +
1737 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001738 param = kzalloc(pyld_sz, GFP_KERNEL);
1739 if (!param) {
1740 retval = -ENOMEM;
1741 break;
1742 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001743 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001744 retval = -EFAULT;
1745 break;
1746 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001747 /* add check in case user-space module compromised */
1748 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1749 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301750 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001751 ((struct ipa_msg_meta *)param)->msg_len,
1752 pre_entry);
1753 retval = -EFAULT;
1754 break;
1755 }
Amir Levy9659e592016-10-27 18:08:27 +03001756 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
Amir Levy479cfdd2017-10-26 12:23:14 +03001757 (char *)param + sizeof(struct ipa_msg_meta),
1758 ((struct ipa_msg_meta *)param)->msg_len) !=
1759 ((struct ipa_msg_meta *)param)->msg_len) {
Amir Levy9659e592016-10-27 18:08:27 +03001760 retval = -1;
1761 break;
1762 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001763 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001764 retval = -EFAULT;
1765 break;
1766 }
1767 break;
1768 case IPA_IOC_RM_ADD_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001769 /* deprecate if IPA PM is used */
1770 if (ipa3_ctx->use_ipa_pm)
1771 return 0;
1772
Amir Levy479cfdd2017-10-26 12:23:14 +03001773 if (copy_from_user(&rm_depend, (const void __user *)arg,
1774 sizeof(struct ipa_ioc_rm_dependency))) {
Amir Levy9659e592016-10-27 18:08:27 +03001775 retval = -EFAULT;
1776 break;
1777 }
1778 retval = ipa_rm_add_dependency_from_ioctl(
1779 rm_depend.resource_name, rm_depend.depends_on_name);
1780 break;
1781 case IPA_IOC_RM_DEL_DEPENDENCY:
Michael Adisumarta3e350812017-09-18 14:54:36 -07001782 /* deprecate if IPA PM is used */
1783 if (ipa3_ctx->use_ipa_pm)
1784 return 0;
1785
Amir Levy479cfdd2017-10-26 12:23:14 +03001786 if (copy_from_user(&rm_depend, (const void __user *)arg,
1787 sizeof(struct ipa_ioc_rm_dependency))) {
Amir Levy9659e592016-10-27 18:08:27 +03001788 retval = -EFAULT;
1789 break;
1790 }
1791 retval = ipa_rm_delete_dependency_from_ioctl(
1792 rm_depend.resource_name, rm_depend.depends_on_name);
1793 break;
1794 case IPA_IOC_GENERATE_FLT_EQ:
1795 {
1796 struct ipa_ioc_generate_flt_eq flt_eq;
1797
Amir Levy479cfdd2017-10-26 12:23:14 +03001798 if (copy_from_user(&flt_eq, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001799 sizeof(struct ipa_ioc_generate_flt_eq))) {
1800 retval = -EFAULT;
1801 break;
1802 }
1803 if (ipahal_flt_generate_equation(flt_eq.ip,
1804 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1805 retval = -EFAULT;
1806 break;
1807 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001808 if (copy_to_user((void __user *)arg, &flt_eq,
Amir Levy9659e592016-10-27 18:08:27 +03001809 sizeof(struct ipa_ioc_generate_flt_eq))) {
1810 retval = -EFAULT;
1811 break;
1812 }
1813 break;
1814 }
1815 case IPA_IOC_QUERY_EP_MAPPING:
1816 {
1817 retval = ipa3_get_ep_mapping(arg);
1818 break;
1819 }
1820 case IPA_IOC_QUERY_RT_TBL_INDEX:
Amir Levy479cfdd2017-10-26 12:23:14 +03001821 if (copy_from_user(header, (const void __user *)arg,
1822 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
Amir Levy9659e592016-10-27 18:08:27 +03001823 retval = -EFAULT;
1824 break;
1825 }
1826 if (ipa3_query_rt_index(
Amir Levy479cfdd2017-10-26 12:23:14 +03001827 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
Amir Levy9659e592016-10-27 18:08:27 +03001828 retval = -EFAULT;
1829 break;
1830 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001831 if (copy_to_user((void __user *)arg, header,
1832 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
Amir Levy9659e592016-10-27 18:08:27 +03001833 retval = -EFAULT;
1834 break;
1835 }
1836 break;
1837 case IPA_IOC_WRITE_QMAPID:
Amir Levy479cfdd2017-10-26 12:23:14 +03001838 if (copy_from_user(header, (const void __user *)arg,
1839 sizeof(struct ipa_ioc_write_qmapid))) {
Amir Levy9659e592016-10-27 18:08:27 +03001840 retval = -EFAULT;
1841 break;
1842 }
1843 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1844 retval = -EFAULT;
1845 break;
1846 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001847 if (copy_to_user((void __user *)arg, header,
1848 sizeof(struct ipa_ioc_write_qmapid))) {
Amir Levy9659e592016-10-27 18:08:27 +03001849 retval = -EFAULT;
1850 break;
1851 }
1852 break;
1853 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301854 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
Amir Levy9659e592016-10-27 18:08:27 +03001855 if (retval) {
1856 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1857 break;
1858 }
1859 break;
1860 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301861 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
Amir Levy9659e592016-10-27 18:08:27 +03001862 if (retval) {
1863 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1864 break;
1865 }
1866 break;
1867 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301868 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
Amir Levy9659e592016-10-27 18:08:27 +03001869 if (retval) {
1870 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1871 break;
1872 }
1873 break;
1874 case IPA_IOC_ADD_HDR_PROC_CTX:
Amir Levy479cfdd2017-10-26 12:23:14 +03001875 if (copy_from_user(header, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001876 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1877 retval = -EFAULT;
1878 break;
1879 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001880 pre_entry =
1881 ((struct ipa_ioc_add_hdr_proc_ctx *)
1882 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001883 pyld_sz =
1884 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001885 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001886 param = kzalloc(pyld_sz, GFP_KERNEL);
1887 if (!param) {
1888 retval = -ENOMEM;
1889 break;
1890 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001891 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001892 retval = -EFAULT;
1893 break;
1894 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001895 /* add check in case user-space module compromised */
1896 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1897 param)->num_proc_ctxs != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301898 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001899 ((struct ipa_ioc_add_hdr_proc_ctx *)
1900 param)->num_proc_ctxs, pre_entry);
1901 retval = -EFAULT;
1902 break;
1903 }
Amir Levy9659e592016-10-27 18:08:27 +03001904 if (ipa3_add_hdr_proc_ctx(
Skylar Chang68c37d82018-04-07 16:42:36 -07001905 (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001906 retval = -EFAULT;
1907 break;
1908 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001909 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001910 retval = -EFAULT;
1911 break;
1912 }
1913 break;
1914 case IPA_IOC_DEL_HDR_PROC_CTX:
Amir Levy479cfdd2017-10-26 12:23:14 +03001915 if (copy_from_user(header, (const void __user *)arg,
Amir Levy9659e592016-10-27 18:08:27 +03001916 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1917 retval = -EFAULT;
1918 break;
1919 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001920 pre_entry =
1921 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001922 pyld_sz =
1923 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001924 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001925 param = kzalloc(pyld_sz, GFP_KERNEL);
1926 if (!param) {
1927 retval = -ENOMEM;
1928 break;
1929 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001930 if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001931 retval = -EFAULT;
1932 break;
1933 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001934 /* add check in case user-space module compromised */
1935 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1936 param)->num_hdls != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301937 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001938 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1939 num_hdls,
1940 pre_entry);
1941 retval = -EFAULT;
1942 break;
1943 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001944 if (ipa3_del_hdr_proc_ctx_by_user(
1945 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001946 retval = -EFAULT;
1947 break;
1948 }
Amir Levy479cfdd2017-10-26 12:23:14 +03001949 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001950 retval = -EFAULT;
1951 break;
1952 }
1953 break;
1954
1955 case IPA_IOC_GET_HW_VERSION:
1956 pyld_sz = sizeof(enum ipa_hw_type);
1957 param = kzalloc(pyld_sz, GFP_KERNEL);
1958 if (!param) {
1959 retval = -ENOMEM;
1960 break;
1961 }
1962 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
Amir Levy479cfdd2017-10-26 12:23:14 +03001963 if (copy_to_user((void __user *)arg, param, pyld_sz)) {
Amir Levy9659e592016-10-27 18:08:27 +03001964 retval = -EFAULT;
1965 break;
1966 }
1967 break;
1968
Amir Levya5361ab2018-05-01 13:25:37 +03001969 case IPA_IOC_GET_VLAN_MODE:
1970 if (copy_from_user(&vlan_mode, (const void __user *)arg,
1971 sizeof(struct ipa_ioc_get_vlan_mode))) {
1972 retval = -EFAULT;
1973 break;
1974 }
1975 retval = ipa3_is_vlan_mode(
1976 vlan_mode.iface,
1977 &is_vlan_mode);
1978 if (retval)
1979 break;
1980
1981 vlan_mode.is_vlan_mode = is_vlan_mode;
1982
1983 if (copy_to_user((void __user *)arg,
1984 &vlan_mode,
1985 sizeof(struct ipa_ioc_get_vlan_mode))) {
1986 retval = -EFAULT;
1987 break;
1988 }
1989 break;
1990
Shihuan Liuc3174f52017-05-04 15:59:13 -07001991 case IPA_IOC_ADD_VLAN_IFACE:
1992 if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
1993 retval = -EFAULT;
1994 break;
1995 }
1996 break;
1997
1998 case IPA_IOC_DEL_VLAN_IFACE:
1999 if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
2000 retval = -EFAULT;
2001 break;
2002 }
2003 break;
Amir Levy4f8b4832018-06-05 15:48:03 +03002004 case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
2005 if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
2006 retval = -EFAULT;
2007 break;
2008 }
2009 break;
2010 case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
2011 if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
2012 retval = -EFAULT;
2013 break;
2014 }
2015 break;
Shihuan Liuc3174f52017-05-04 15:59:13 -07002016 case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
2017 if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
2018 retval = -EFAULT;
2019 break;
2020 }
2021 break;
2022
2023 case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
2024 if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
2025 retval = -EFAULT;
2026 break;
2027 }
2028 break;
2029
Skylar Chang68c37d82018-04-07 16:42:36 -07002030 case IPA_IOC_CLEANUP:
2031 /*Route and filter rules will also be clean*/
2032 IPADBG("Got IPA_IOC_CLEANUP\n");
2033 retval = ipa3_reset_hdr(true);
2034 memset(&nat_del, 0, sizeof(nat_del));
2035 nat_del.table_index = 0;
2036 retval = ipa3_nat_del_cmd(&nat_del);
2037 retval = ipa3_clean_modem_rule();
2038 break;
2039
2040 case IPA_IOC_QUERY_WLAN_CLIENT:
2041 IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n");
2042 retval = ipa3_resend_wlan_msg();
2043 break;
2044
Mohammed Javida0f23d92018-09-11 10:50:28 +05302045 case IPA_IOC_GSB_CONNECT:
2046 IPADBG("Got IPA_IOC_GSB_CONNECT\n");
2047 if (ipa3_send_gsb_msg(arg, IPA_GSB_CONNECT)) {
2048 retval = -EFAULT;
2049 break;
2050 }
2051 break;
2052
2053 case IPA_IOC_GSB_DISCONNECT:
2054 IPADBG("Got IPA_IOC_GSB_DISCONNECT\n");
2055 if (ipa3_send_gsb_msg(arg, IPA_GSB_DISCONNECT)) {
2056 retval = -EFAULT;
2057 break;
2058 }
2059 break;
2060
Mohammed Javidd636e0c2019-06-13 16:16:59 +05302061 case IPA_IOC_GET_PHERIPHERAL_EP_INFO:
2062 IPADBG("Got IPA_IOC_GET_EP_INFO\n");
sivakanth reddy vaka2a5a5ee2019-12-08 13:50:56 +05302063 if (ipa3_ctx->ipa_config_is_auto == false) {
2064 IPADBG("not an auto config: returning error\n");
2065 retval = -ENOTTY;
2066 break;
2067 }
Mohammed Javidd636e0c2019-06-13 16:16:59 +05302068 if (copy_from_user(&ep_info, (const void __user *)arg,
2069 sizeof(struct ipa_ioc_get_ep_info))) {
2070 IPAERR_RL("copy_from_user fails\n");
2071 retval = -EFAULT;
2072 break;
2073 }
2074
2075 if (ep_info.max_ep_pairs != QUERY_MAX_EP_PAIRS)
2076 IPAERR_RL("unexpected max_ep_pairs %d\n",
2077 ep_info.max_ep_pairs);
2078
2079 if (ep_info.ep_pair_size !=
2080 (QUERY_MAX_EP_PAIRS * sizeof(struct ipa_ep_pair_info)))
2081 IPAERR_RL("unexpected ep_pair_size %d\n",
2082 ep_info.max_ep_pairs);
2083
2084 uptr = ep_info.info;
2085 if (unlikely(!uptr)) {
2086 IPAERR_RL("unexpected NULL info\n");
2087 retval = -EFAULT;
2088 break;
2089 }
2090
2091 param = kzalloc(ep_info.ep_pair_size, GFP_KERNEL);
2092 if (!param) {
2093 IPAERR_RL("kzalloc fails\n");
2094 retval = -ENOMEM;
2095 break;
2096 }
2097
2098 retval = ipa3_get_ep_info(&ep_info, param);
2099 if (retval < 0) {
2100 IPAERR("ipa3_get_ep_info failed\n");
2101 retval = -EFAULT;
2102 break;
2103 }
2104
2105 if (copy_to_user((void __user *)uptr, param,
2106 ep_info.ep_pair_size)) {
2107 IPAERR_RL("copy_to_user fails\n");
2108 retval = -EFAULT;
2109 break;
2110 }
2111
2112 if (copy_to_user((void __user *)arg, &ep_info,
2113 sizeof(struct ipa_ioc_get_ep_info))) {
2114 IPAERR_RL("copy_to_user fails\n");
2115 retval = -EFAULT;
2116 break;
2117 }
2118 break;
2119
Amir Levy479cfdd2017-10-26 12:23:14 +03002120 default:
Amir Levy9659e592016-10-27 18:08:27 +03002121 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2122 return -ENOTTY;
2123 }
2124 kfree(param);
2125 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2126
2127 return retval;
2128}
2129
2130/**
Skylar Chang68c37d82018-04-07 16:42:36 -07002131 * ipa3_setup_dflt_rt_tables() - Setup default routing tables
2132 *
2133 * Return codes:
2134 * 0: success
2135 * -ENOMEM: failed to allocate memory
2136 * -EPERM: failed to add the tables
2137 */
Amir Levy9659e592016-10-27 18:08:27 +03002138int ipa3_setup_dflt_rt_tables(void)
2139{
2140 struct ipa_ioc_add_rt_rule *rt_rule;
2141 struct ipa_rt_rule_add *rt_rule_entry;
2142
2143 rt_rule =
Amir Levy479cfdd2017-10-26 12:23:14 +03002144 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
2145 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
Amir Levy9659e592016-10-27 18:08:27 +03002146 if (!rt_rule) {
2147 IPAERR("fail to alloc mem\n");
2148 return -ENOMEM;
2149 }
2150 /* setup a default v4 route to point to Apps */
2151 rt_rule->num_rules = 1;
2152 rt_rule->commit = 1;
2153 rt_rule->ip = IPA_IP_v4;
2154 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
Amir Levy479cfdd2017-10-26 12:23:14 +03002155 IPA_RESOURCE_NAME_MAX);
Amir Levy9659e592016-10-27 18:08:27 +03002156
2157 rt_rule_entry = &rt_rule->rules[0];
2158 rt_rule_entry->at_rear = 1;
2159 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
2160 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
2161 rt_rule_entry->rule.retain_hdr = 1;
2162
2163 if (ipa3_add_rt_rule(rt_rule)) {
2164 IPAERR("fail to add dflt v4 rule\n");
2165 kfree(rt_rule);
2166 return -EPERM;
2167 }
2168 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
2169 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
2170
2171 /* setup a default v6 route to point to A5 */
2172 rt_rule->ip = IPA_IP_v6;
2173 if (ipa3_add_rt_rule(rt_rule)) {
2174 IPAERR("fail to add dflt v6 rule\n");
2175 kfree(rt_rule);
2176 return -EPERM;
2177 }
2178 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
2179 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
2180
2181 /*
2182 * because these tables are the very first to be added, they will both
2183 * have the same index (0) which is essential for programming the
2184 * "route" end-point config
2185 */
2186
2187 kfree(rt_rule);
2188
2189 return 0;
2190}
2191
2192static int ipa3_setup_exception_path(void)
2193{
2194 struct ipa_ioc_add_hdr *hdr;
2195 struct ipa_hdr_add *hdr_entry;
2196 struct ipahal_reg_route route = { 0 };
2197 int ret;
2198
2199 /* install the basic exception header */
2200 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
2201 sizeof(struct ipa_hdr_add), GFP_KERNEL);
2202 if (!hdr) {
2203 IPAERR("fail to alloc exception hdr\n");
2204 return -ENOMEM;
2205 }
2206 hdr->num_hdrs = 1;
2207 hdr->commit = 1;
2208 hdr_entry = &hdr->hdr[0];
2209
2210 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
2211 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
2212
2213 if (ipa3_add_hdr(hdr)) {
2214 IPAERR("fail to add exception hdr\n");
2215 ret = -EPERM;
2216 goto bail;
2217 }
2218
2219 if (hdr_entry->status) {
2220 IPAERR("fail to add exception hdr\n");
2221 ret = -EPERM;
2222 goto bail;
2223 }
2224
2225 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
2226
2227 /* set the route register to pass exception packets to Apps */
2228 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
2229 route.route_frag_def_pipe = ipa3_get_ep_mapping(
2230 IPA_CLIENT_APPS_LAN_CONS);
2231 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
2232 route.route_def_retain_hdr = 1;
2233
2234 if (ipa3_cfg_route(&route)) {
2235 IPAERR("fail to add exception hdr\n");
2236 ret = -EPERM;
2237 goto bail;
2238 }
2239
2240 ret = 0;
2241bail:
2242 kfree(hdr);
2243 return ret;
2244}
2245
2246static int ipa3_init_smem_region(int memory_region_size,
2247 int memory_region_offset)
2248{
2249 struct ipahal_imm_cmd_dma_shared_mem cmd;
2250 struct ipahal_imm_cmd_pyld *cmd_pyld;
2251 struct ipa3_desc desc;
2252 struct ipa_mem_buffer mem;
2253 int rc;
2254
2255 if (memory_region_size == 0)
2256 return 0;
2257
2258 memset(&desc, 0, sizeof(desc));
2259 memset(&cmd, 0, sizeof(cmd));
2260 memset(&mem, 0, sizeof(mem));
2261
2262 mem.size = memory_region_size;
2263 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
2264 &mem.phys_base, GFP_KERNEL);
2265 if (!mem.base) {
2266 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
2267 return -ENOMEM;
2268 }
2269
2270 memset(mem.base, 0, mem.size);
2271 cmd.is_read = false;
2272 cmd.skip_pipeline_clear = false;
2273 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2274 cmd.size = mem.size;
2275 cmd.system_addr = mem.phys_base;
2276 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2277 memory_region_offset;
2278 cmd_pyld = ipahal_construct_imm_cmd(
2279 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2280 if (!cmd_pyld) {
2281 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2282 return -ENOMEM;
2283 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002284 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03002285
2286 rc = ipa3_send_cmd(1, &desc);
2287 if (rc) {
2288 IPAERR("failed to send immediate command (error %d)\n", rc);
2289 rc = -EFAULT;
2290 }
2291
2292 ipahal_destroy_imm_cmd(cmd_pyld);
2293 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
2294 mem.phys_base);
2295
2296 return rc;
2297}
2298
2299/**
Skylar Chang68c37d82018-04-07 16:42:36 -07002300 * ipa3_init_q6_smem() - Initialize Q6 general memory and
2301 * header memory regions in IPA.
2302 *
2303 * Return codes:
2304 * 0: success
2305 * -ENOMEM: failed to allocate dma memory
2306 * -EFAULT: failed to send IPA command to initialize the memory
2307 */
Amir Levy9659e592016-10-27 18:08:27 +03002308int ipa3_init_q6_smem(void)
2309{
2310 int rc;
2311
2312 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2313
2314 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
2315 IPA_MEM_PART(modem_ofst));
2316 if (rc) {
2317 IPAERR("failed to initialize Modem RAM memory\n");
2318 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2319 return rc;
2320 }
2321
2322 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
2323 IPA_MEM_PART(modem_hdr_ofst));
2324 if (rc) {
2325 IPAERR("failed to initialize Modem HDRs RAM memory\n");
2326 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2327 return rc;
2328 }
2329
2330 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
2331 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2332 if (rc) {
2333 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
2334 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2335 return rc;
2336 }
2337
2338 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
2339 IPA_MEM_PART(modem_comp_decomp_ofst));
2340 if (rc) {
2341 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
2342 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2343 return rc;
2344 }
2345 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2346
2347 return rc;
2348}
2349
2350static void ipa3_destroy_imm(void *user1, int user2)
2351{
2352 ipahal_destroy_imm_cmd(user1);
2353}
2354
2355static void ipa3_q6_pipe_delay(bool delay)
2356{
2357 int client_idx;
2358 int ep_idx;
2359 struct ipa_ep_cfg_ctrl ep_ctrl;
2360
2361 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
2362 ep_ctrl.ipa_ep_delay = delay;
2363
2364 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2365 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
2366 ep_idx = ipa3_get_ep_mapping(client_idx);
2367 if (ep_idx == -1)
2368 continue;
2369
2370 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
2371 ep_idx, &ep_ctrl);
2372 }
2373 }
2374}
2375
2376static void ipa3_q6_avoid_holb(void)
2377{
2378 int ep_idx;
2379 int client_idx;
2380 struct ipa_ep_cfg_ctrl ep_suspend;
2381 struct ipa_ep_cfg_holb ep_holb;
2382
2383 memset(&ep_suspend, 0, sizeof(ep_suspend));
2384 memset(&ep_holb, 0, sizeof(ep_holb));
2385
2386 ep_suspend.ipa_ep_suspend = true;
2387 ep_holb.tmr_val = 0;
2388 ep_holb.en = 1;
2389
2390 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2391 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
2392 ep_idx = ipa3_get_ep_mapping(client_idx);
2393 if (ep_idx == -1)
2394 continue;
2395
Skylar Changde679dc2017-11-21 10:11:34 -08002396 /* from IPA 4.0 pipe suspend is not supported */
2397 if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
2398 ipahal_write_reg_n_fields(
2399 IPA_ENDP_INIT_CTRL_n,
2400 ep_idx, &ep_suspend);
2401
Amir Levy9659e592016-10-27 18:08:27 +03002402 /*
2403 * ipa3_cfg_ep_holb is not used here because we are
2404 * setting HOLB on Q6 pipes, and from APPS perspective
2405 * they are not valid, therefore, the above function
2406 * will fail.
2407 */
2408 ipahal_write_reg_n_fields(
2409 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
2410 ep_idx, &ep_holb);
2411 ipahal_write_reg_n_fields(
2412 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
2413 ep_idx, &ep_holb);
Amir Levy9659e592016-10-27 18:08:27 +03002414 }
2415 }
2416}
2417
Michael Adisumarta0090e542018-03-14 10:44:53 -07002418static void ipa3_halt_q6_gsi_channels(bool prod)
Skylar Chang94692c92017-03-01 09:07:11 -08002419{
2420 int ep_idx;
2421 int client_idx;
2422 const struct ipa_gsi_ep_config *gsi_ep_cfg;
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002423 int i;
Skylar Chang94692c92017-03-01 09:07:11 -08002424 int ret;
2425 int code = 0;
2426
Michael Adisumarta0090e542018-03-14 10:44:53 -07002427 /* if prod flag is true, then we halt the producer channels also */
Skylar Chang94692c92017-03-01 09:07:11 -08002428 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
Michael Adisumarta0090e542018-03-14 10:44:53 -07002429 if (IPA_CLIENT_IS_Q6_CONS(client_idx)
2430 || (IPA_CLIENT_IS_Q6_PROD(client_idx) && prod)) {
Skylar Chang94692c92017-03-01 09:07:11 -08002431 ep_idx = ipa3_get_ep_mapping(client_idx);
2432 if (ep_idx == -1)
2433 continue;
2434
Skylar Changc1f15312017-05-09 14:14:32 -07002435 gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
Skylar Chang94692c92017-03-01 09:07:11 -08002436 if (!gsi_ep_cfg) {
2437 IPAERR("failed to get GSI config\n");
2438 ipa_assert();
2439 return;
2440 }
2441
2442 ret = gsi_halt_channel_ee(
2443 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
2444 &code);
Michael Adisumartaf01e9fd2017-08-31 12:23:51 -07002445 for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY &&
2446 ret == -GSI_STATUS_AGAIN; i++) {
2447 IPADBG(
2448 "ch %d ee %d with code %d\n is busy try again",
2449 gsi_ep_cfg->ipa_gsi_chan_num,
2450 gsi_ep_cfg->ee,
2451 code);
2452 usleep_range(IPA_GSI_CHANNEL_HALT_MIN_SLEEP,
2453 IPA_GSI_CHANNEL_HALT_MAX_SLEEP);
2454 ret = gsi_halt_channel_ee(
2455 gsi_ep_cfg->ipa_gsi_chan_num,
2456 gsi_ep_cfg->ee, &code);
2457 }
Skylar Chang94692c92017-03-01 09:07:11 -08002458 if (ret == GSI_STATUS_SUCCESS)
2459 IPADBG("halted gsi ch %d ee %d with code %d\n",
2460 gsi_ep_cfg->ipa_gsi_chan_num,
2461 gsi_ep_cfg->ee,
2462 code);
2463 else
2464 IPAERR("failed to halt ch %d ee %d code %d\n",
2465 gsi_ep_cfg->ipa_gsi_chan_num,
2466 gsi_ep_cfg->ee,
2467 code);
2468 }
2469 }
2470}
2471
Amir Levy9659e592016-10-27 18:08:27 +03002472static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
2473 enum ipa_rule_type rlt)
2474{
2475 struct ipa3_desc *desc;
2476 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2477 struct ipahal_imm_cmd_pyld **cmd_pyld;
2478 int retval = 0;
2479 int pipe_idx;
2480 int flt_idx = 0;
2481 int num_cmds = 0;
2482 int index;
2483 u32 lcl_addr_mem_part;
2484 u32 lcl_hdr_sz;
2485 struct ipa_mem_buffer mem;
2486
2487 IPADBG("Entry\n");
2488
2489 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2490 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2491 return -EINVAL;
2492 }
2493
2494 /* Up to filtering pipes we have filtering tables */
2495 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
2496 GFP_KERNEL);
2497 if (!desc) {
2498 IPAERR("failed to allocate memory\n");
2499 return -ENOMEM;
2500 }
2501
2502 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
2503 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
2504 if (!cmd_pyld) {
2505 IPAERR("failed to allocate memory\n");
2506 retval = -ENOMEM;
2507 goto free_desc;
2508 }
2509
2510 if (ip == IPA_IP_v4) {
2511 if (rlt == IPA_RULE_HASHABLE) {
2512 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
2513 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2514 } else {
2515 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
2516 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2517 }
2518 } else {
2519 if (rlt == IPA_RULE_HASHABLE) {
2520 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
2521 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2522 } else {
2523 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
2524 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2525 }
2526 }
2527
2528 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02002529 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002530 if (retval) {
2531 IPAERR("failed to generate flt single tbl empty img\n");
2532 goto free_cmd_pyld;
2533 }
2534
2535 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
2536 if (!ipa_is_ep_support_flt(pipe_idx))
2537 continue;
2538
2539 /*
2540 * Iterating over all the filtering pipes which are either
2541 * invalid but connected or connected but not configured by AP.
2542 */
2543 if (!ipa3_ctx->ep[pipe_idx].valid ||
2544 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
2545
Amir Levy479cfdd2017-10-26 12:23:14 +03002546 if (num_cmds >= ipa3_ctx->ep_flt_num) {
2547 IPAERR("number of commands is out of range\n");
2548 retval = -ENOBUFS;
2549 goto free_empty_img;
2550 }
2551
Amir Levy9659e592016-10-27 18:08:27 +03002552 cmd.is_read = false;
2553 cmd.skip_pipeline_clear = false;
2554 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2555 cmd.size = mem.size;
2556 cmd.system_addr = mem.phys_base;
2557 cmd.local_addr =
2558 ipa3_ctx->smem_restricted_bytes +
2559 lcl_addr_mem_part +
2560 ipahal_get_hw_tbl_hdr_width() +
2561 flt_idx * ipahal_get_hw_tbl_hdr_width();
2562 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
2563 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2564 if (!cmd_pyld[num_cmds]) {
2565 IPAERR("fail construct dma_shared_mem cmd\n");
2566 retval = -ENOMEM;
2567 goto free_empty_img;
2568 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002569 ipa3_init_imm_cmd_desc(&desc[num_cmds],
2570 cmd_pyld[num_cmds]);
2571 ++num_cmds;
Amir Levy9659e592016-10-27 18:08:27 +03002572 }
2573
Amir Levy479cfdd2017-10-26 12:23:14 +03002574 ++flt_idx;
Amir Levy9659e592016-10-27 18:08:27 +03002575 }
2576
2577 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2578 retval = ipa3_send_cmd(num_cmds, desc);
2579 if (retval) {
2580 IPAERR("failed to send immediate command (err %d)\n", retval);
2581 retval = -EFAULT;
2582 }
2583
2584free_empty_img:
2585 ipahal_free_dma_mem(&mem);
2586free_cmd_pyld:
2587 for (index = 0; index < num_cmds; index++)
2588 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2589 kfree(cmd_pyld);
2590free_desc:
2591 kfree(desc);
2592 return retval;
2593}
2594
2595static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2596 enum ipa_rule_type rlt)
2597{
2598 struct ipa3_desc *desc;
2599 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2600 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2601 int retval = 0;
2602 u32 modem_rt_index_lo;
2603 u32 modem_rt_index_hi;
2604 u32 lcl_addr_mem_part;
2605 u32 lcl_hdr_sz;
2606 struct ipa_mem_buffer mem;
2607
2608 IPADBG("Entry\n");
2609
2610 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2611 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2612 return -EINVAL;
2613 }
2614
2615 if (ip == IPA_IP_v4) {
2616 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2617 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2618 if (rlt == IPA_RULE_HASHABLE) {
2619 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2620 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2621 } else {
2622 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2623 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2624 }
2625 } else {
2626 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2627 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2628 if (rlt == IPA_RULE_HASHABLE) {
2629 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2630 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2631 } else {
2632 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2633 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2634 }
2635 }
2636
2637 retval = ipahal_rt_generate_empty_img(
2638 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002639 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002640 if (retval) {
2641 IPAERR("fail generate empty rt img\n");
2642 return -ENOMEM;
2643 }
2644
2645 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2646 if (!desc) {
2647 IPAERR("failed to allocate memory\n");
2648 goto free_empty_img;
2649 }
2650
2651 cmd.is_read = false;
2652 cmd.skip_pipeline_clear = false;
2653 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2654 cmd.size = mem.size;
2655 cmd.system_addr = mem.phys_base;
2656 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2657 lcl_addr_mem_part +
2658 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2659 cmd_pyld = ipahal_construct_imm_cmd(
2660 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2661 if (!cmd_pyld) {
2662 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2663 retval = -ENOMEM;
2664 goto free_desc;
2665 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002666 ipa3_init_imm_cmd_desc(desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03002667
2668 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2669 retval = ipa3_send_cmd(1, desc);
2670 if (retval) {
2671 IPAERR("failed to send immediate command (err %d)\n", retval);
2672 retval = -EFAULT;
2673 }
2674
2675 ipahal_destroy_imm_cmd(cmd_pyld);
2676free_desc:
2677 kfree(desc);
2678free_empty_img:
2679 ipahal_free_dma_mem(&mem);
2680 return retval;
2681}
2682
2683static int ipa3_q6_clean_q6_tables(void)
2684{
2685 struct ipa3_desc *desc;
2686 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2687 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2688 int retval;
2689 struct ipahal_reg_fltrt_hash_flush flush;
2690 struct ipahal_reg_valmask valmask;
2691
2692 IPADBG("Entry\n");
2693
2694
2695 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2696 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2697 return -EFAULT;
2698 }
2699 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2700 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2701 return -EFAULT;
2702 }
2703 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2704 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2705 return -EFAULT;
2706 }
2707 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2708 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2709 return -EFAULT;
2710 }
2711
2712 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2713 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2714 return -EFAULT;
2715 }
2716 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2717 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2718 return -EFAULT;
2719 }
2720 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2721 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2722 return -EFAULT;
2723 }
2724 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2725 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2726 return -EFAULT;
2727 }
2728
2729 /* Flush rules cache */
2730 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2731 if (!desc) {
2732 IPAERR("failed to allocate memory\n");
2733 return -ENOMEM;
2734 }
2735
2736 flush.v4_flt = true;
2737 flush.v4_rt = true;
2738 flush.v6_flt = true;
2739 flush.v6_rt = true;
2740 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2741 reg_write_cmd.skip_pipeline_clear = false;
2742 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2743 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2744 reg_write_cmd.value = valmask.val;
2745 reg_write_cmd.value_mask = valmask.mask;
2746 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2747 &reg_write_cmd, false);
2748 if (!cmd_pyld) {
2749 IPAERR("fail construct register_write imm cmd\n");
2750 retval = -EFAULT;
2751 goto bail_desc;
2752 }
Amir Levy479cfdd2017-10-26 12:23:14 +03002753 ipa3_init_imm_cmd_desc(desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03002754
2755 IPADBG("Sending 1 descriptor for tbls flush\n");
2756 retval = ipa3_send_cmd(1, desc);
2757 if (retval) {
2758 IPAERR("failed to send immediate command (err %d)\n", retval);
2759 retval = -EFAULT;
2760 }
2761
2762 ipahal_destroy_imm_cmd(cmd_pyld);
2763
2764bail_desc:
2765 kfree(desc);
2766 IPADBG("Done - retval = %d\n", retval);
2767 return retval;
2768}
2769
2770static int ipa3_q6_set_ex_path_to_apps(void)
2771{
2772 int ep_idx;
2773 int client_idx;
2774 struct ipa3_desc *desc;
2775 int num_descs = 0;
2776 int index;
2777 struct ipahal_imm_cmd_register_write reg_write;
2778 struct ipahal_imm_cmd_pyld *cmd_pyld;
2779 int retval;
Amir Levy9659e592016-10-27 18:08:27 +03002780
2781 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2782 GFP_KERNEL);
2783 if (!desc) {
2784 IPAERR("failed to allocate memory\n");
2785 return -ENOMEM;
2786 }
2787
2788 /* Set the exception path to AP */
2789 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2790 ep_idx = ipa3_get_ep_mapping(client_idx);
Michael Adisumarta74b05d92019-11-29 01:10:52 -08002791 if (ep_idx == -1 || (ep_idx >= IPA3_MAX_NUM_PIPES))
Amir Levy9659e592016-10-27 18:08:27 +03002792 continue;
2793
Skylar Chang53137112017-05-12 17:13:13 -07002794 /* disable statuses for all modem controlled prod pipes */
2795 if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
2796 (ipa3_ctx->ep[ep_idx].valid &&
Skylar Changd8d8b432018-06-15 10:39:10 -07002797 ipa3_ctx->ep[ep_idx].skip_ep_cfg) ||
2798 (ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD
2799 && ipa3_ctx->modem_cfg_emb_pipe_flt)) {
Amir Levy5807be32017-04-19 14:35:12 +03002800 ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
2801
Skylar Changd8d8b432018-06-15 10:39:10 -07002802 ipa3_ctx->ep[ep_idx].status.status_en = false;
Amir Levy5807be32017-04-19 14:35:12 +03002803 reg_write.skip_pipeline_clear = false;
2804 reg_write.pipeline_clear_options =
2805 IPAHAL_HPS_CLEAR;
2806 reg_write.offset =
2807 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2808 ep_idx);
2809 reg_write.value = 0;
2810 reg_write.value_mask = ~0;
2811 cmd_pyld = ipahal_construct_imm_cmd(
2812 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2813 if (!cmd_pyld) {
2814 IPAERR("fail construct register_write cmd\n");
2815 ipa_assert();
2816 return -EFAULT;
2817 }
2818
Amir Levy479cfdd2017-10-26 12:23:14 +03002819 ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
Amir Levy5807be32017-04-19 14:35:12 +03002820 desc[num_descs].callback = ipa3_destroy_imm;
2821 desc[num_descs].user1 = cmd_pyld;
Amir Levy479cfdd2017-10-26 12:23:14 +03002822 ++num_descs;
Amir Levy5807be32017-04-19 14:35:12 +03002823 }
Amir Levy9659e592016-10-27 18:08:27 +03002824 }
2825
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002826 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002827 retval = ipa3_tag_process(desc, num_descs,
2828 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2829 if (retval) {
2830 IPAERR("TAG process failed! (error %d)\n", retval);
2831 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2832 if (retval != -ETIME) {
2833 for (index = 0; index < num_descs; index++)
2834 if (desc[index].callback)
2835 desc[index].callback(desc[index].user1,
2836 desc[index].user2);
2837 retval = -EINVAL;
2838 }
2839 }
2840
2841 kfree(desc);
2842
2843 return retval;
2844}
2845
2846/**
Skylar Chang68c37d82018-04-07 16:42:36 -07002847 * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2848 * in IPA HW. This is performed in case of SSR.
2849 *
2850 * This is a mandatory procedure, in case one of the steps fails, the
2851 * AP needs to restart.
2852 */
Amir Levy9659e592016-10-27 18:08:27 +03002853void ipa3_q6_pre_shutdown_cleanup(void)
2854{
2855 IPADBG_LOW("ENTER\n");
2856
2857 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2858
2859 ipa3_q6_pipe_delay(true);
2860 ipa3_q6_avoid_holb();
Mohammed Javidf109cf62019-07-02 13:16:54 +05302861 if (ipa3_ctx->ipa_config_is_mhi) {
Mohammed Javidd53feb82018-07-19 20:16:39 +05302862 ipa3_set_reset_client_cons_pipe_sus_holb(true,
2863 IPA_CLIENT_MHI_CONS);
Mohammed Javidf109cf62019-07-02 13:16:54 +05302864 if (ipa3_ctx->ipa_config_is_auto)
2865 ipa3_set_reset_client_cons_pipe_sus_holb(true,
2866 IPA_CLIENT_MHI2_CONS);
2867 }
2868
Amir Levy9659e592016-10-27 18:08:27 +03002869 if (ipa3_q6_clean_q6_tables()) {
2870 IPAERR("Failed to clean Q6 tables\n");
2871 BUG();
2872 }
2873 if (ipa3_q6_set_ex_path_to_apps()) {
2874 IPAERR("Failed to redirect exceptions to APPS\n");
2875 BUG();
2876 }
2877 /* Remove delay from Q6 PRODs to avoid pending descriptors
Skylar Chang68c37d82018-04-07 16:42:36 -07002878 * on pipe reset procedure
2879 */
Amir Levy9659e592016-10-27 18:08:27 +03002880 ipa3_q6_pipe_delay(false);
Mohammed Javidd53feb82018-07-19 20:16:39 +05302881 ipa3_set_reset_client_prod_pipe_delay(true,
2882 IPA_CLIENT_USB_PROD);
Mohammed Javida617b262018-03-19 16:55:00 +05302883
Amir Levy9659e592016-10-27 18:08:27 +03002884 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2885 IPADBG_LOW("Exit with success\n");
2886}
2887
2888/*
2889 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2890 * check if GSI channel related to Q6 producer client is empty.
2891 *
2892 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2893 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2894 */
2895void ipa3_q6_post_shutdown_cleanup(void)
2896{
2897 int client_idx;
Skylar Changc1f15312017-05-09 14:14:32 -07002898 int ep_idx;
Michael Adisumarta0090e542018-03-14 10:44:53 -07002899 bool prod = false;
Amir Levy9659e592016-10-27 18:08:27 +03002900
2901 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002902
2903 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2904 IPAERR("uC is not loaded. Skipping\n");
2905 return;
2906 }
2907
Skylar Chang94692c92017-03-01 09:07:11 -08002908 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2909
2910 /* Handle the issue where SUSPEND was removed for some reason */
2911 ipa3_q6_avoid_holb();
Michael Adisumarta0090e542018-03-14 10:44:53 -07002912
2913 /* halt both prod and cons channels starting at IPAv4 */
2914 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
2915 prod = true;
2916 ipa3_halt_q6_gsi_channels(prod);
2917 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2918 IPADBG("Exit without consumer check\n");
2919 return;
2920 }
2921
2922 ipa3_halt_q6_gsi_channels(prod);
Skylar Chang94692c92017-03-01 09:07:11 -08002923
Amir Levy9659e592016-10-27 18:08:27 +03002924 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2925 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
Skylar Changc1f15312017-05-09 14:14:32 -07002926 ep_idx = ipa3_get_ep_mapping(client_idx);
2927 if (ep_idx == -1)
2928 continue;
2929
Amir Levy9659e592016-10-27 18:08:27 +03002930 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2931 IPAERR("fail to validate Q6 ch emptiness %d\n",
2932 client_idx);
2933 BUG();
2934 return;
2935 }
2936 }
2937
2938 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2939 IPADBG_LOW("Exit with success\n");
2940}
2941
Ashok Vuyyuru27ede172019-01-28 15:35:55 +05302942/**
2943 * ipa3_q6_pre_powerup_cleanup() - A cleanup routine for pheripheral
2944 * configuration in IPA HW. This is performed in case of SSR.
2945 *
2946 * This is a mandatory procedure, in case one of the steps fails, the
2947 * AP needs to restart.
2948 */
2949void ipa3_q6_pre_powerup_cleanup(void)
2950{
2951 IPADBG_LOW("ENTER\n");
2952
2953 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2954
2955 if (ipa3_ctx->ipa_config_is_auto)
2956 ipa3_set_reset_client_prod_pipe_delay(true,
2957 IPA_CLIENT_USB2_PROD);
2958 if (ipa3_ctx->ipa_config_is_mhi) {
2959 ipa3_set_reset_client_prod_pipe_delay(true,
2960 IPA_CLIENT_MHI_PROD);
2961 if (ipa3_ctx->ipa_config_is_auto)
2962 ipa3_set_reset_client_prod_pipe_delay(true,
2963 IPA_CLIENT_MHI2_PROD);
2964 }
2965
2966 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2967 IPADBG_LOW("Exit with success\n");
2968}
2969
Amir Levy9659e592016-10-27 18:08:27 +03002970static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2971{
2972 /* Set 4 bytes of CANARY before the offset */
2973 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2974}
2975
2976/**
Amir Levy9fadeca2017-04-25 10:18:32 +03002977 * _ipa_init_sram_v3() - Initialize IPA local SRAM.
Amir Levy9659e592016-10-27 18:08:27 +03002978 *
2979 * Return codes: 0 for success, negative value for failure
2980 */
Amir Levy9fadeca2017-04-25 10:18:32 +03002981int _ipa_init_sram_v3(void)
Amir Levy9659e592016-10-27 18:08:27 +03002982{
2983 u32 *ipa_sram_mmio;
2984 unsigned long phys_addr;
2985
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04002986 IPADBG(
2987 "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SRAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n",
2988 ipa3_ctx->ipa_wrapper_base,
2989 ipa3_ctx->ctrl->ipa_reg_base_ofst,
2990 ipahal_get_reg_n_ofst(
2991 IPA_SRAM_DIRECT_ACCESS_n,
2992 ipa3_ctx->smem_restricted_bytes / 4),
2993 ipa3_ctx->smem_restricted_bytes,
2994 ipa3_ctx->smem_sz);
2995
Amir Levy9659e592016-10-27 18:08:27 +03002996 phys_addr = ipa3_ctx->ipa_wrapper_base +
2997 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2998 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
2999 ipa3_ctx->smem_restricted_bytes / 4);
3000
3001 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
3002 if (!ipa_sram_mmio) {
3003 IPAERR("fail to ioremap IPA SRAM\n");
3004 return -ENOMEM;
3005 }
3006
3007 /* Consult with ipa_i.h on the location of the CANARY values */
3008 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
3009 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
3010 ipa3_sram_set_canary(ipa_sram_mmio,
3011 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
3012 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
3013 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
3014 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
3015 ipa3_sram_set_canary(ipa_sram_mmio,
3016 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
3017 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
3018 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
3019 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
3020 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
3021 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
3022 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
3023 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
3024 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
3025 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
3026 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
3027 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
3028 ipa3_sram_set_canary(ipa_sram_mmio,
3029 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
3030 ipa3_sram_set_canary(ipa_sram_mmio,
3031 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
3032 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
3033 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
Amir Levy9fadeca2017-04-25 10:18:32 +03003034 ipa3_sram_set_canary(ipa_sram_mmio,
3035 (ipa_get_hw_type() >= IPA_HW_v3_5) ?
3036 IPA_MEM_PART(uc_event_ring_ofst) :
3037 IPA_MEM_PART(end_ofst));
Amir Levy9659e592016-10-27 18:08:27 +03003038
3039 iounmap(ipa_sram_mmio);
3040
3041 return 0;
3042}
3043
3044/**
3045 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
3046 *
3047 * Return codes: 0 for success, negative value for failure
3048 */
3049int _ipa_init_hdr_v3_0(void)
3050{
Amir Levy479cfdd2017-10-26 12:23:14 +03003051 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003052 struct ipa_mem_buffer mem;
3053 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
3054 struct ipahal_imm_cmd_pyld *cmd_pyld;
3055 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
3056
3057 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
3058 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
3059 GFP_KERNEL);
3060 if (!mem.base) {
3061 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
3062 return -ENOMEM;
3063 }
3064 memset(mem.base, 0, mem.size);
3065
3066 cmd.hdr_table_addr = mem.phys_base;
3067 cmd.size_hdr_table = mem.size;
3068 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
3069 IPA_MEM_PART(modem_hdr_ofst);
3070 cmd_pyld = ipahal_construct_imm_cmd(
3071 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
3072 if (!cmd_pyld) {
3073 IPAERR("fail to construct hdr_init_local imm cmd\n");
3074 dma_free_coherent(ipa3_ctx->pdev,
3075 mem.size, mem.base,
3076 mem.phys_base);
3077 return -EFAULT;
3078 }
Amir Levy479cfdd2017-10-26 12:23:14 +03003079 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003080 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3081
3082 if (ipa3_send_cmd(1, &desc)) {
3083 IPAERR("fail to send immediate command\n");
3084 ipahal_destroy_imm_cmd(cmd_pyld);
3085 dma_free_coherent(ipa3_ctx->pdev,
3086 mem.size, mem.base,
3087 mem.phys_base);
3088 return -EFAULT;
3089 }
3090
3091 ipahal_destroy_imm_cmd(cmd_pyld);
3092 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
3093
3094 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
3095 IPA_MEM_PART(apps_hdr_proc_ctx_size);
3096 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
3097 GFP_KERNEL);
3098 if (!mem.base) {
3099 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
3100 return -ENOMEM;
3101 }
3102 memset(mem.base, 0, mem.size);
Amir Levy9659e592016-10-27 18:08:27 +03003103
3104 dma_cmd.is_read = false;
3105 dma_cmd.skip_pipeline_clear = false;
3106 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
3107 dma_cmd.system_addr = mem.phys_base;
3108 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
3109 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
3110 dma_cmd.size = mem.size;
3111 cmd_pyld = ipahal_construct_imm_cmd(
3112 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
3113 if (!cmd_pyld) {
3114 IPAERR("fail to construct dma_shared_mem imm\n");
3115 dma_free_coherent(ipa3_ctx->pdev,
3116 mem.size, mem.base,
3117 mem.phys_base);
3118 return -EFAULT;
3119 }
Amir Levy479cfdd2017-10-26 12:23:14 +03003120 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003121 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3122
3123 if (ipa3_send_cmd(1, &desc)) {
3124 IPAERR("fail to send immediate command\n");
3125 ipahal_destroy_imm_cmd(cmd_pyld);
3126 dma_free_coherent(ipa3_ctx->pdev,
3127 mem.size,
3128 mem.base,
3129 mem.phys_base);
3130 return -EFAULT;
3131 }
3132 ipahal_destroy_imm_cmd(cmd_pyld);
3133
3134 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
3135
3136 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
3137
3138 return 0;
3139}
3140
3141/**
3142 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
3143 *
3144 * Return codes: 0 for success, negative value for failure
3145 */
3146int _ipa_init_rt4_v3(void)
3147{
Amir Levy479cfdd2017-10-26 12:23:14 +03003148 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003149 struct ipa_mem_buffer mem;
3150 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
3151 struct ipahal_imm_cmd_pyld *cmd_pyld;
3152 int i;
3153 int rc = 0;
3154
3155 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
3156 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
3157 i++)
3158 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
3159 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
3160
3161 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
3162 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02003163 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003164 if (rc) {
3165 IPAERR("fail generate empty v4 rt img\n");
3166 return rc;
3167 }
3168
3169 v4_cmd.hash_rules_addr = mem.phys_base;
3170 v4_cmd.hash_rules_size = mem.size;
3171 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3172 IPA_MEM_PART(v4_rt_hash_ofst);
3173 v4_cmd.nhash_rules_addr = mem.phys_base;
3174 v4_cmd.nhash_rules_size = mem.size;
3175 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3176 IPA_MEM_PART(v4_rt_nhash_ofst);
3177 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
3178 v4_cmd.hash_local_addr);
3179 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
3180 v4_cmd.nhash_local_addr);
3181 cmd_pyld = ipahal_construct_imm_cmd(
3182 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
3183 if (!cmd_pyld) {
3184 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
3185 rc = -EPERM;
3186 goto free_mem;
3187 }
3188
Amir Levy479cfdd2017-10-26 12:23:14 +03003189 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003190 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3191
3192 if (ipa3_send_cmd(1, &desc)) {
3193 IPAERR("fail to send immediate command\n");
3194 rc = -EFAULT;
3195 }
3196
3197 ipahal_destroy_imm_cmd(cmd_pyld);
3198
3199free_mem:
3200 ipahal_free_dma_mem(&mem);
3201 return rc;
3202}
3203
3204/**
3205 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
3206 *
3207 * Return codes: 0 for success, negative value for failure
3208 */
3209int _ipa_init_rt6_v3(void)
3210{
Amir Levy479cfdd2017-10-26 12:23:14 +03003211 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003212 struct ipa_mem_buffer mem;
3213 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
3214 struct ipahal_imm_cmd_pyld *cmd_pyld;
3215 int i;
3216 int rc = 0;
3217
3218 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
3219 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
3220 i++)
3221 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
3222 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
3223
3224 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
3225 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02003226 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003227 if (rc) {
3228 IPAERR("fail generate empty v6 rt img\n");
3229 return rc;
3230 }
3231
3232 v6_cmd.hash_rules_addr = mem.phys_base;
3233 v6_cmd.hash_rules_size = mem.size;
3234 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3235 IPA_MEM_PART(v6_rt_hash_ofst);
3236 v6_cmd.nhash_rules_addr = mem.phys_base;
3237 v6_cmd.nhash_rules_size = mem.size;
3238 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3239 IPA_MEM_PART(v6_rt_nhash_ofst);
3240 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
3241 v6_cmd.hash_local_addr);
3242 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
3243 v6_cmd.nhash_local_addr);
3244 cmd_pyld = ipahal_construct_imm_cmd(
3245 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
3246 if (!cmd_pyld) {
3247 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
3248 rc = -EPERM;
3249 goto free_mem;
3250 }
3251
Amir Levy479cfdd2017-10-26 12:23:14 +03003252 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003253 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3254
3255 if (ipa3_send_cmd(1, &desc)) {
3256 IPAERR("fail to send immediate command\n");
3257 rc = -EFAULT;
3258 }
3259
3260 ipahal_destroy_imm_cmd(cmd_pyld);
3261
3262free_mem:
3263 ipahal_free_dma_mem(&mem);
3264 return rc;
3265}
3266
3267/**
3268 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
3269 *
3270 * Return codes: 0 for success, negative value for failure
3271 */
3272int _ipa_init_flt4_v3(void)
3273{
Amir Levy479cfdd2017-10-26 12:23:14 +03003274 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003275 struct ipa_mem_buffer mem;
3276 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
3277 struct ipahal_imm_cmd_pyld *cmd_pyld;
3278 int rc;
3279
3280 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
3281 IPA_MEM_PART(v4_flt_hash_size),
3282 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02003283 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003284 if (rc) {
3285 IPAERR("fail generate empty v4 flt img\n");
3286 return rc;
3287 }
3288
3289 v4_cmd.hash_rules_addr = mem.phys_base;
3290 v4_cmd.hash_rules_size = mem.size;
3291 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3292 IPA_MEM_PART(v4_flt_hash_ofst);
3293 v4_cmd.nhash_rules_addr = mem.phys_base;
3294 v4_cmd.nhash_rules_size = mem.size;
3295 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3296 IPA_MEM_PART(v4_flt_nhash_ofst);
3297 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
3298 v4_cmd.hash_local_addr);
3299 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
3300 v4_cmd.nhash_local_addr);
3301 cmd_pyld = ipahal_construct_imm_cmd(
3302 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
3303 if (!cmd_pyld) {
3304 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
3305 rc = -EPERM;
3306 goto free_mem;
3307 }
3308
Amir Levy479cfdd2017-10-26 12:23:14 +03003309 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003310 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3311
3312 if (ipa3_send_cmd(1, &desc)) {
3313 IPAERR("fail to send immediate command\n");
3314 rc = -EFAULT;
3315 }
3316
3317 ipahal_destroy_imm_cmd(cmd_pyld);
3318
3319free_mem:
3320 ipahal_free_dma_mem(&mem);
3321 return rc;
3322}
3323
3324/**
3325 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
3326 *
3327 * Return codes: 0 for success, negative value for failure
3328 */
3329int _ipa_init_flt6_v3(void)
3330{
Amir Levy479cfdd2017-10-26 12:23:14 +03003331 struct ipa3_desc desc;
Amir Levy9659e592016-10-27 18:08:27 +03003332 struct ipa_mem_buffer mem;
3333 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
3334 struct ipahal_imm_cmd_pyld *cmd_pyld;
3335 int rc;
3336
3337 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
3338 IPA_MEM_PART(v6_flt_hash_size),
3339 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02003340 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03003341 if (rc) {
3342 IPAERR("fail generate empty v6 flt img\n");
3343 return rc;
3344 }
3345
3346 v6_cmd.hash_rules_addr = mem.phys_base;
3347 v6_cmd.hash_rules_size = mem.size;
3348 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
3349 IPA_MEM_PART(v6_flt_hash_ofst);
3350 v6_cmd.nhash_rules_addr = mem.phys_base;
3351 v6_cmd.nhash_rules_size = mem.size;
3352 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
3353 IPA_MEM_PART(v6_flt_nhash_ofst);
3354 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
3355 v6_cmd.hash_local_addr);
3356 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
3357 v6_cmd.nhash_local_addr);
3358
3359 cmd_pyld = ipahal_construct_imm_cmd(
3360 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
3361 if (!cmd_pyld) {
3362 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
3363 rc = -EPERM;
3364 goto free_mem;
3365 }
3366
Amir Levy479cfdd2017-10-26 12:23:14 +03003367 ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
Amir Levy9659e592016-10-27 18:08:27 +03003368 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
3369
3370 if (ipa3_send_cmd(1, &desc)) {
3371 IPAERR("fail to send immediate command\n");
3372 rc = -EFAULT;
3373 }
3374
3375 ipahal_destroy_imm_cmd(cmd_pyld);
3376
3377free_mem:
3378 ipahal_free_dma_mem(&mem);
3379 return rc;
3380}
3381
3382static int ipa3_setup_flt_hash_tuple(void)
3383{
3384 int pipe_idx;
3385 struct ipahal_reg_hash_tuple tuple;
3386
3387 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
3388
3389 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
3390 if (!ipa_is_ep_support_flt(pipe_idx))
3391 continue;
3392
3393 if (ipa_is_modem_pipe(pipe_idx))
3394 continue;
3395
3396 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
3397 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
3398 return -EFAULT;
3399 }
3400 }
3401
3402 return 0;
3403}
3404
3405static int ipa3_setup_rt_hash_tuple(void)
3406{
3407 int tbl_idx;
3408 struct ipahal_reg_hash_tuple tuple;
3409
3410 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
3411
3412 for (tbl_idx = 0;
3413 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
3414 IPA_MEM_PART(v4_rt_num_index));
3415 tbl_idx++) {
3416
3417 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
3418 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
3419 continue;
3420
3421 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
3422 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
3423 continue;
3424
3425 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
3426 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
3427 return -EFAULT;
3428 }
3429 }
3430
3431 return 0;
3432}
3433
3434static int ipa3_setup_apps_pipes(void)
3435{
3436 struct ipa_sys_connect_params sys_in;
3437 int result = 0;
3438
3439 if (ipa3_ctx->gsi_ch20_wa) {
3440 IPADBG("Allocating GSI physical channel 20\n");
3441 result = ipa_gsi_ch20_wa();
3442 if (result) {
3443 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003444 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03003445 }
3446 }
3447
Skylar Changd407e592017-03-30 11:25:30 -07003448 /* allocate the common PROD event ring */
3449 if (ipa3_alloc_common_event_ring()) {
3450 IPAERR("ipa3_alloc_common_event_ring failed.\n");
3451 result = -EPERM;
3452 goto fail_ch20_wa;
3453 }
3454
Amir Levy9659e592016-10-27 18:08:27 +03003455 /* CMD OUT (AP->IPA) */
3456 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3457 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
3458 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3459 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
3460 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
3461 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003462 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003463 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003464 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03003465 }
3466 IPADBG("Apps to IPA cmd pipe is connected\n");
3467
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003468 IPADBG("Will initialize SRAM\n");
Amir Levy9659e592016-10-27 18:08:27 +03003469 ipa3_ctx->ctrl->ipa_init_sram();
3470 IPADBG("SRAM initialized\n");
3471
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003472 IPADBG("Will initialize HDR\n");
Amir Levy9659e592016-10-27 18:08:27 +03003473 ipa3_ctx->ctrl->ipa_init_hdr();
3474 IPADBG("HDR initialized\n");
3475
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003476 IPADBG("Will initialize V4 RT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003477 ipa3_ctx->ctrl->ipa_init_rt4();
3478 IPADBG("V4 RT initialized\n");
3479
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003480 IPADBG("Will initialize V6 RT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003481 ipa3_ctx->ctrl->ipa_init_rt6();
3482 IPADBG("V6 RT initialized\n");
3483
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003484 IPADBG("Will initialize V4 FLT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003485 ipa3_ctx->ctrl->ipa_init_flt4();
3486 IPADBG("V4 FLT initialized\n");
3487
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04003488 IPADBG("Will initialize V6 FLT\n");
Amir Levy9659e592016-10-27 18:08:27 +03003489 ipa3_ctx->ctrl->ipa_init_flt6();
3490 IPADBG("V6 FLT initialized\n");
3491
3492 if (ipa3_setup_flt_hash_tuple()) {
3493 IPAERR(":fail to configure flt hash tuple\n");
3494 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003495 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003496 }
3497 IPADBG("flt hash tuple is configured\n");
3498
3499 if (ipa3_setup_rt_hash_tuple()) {
3500 IPAERR(":fail to configure rt hash tuple\n");
3501 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003502 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003503 }
3504 IPADBG("rt hash tuple is configured\n");
3505
3506 if (ipa3_setup_exception_path()) {
3507 IPAERR(":fail to setup excp path\n");
3508 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003509 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003510 }
3511 IPADBG("Exception path was successfully set");
3512
3513 if (ipa3_setup_dflt_rt_tables()) {
3514 IPAERR(":fail to setup dflt routes\n");
3515 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003516 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003517 }
3518 IPADBG("default routing was set\n");
3519
Ghanim Fodic6b67492017-03-15 14:19:56 +02003520 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03003521 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3522 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
3523 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
3524 sys_in.notify = ipa3_lan_rx_cb;
3525 sys_in.priv = NULL;
3526 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
3527 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
3528 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
3529 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
3530 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
3531 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
3532 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
3533 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
3534
3535 /**
3536 * ipa_lan_rx_cb() intended to notify the source EP about packet
3537 * being received on the LAN_CONS via calling the source EP call-back.
3538 * There could be a race condition with calling this call-back. Other
3539 * thread may nullify it - e.g. on EP disconnect.
3540 * This lock intended to protect the access to the source EP call-back
3541 */
3542 spin_lock_init(&ipa3_ctx->disconnect_lock);
3543 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02003544 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03003545 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02003546 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03003547 }
3548
Ghanim Fodic6b67492017-03-15 14:19:56 +02003549 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02003550 if (!ipa3_ctx->ipa_config_is_mhi) {
3551 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
3552 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
3553 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
3554 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
3555 if (ipa3_setup_sys_pipe(&sys_in,
3556 &ipa3_ctx->clnt_hdl_data_out)) {
3557 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
3558 result = -EPERM;
3559 goto fail_lan_data_out;
3560 }
Amir Levy9659e592016-10-27 18:08:27 +03003561 }
3562
3563 return 0;
3564
Ghanim Fodic6b67492017-03-15 14:19:56 +02003565fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03003566 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003567fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03003568 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
3569 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3570 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
3571 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
3572 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003573 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003574 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003575fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03003576 return result;
3577}
3578
3579static void ipa3_teardown_apps_pipes(void)
3580{
Amir Levy54fe4d32017-03-16 11:21:49 +02003581 if (!ipa3_ctx->ipa_config_is_mhi)
3582 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03003583 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
3584 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3585 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003586 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003587 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
3588}
3589
3590#ifdef CONFIG_COMPAT
Amir Levy479cfdd2017-10-26 12:23:14 +03003591
3592static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg,
3593 int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *))
3594{
3595 long retval;
3596 struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32;
3597 struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
3598
3599 retval = copy_from_user(&table_alloc32, (const void __user *)arg,
3600 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
3601 if (retval)
3602 return retval;
3603
3604 table_alloc.size = (size_t)table_alloc32.size;
3605 table_alloc.offset = (off_t)table_alloc32.offset;
3606
3607 retval = alloc_func(&table_alloc);
3608 if (retval)
3609 return retval;
3610
3611 if (table_alloc.offset) {
3612 table_alloc32.offset = (compat_off_t)table_alloc.offset;
3613 retval = copy_to_user((void __user *)arg, &table_alloc32,
3614 sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
3615 }
3616
3617 return retval;
3618}
3619
Amir Levy9659e592016-10-27 18:08:27 +03003620long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3621{
Amir Levy479cfdd2017-10-26 12:23:14 +03003622 long retval = 0;
Amir Levy9659e592016-10-27 18:08:27 +03003623 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
3624 struct ipa_ioc_nat_alloc_mem nat_mem;
3625
3626 switch (cmd) {
3627 case IPA_IOC_ADD_HDR32:
3628 cmd = IPA_IOC_ADD_HDR;
3629 break;
3630 case IPA_IOC_DEL_HDR32:
3631 cmd = IPA_IOC_DEL_HDR;
3632 break;
3633 case IPA_IOC_ADD_RT_RULE32:
3634 cmd = IPA_IOC_ADD_RT_RULE;
3635 break;
3636 case IPA_IOC_DEL_RT_RULE32:
3637 cmd = IPA_IOC_DEL_RT_RULE;
3638 break;
3639 case IPA_IOC_ADD_FLT_RULE32:
3640 cmd = IPA_IOC_ADD_FLT_RULE;
3641 break;
3642 case IPA_IOC_DEL_FLT_RULE32:
3643 cmd = IPA_IOC_DEL_FLT_RULE;
3644 break;
3645 case IPA_IOC_GET_RT_TBL32:
3646 cmd = IPA_IOC_GET_RT_TBL;
3647 break;
3648 case IPA_IOC_COPY_HDR32:
3649 cmd = IPA_IOC_COPY_HDR;
3650 break;
3651 case IPA_IOC_QUERY_INTF32:
3652 cmd = IPA_IOC_QUERY_INTF;
3653 break;
3654 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3655 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3656 break;
3657 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3658 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3659 break;
3660 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3661 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3662 break;
3663 case IPA_IOC_GET_HDR32:
3664 cmd = IPA_IOC_GET_HDR;
3665 break;
3666 case IPA_IOC_ALLOC_NAT_MEM32:
Amir Levy479cfdd2017-10-26 12:23:14 +03003667 retval = copy_from_user(&nat_mem32, (const void __user *)arg,
3668 sizeof(struct ipa3_ioc_nat_alloc_mem32));
3669 if (retval)
3670 return retval;
Amir Levy9659e592016-10-27 18:08:27 +03003671 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3672 IPA_RESOURCE_NAME_MAX);
3673 nat_mem.size = (size_t)nat_mem32.size;
3674 nat_mem.offset = (off_t)nat_mem32.offset;
3675
3676 /* null terminate the string */
3677 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3678
Amir Levy479cfdd2017-10-26 12:23:14 +03003679 retval = ipa3_allocate_nat_device(&nat_mem);
3680 if (retval)
3681 return retval;
Amir Levy9659e592016-10-27 18:08:27 +03003682 nat_mem32.offset = (compat_off_t)nat_mem.offset;
Amir Levy479cfdd2017-10-26 12:23:14 +03003683 retval = copy_to_user((void __user *)arg, &nat_mem32,
3684 sizeof(struct ipa3_ioc_nat_alloc_mem32));
Amir Levy9659e592016-10-27 18:08:27 +03003685 return retval;
Amir Levy479cfdd2017-10-26 12:23:14 +03003686 case IPA_IOC_ALLOC_NAT_TABLE32:
3687 return compat_ipa3_nat_ipv6ct_alloc_table(arg,
3688 ipa3_allocate_nat_table);
3689 case IPA_IOC_ALLOC_IPV6CT_TABLE32:
3690 return compat_ipa3_nat_ipv6ct_alloc_table(arg,
3691 ipa3_allocate_ipv6ct_table);
Amir Levy9659e592016-10-27 18:08:27 +03003692 case IPA_IOC_V4_INIT_NAT32:
3693 cmd = IPA_IOC_V4_INIT_NAT;
3694 break;
Amir Levy479cfdd2017-10-26 12:23:14 +03003695 case IPA_IOC_INIT_IPV6CT_TABLE32:
3696 cmd = IPA_IOC_INIT_IPV6CT_TABLE;
3697 break;
3698 case IPA_IOC_TABLE_DMA_CMD32:
3699 cmd = IPA_IOC_TABLE_DMA_CMD;
Amir Levy9659e592016-10-27 18:08:27 +03003700 break;
3701 case IPA_IOC_V4_DEL_NAT32:
3702 cmd = IPA_IOC_V4_DEL_NAT;
3703 break;
Amir Levy479cfdd2017-10-26 12:23:14 +03003704 case IPA_IOC_DEL_NAT_TABLE32:
3705 cmd = IPA_IOC_DEL_NAT_TABLE;
3706 break;
3707 case IPA_IOC_DEL_IPV6CT_TABLE32:
3708 cmd = IPA_IOC_DEL_IPV6CT_TABLE;
3709 break;
3710 case IPA_IOC_NAT_MODIFY_PDN32:
3711 cmd = IPA_IOC_NAT_MODIFY_PDN;
3712 break;
Amir Levy9659e592016-10-27 18:08:27 +03003713 case IPA_IOC_GET_NAT_OFFSET32:
3714 cmd = IPA_IOC_GET_NAT_OFFSET;
3715 break;
3716 case IPA_IOC_PULL_MSG32:
3717 cmd = IPA_IOC_PULL_MSG;
3718 break;
3719 case IPA_IOC_RM_ADD_DEPENDENCY32:
3720 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3721 break;
3722 case IPA_IOC_RM_DEL_DEPENDENCY32:
3723 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3724 break;
3725 case IPA_IOC_GENERATE_FLT_EQ32:
3726 cmd = IPA_IOC_GENERATE_FLT_EQ;
3727 break;
3728 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3729 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3730 break;
3731 case IPA_IOC_WRITE_QMAPID32:
3732 cmd = IPA_IOC_WRITE_QMAPID;
3733 break;
3734 case IPA_IOC_MDFY_FLT_RULE32:
3735 cmd = IPA_IOC_MDFY_FLT_RULE;
3736 break;
3737 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3738 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3739 break;
3740 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3741 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3742 break;
3743 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3744 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3745 break;
3746 case IPA_IOC_MDFY_RT_RULE32:
3747 cmd = IPA_IOC_MDFY_RT_RULE;
3748 break;
3749 case IPA_IOC_COMMIT_HDR:
3750 case IPA_IOC_RESET_HDR:
3751 case IPA_IOC_COMMIT_RT:
3752 case IPA_IOC_RESET_RT:
3753 case IPA_IOC_COMMIT_FLT:
3754 case IPA_IOC_RESET_FLT:
3755 case IPA_IOC_DUMP:
3756 case IPA_IOC_PUT_RT_TBL:
3757 case IPA_IOC_PUT_HDR:
3758 case IPA_IOC_SET_FLT:
3759 case IPA_IOC_QUERY_EP_MAPPING:
3760 break;
3761 default:
3762 return -ENOIOCTLCMD;
3763 }
3764 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3765}
3766#endif
3767
3768static ssize_t ipa3_write(struct file *file, const char __user *buf,
3769 size_t count, loff_t *ppos);
3770
3771static const struct file_operations ipa3_drv_fops = {
3772 .owner = THIS_MODULE,
3773 .open = ipa3_open,
3774 .read = ipa3_read,
3775 .write = ipa3_write,
3776 .unlocked_ioctl = ipa3_ioctl,
3777#ifdef CONFIG_COMPAT
3778 .compat_ioctl = compat_ipa3_ioctl,
3779#endif
3780};
3781
3782static int ipa3_get_clks(struct device *dev)
3783{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003784 if (ipa3_res.use_bw_vote) {
3785 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3786 ipa3_clk = NULL;
3787 return 0;
3788 }
3789
Amir Levy9659e592016-10-27 18:08:27 +03003790 ipa3_clk = clk_get(dev, "core_clk");
3791 if (IS_ERR(ipa3_clk)) {
3792 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3793 IPAERR("fail to get ipa clk\n");
3794 return PTR_ERR(ipa3_clk);
3795 }
3796 return 0;
3797}
3798
3799/**
3800 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3801 */
3802void _ipa_enable_clks_v3_0(void)
3803{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003804 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003805 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003806 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003807 clk_prepare(ipa3_clk);
3808 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003809 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003810 }
3811
Ghanim Fodi6a831342017-03-07 18:19:15 +02003812 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003813}
3814
3815static unsigned int ipa3_get_bus_vote(void)
3816{
3817 unsigned int idx = 1;
3818
Skylar Chang448d8b82017-08-08 17:30:32 -07003819 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs2) {
Amir Levy9659e592016-10-27 18:08:27 +03003820 idx = 1;
3821 } else if (ipa3_ctx->curr_ipa_clk_rate ==
Skylar Chang448d8b82017-08-08 17:30:32 -07003822 ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3823 idx = 2;
3824 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3825 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3826 idx = 3;
Amir Levy9659e592016-10-27 18:08:27 +03003827 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3828 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3829 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3830 } else {
3831 WARN_ON(1);
3832 }
Michael Adisumartad8c88e52018-01-05 10:22:38 -08003833 IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
Amir Levy9659e592016-10-27 18:08:27 +03003834
3835 return idx;
3836}
3837
3838/**
Skylar Chang68c37d82018-04-07 16:42:36 -07003839 * ipa3_enable_clks() - Turn on IPA clocks
3840 *
3841 * Return codes:
3842 * None
3843 */
Amir Levy9659e592016-10-27 18:08:27 +03003844void ipa3_enable_clks(void)
3845{
Skylar Changefc0a0f2018-03-29 11:17:40 -07003846 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
3847 IPAERR("not supported in this mode\n");
3848 return;
3849 }
3850
Amir Levy9659e592016-10-27 18:08:27 +03003851 IPADBG("enabling IPA clocks and bus voting\n");
3852
Ghanim Fodi6a831342017-03-07 18:19:15 +02003853 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3854 ipa3_get_bus_vote()))
3855 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003856
Ghanim Fodi6a831342017-03-07 18:19:15 +02003857 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003858}
3859
3860
3861/**
3862 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3863 */
3864void _ipa_disable_clks_v3_0(void)
3865{
Amir Levy9659e592016-10-27 18:08:27 +03003866 ipa3_suspend_apps_pipes(true);
3867 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003868 if (ipa3_clk) {
3869 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003870 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003871 }
Amir Levy9659e592016-10-27 18:08:27 +03003872}
3873
3874/**
Skylar Chang68c37d82018-04-07 16:42:36 -07003875 * ipa3_disable_clks() - Turn off IPA clocks
3876 *
3877 * Return codes:
3878 * None
3879 */
Amir Levy9659e592016-10-27 18:08:27 +03003880void ipa3_disable_clks(void)
3881{
Skylar Changefc0a0f2018-03-29 11:17:40 -07003882 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
3883 IPAERR("not supported in this mode\n");
3884 return;
3885 }
3886
Amir Levy9659e592016-10-27 18:08:27 +03003887 IPADBG("disabling IPA clocks and bus voting\n");
3888
3889 ipa3_ctx->ctrl->ipa3_disable_clks();
3890
Ghanim Fodi6a831342017-03-07 18:19:15 +02003891 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3892 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003893}
3894
3895/**
3896 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3897 *
3898 * This function is called prior to clock gating when active client counter
3899 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003900 * were not submitted to the IPA client via the transport. During TAG process
3901 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003902 *
3903 * Return codes:
3904 * None
3905 */
3906static void ipa3_start_tag_process(struct work_struct *work)
3907{
3908 int res;
3909
3910 IPADBG("starting TAG process\n");
3911 /* close aggregation frames on all pipes */
3912 res = ipa3_tag_aggr_force_close(-1);
3913 if (res)
3914 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3915 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3916
3917 IPADBG("TAG process done\n");
3918}
3919
3920/**
Skylar Chang68c37d82018-04-07 16:42:36 -07003921 * ipa3_active_clients_log_mod() - Log a modification in the active clients
3922 * reference count
3923 *
3924 * This method logs any modification in the active clients reference count:
3925 * It logs the modification in the circular history buffer
3926 * It logs the modification in the hash table - looking for an entry,
3927 * creating one if needed and deleting one if needed.
3928 *
3929 * @id: ipa3_active client logging info struct to hold the log information
3930 * @inc: a boolean variable to indicate whether the modification is an increase
3931 * or decrease
3932 * @int_ctx: a boolean variable to indicate whether this call is being made from
3933 * an interrupt context and therefore should allocate GFP_ATOMIC memory
3934 *
3935 * Method process:
3936 * - Hash the unique identifier string
3937 * - Find the hash in the table
3938 * 1)If found, increase or decrease the reference count
3939 * 2)If not found, allocate a new hash table entry struct and initialize it
3940 * - Remove and deallocate unneeded data structure
3941 * - Log the call in the circular history buffer (unless it is a simple call)
3942 */
Amir Levy9659e592016-10-27 18:08:27 +03003943void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3944 bool inc, bool int_ctx)
3945{
3946 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3947 unsigned long long t;
3948 unsigned long nanosec_rem;
3949 struct ipa3_active_client_htable_entry *hentry;
3950 struct ipa3_active_client_htable_entry *hfound;
3951 u32 hkey;
3952 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
Skylar Chang69ae50e2017-07-31 13:13:29 -07003953 unsigned long flags;
Amir Levy9659e592016-10-27 18:08:27 +03003954
Skylar Chang69ae50e2017-07-31 13:13:29 -07003955 spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
3956 int_ctx = true;
Amir Levy9659e592016-10-27 18:08:27 +03003957 hfound = NULL;
3958 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3959 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003960 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003961 0);
3962 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3963 hentry, list, hkey) {
3964 if (!strcmp(hentry->id_string, id->id_string)) {
3965 hentry->count = hentry->count + (inc ? 1 : -1);
3966 hfound = hentry;
3967 }
3968 }
3969 if (hfound == NULL) {
3970 hentry = NULL;
3971 hentry = kzalloc(sizeof(
3972 struct ipa3_active_client_htable_entry),
3973 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3974 if (hentry == NULL) {
3975 IPAERR("failed allocating active clients hash entry");
Skylar Chang69ae50e2017-07-31 13:13:29 -07003976 spin_unlock_irqrestore(
3977 &ipa3_ctx->ipa3_active_clients_logging.lock,
3978 flags);
Amir Levy9659e592016-10-27 18:08:27 +03003979 return;
3980 }
3981 hentry->type = id->type;
3982 strlcpy(hentry->id_string, id->id_string,
3983 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3984 INIT_HLIST_NODE(&hentry->list);
3985 hentry->count = inc ? 1 : -1;
3986 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3987 &hentry->list, hkey);
3988 } else if (hfound->count == 0) {
3989 hash_del(&hfound->list);
3990 kfree(hfound);
3991 }
3992
3993 if (id->type != SIMPLE) {
3994 t = local_clock();
3995 nanosec_rem = do_div(t, 1000000000) / 1000;
3996 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3997 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3998 "[%5lu.%06lu] v %s, %s: %d",
3999 (unsigned long)t, nanosec_rem,
4000 id->id_string, id->file, id->line);
4001 ipa3_active_clients_log_insert(temp_str);
4002 }
Skylar Chang69ae50e2017-07-31 13:13:29 -07004003 spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
4004 flags);
Amir Levy9659e592016-10-27 18:08:27 +03004005}
4006
4007void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
4008 bool int_ctx)
4009{
4010 ipa3_active_clients_log_mod(id, false, int_ctx);
4011}
4012
4013void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
4014 bool int_ctx)
4015{
4016 ipa3_active_clients_log_mod(id, true, int_ctx);
4017}
4018
4019/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004020 * ipa3_inc_client_enable_clks() - Increase active clients counter, and
4021 * enable ipa clocks if necessary
4022 *
4023 * Return codes:
4024 * None
4025 */
Amir Levy9659e592016-10-27 18:08:27 +03004026void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
4027{
Skylar Chang242952b2017-07-20 15:04:05 -07004028 int ret;
4029
Amir Levy9659e592016-10-27 18:08:27 +03004030 ipa3_active_clients_log_inc(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004031 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
4032 if (ret) {
4033 IPADBG_LOW("active clients = %d\n",
4034 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4035 return;
4036 }
4037
4038 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
4039
4040 /* somebody might voted to clocks meanwhile */
4041 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
4042 if (ret) {
4043 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
4044 IPADBG_LOW("active clients = %d\n",
4045 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4046 return;
4047 }
4048
4049 ipa3_enable_clks();
4050 atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
4051 IPADBG_LOW("active clients = %d\n",
4052 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4053 ipa3_suspend_apps_pipes(false);
4054 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004055}
4056
4057/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004058 * ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
4059 * clients if no asynchronous actions should be done. Asynchronous actions are
4060 * locking a mutex and waking up IPA HW.
4061 *
4062 * Return codes: 0 for success
4063 * -EPERM if an asynchronous action should have been done
4064 */
Amir Levy9659e592016-10-27 18:08:27 +03004065int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
4066 *id)
4067{
Skylar Chang242952b2017-07-20 15:04:05 -07004068 int ret;
Amir Levy9659e592016-10-27 18:08:27 +03004069
Skylar Chang242952b2017-07-20 15:04:05 -07004070 ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
4071 if (ret) {
4072 ipa3_active_clients_log_inc(id, true);
4073 IPADBG_LOW("active clients = %d\n",
4074 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4075 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03004076 }
Amir Levy9659e592016-10-27 18:08:27 +03004077
Skylar Chang242952b2017-07-20 15:04:05 -07004078 return -EPERM;
4079}
4080
4081static void __ipa3_dec_client_disable_clks(void)
4082{
4083 int ret;
4084
4085 if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
4086 IPAERR("trying to disable clocks with refcnt is 0!\n");
4087 ipa_assert();
4088 return;
4089 }
4090
4091 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
4092 if (ret)
4093 goto bail;
4094
4095 /* seems like this is the only client holding the clocks */
4096 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
4097 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
4098 ipa3_ctx->tag_process_before_gating) {
4099 ipa3_ctx->tag_process_before_gating = false;
4100 /*
4101 * When TAG process ends, active clients will be
4102 * decreased
4103 */
4104 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
4105 goto unlock_mutex;
4106 }
4107
4108 /* a different context might increase the clock reference meanwhile */
4109 ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
4110 if (ret > 0)
4111 goto unlock_mutex;
4112 ipa3_disable_clks();
4113
4114unlock_mutex:
4115 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
4116bail:
4117 IPADBG_LOW("active clients = %d\n",
4118 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Amir Levy9659e592016-10-27 18:08:27 +03004119}
4120
4121/**
4122 * ipa3_dec_client_disable_clks() - Decrease active clients counter
4123 *
4124 * In case that there are no active clients this function also starts
4125 * TAG process. When TAG progress ends ipa clocks will be gated.
4126 * start_tag_process_again flag is set during this function to signal TAG
4127 * process to start again as there was another client that may send data to ipa
4128 *
4129 * Return codes:
4130 * None
4131 */
4132void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
4133{
Amir Levy9659e592016-10-27 18:08:27 +03004134 ipa3_active_clients_log_dec(id, false);
Skylar Chang242952b2017-07-20 15:04:05 -07004135 __ipa3_dec_client_disable_clks();
4136}
4137
4138static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
4139{
4140 __ipa3_dec_client_disable_clks();
4141}
4142
4143/**
4144 * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
4145 * if possible without blocking. If this is the last client then the desrease
4146 * will happen from work queue context.
4147 *
4148 * Return codes:
4149 * None
4150 */
4151void ipa3_dec_client_disable_clks_no_block(
4152 struct ipa_active_client_logging_info *id)
4153{
4154 int ret;
4155
4156 ipa3_active_clients_log_dec(id, true);
4157 ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
4158 if (ret) {
4159 IPADBG_LOW("active clients = %d\n",
4160 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
4161 return;
Amir Levy9659e592016-10-27 18:08:27 +03004162 }
Skylar Chang242952b2017-07-20 15:04:05 -07004163
4164 /* seems like this is the only client holding the clocks */
4165 queue_work(ipa3_ctx->power_mgmt_wq,
4166 &ipa_dec_clients_disable_clks_on_wq_work);
Amir Levy9659e592016-10-27 18:08:27 +03004167}
4168
4169/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004170 * ipa3_inc_acquire_wakelock() - Increase active clients counter, and
4171 * acquire wakelock if necessary
4172 *
4173 * Return codes:
4174 * None
4175 */
Amir Levy9659e592016-10-27 18:08:27 +03004176void ipa3_inc_acquire_wakelock(void)
4177{
4178 unsigned long flags;
4179
4180 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4181 ipa3_ctx->wakelock_ref_cnt.cnt++;
4182 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
4183 __pm_stay_awake(&ipa3_ctx->w_lock);
4184 IPADBG_LOW("active wakelock ref cnt = %d\n",
4185 ipa3_ctx->wakelock_ref_cnt.cnt);
4186 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4187}
4188
4189/**
4190 * ipa3_dec_release_wakelock() - Decrease active clients counter
4191 *
4192 * In case if the ref count is 0, release the wakelock.
4193 *
4194 * Return codes:
4195 * None
4196 */
4197void ipa3_dec_release_wakelock(void)
4198{
4199 unsigned long flags;
4200
4201 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4202 ipa3_ctx->wakelock_ref_cnt.cnt--;
4203 IPADBG_LOW("active wakelock ref cnt = %d\n",
4204 ipa3_ctx->wakelock_ref_cnt.cnt);
4205 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
4206 __pm_relax(&ipa3_ctx->w_lock);
4207 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
4208}
4209
Michael Adisumartac06df412017-09-19 10:10:35 -07004210int ipa3_set_clock_plan_from_pm(int idx)
4211{
4212 u32 clk_rate;
4213
Michael Adisumarta9cb4d212018-05-14 18:35:41 -07004214 IPADBG_LOW("idx = %d\n", idx);
4215
4216 if (!ipa3_ctx->enable_clock_scaling) {
4217 ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004218 return 0;
Michael Adisumarta9cb4d212018-05-14 18:35:41 -07004219 }
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004220
Skylar Changefc0a0f2018-03-29 11:17:40 -07004221 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
4222 IPAERR("not supported in this mode\n");
4223 return 0;
4224 }
4225
Michael Adisumartac06df412017-09-19 10:10:35 -07004226 if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
4227 IPAERR("bad voltage\n");
4228 return -EINVAL;
4229 }
4230
4231 if (idx == 1)
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004232 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
Michael Adisumartac06df412017-09-19 10:10:35 -07004233 else if (idx == 2)
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004234 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
Michael Adisumartac06df412017-09-19 10:10:35 -07004235 else if (idx == 3)
Michael Adisumartafd2d2fc92017-12-11 11:34:02 -08004236 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
4237 else if (idx == 4)
Michael Adisumartac06df412017-09-19 10:10:35 -07004238 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4239 else {
4240 IPAERR("bad voltage\n");
4241 WARN_ON(1);
4242 return -EFAULT;
4243 }
4244
4245 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
4246 IPADBG_LOW("Same voltage\n");
4247 return 0;
4248 }
4249
4250 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
4251 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
4252 ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
4253 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
4254 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
4255 if (ipa3_clk)
4256 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
4257 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
4258 ipa3_get_bus_vote()))
4259 WARN_ON(1);
4260 } else {
4261 IPADBG_LOW("clocks are gated, not setting rate\n");
4262 }
4263 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
4264 IPADBG_LOW("Done\n");
4265
4266 return 0;
4267}
4268
Amir Levy9659e592016-10-27 18:08:27 +03004269int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
4270 u32 bandwidth_mbps)
4271{
4272 enum ipa_voltage_level needed_voltage;
4273 u32 clk_rate;
4274
Skylar Changefc0a0f2018-03-29 11:17:40 -07004275 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL) {
4276 IPAERR("not supported in this mode\n");
4277 return 0;
4278 }
4279
Amir Levy9659e592016-10-27 18:08:27 +03004280 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
4281 floor_voltage, bandwidth_mbps);
4282
4283 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
4284 floor_voltage >= IPA_VOLTAGE_MAX) {
4285 IPAERR("bad voltage\n");
4286 return -EINVAL;
4287 }
4288
4289 if (ipa3_ctx->enable_clock_scaling) {
4290 IPADBG_LOW("Clock scaling is enabled\n");
4291 if (bandwidth_mbps >=
4292 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
4293 needed_voltage = IPA_VOLTAGE_TURBO;
4294 else if (bandwidth_mbps >=
4295 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
4296 needed_voltage = IPA_VOLTAGE_NOMINAL;
Skylar Chang448d8b82017-08-08 17:30:32 -07004297 else if (bandwidth_mbps >=
4298 ipa3_ctx->ctrl->clock_scaling_bw_threshold_svs)
Amir Levy9659e592016-10-27 18:08:27 +03004299 needed_voltage = IPA_VOLTAGE_SVS;
Skylar Chang448d8b82017-08-08 17:30:32 -07004300 else
4301 needed_voltage = IPA_VOLTAGE_SVS2;
Amir Levy9659e592016-10-27 18:08:27 +03004302 } else {
4303 IPADBG_LOW("Clock scaling is disabled\n");
4304 needed_voltage = IPA_VOLTAGE_NOMINAL;
4305 }
4306
4307 needed_voltage = max(needed_voltage, floor_voltage);
4308 switch (needed_voltage) {
Skylar Chang448d8b82017-08-08 17:30:32 -07004309 case IPA_VOLTAGE_SVS2:
4310 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs2;
4311 break;
Amir Levy9659e592016-10-27 18:08:27 +03004312 case IPA_VOLTAGE_SVS:
4313 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
4314 break;
4315 case IPA_VOLTAGE_NOMINAL:
4316 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
4317 break;
4318 case IPA_VOLTAGE_TURBO:
4319 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4320 break;
4321 default:
4322 IPAERR("bad voltage\n");
4323 WARN_ON(1);
4324 return -EFAULT;
4325 }
4326
4327 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
4328 IPADBG_LOW("Same voltage\n");
4329 return 0;
4330 }
4331
Skylar Chang242952b2017-07-20 15:04:05 -07004332 /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
4333 mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004334 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
4335 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
Skylar Chang242952b2017-07-20 15:04:05 -07004336 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004337 if (ipa3_clk)
4338 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
4339 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
Skylar Chang242952b2017-07-20 15:04:05 -07004340 ipa3_get_bus_vote()))
Ghanim Fodi6a831342017-03-07 18:19:15 +02004341 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03004342 } else {
4343 IPADBG_LOW("clocks are gated, not setting rate\n");
4344 }
Skylar Chang242952b2017-07-20 15:04:05 -07004345 mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004346 IPADBG_LOW("Done\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07004347
Amir Levy9659e592016-10-27 18:08:27 +03004348 return 0;
4349}
4350
Amir Levya59ed3f2017-03-05 17:30:55 +02004351static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03004352{
4353 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02004354 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03004355 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
4356}
4357
4358/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004359 * ipa3_suspend_handler() - Handles the suspend interrupt:
4360 * wakes up the suspended peripheral by requesting its consumer
4361 * @interrupt: Interrupt type
4362 * @private_data: The client's private data
4363 * @interrupt_data: Interrupt specific information data
4364 */
Amir Levy9659e592016-10-27 18:08:27 +03004365void ipa3_suspend_handler(enum ipa_irq_type interrupt,
4366 void *private_data,
4367 void *interrupt_data)
4368{
4369 enum ipa_rm_resource_name resource;
4370 u32 suspend_data =
4371 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
4372 u32 bmsk = 1;
4373 u32 i = 0;
4374 int res;
4375 struct ipa_ep_cfg_holb holb_cfg;
Michael Adisumarta3e350812017-09-18 14:54:36 -07004376 u32 pipe_bitmask = 0;
Amir Levy9659e592016-10-27 18:08:27 +03004377
4378 IPADBG("interrupt=%d, interrupt_data=%u\n",
4379 interrupt, suspend_data);
4380 memset(&holb_cfg, 0, sizeof(holb_cfg));
4381 holb_cfg.tmr_val = 0;
4382
Michael Adisumarta3e350812017-09-18 14:54:36 -07004383 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
Amir Levy9659e592016-10-27 18:08:27 +03004384 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
Michael Adisumarta3e350812017-09-18 14:54:36 -07004385 if (ipa3_ctx->use_ipa_pm) {
4386 pipe_bitmask |= bmsk;
4387 continue;
4388 }
Amir Levy9659e592016-10-27 18:08:27 +03004389 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
4390 /*
4391 * pipe will be unsuspended as part of
4392 * enabling IPA clocks
4393 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08004394 mutex_lock(&ipa3_ctx->transport_pm.
4395 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004396 if (!atomic_read(
4397 &ipa3_ctx->transport_pm.dec_clients)
4398 ) {
4399 IPA_ACTIVE_CLIENTS_INC_EP(
4400 ipa3_ctx->ep[i].client);
4401 IPADBG_LOW("Pipes un-suspended.\n");
4402 IPADBG_LOW("Enter poll mode.\n");
4403 atomic_set(
4404 &ipa3_ctx->transport_pm.dec_clients,
4405 1);
Skylar Chang9e3b6492017-11-07 09:49:48 -08004406 /*
4407 * acquire wake lock as long as suspend
4408 * vote is held
4409 */
4410 ipa3_inc_acquire_wakelock();
Amir Levya59ed3f2017-03-05 17:30:55 +02004411 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03004412 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08004413 mutex_unlock(&ipa3_ctx->transport_pm.
4414 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004415 } else {
4416 resource = ipa3_get_rm_resource_from_ep(i);
4417 res =
4418 ipa_rm_request_resource_with_timer(resource);
4419 if (res == -EPERM &&
4420 IPA_CLIENT_IS_CONS(
4421 ipa3_ctx->ep[i].client)) {
4422 holb_cfg.en = 1;
4423 res = ipa3_cfg_ep_holb_by_client(
4424 ipa3_ctx->ep[i].client, &holb_cfg);
4425 if (res) {
4426 IPAERR("holb en fail, stall\n");
4427 BUG();
4428 }
4429 }
4430 }
4431 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07004432 }
4433 if (ipa3_ctx->use_ipa_pm) {
4434 res = ipa_pm_handle_suspend(pipe_bitmask);
4435 if (res) {
4436 IPAERR("ipa_pm_handle_suspend failed %d\n", res);
4437 return;
4438 }
Amir Levy9659e592016-10-27 18:08:27 +03004439 }
4440}
4441
4442/**
Skylar Chang68c37d82018-04-07 16:42:36 -07004443 * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
4444 * as it was registered in the IPA init sequence.
4445 * Return codes:
4446 * 0: success
4447 * -EPERM: failed to remove current handler or failed to add original handler
4448 */
Amir Levy9659e592016-10-27 18:08:27 +03004449int ipa3_restore_suspend_handler(void)
4450{
4451 int result = 0;
4452
4453 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
4454 if (result) {
4455 IPAERR("remove handler for suspend interrupt failed\n");
4456 return -EPERM;
4457 }
4458
4459 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
4460 ipa3_suspend_handler, false, NULL);
4461 if (result) {
4462 IPAERR("register handler for suspend interrupt failed\n");
4463 result = -EPERM;
4464 }
4465
4466 IPADBG("suspend handler successfully restored\n");
4467
4468 return result;
4469}
4470
4471static int ipa3_apps_cons_release_resource(void)
4472{
4473 return 0;
4474}
4475
4476static int ipa3_apps_cons_request_resource(void)
4477{
4478 return 0;
4479}
4480
Amir Levya59ed3f2017-03-05 17:30:55 +02004481static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03004482{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304483 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004484 /* check whether still need to decrease client usage */
4485 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
4486 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
4487 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02004488 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03004489 } else {
4490 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Skylar Chang9e3b6492017-11-07 09:49:48 -08004491 ipa3_dec_release_wakelock();
Amir Levya59ed3f2017-03-05 17:30:55 +02004492 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03004493 }
4494 }
4495 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304496 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004497}
4498
4499int ipa3_create_apps_resource(void)
4500{
4501 struct ipa_rm_create_params apps_cons_create_params;
4502 struct ipa_rm_perf_profile profile;
4503 int result = 0;
4504
4505 memset(&apps_cons_create_params, 0,
4506 sizeof(apps_cons_create_params));
4507 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
4508 apps_cons_create_params.request_resource =
4509 ipa3_apps_cons_request_resource;
4510 apps_cons_create_params.release_resource =
4511 ipa3_apps_cons_release_resource;
4512 result = ipa_rm_create_resource(&apps_cons_create_params);
4513 if (result) {
4514 IPAERR("ipa_rm_create_resource failed\n");
4515 return result;
4516 }
4517
4518 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
4519 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
4520
4521 return result;
4522}
4523
4524/**
4525 * ipa3_init_interrupts() - Register to IPA IRQs
4526 *
4527 * Return codes: 0 in success, negative in failure
4528 *
4529 */
4530int ipa3_init_interrupts(void)
4531{
4532 int result;
4533
4534 /*register IPA IRQ handler*/
4535 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
Skylar Changefc0a0f2018-03-29 11:17:40 -07004536 &ipa3_ctx->master_pdev->dev);
Amir Levy9659e592016-10-27 18:08:27 +03004537 if (result) {
4538 IPAERR("ipa interrupts initialization failed\n");
4539 return -ENODEV;
4540 }
4541
4542 /*add handler for suspend interrupt*/
4543 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
4544 ipa3_suspend_handler, false, NULL);
4545 if (result) {
4546 IPAERR("register handler for suspend interrupt failed\n");
4547 result = -ENODEV;
4548 goto fail_add_interrupt_handler;
4549 }
4550
4551 return 0;
4552
4553fail_add_interrupt_handler:
Skylar Changefc0a0f2018-03-29 11:17:40 -07004554 free_irq(ipa3_res.ipa_irq, &ipa3_ctx->master_pdev->dev);
Amir Levy9659e592016-10-27 18:08:27 +03004555 return result;
4556}
4557
4558/**
4559 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
4560 * The idr strcuture per filtering table is intended for rule id generation
4561 * per filtering rule.
4562 */
4563static void ipa3_destroy_flt_tbl_idrs(void)
4564{
4565 int i;
4566 struct ipa3_flt_tbl *flt_tbl;
4567
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004568 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4569 idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4570
Amir Levy9659e592016-10-27 18:08:27 +03004571 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4572 if (!ipa_is_ep_support_flt(i))
4573 continue;
4574
4575 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004576 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004577 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004578 flt_tbl->rule_ids = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004579 }
4580}
4581
4582static void ipa3_freeze_clock_vote_and_notify_modem(void)
4583{
4584 int res;
Amir Levy9659e592016-10-27 18:08:27 +03004585 struct ipa_active_client_logging_info log_info;
4586
4587 if (ipa3_ctx->smp2p_info.res_sent)
4588 return;
4589
Skylar Change1209942017-02-02 14:26:38 -08004590 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
4591 IPAERR("smp2p out gpio not assigned\n");
4592 return;
4593 }
4594
Amir Levy9659e592016-10-27 18:08:27 +03004595 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
4596 res = ipa3_inc_client_enable_clks_no_block(&log_info);
4597 if (res)
Skylar Change1209942017-02-02 14:26:38 -08004598 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004599 else
Skylar Change1209942017-02-02 14:26:38 -08004600 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03004601
Skylar Change1209942017-02-02 14:26:38 -08004602 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4603 IPA_GPIO_OUT_CLK_VOTE_IDX,
4604 ipa3_ctx->smp2p_info.ipa_clk_on);
4605 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4606 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004607
Skylar Change1209942017-02-02 14:26:38 -08004608 ipa3_ctx->smp2p_info.res_sent = true;
4609 IPADBG("IPA clocks are %s\n",
4610 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
4611}
4612
4613void ipa3_reset_freeze_vote(void)
4614{
4615 if (ipa3_ctx->smp2p_info.res_sent == false)
4616 return;
4617
4618 if (ipa3_ctx->smp2p_info.ipa_clk_on)
4619 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
4620
4621 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4622 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
4623 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
4624 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
4625
4626 ipa3_ctx->smp2p_info.res_sent = false;
4627 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03004628}
4629
4630static int ipa3_panic_notifier(struct notifier_block *this,
4631 unsigned long event, void *ptr)
4632{
4633 int res;
4634
4635 ipa3_freeze_clock_vote_and_notify_modem();
4636
4637 IPADBG("Calling uC panic handler\n");
4638 res = ipa3_uc_panic_notifier(this, event, ptr);
4639 if (res)
4640 IPAERR("uC panic handler failed %d\n", res);
4641
Michael Adisumartaedba22d2018-04-19 12:28:33 -07004642 if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0)
Michael Adisumartac50b8002018-06-13 15:21:07 -07004643 ipahal_print_all_regs(false);
Michael Adisumartaedba22d2018-04-19 12:28:33 -07004644
Amir Levy9659e592016-10-27 18:08:27 +03004645 return NOTIFY_DONE;
4646}
4647
4648static struct notifier_block ipa3_panic_blk = {
4649 .notifier_call = ipa3_panic_notifier,
4650 /* IPA panic handler needs to run before modem shuts down */
4651 .priority = INT_MAX,
4652};
4653
4654static void ipa3_register_panic_hdlr(void)
4655{
4656 atomic_notifier_chain_register(&panic_notifier_list,
4657 &ipa3_panic_blk);
4658}
4659
4660static void ipa3_trigger_ipa_ready_cbs(void)
4661{
4662 struct ipa3_ready_cb_info *info;
4663
4664 mutex_lock(&ipa3_ctx->lock);
4665
4666 /* Call all the CBs */
4667 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
4668 if (info->ready_cb)
4669 info->ready_cb(info->user_data);
4670
4671 mutex_unlock(&ipa3_ctx->lock);
4672}
4673
4674static int ipa3_gsi_pre_fw_load_init(void)
4675{
4676 int result;
4677
4678 result = gsi_configure_regs(ipa3_res.transport_mem_base,
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004679 ipa3_res.transport_mem_size,
4680 ipa3_res.ipa_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004681 if (result) {
4682 IPAERR("Failed to configure GSI registers\n");
4683 return -EINVAL;
4684 }
4685
4686 return 0;
4687}
4688
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004689static void ipa3_uc_is_loaded(void)
4690{
4691 IPADBG("\n");
4692 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
4693}
4694
Amir Levy41644242016-11-03 15:38:09 +02004695static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
4696{
4697 enum gsi_ver gsi_ver;
4698
4699 switch (ipa_hw_type) {
4700 case IPA_HW_v3_0:
4701 case IPA_HW_v3_1:
4702 gsi_ver = GSI_VER_1_0;
4703 break;
4704 case IPA_HW_v3_5:
4705 gsi_ver = GSI_VER_1_2;
4706 break;
4707 case IPA_HW_v3_5_1:
4708 gsi_ver = GSI_VER_1_3;
4709 break;
Michael Adisumarta891a4ff2017-05-16 16:40:06 -07004710 case IPA_HW_v4_0:
4711 gsi_ver = GSI_VER_2_0;
4712 break;
Amir Levy41644242016-11-03 15:38:09 +02004713 default:
4714 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
4715 WARN_ON(1);
4716 gsi_ver = GSI_VER_ERR;
4717 }
4718
4719 IPADBG("GSI version %d\n", gsi_ver);
4720
4721 return gsi_ver;
4722}
4723
Amir Levy9659e592016-10-27 18:08:27 +03004724/**
4725 * ipa3_post_init() - Initialize the IPA Driver (Part II).
4726 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02004727 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03004728 *
4729 * @resource_p: contain platform specific values from DST file
4730 * @pdev: The platform device structure representing the IPA driver
4731 *
4732 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004733 * - Initialize endpoints bitmaps
4734 * - Initialize resource groups min and max values
4735 * - Initialize filtering lists heads and idr
4736 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02004737 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03004738 * - Setup APPS pipes
4739 * - Initialize tethering bridge
4740 * - Initialize IPA debugfs
4741 * - Initialize IPA uC interface
4742 * - Initialize WDI interface
4743 * - Initialize USB interface
4744 * - Register for panic handler
4745 * - Trigger IPA ready callbacks (to all subscribers)
4746 * - Trigger IPA completion object (to all who wait on it)
4747 */
4748static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
4749 struct device *ipa_dev)
4750{
4751 int result;
Amir Levy9659e592016-10-27 18:08:27 +03004752 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004753 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02004754 struct ipa3_flt_tbl *flt_tbl;
4755 int i;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004756 struct idr *idr;
Amir Levy54fe4d32017-03-16 11:21:49 +02004757
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304758 if (ipa3_ctx == NULL) {
4759 IPADBG("IPA driver haven't initialized\n");
4760 return -ENXIO;
4761 }
4762
4763 /* Prevent consequent calls from trying to load the FW again. */
4764 if (ipa3_ctx->ipa_initialization_complete)
4765 return 0;
Mohammed Javidc6db3362018-02-13 13:41:38 +05304766
4767 IPADBG("active clients = %d\n",
4768 atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
Skylar Chang40430532017-07-06 14:31:57 -07004769 /* move proxy vote for modem on ipa3_post_init */
Mohammed Javidc6db3362018-02-13 13:41:38 +05304770 if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
4771 ipa3_proxy_clk_vote();
Utkarsh Saxenaded78142017-05-03 14:04:30 +05304772
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004773 /*
4774 * SMMU was already attached if used, safe to do allocations
4775 *
4776 * NOTE WELL: On an emulation system, this allocation is done
4777 * in ipa3_pre_init()
4778 */
4779 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
4780 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4781 ipa3_ctx->pdev)) {
4782 IPAERR("fail to init ipahal\n");
4783 result = -EFAULT;
4784 goto fail_ipahal;
4785 }
Skylar Changefc0a0f2018-03-29 11:17:40 -07004786 }
4787
4788 result = ipa3_init_hw();
4789 if (result) {
4790 IPAERR(":error initializing HW\n");
4791 result = -ENODEV;
4792 goto fail_init_hw;
4793 }
4794 IPADBG("IPA HW initialization sequence completed");
4795
4796 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4797 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4798 IPAERR("IPA has more pipes then supported has %d, max %d\n",
4799 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4800 result = -ENODEV;
4801 goto fail_init_hw;
4802 }
4803
4804 ipa3_ctx->ctrl->ipa_sram_read_settings();
4805 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4806 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4807
4808 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4809 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4810 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4811
4812 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4813 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4814
4815 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4816 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4817 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4818
4819 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4820 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4821 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4822
4823 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4824 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4825 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4826 result = -ENOMEM;
4827 goto fail_init_hw;
4828 }
4829
4830 result = ipa3_allocate_dma_task_for_gsi();
4831 if (result) {
4832 IPAERR("failed to allocate dma task\n");
4833 goto fail_dma_task;
4834 }
4835
4836 if (ipa3_nat_ipv6ct_init_devices()) {
4837 IPAERR("unable to init NAT and IPv6CT devices\n");
4838 result = -ENODEV;
4839 goto fail_nat_ipv6ct_init_dev;
4840 }
4841
4842 result = ipa3_alloc_pkt_init();
4843 if (result) {
4844 IPAERR("Failed to alloc pkt_init payload\n");
4845 result = -ENODEV;
4846 goto fail_allok_pkt_init;
4847 }
4848
4849 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
4850 ipa3_enable_dcd();
4851
Amir Levy54fe4d32017-03-16 11:21:49 +02004852 /*
4853 * indication whether working in MHI config or non MHI config is given
4854 * in ipa3_write which is launched before ipa3_post_init. i.e. from
4855 * this point it is safe to use ipa3_ep_mapping array and the correct
4856 * entry will be returned from ipa3_get_hw_type_index()
4857 */
4858 ipa_init_ep_flt_bitmap();
4859 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
4860 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
4861
4862 /* Assign resource limitation to each group */
4863 ipa3_set_resorce_groups_min_max_limits();
4864
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004865 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
4866 idr_init(idr);
4867 idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
4868 idr_init(idr);
4869
Amir Levy54fe4d32017-03-16 11:21:49 +02004870 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4871 if (!ipa_is_ep_support_flt(i))
4872 continue;
4873
4874 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
4875 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4876 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4877 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
4878 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4879 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004880 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
Amir Levy54fe4d32017-03-16 11:21:49 +02004881
4882 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
4883 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4884 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4885 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
4886 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4887 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
Skylar Chang0c37f5f2017-07-24 10:22:53 -07004888 flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
Amir Levy54fe4d32017-03-16 11:21:49 +02004889 }
4890
4891 if (!ipa3_ctx->apply_rg10_wa) {
4892 result = ipa3_init_interrupts();
4893 if (result) {
4894 IPAERR("ipa initialization of interrupts failed\n");
4895 result = -ENODEV;
4896 goto fail_register_device;
4897 }
4898 } else {
4899 IPADBG("Initialization of ipa interrupts skipped\n");
4900 }
Amir Levy9659e592016-10-27 18:08:27 +03004901
Amir Levy3afd94a2017-01-05 10:19:13 +02004902 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02004903 * IPAv3.5 and above requires to disable prefetch for USB in order
Skylar Chang84099692018-04-24 14:43:03 -07004904 * to allow MBIM to work.
Amir Levy3afd94a2017-01-05 10:19:13 +02004905 */
Michael Adisumartad68ab112017-06-14 11:40:06 -07004906 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4907 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
Amir Levy5cfbb322017-01-09 14:53:02 +02004908 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02004909 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
4910
Skylar Chang84099692018-04-24 14:43:03 -07004911 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
4912 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
4913 (ipa3_ctx->ipa_config_is_mhi))
4914 ipa3_disable_prefetch(IPA_CLIENT_MHI_CONS);
4915
Amir Levya59ed3f2017-03-05 17:30:55 +02004916 memset(&gsi_props, 0, sizeof(gsi_props));
4917 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
4918 gsi_props.ee = resource_p->ee;
4919 gsi_props.intr = GSI_INTR_IRQ;
Amir Levya59ed3f2017-03-05 17:30:55 +02004920 gsi_props.phys_addr = resource_p->transport_mem_base;
4921 gsi_props.size = resource_p->transport_mem_size;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004922 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
4923 gsi_props.irq = resource_p->emulator_irq;
4924 gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr();
4925 gsi_props.emulator_intcntrlr_addr =
4926 resource_p->emulator_intcntrlr_mem_base;
4927 gsi_props.emulator_intcntrlr_size =
4928 resource_p->emulator_intcntrlr_mem_size;
4929 } else {
4930 gsi_props.irq = resource_p->transport_irq;
4931 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004932 gsi_props.notify_cb = ipa_gsi_notify_cb;
4933 gsi_props.req_clk_cb = NULL;
4934 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004935
Ghanim Fodic823bc62017-10-21 17:29:53 +03004936 if (ipa3_ctx->ipa_config_is_mhi) {
4937 gsi_props.mhi_er_id_limits_valid = true;
4938 gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
4939 gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
4940 }
4941
Amir Levya59ed3f2017-03-05 17:30:55 +02004942 result = gsi_register_device(&gsi_props,
4943 &ipa3_ctx->gsi_dev_hdl);
4944 if (result != GSI_STATUS_SUCCESS) {
4945 IPAERR(":gsi register error - %d\n", result);
4946 result = -ENODEV;
4947 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03004948 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004949 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03004950
4951 /* setup the AP-IPA pipes */
4952 if (ipa3_setup_apps_pipes()) {
4953 IPAERR(":failed to setup IPA-Apps pipes\n");
4954 result = -ENODEV;
4955 goto fail_setup_apps_pipes;
4956 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004957 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004958
4959 if (ipa3_ctx->use_ipa_teth_bridge) {
4960 /* Initialize the tethering bridge driver */
4961 result = ipa3_teth_bridge_driver_init();
4962 if (result) {
4963 IPAERR(":teth_bridge init failed (%d)\n", -result);
4964 result = -ENODEV;
4965 goto fail_teth_bridge_driver_init;
4966 }
4967 IPADBG("teth_bridge initialized");
4968 }
4969
Amir Levy9659e592016-10-27 18:08:27 +03004970 result = ipa3_uc_interface_init();
4971 if (result)
4972 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4973 else
4974 IPADBG(":ipa Uc interface init ok\n");
4975
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004976 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4977 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4978
Amir Levy9659e592016-10-27 18:08:27 +03004979 result = ipa3_wdi_init();
4980 if (result)
4981 IPAERR(":wdi init failed (%d)\n", -result);
4982 else
4983 IPADBG(":wdi init ok\n");
4984
4985 result = ipa3_ntn_init();
4986 if (result)
4987 IPAERR(":ntn init failed (%d)\n", -result);
4988 else
4989 IPADBG(":ntn init ok\n");
4990
Skylar Chang6f6e3072017-07-28 10:03:47 -07004991 result = ipa_hw_stats_init();
4992 if (result)
4993 IPAERR("fail to init stats %d\n", result);
4994 else
4995 IPADBG(":stats init ok\n");
4996
Amir Levy9659e592016-10-27 18:08:27 +03004997 ipa3_register_panic_hdlr();
4998
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04004999 ipa3_debugfs_init();
5000
Amir Levy9659e592016-10-27 18:08:27 +03005001 mutex_lock(&ipa3_ctx->lock);
5002 ipa3_ctx->ipa_initialization_complete = true;
5003 mutex_unlock(&ipa3_ctx->lock);
5004
5005 ipa3_trigger_ipa_ready_cbs();
5006 complete_all(&ipa3_ctx->init_completion_obj);
5007 pr_info("IPA driver initialization was successful.\n");
5008
5009 return 0;
5010
5011fail_teth_bridge_driver_init:
5012 ipa3_teardown_apps_pipes();
5013fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02005014 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03005015fail_register_device:
Amir Levy9659e592016-10-27 18:08:27 +03005016 ipa3_destroy_flt_tbl_idrs();
Skylar Changefc0a0f2018-03-29 11:17:40 -07005017fail_allok_pkt_init:
5018 ipa3_nat_ipv6ct_destroy_devices();
5019fail_nat_ipv6ct_init_dev:
5020 ipa3_free_dma_task_for_gsi();
5021fail_dma_task:
5022fail_init_hw:
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005023 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION)
5024 ipahal_destroy();
Skylar Changefc0a0f2018-03-29 11:17:40 -07005025fail_ipahal:
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005026 ipa3_proxy_clk_unvote();
Skylar Changefc0a0f2018-03-29 11:17:40 -07005027
Amir Levy9659e592016-10-27 18:08:27 +03005028 return result;
5029}
5030
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005031static int ipa3_manual_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03005032{
5033 int result;
5034 const struct firmware *fw;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005035 const char *path = IPA_FWS_PATH;
Amir Levy9659e592016-10-27 18:08:27 +03005036
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005037 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
5038 switch (ipa3_get_emulation_type()) {
5039 case IPA_HW_v3_5_1:
5040 path = IPA_FWS_PATH_3_5_1;
5041 break;
5042 case IPA_HW_v4_0:
5043 path = IPA_FWS_PATH_4_0;
5044 break;
5045 default:
5046 break;
5047 }
5048 }
Amir Levy9659e592016-10-27 18:08:27 +03005049
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005050 IPADBG("Manual FW loading (%s) process initiated\n", path);
5051
5052 result = request_firmware(&fw, path, ipa3_ctx->cdev.dev);
Amir Levy9659e592016-10-27 18:08:27 +03005053 if (result < 0) {
5054 IPAERR("request_firmware failed, error %d\n", result);
5055 return result;
5056 }
5057 if (fw == NULL) {
5058 IPAERR("Firmware is NULL!\n");
5059 return -EINVAL;
5060 }
5061
5062 IPADBG("FWs are available for loading\n");
5063
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005064 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
5065 result = emulator_load_fws(fw,
5066 ipa3_res.transport_mem_base,
5067 ipa3_res.transport_mem_size);
5068 } else {
5069 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
5070 }
Amir Levy9659e592016-10-27 18:08:27 +03005071 if (result) {
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005072 IPAERR("Manual IPA FWs loading has failed\n");
Amir Levy9659e592016-10-27 18:08:27 +03005073 release_firmware(fw);
5074 return result;
5075 }
5076
5077 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02005078 ipa3_res.transport_mem_size,
5079 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03005080 if (result) {
5081 IPAERR("Failed to enable GSI FW\n");
5082 release_firmware(fw);
5083 return result;
5084 }
5085
5086 release_firmware(fw);
5087
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005088 IPADBG("Manual FW loading process is complete\n");
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005089
Amir Levy9659e592016-10-27 18:08:27 +03005090 return 0;
5091}
5092
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005093static int ipa3_pil_load_ipa_fws(void)
Amir Levy9659e592016-10-27 18:08:27 +03005094{
5095 void *subsystem_get_retval = NULL;
5096
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005097 IPADBG("PIL FW loading process initiated\n");
Amir Levy9659e592016-10-27 18:08:27 +03005098
5099 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
5100 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
5101 IPAERR("Unable to trigger PIL process for FW loading\n");
5102 return -EINVAL;
5103 }
5104
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005105 IPADBG("PIL FW loading process is complete\n");
Amir Levy9659e592016-10-27 18:08:27 +03005106 return 0;
5107}
5108
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005109static void ipa3_load_ipa_fw(struct work_struct *work)
5110{
5111 int result;
5112
5113 IPADBG("Entry\n");
5114
Skylar Changefc0a0f2018-03-29 11:17:40 -07005115 result = ipa3_attach_to_smmu();
5116 if (result) {
5117 IPAERR("IPA attach to smmu failed %d\n", result);
5118 return;
5119 }
5120
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005121 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
5122
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005123 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION &&
5124 (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)))
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005125 result = ipa3_pil_load_ipa_fws();
5126 else
5127 result = ipa3_manual_load_ipa_fws();
5128
5129 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
5130
5131 if (result) {
5132 IPAERR("IPA FW loading process has failed\n");
5133 return;
5134 }
5135 pr_info("IPA FW loaded successfully\n");
5136
Skylar Changefc0a0f2018-03-29 11:17:40 -07005137 result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005138 if (result)
5139 IPAERR("IPA post init failed %d\n", result);
5140}
5141
Amir Levy9659e592016-10-27 18:08:27 +03005142static ssize_t ipa3_write(struct file *file, const char __user *buf,
5143 size_t count, loff_t *ppos)
5144{
5145 unsigned long missing;
Amir Levy9659e592016-10-27 18:08:27 +03005146
Amir Levy2da9d452017-12-12 10:09:46 +02005147 char dbg_buff[32] = { 0 };
Amir Levy9659e592016-10-27 18:08:27 +03005148
5149 if (sizeof(dbg_buff) < count + 1)
5150 return -EFAULT;
5151
5152 missing = copy_from_user(dbg_buff, buf, count);
5153
5154 if (missing) {
5155 IPAERR("Unable to copy data from user\n");
5156 return -EFAULT;
5157 }
5158
Amir Levya5774c42017-12-14 22:15:54 +02005159 dbg_buff[count] = '\0';
Mohammed Javidbf4c8022017-08-07 23:15:48 +05305160
Amir Levy2da9d452017-12-12 10:09:46 +02005161 IPADBG("user input string %s\n", dbg_buff);
5162
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005163 /* Check MHI configuration on MDM devices */
5164 if (!ipa3_is_msm_device()) {
Amir Levy2da9d452017-12-12 10:09:46 +02005165
5166 if (strnstr(dbg_buff, "vlan", strlen(dbg_buff))) {
5167 if (strnstr(dbg_buff, "eth", strlen(dbg_buff)))
5168 ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_EMAC] =
5169 true;
5170 if (strnstr(dbg_buff, "rndis", strlen(dbg_buff)))
5171 ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_RNDIS] =
5172 true;
5173 if (strnstr(dbg_buff, "ecm", strlen(dbg_buff)))
5174 ipa3_ctx->vlan_mode_iface[IPA_VLAN_IF_ECM] =
5175 true;
5176
5177 /*
5178 * when vlan mode is passed to our dev we expect
5179 * another write
5180 */
5181 return count;
5182 }
5183
Amir Levya5774c42017-12-14 22:15:54 +02005184 /* trim ending newline character if any */
5185 if (count && (dbg_buff[count - 1] == '\n'))
5186 dbg_buff[count - 1] = '\0';
5187
Amir Levy54fe4d32017-03-16 11:21:49 +02005188 if (!strcasecmp(dbg_buff, "MHI")) {
5189 ipa3_ctx->ipa_config_is_mhi = true;
5190 pr_info(
Amir Levy2da9d452017-12-12 10:09:46 +02005191 "IPA is loading with MHI configuration\n");
Amir Levya5774c42017-12-14 22:15:54 +02005192 } else if (!strcmp(dbg_buff, "1")) {
Amir Levy54fe4d32017-03-16 11:21:49 +02005193 pr_info(
Amir Levy2da9d452017-12-12 10:09:46 +02005194 "IPA is loading with non MHI configuration\n");
5195 } else {
5196 IPAERR("got invalid string %s not loading FW\n",
5197 dbg_buff);
5198 return count;
Amir Levy54fe4d32017-03-16 11:21:49 +02005199 }
Amir Levy54fe4d32017-03-16 11:21:49 +02005200 }
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005201
Chaitanya Pratapa31bf6432019-12-11 22:18:17 -08005202 /* Prevent consequent calls from trying to load the FW again. */
5203 if (ipa3_is_ready())
5204 return count;
5205
Skylar Changafc22fe2019-04-25 14:10:52 -07005206 /* Prevent multiple calls from trying to load the FW again. */
5207 if (ipa3_ctx->fw_loaded) {
5208 IPAERR("not load FW again\n");
5209 return count;
5210 }
5211
5212 /* Schedule WQ to load ipa-fws */
5213 ipa3_ctx->fw_loaded = true;
5214
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005215 queue_work(ipa3_ctx->transport_power_mgmt_wq,
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005216 &ipa3_fw_loading_work);
Ghanim Fodi03dcc272017-08-08 18:13:25 +03005217
Ghanim Fodia5f376a2017-10-17 18:14:53 +03005218 IPADBG("Scheduled a work to load IPA FW\n");
Amir Levy9659e592016-10-27 18:08:27 +03005219 return count;
5220}
5221
Skylar Chang48afa052017-10-25 09:32:57 -07005222/**
5223 * ipa3_tz_unlock_reg - Unlocks memory regions so that they become accessible
5224 * from AP.
5225 * @reg_info - Pointer to array of memory regions to unlock
5226 * @num_regs - Number of elements in the array
5227 *
5228 * Converts the input array of regions to a struct that TZ understands and
5229 * issues an SCM call.
5230 * Also flushes the memory cache to DDR in order to make sure that TZ sees the
5231 * correct data structure.
5232 *
5233 * Returns: 0 on success, negative on failure
5234 */
5235int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005236{
5237 int i, size, ret, resp;
5238 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
5239 struct tz_smmu_ipa_protect_region_s cmd_buf;
Skylar Chang3a696ba2017-10-25 09:35:07 -07005240 struct scm_desc desc = {0};
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005241
Skylar Chang48afa052017-10-25 09:32:57 -07005242 if (reg_info == NULL || num_regs == 0) {
5243 IPAERR("Bad parameters\n");
5244 return -EFAULT;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005245 }
Skylar Chang48afa052017-10-25 09:32:57 -07005246
5247 size = num_regs * sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
5248 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
5249 if (ipa_tz_unlock_vec == NULL)
5250 return -ENOMEM;
5251
5252 for (i = 0; i < num_regs; i++) {
5253 ipa_tz_unlock_vec[i].input_addr = reg_info[i].reg_addr ^
5254 (reg_info[i].reg_addr & 0xFFF);
5255 ipa_tz_unlock_vec[i].output_addr = reg_info[i].reg_addr ^
5256 (reg_info[i].reg_addr & 0xFFF);
5257 ipa_tz_unlock_vec[i].size = reg_info[i].size;
5258 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
5259 }
5260
5261 /* pass physical address of command buffer */
5262 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
5263 cmd_buf.size_bytes = size;
5264
5265 /* flush cache to DDR */
5266 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
5267 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
Skylar Chang3a696ba2017-10-25 09:35:07 -07005268 if (!is_scm_armv8())
5269 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID,
5270 &cmd_buf, sizeof(cmd_buf), &resp, sizeof(resp));
5271 else {
5272 desc.args[0] = virt_to_phys((void *)ipa_tz_unlock_vec);
5273 desc.args[1] = size;
5274 desc.arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL);
5275 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
5276 TZ_MEM_PROTECT_REGION_ID), &desc);
5277 }
Skylar Chang48afa052017-10-25 09:32:57 -07005278
Skylar Chang48afa052017-10-25 09:32:57 -07005279 if (ret) {
5280 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
5281 kfree(ipa_tz_unlock_vec);
5282 return -EFAULT;
5283 }
5284 kfree(ipa_tz_unlock_vec);
5285
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005286 return 0;
5287}
5288
Skylar Changcd3902d2017-03-27 18:08:27 -07005289static int ipa3_alloc_pkt_init(void)
5290{
5291 struct ipa_mem_buffer mem;
5292 struct ipahal_imm_cmd_pyld *cmd_pyld;
5293 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
5294 int i;
5295
5296 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
5297 &cmd, false);
5298 if (!cmd_pyld) {
5299 IPAERR("failed to construct IMM cmd\n");
5300 return -ENOMEM;
5301 }
Michael Adisumartab5d170f2017-05-17 14:34:11 -07005302 ipa3_ctx->pkt_init_imm_opcode = cmd_pyld->opcode;
Skylar Changcd3902d2017-03-27 18:08:27 -07005303
5304 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
5305 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
5306 &mem.phys_base, GFP_KERNEL);
5307 if (!mem.base) {
5308 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
5309 ipahal_destroy_imm_cmd(cmd_pyld);
5310 return -ENOMEM;
5311 }
5312 ipahal_destroy_imm_cmd(cmd_pyld);
5313
5314 memset(mem.base, 0, mem.size);
5315 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
5316 cmd.destination_pipe_index = i;
5317 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
5318 &cmd, false);
5319 if (!cmd_pyld) {
5320 IPAERR("failed to construct IMM cmd\n");
5321 dma_free_coherent(ipa3_ctx->pdev,
5322 mem.size,
5323 mem.base,
5324 mem.phys_base);
5325 return -ENOMEM;
5326 }
5327 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
5328 cmd_pyld->len);
5329 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
5330 ipahal_destroy_imm_cmd(cmd_pyld);
5331 }
5332
5333 return 0;
5334}
5335
Amir Levy9659e592016-10-27 18:08:27 +03005336/**
Skylar Chang68c37d82018-04-07 16:42:36 -07005337 * ipa3_pre_init() - Initialize the IPA Driver.
5338 * This part contains all initialization which doesn't require IPA HW, such
5339 * as structure allocations and initializations, register writes, etc.
5340 *
5341 * @resource_p: contain platform specific values from DST file
5342 * @pdev: The platform device structure representing the IPA driver
5343 *
5344 * Function initialization process:
5345 * Allocate memory for the driver context data struct
5346 * Initializing the ipa3_ctx with :
5347 * 1)parsed values from the dts file
5348 * 2)parameters passed to the module initialization
5349 * 3)read HW values(such as core memory size)
5350 * Map IPA core registers to CPU memory
5351 * Restart IPA core(HW reset)
5352 * Initialize the look-aside caches(kmem_cache/slab) for filter,
5353 * routing and IPA-tree
5354 * Create memory pool with 4 objects for DMA operations(each object
5355 * is 512Bytes long), this object will be use for tx(A5->IPA)
5356 * Initialize lists head(routing, hdr, system pipes)
5357 * Initialize mutexes (for ipa_ctx and NAT memory mutexes)
5358 * Initialize spinlocks (for list related to A5<->IPA pipes)
5359 * Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
5360 * Initialize Red-Black-Tree(s) for handles of header,routing rule,
5361 * routing table ,filtering rule
5362 * Initialize the filter block by committing IPV4 and IPV6 default rules
5363 * Create empty routing table in system memory(no committing)
5364 * Create a char-device for IPA
5365 * Initialize IPA RM (resource manager)
5366 * Configure GSI registers (in GSI case)
5367 */
Amir Levy9659e592016-10-27 18:08:27 +03005368static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
Skylar Changefc0a0f2018-03-29 11:17:40 -07005369 struct platform_device *ipa_pdev)
Amir Levy9659e592016-10-27 18:08:27 +03005370{
5371 int result = 0;
5372 int i;
Amir Levy9659e592016-10-27 18:08:27 +03005373 struct ipa3_rt_tbl_set *rset;
Mohammed Javidc6db3362018-02-13 13:41:38 +05305374 struct ipa_active_client_logging_info log_info;
Skylar Changefc0a0f2018-03-29 11:17:40 -07005375 struct cdev *cdev;
Amir Levy9659e592016-10-27 18:08:27 +03005376
5377 IPADBG("IPA Driver initialization started\n");
5378
5379 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
5380 if (!ipa3_ctx) {
5381 IPAERR(":kzalloc err.\n");
5382 result = -ENOMEM;
5383 goto fail_mem_ctx;
5384 }
5385
5386 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
Skylar Chang841c1452017-04-03 16:07:22 -07005387 if (ipa3_ctx->logbuf == NULL)
Mohammed Javid0af3c662018-06-29 15:06:00 +05305388 IPADBG("failed to create IPC log, continue...\n");
Amir Levy9659e592016-10-27 18:08:27 +03005389
Skylar Changefc0a0f2018-03-29 11:17:40 -07005390 /* ipa3_ctx->pdev and ipa3_ctx->uc_pdev will be set in the smmu probes*/
5391 ipa3_ctx->master_pdev = ipa_pdev;
Michael Adisumartac8c404a2018-04-05 18:01:45 -07005392 for (i = 0; i < IPA_SMMU_CB_MAX; i++)
5393 ipa3_ctx->s1_bypass_arr[i] = true;
Michael Adisumarta93e97522017-10-06 15:49:46 -07005394
Amir Levy9659e592016-10-27 18:08:27 +03005395 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
5396 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
5397 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
5398 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
5399 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03005400 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
5401 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
Mohammed Javid80d0e2a2019-06-10 14:11:42 +05305402 ipa3_ctx->ipa_config_is_auto = resource_p->ipa_config_is_auto;
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08005403 ipa3_ctx->use_xbl_boot = resource_p->use_xbl_boot;
Amir Levy9659e592016-10-27 18:08:27 +03005404 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
5405 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
5406 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
5407 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
5408 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03005409 ipa3_ctx->ee = resource_p->ee;
5410 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
5411 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
Michael Adisumarta3e350812017-09-18 14:54:36 -07005412 ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
Mohammed Javid03854df2018-06-20 18:36:57 +05305413 ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
Amir Levy9659e592016-10-27 18:08:27 +03005414 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Mohammed Javid73cd4d22018-04-03 17:15:49 +05305415 ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005416 ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
5417 ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
Skylar Changefc0a0f2018-03-29 11:17:40 -07005418
5419 WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL,
5420 "Non NORMAL IPA HW mode, is this emulation platform ?");
5421
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005422 if (resource_p->ipa_tz_unlock_reg) {
5423 ipa3_ctx->ipa_tz_unlock_reg_num =
5424 resource_p->ipa_tz_unlock_reg_num;
5425 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
5426 ipa3_ctx->ipa_tz_unlock_reg_num,
5427 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
5428 GFP_KERNEL);
5429 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
5430 result = -ENOMEM;
5431 goto fail_tz_unlock_reg;
5432 }
5433 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
5434 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
5435 resource_p->ipa_tz_unlock_reg[i].reg_addr;
5436 ipa3_ctx->ipa_tz_unlock_reg[i].size =
5437 resource_p->ipa_tz_unlock_reg[i].size;
5438 }
5439 }
5440
5441 /* unlock registers for uc */
Skylar Chang48afa052017-10-25 09:32:57 -07005442 result = ipa3_tz_unlock_reg(ipa3_ctx->ipa_tz_unlock_reg,
5443 ipa3_ctx->ipa_tz_unlock_reg_num);
5444 if (result)
5445 IPAERR("Failed to unlock memory region using TZ\n");
Amir Levy9659e592016-10-27 18:08:27 +03005446
5447 /* default aggregation parameters */
5448 ipa3_ctx->aggregation_type = IPA_MBIM_16;
5449 ipa3_ctx->aggregation_byte_limit = 1;
5450 ipa3_ctx->aggregation_time_limit = 0;
5451
5452 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
5453 if (!ipa3_ctx->ctrl) {
5454 IPAERR("memory allocation error for ctrl\n");
5455 result = -ENOMEM;
5456 goto fail_mem_ctrl;
5457 }
5458 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
5459 ipa3_ctx->ipa_hw_type);
5460 if (result) {
5461 IPAERR("fail to static bind IPA ctrl.\n");
5462 result = -EFAULT;
5463 goto fail_bind;
5464 }
5465
Skylar Changefc0a0f2018-03-29 11:17:40 -07005466 result = ipa3_init_mem_partition(ipa3_ctx->master_pdev->dev.of_node);
Amir Levy9659e592016-10-27 18:08:27 +03005467 if (result) {
5468 IPAERR(":ipa3_init_mem_partition failed!\n");
5469 result = -ENODEV;
5470 goto fail_init_mem_partition;
5471 }
5472
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005473 if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL &&
5474 ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
Skylar Changefc0a0f2018-03-29 11:17:40 -07005475 ipa3_ctx->ctrl->msm_bus_data_ptr =
5476 msm_bus_cl_get_pdata(ipa3_ctx->master_pdev);
5477 if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) {
5478 IPAERR("failed to get bus scaling\n");
5479 goto fail_bus_reg;
5480 }
Ghanim Fodi6a831342017-03-07 18:19:15 +02005481 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
Skylar Changefc0a0f2018-03-29 11:17:40 -07005482 ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03005483
Skylar Changefc0a0f2018-03-29 11:17:40 -07005484 /* get BUS handle */
5485 ipa3_ctx->ipa_bus_hdl =
5486 msm_bus_scale_register_client(
5487 ipa3_ctx->ctrl->msm_bus_data_ptr);
5488 if (!ipa3_ctx->ipa_bus_hdl) {
5489 IPAERR("fail to register with bus mgr!\n");
5490 result = -ENODEV;
5491 goto fail_bus_reg;
5492 }
Amir Levy9659e592016-10-27 18:08:27 +03005493 }
5494
5495 /* get IPA clocks */
Skylar Changefc0a0f2018-03-29 11:17:40 -07005496 result = ipa3_get_clks(&ipa3_ctx->master_pdev->dev);
Amir Levy9659e592016-10-27 18:08:27 +03005497 if (result)
5498 goto fail_clk;
5499
5500 /* init active_clients_log after getting ipa-clk */
Ghanim Fodic48ba992017-12-24 19:28:38 +02005501 result = ipa3_active_clients_log_init();
5502 if (result)
Amir Levy9659e592016-10-27 18:08:27 +03005503 goto fail_init_active_client;
5504
5505 /* Enable ipa3_ctx->enable_clock_scaling */
5506 ipa3_ctx->enable_clock_scaling = 1;
5507 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
5508
5509 /* enable IPA clocks explicitly to allow the initialization */
5510 ipa3_enable_clks();
5511
5512 /* setup IPA register access */
5513 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
5514 ipa3_ctx->ctrl->ipa_reg_base_ofst);
5515 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
5516 ipa3_ctx->ctrl->ipa_reg_base_ofst,
5517 resource_p->ipa_mem_size);
5518 if (!ipa3_ctx->mmio) {
5519 IPAERR(":ipa-base ioremap err.\n");
5520 result = -EFAULT;
5521 goto fail_remap;
5522 }
5523
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005524 IPADBG(
5525 "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n",
5526 resource_p->ipa_mem_base,
5527 ipa3_ctx->ctrl->ipa_reg_base_ofst,
5528 resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst,
5529 ipa3_ctx->mmio,
5530 resource_p->ipa_mem_size);
5531
5532 /*
5533 * Emulation requires ipahal be initialized early...for FW
5534 * download, hence...
5535 */
5536 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
5537 if (ipahal_init(ipa3_ctx->ipa_hw_type,
5538 ipa3_ctx->mmio,
5539 &(ipa3_ctx->master_pdev->dev))) {
5540 IPAERR("fail to init ipahal\n");
5541 result = -EFAULT;
5542 goto fail_ipahal_init;
5543 }
5544 }
5545
Amir Levy9659e592016-10-27 18:08:27 +03005546 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
Mohammed Javidc6db3362018-02-13 13:41:38 +05305547
5548 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
5549 ipa3_active_clients_log_inc(&log_info, false);
5550 ipa3_ctx->q6_proxy_clk_vote_valid = true;
5551 ipa3_ctx->q6_proxy_clk_vote_cnt = 1;
5552
5553 /*Updating the proxy vote cnt 1 */
Skylar Chang242952b2017-07-20 15:04:05 -07005554 atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
Amir Levy9659e592016-10-27 18:08:27 +03005555
Amir Levy9659e592016-10-27 18:08:27 +03005556 /* Create workqueues for power management */
5557 ipa3_ctx->power_mgmt_wq =
5558 create_singlethread_workqueue("ipa_power_mgmt");
5559 if (!ipa3_ctx->power_mgmt_wq) {
5560 IPAERR("failed to create power mgmt wq\n");
5561 result = -ENOMEM;
5562 goto fail_init_hw;
5563 }
5564
5565 ipa3_ctx->transport_power_mgmt_wq =
5566 create_singlethread_workqueue("transport_power_mgmt");
5567 if (!ipa3_ctx->transport_power_mgmt_wq) {
5568 IPAERR("failed to create transport power mgmt wq\n");
5569 result = -ENOMEM;
5570 goto fail_create_transport_wq;
5571 }
5572
Sridhar Ancha99b505b2016-04-21 23:11:10 +05305573 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03005574
5575 /* init the lookaside cache */
5576 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
5577 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
5578 if (!ipa3_ctx->flt_rule_cache) {
5579 IPAERR(":ipa flt cache create failed\n");
5580 result = -ENOMEM;
5581 goto fail_flt_rule_cache;
5582 }
5583 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
5584 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
5585 if (!ipa3_ctx->rt_rule_cache) {
5586 IPAERR(":ipa rt cache create failed\n");
5587 result = -ENOMEM;
5588 goto fail_rt_rule_cache;
5589 }
5590 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
5591 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
5592 if (!ipa3_ctx->hdr_cache) {
5593 IPAERR(":ipa hdr cache create failed\n");
5594 result = -ENOMEM;
5595 goto fail_hdr_cache;
5596 }
5597 ipa3_ctx->hdr_offset_cache =
5598 kmem_cache_create("IPA_HDR_OFFSET",
5599 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
5600 if (!ipa3_ctx->hdr_offset_cache) {
5601 IPAERR(":ipa hdr off cache create failed\n");
5602 result = -ENOMEM;
5603 goto fail_hdr_offset_cache;
5604 }
5605 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
5606 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
5607 if (!ipa3_ctx->hdr_proc_ctx_cache) {
5608 IPAERR(":ipa hdr proc ctx cache create failed\n");
5609 result = -ENOMEM;
5610 goto fail_hdr_proc_ctx_cache;
5611 }
5612 ipa3_ctx->hdr_proc_ctx_offset_cache =
5613 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
5614 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
5615 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
5616 IPAERR(":ipa hdr proc ctx off cache create failed\n");
5617 result = -ENOMEM;
5618 goto fail_hdr_proc_ctx_offset_cache;
5619 }
5620 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
5621 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
5622 if (!ipa3_ctx->rt_tbl_cache) {
5623 IPAERR(":ipa rt tbl cache create failed\n");
5624 result = -ENOMEM;
5625 goto fail_rt_tbl_cache;
5626 }
5627 ipa3_ctx->tx_pkt_wrapper_cache =
5628 kmem_cache_create("IPA_TX_PKT_WRAPPER",
5629 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
5630 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
5631 IPAERR(":ipa tx pkt wrapper cache create failed\n");
5632 result = -ENOMEM;
5633 goto fail_tx_pkt_wrapper_cache;
5634 }
5635 ipa3_ctx->rx_pkt_wrapper_cache =
5636 kmem_cache_create("IPA_RX_PKT_WRAPPER",
5637 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
5638 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
5639 IPAERR(":ipa rx pkt wrapper cache create failed\n");
5640 result = -ENOMEM;
5641 goto fail_rx_pkt_wrapper_cache;
5642 }
5643
Amir Levy9659e592016-10-27 18:08:27 +03005644 /* init the various list heads */
5645 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
5646 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
5647 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
5648 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
5649 }
5650 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
5651 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
5652 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
5653 INIT_LIST_HEAD(&ipa3_ctx->
5654 hdr_proc_ctx_tbl.head_free_offset_list[i]);
5655 }
5656 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005657 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005658 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005659 idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005660
5661 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
5662 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005663 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005664 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
5665 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005666 idr_init(&rset->rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005667
5668 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
5669 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
5670 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
5671 init_waitqueue_head(&ipa3_ctx->msg_waitq);
5672 mutex_init(&ipa3_ctx->msg_lock);
5673
Skylar Chang68c37d82018-04-07 16:42:36 -07005674 /* store wlan client-connect-msg-list */
5675 INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list);
5676 mutex_init(&ipa3_ctx->msg_wlan_client_lock);
5677
Amir Levy9659e592016-10-27 18:08:27 +03005678 mutex_init(&ipa3_ctx->lock);
Skylar Changfb792c62017-08-17 12:53:23 -07005679 mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05305680 mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
Amir Levy9659e592016-10-27 18:08:27 +03005681
5682 idr_init(&ipa3_ctx->ipa_idr);
5683 spin_lock_init(&ipa3_ctx->idr_lock);
5684
5685 /* wlan related member */
5686 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
5687 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
5688 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
5689 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
5690
Skylar Changefc0a0f2018-03-29 11:17:40 -07005691 ipa3_ctx->cdev.class = class_create(THIS_MODULE, DRV_NAME);
Amir Levy9659e592016-10-27 18:08:27 +03005692
Skylar Changefc0a0f2018-03-29 11:17:40 -07005693 result = alloc_chrdev_region(&ipa3_ctx->cdev.dev_num, 0, 1, DRV_NAME);
Amir Levy9659e592016-10-27 18:08:27 +03005694 if (result) {
5695 IPAERR("alloc_chrdev_region err.\n");
5696 result = -ENODEV;
5697 goto fail_alloc_chrdev_region;
5698 }
5699
Skylar Changefc0a0f2018-03-29 11:17:40 -07005700 ipa3_ctx->cdev.dev = device_create(ipa3_ctx->cdev.class, NULL,
5701 ipa3_ctx->cdev.dev_num, ipa3_ctx, DRV_NAME);
5702 if (IS_ERR(ipa3_ctx->cdev.dev)) {
Amir Levy9659e592016-10-27 18:08:27 +03005703 IPAERR(":device_create err.\n");
5704 result = -ENODEV;
5705 goto fail_device_create;
5706 }
5707
Amir Levy9659e592016-10-27 18:08:27 +03005708 /* Create a wakeup source. */
5709 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
5710 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
5711
Michael Adisumarta3e350812017-09-18 14:54:36 -07005712 /* Initialize Power Management framework */
5713 if (ipa3_ctx->use_ipa_pm) {
5714 result = ipa_pm_init(&ipa3_res.pm_init);
5715 if (result) {
5716 IPAERR("IPA PM initialization failed (%d)\n", -result);
5717 result = -ENODEV;
5718 goto fail_ipa_rm_init;
5719 }
5720 IPADBG("IPA resource manager initialized");
5721 } else {
5722 result = ipa_rm_initialize();
5723 if (result) {
5724 IPAERR("RM initialization failed (%d)\n", -result);
5725 result = -ENODEV;
5726 goto fail_ipa_rm_init;
5727 }
5728 IPADBG("IPA resource manager initialized");
Amir Levy9659e592016-10-27 18:08:27 +03005729
Michael Adisumarta3e350812017-09-18 14:54:36 -07005730 result = ipa3_create_apps_resource();
5731 if (result) {
5732 IPAERR("Failed to create APPS_CONS resource\n");
5733 result = -ENODEV;
5734 goto fail_create_apps_resource;
5735 }
Amir Levy9659e592016-10-27 18:08:27 +03005736 }
5737
Amir Levy9659e592016-10-27 18:08:27 +03005738 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
5739
5740 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07005741 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03005742
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005743 result = ipa3_dma_setup();
5744 if (result) {
5745 IPAERR("Failed to setup IPA DMA\n");
5746 result = -ENODEV;
5747 goto fail_ipa_dma_setup;
5748 }
5749
Amir Levy9659e592016-10-27 18:08:27 +03005750 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02005751 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03005752 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02005753 *
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005754 * For IPA3.0 and the emulation system, the GSI configuration
5755 * is done by the GSI driver.
5756 *
Amir Levya59ed3f2017-03-05 17:30:55 +02005757 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03005758 */
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005759 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 ||
5760 ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
Amir Levya59ed3f2017-03-05 17:30:55 +02005761 result = ipa3_gsi_pre_fw_load_init();
5762 if (result) {
5763 IPAERR("gsi pre FW loading config failed\n");
5764 result = -ENODEV;
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005765 goto fail_gsi_pre_fw_load_init;
Amir Levy9659e592016-10-27 18:08:27 +03005766 }
5767 }
Amir Levy9659e592016-10-27 18:08:27 +03005768
Skylar Changefc0a0f2018-03-29 11:17:40 -07005769 cdev = &ipa3_ctx->cdev.cdev;
5770 cdev_init(cdev, &ipa3_drv_fops);
5771 cdev->owner = THIS_MODULE;
5772 cdev->ops = &ipa3_drv_fops; /* from LDD3 */
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305773
Skylar Changefc0a0f2018-03-29 11:17:40 -07005774 result = cdev_add(cdev, ipa3_ctx->cdev.dev_num, 1);
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305775 if (result) {
5776 IPAERR(":cdev_add err=%d\n", -result);
5777 result = -ENODEV;
5778 goto fail_cdev_add;
5779 }
5780 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
Skylar Changefc0a0f2018-03-29 11:17:40 -07005781 MAJOR(ipa3_ctx->cdev.dev_num),
5782 MINOR(ipa3_ctx->cdev.dev_num));
Mohammed Javidc6db3362018-02-13 13:41:38 +05305783 /*
5784 * for IPA 4.0 offline charge is not needed and we need to prevent
5785 * power collapse until IPA uC is loaded.
5786 */
5787
Skylar Chang40430532017-07-06 14:31:57 -07005788 /* proxy vote for modem is added in ipa3_post_init() phase */
Mohammed Javidc6db3362018-02-13 13:41:38 +05305789 if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
5790 ipa3_proxy_clk_unvote();
Amir Levy9659e592016-10-27 18:08:27 +03005791 return 0;
5792
Utkarsh Saxenaded78142017-05-03 14:04:30 +05305793fail_cdev_add:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005794fail_gsi_pre_fw_load_init:
5795 ipa3_dma_shutdown();
5796fail_ipa_dma_setup:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005797 if (ipa3_ctx->use_ipa_pm)
5798 ipa_pm_destroy();
5799 else
Michael Adisumarta3e350812017-09-18 14:54:36 -07005800 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
Amir Levy9659e592016-10-27 18:08:27 +03005801fail_create_apps_resource:
Michael Adisumarta3e350812017-09-18 14:54:36 -07005802 if (!ipa3_ctx->use_ipa_pm)
5803 ipa_rm_exit();
Amir Levy9659e592016-10-27 18:08:27 +03005804fail_ipa_rm_init:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005805 device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num);
Amir Levy9659e592016-10-27 18:08:27 +03005806fail_device_create:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005807 unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1);
Amir Levy9659e592016-10-27 18:08:27 +03005808fail_alloc_chrdev_region:
Ghanim Fodie6bb7a82017-10-02 17:59:58 +03005809 idr_destroy(&ipa3_ctx->ipa_idr);
Skylar Chang0c37f5f2017-07-24 10:22:53 -07005810 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
5811 idr_destroy(&rset->rule_ids);
5812 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
5813 idr_destroy(&rset->rule_ids);
5814 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
5815 idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
Amir Levy9659e592016-10-27 18:08:27 +03005816 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
5817fail_rx_pkt_wrapper_cache:
5818 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
5819fail_tx_pkt_wrapper_cache:
5820 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
5821fail_rt_tbl_cache:
5822 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
5823fail_hdr_proc_ctx_offset_cache:
5824 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
5825fail_hdr_proc_ctx_cache:
5826 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
5827fail_hdr_offset_cache:
5828 kmem_cache_destroy(ipa3_ctx->hdr_cache);
5829fail_hdr_cache:
5830 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
5831fail_rt_rule_cache:
5832 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
5833fail_flt_rule_cache:
5834 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
5835fail_create_transport_wq:
5836 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
5837fail_init_hw:
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04005838 if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION)
5839 ipahal_destroy();
5840fail_ipahal_init:
Amir Levy9659e592016-10-27 18:08:27 +03005841 iounmap(ipa3_ctx->mmio);
5842fail_remap:
5843 ipa3_disable_clks();
5844 ipa3_active_clients_log_destroy();
5845fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02005846 if (ipa3_clk)
5847 clk_put(ipa3_clk);
5848 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03005849fail_clk:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005850 if (ipa3_ctx->ipa_bus_hdl)
5851 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03005852fail_bus_reg:
Skylar Changefc0a0f2018-03-29 11:17:40 -07005853 if (ipa3_ctx->ctrl->msm_bus_data_ptr)
5854 msm_bus_cl_clear_pdata(ipa3_ctx->ctrl->msm_bus_data_ptr);
Amir Levy9659e592016-10-27 18:08:27 +03005855fail_init_mem_partition:
5856fail_bind:
5857 kfree(ipa3_ctx->ctrl);
5858fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005859 kfree(ipa3_ctx->ipa_tz_unlock_reg);
5860fail_tz_unlock_reg:
Skylar Chang841c1452017-04-03 16:07:22 -07005861 if (ipa3_ctx->logbuf)
5862 ipc_log_context_destroy(ipa3_ctx->logbuf);
Amir Levy9659e592016-10-27 18:08:27 +03005863 kfree(ipa3_ctx);
5864 ipa3_ctx = NULL;
5865fail_mem_ctx:
5866 return result;
5867}
5868
Michael Adisumarta3e350812017-09-18 14:54:36 -07005869static int get_ipa_dts_pm_info(struct platform_device *pdev,
5870 struct ipa3_plat_drv_res *ipa_drv_res)
5871{
5872 int result;
5873 int i, j;
5874
5875 ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
5876 "qcom,use-ipa-pm");
5877 IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
5878 if (!ipa_drv_res->use_ipa_pm)
5879 return 0;
5880
5881 result = of_property_read_u32(pdev->dev.of_node,
5882 "qcom,msm-bus,num-cases",
5883 &ipa_drv_res->pm_init.threshold_size);
5884 /* No vote is ignored */
5885 ipa_drv_res->pm_init.threshold_size -= 2;
5886 if (result || ipa_drv_res->pm_init.threshold_size >
5887 IPA_PM_THRESHOLD_MAX) {
5888 IPAERR("invalid property qcom,msm-bus,num-cases %d\n",
5889 ipa_drv_res->pm_init.threshold_size);
5890 return -EFAULT;
5891 }
5892
5893 result = of_property_read_u32_array(pdev->dev.of_node,
5894 "qcom,throughput-threshold",
5895 ipa_drv_res->pm_init.default_threshold,
5896 ipa_drv_res->pm_init.threshold_size);
5897 if (result) {
5898 IPAERR("failed to read qcom,throughput-thresholds\n");
5899 return -EFAULT;
5900 }
5901
5902 result = of_property_count_strings(pdev->dev.of_node,
5903 "qcom,scaling-exceptions");
5904 if (result < 0) {
5905 IPADBG("no exception list for ipa pm\n");
5906 result = 0;
5907 }
5908
5909 if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
5910 IPAERR("failed to read qcom,scaling-exceptions\n");
5911 return -EFAULT;
5912 }
5913
5914 ipa_drv_res->pm_init.exception_size = result /
5915 (ipa_drv_res->pm_init.threshold_size + 1);
5916 if (ipa_drv_res->pm_init.exception_size >=
5917 IPA_PM_EXCEPTION_MAX) {
5918 IPAERR("exception list larger then max %d\n",
5919 ipa_drv_res->pm_init.exception_size);
5920 return -EFAULT;
5921 }
5922
5923 for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
5924 struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
5925
5926 result = of_property_read_string_index(pdev->dev.of_node,
5927 "qcom,scaling-exceptions",
5928 i * ipa_drv_res->pm_init.threshold_size,
5929 &ex[i].usecase);
5930 if (result) {
5931 IPAERR("failed to read qcom,scaling-exceptions");
5932 return -EFAULT;
5933 }
5934
5935 for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
5936 const char *str;
5937
5938 result = of_property_read_string_index(
5939 pdev->dev.of_node,
5940 "qcom,scaling-exceptions",
5941 i * ipa_drv_res->pm_init.threshold_size + j + 1,
5942 &str);
5943 if (result) {
5944 IPAERR("failed to read qcom,scaling-exceptions"
5945 );
5946 return -EFAULT;
5947 }
5948
5949 if (kstrtou32(str, 0, &ex[i].threshold[j])) {
5950 IPAERR("error str=%s\n", str);
5951 return -EFAULT;
5952 }
5953 }
5954 }
5955
5956 return 0;
5957}
5958
Amir Levy9659e592016-10-27 18:08:27 +03005959static int get_ipa_dts_configuration(struct platform_device *pdev,
5960 struct ipa3_plat_drv_res *ipa_drv_res)
5961{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005962 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03005963 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005964 u32 *ipa_tz_unlock_reg;
5965 int elem_num;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005966 u32 mhi_evid_limits[2];
Amir Levy9659e592016-10-27 18:08:27 +03005967
5968 /* initialize ipa3_res */
5969 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
5970 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
5971 ipa_drv_res->ipa_hw_type = 0;
5972 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03005973 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
5974 ipa_drv_res->ipa_wdi2 = false;
Mohammed Javid80d0e2a2019-06-10 14:11:42 +05305975 ipa_drv_res->ipa_config_is_auto = false;
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08005976 ipa_drv_res->use_xbl_boot = false;
Mohammed Javid73cd4d22018-04-03 17:15:49 +05305977 ipa_drv_res->ipa_mhi_dynamic_config = false;
Amir Levy9659e592016-10-27 18:08:27 +03005978 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02005979 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03005980 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5981 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
5982 ipa_drv_res->apply_rg10_wa = false;
5983 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005984 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
5985 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Ghanim Fodic823bc62017-10-21 17:29:53 +03005986 ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START;
5987 ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
Amir Levy9659e592016-10-27 18:08:27 +03005988
5989 /* Get IPA HW Version */
5990 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
5991 &ipa_drv_res->ipa_hw_type);
5992 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
5993 IPAERR(":get resource failed for ipa-hw-ver!\n");
5994 return -ENODEV;
5995 }
5996 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
5997
5998 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
5999 IPAERR(":IPA version below 3.0 not supported!\n");
6000 return -ENODEV;
6001 }
6002
6003 /* Get IPA HW mode */
6004 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
6005 &ipa_drv_res->ipa3_hw_mode);
6006 if (result)
6007 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
6008 else
6009 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
6010 ipa_drv_res->ipa3_hw_mode);
6011
6012 /* Get IPA WAN / LAN RX pool size */
6013 result = of_property_read_u32(pdev->dev.of_node,
6014 "qcom,wan-rx-ring-size",
6015 &ipa_drv_res->wan_rx_ring_size);
6016 if (result)
6017 IPADBG("using default for wan-rx-ring-size = %u\n",
6018 ipa_drv_res->wan_rx_ring_size);
6019 else
6020 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
6021 ipa_drv_res->wan_rx_ring_size);
6022
6023 result = of_property_read_u32(pdev->dev.of_node,
6024 "qcom,lan-rx-ring-size",
6025 &ipa_drv_res->lan_rx_ring_size);
6026 if (result)
6027 IPADBG("using default for lan-rx-ring-size = %u\n",
6028 ipa_drv_res->lan_rx_ring_size);
6029 else
6030 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
6031 ipa_drv_res->lan_rx_ring_size);
6032
6033 ipa_drv_res->use_ipa_teth_bridge =
6034 of_property_read_bool(pdev->dev.of_node,
6035 "qcom,use-ipa-tethering-bridge");
6036 IPADBG(": using TBDr = %s",
6037 ipa_drv_res->use_ipa_teth_bridge
6038 ? "True" : "False");
6039
Mohammed Javid73cd4d22018-04-03 17:15:49 +05306040 ipa_drv_res->ipa_mhi_dynamic_config =
6041 of_property_read_bool(pdev->dev.of_node,
6042 "qcom,use-ipa-in-mhi-mode");
6043 IPADBG(": ipa_mhi_dynamic_config (%s)\n",
6044 ipa_drv_res->ipa_mhi_dynamic_config
6045 ? "True" : "False");
6046
Amir Levy9659e592016-10-27 18:08:27 +03006047 ipa_drv_res->modem_cfg_emb_pipe_flt =
6048 of_property_read_bool(pdev->dev.of_node,
6049 "qcom,modem-cfg-emb-pipe-flt");
6050 IPADBG(": modem configure embedded pipe filtering = %s\n",
6051 ipa_drv_res->modem_cfg_emb_pipe_flt
6052 ? "True" : "False");
6053
6054 ipa_drv_res->ipa_wdi2 =
6055 of_property_read_bool(pdev->dev.of_node,
6056 "qcom,ipa-wdi2");
6057 IPADBG(": WDI-2.0 = %s\n",
6058 ipa_drv_res->ipa_wdi2
6059 ? "True" : "False");
6060
Mohammed Javid80d0e2a2019-06-10 14:11:42 +05306061 ipa_drv_res->ipa_config_is_auto =
6062 of_property_read_bool(pdev->dev.of_node,
6063 "qcom,ipa-config-is-auto");
6064 IPADBG(": ipa-config-is-auto = %s\n",
6065 ipa_drv_res->ipa_config_is_auto
6066 ? "True" : "False");
6067
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08006068 ipa_drv_res->use_xbl_boot =
6069 of_property_read_bool(pdev->dev.of_node,
6070 "qcom,use-xbl-boot");
6071 IPADBG("Is xbl loading used ? (%s)\n",
6072 ipa_drv_res->use_xbl_boot ? "Yes":"No");
6073
Amir Levy9659e592016-10-27 18:08:27 +03006074 ipa_drv_res->use_64_bit_dma_mask =
6075 of_property_read_bool(pdev->dev.of_node,
6076 "qcom,use-64-bit-dma-mask");
6077 IPADBG(": use_64_bit_dma_mask = %s\n",
6078 ipa_drv_res->use_64_bit_dma_mask
6079 ? "True" : "False");
6080
Ghanim Fodi6a831342017-03-07 18:19:15 +02006081 ipa_drv_res->use_bw_vote =
6082 of_property_read_bool(pdev->dev.of_node,
6083 "qcom,bandwidth-vote-for-ipa");
6084 IPADBG(": use_bw_vote = %s\n",
6085 ipa_drv_res->use_bw_vote
6086 ? "True" : "False");
6087
Amir Levy9659e592016-10-27 18:08:27 +03006088 ipa_drv_res->skip_uc_pipe_reset =
6089 of_property_read_bool(pdev->dev.of_node,
6090 "qcom,skip-uc-pipe-reset");
6091 IPADBG(": skip uC pipe reset = %s\n",
6092 ipa_drv_res->skip_uc_pipe_reset
6093 ? "True" : "False");
6094
6095 ipa_drv_res->tethered_flow_control =
6096 of_property_read_bool(pdev->dev.of_node,
6097 "qcom,tethered-flow-control");
6098 IPADBG(": Use apps based flow control = %s\n",
6099 ipa_drv_res->tethered_flow_control
6100 ? "True" : "False");
6101
Amir Levy9659e592016-10-27 18:08:27 +03006102 /* Get IPA wrapper address */
6103 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
6104 "ipa-base");
6105 if (!resource) {
6106 IPAERR(":get resource failed for ipa-base!\n");
6107 return -ENODEV;
6108 }
6109 ipa_drv_res->ipa_mem_base = resource->start;
6110 ipa_drv_res->ipa_mem_size = resource_size(resource);
6111 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
6112 ipa_drv_res->ipa_mem_base,
6113 ipa_drv_res->ipa_mem_size);
6114
6115 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
6116 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
6117
Amir Levya59ed3f2017-03-05 17:30:55 +02006118 /* Get IPA GSI address */
6119 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
6120 "gsi-base");
6121 if (!resource) {
6122 IPAERR(":get resource failed for gsi-base!\n");
6123 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03006124 }
Amir Levya59ed3f2017-03-05 17:30:55 +02006125 ipa_drv_res->transport_mem_base = resource->start;
6126 ipa_drv_res->transport_mem_size = resource_size(resource);
6127 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
6128 ipa_drv_res->transport_mem_base,
6129 ipa_drv_res->transport_mem_size);
6130
6131 /* Get IPA GSI IRQ number */
6132 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
6133 "gsi-irq");
6134 if (!resource) {
6135 IPAERR(":get resource failed for gsi-irq!\n");
6136 return -ENODEV;
6137 }
6138 ipa_drv_res->transport_irq = resource->start;
6139 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03006140
6141 /* Get IPA pipe mem start ofst */
6142 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
6143 "ipa-pipe-mem");
6144 if (!resource) {
6145 IPADBG(":not using pipe memory - resource nonexisting\n");
6146 } else {
6147 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
6148 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
6149 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
6150 ipa_drv_res->ipa_pipe_mem_start_ofst,
6151 ipa_drv_res->ipa_pipe_mem_size);
6152 }
6153
6154 /* Get IPA IRQ number */
6155 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
6156 "ipa-irq");
6157 if (!resource) {
6158 IPAERR(":get resource failed for ipa-irq!\n");
6159 return -ENODEV;
6160 }
6161 ipa_drv_res->ipa_irq = resource->start;
6162 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
6163
6164 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
6165 &ipa_drv_res->ee);
6166 if (result)
6167 ipa_drv_res->ee = 0;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04006168 IPADBG(":ee = %u\n", ipa_drv_res->ee);
Amir Levy9659e592016-10-27 18:08:27 +03006169
6170 ipa_drv_res->apply_rg10_wa =
6171 of_property_read_bool(pdev->dev.of_node,
6172 "qcom,use-rg10-limitation-mitigation");
6173 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
6174 ipa_drv_res->apply_rg10_wa
6175 ? "True" : "False");
6176
6177 ipa_drv_res->gsi_ch20_wa =
6178 of_property_read_bool(pdev->dev.of_node,
6179 "qcom,do-not-use-ch-gsi-20");
6180 IPADBG(": GSI CH 20 WA is = %s\n",
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04006181 ipa_drv_res->gsi_ch20_wa
Amir Levy9659e592016-10-27 18:08:27 +03006182 ? "Needed" : "Not needed");
6183
Gidon Studinski3021a6f2016-11-10 12:48:48 +02006184 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
Ghanim Fodic823bc62017-10-21 17:29:53 +03006185 "qcom,mhi-event-ring-id-limits", sizeof(u32));
6186
6187 if (elem_num == 2) {
6188 if (of_property_read_u32_array(pdev->dev.of_node,
6189 "qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) {
6190 IPAERR("failed to read mhi event ring id limits\n");
6191 return -EFAULT;
6192 }
6193 if (mhi_evid_limits[0] > mhi_evid_limits[1]) {
6194 IPAERR("mhi event ring id low limit > high limit\n");
6195 return -EFAULT;
6196 }
6197 ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0];
6198 ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1];
6199 IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n",
6200 mhi_evid_limits[0], mhi_evid_limits[1]);
6201 } else {
6202 if (elem_num > 0) {
6203 IPAERR("Invalid mhi event ring id limits number %d\n",
6204 elem_num);
6205 return -EINVAL;
6206 }
6207 IPADBG("use default mhi evt ring id limits start=%u end=%u\n",
6208 ipa_drv_res->mhi_evid_limits[0],
6209 ipa_drv_res->mhi_evid_limits[1]);
6210 }
6211
6212 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02006213 "qcom,ipa-tz-unlock-reg", sizeof(u32));
6214
6215 if (elem_num > 0 && elem_num % 2 == 0) {
6216 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
6217
6218 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
6219 if (ipa_tz_unlock_reg == NULL)
6220 return -ENOMEM;
6221
6222 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
6223 ipa_drv_res->ipa_tz_unlock_reg_num,
6224 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
6225 GFP_KERNEL);
6226 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
6227 kfree(ipa_tz_unlock_reg);
6228 return -ENOMEM;
6229 }
6230
6231 if (of_property_read_u32_array(pdev->dev.of_node,
6232 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
6233 elem_num)) {
6234 IPAERR("failed to read register addresses\n");
6235 kfree(ipa_tz_unlock_reg);
6236 kfree(ipa_drv_res->ipa_tz_unlock_reg);
6237 return -EFAULT;
6238 }
6239
6240 pos = 0;
6241 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
6242 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
6243 ipa_tz_unlock_reg[pos++];
6244 ipa_drv_res->ipa_tz_unlock_reg[i].size =
6245 ipa_tz_unlock_reg[pos++];
Skylar Chang48afa052017-10-25 09:32:57 -07006246 IPADBG("tz unlock reg %d: addr 0x%pa size %llu\n", i,
Gidon Studinski3021a6f2016-11-10 12:48:48 +02006247 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
6248 ipa_drv_res->ipa_tz_unlock_reg[i].size);
6249 }
6250 kfree(ipa_tz_unlock_reg);
6251 }
Michael Adisumarta3e350812017-09-18 14:54:36 -07006252
6253 /* get IPA PM related information */
6254 result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
6255 if (result) {
6256 IPAERR("failed to get pm info from dts %d\n", result);
6257 return result;
6258 }
6259
Mohammed Javid03854df2018-06-20 18:36:57 +05306260 ipa_drv_res->wdi_over_pcie =
Ghanim Fodi0ef92fc2018-07-08 11:21:31 +03006261 of_property_read_bool(pdev->dev.of_node,
6262 "qcom,wlan-ce-db-over-pcie");
Mohammed Javid03854df2018-06-20 18:36:57 +05306263 IPADBG("Is wdi_over_pcie ? (%s)\n",
Ghanim Fodi0ef92fc2018-07-08 11:21:31 +03006264 ipa_drv_res->wdi_over_pcie ? "Yes":"No");
Mohammed Javid03854df2018-06-20 18:36:57 +05306265
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04006266 /*
6267 * If we're on emulator, get its interrupt controller's mem
6268 * start and size
6269 */
6270 if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
6271 resource = platform_get_resource_byname(
6272 pdev, IORESOURCE_MEM, "intctrl-base");
6273 if (!resource) {
6274 IPAERR(":Can't find intctrl-base resource\n");
6275 return -ENODEV;
6276 }
6277 ipa_drv_res->emulator_intcntrlr_mem_base =
6278 resource->start;
6279 ipa_drv_res->emulator_intcntrlr_mem_size =
6280 resource_size(resource);
6281 IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
6282 ipa_drv_res->emulator_intcntrlr_mem_base,
6283 ipa_drv_res->emulator_intcntrlr_mem_size);
6284 }
6285
Amir Levy9659e592016-10-27 18:08:27 +03006286 return 0;
6287}
6288
6289static int ipa_smmu_wlan_cb_probe(struct device *dev)
6290{
Skylar Changefc0a0f2018-03-29 11:17:40 -07006291 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
Amir Levy9659e592016-10-27 18:08:27 +03006292 int atomic_ctx = 1;
6293 int fast = 1;
6294 int bypass = 1;
6295 int ret;
6296 u32 add_map_size;
6297 const u32 *add_map;
6298 int i;
6299
6300 IPADBG("sub pdev=%p\n", dev);
6301
Skylar Changefc0a0f2018-03-29 11:17:40 -07006302 if (!smmu_info.present[IPA_SMMU_CB_WLAN]) {
6303 IPAERR("WLAN SMMU is disabled\n");
6304 return 0;
6305 }
6306
Amir Levy9659e592016-10-27 18:08:27 +03006307 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02006308 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03006309 if (!cb->iommu) {
6310 IPAERR("could not alloc iommu domain\n");
6311 /* assume this failure is because iommu driver is not ready */
6312 return -EPROBE_DEFER;
6313 }
6314 cb->valid = true;
6315
Skylar Changefc0a0f2018-03-29 11:17:40 -07006316 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") ||
6317 ipa3_ctx->ipa_config_is_mhi) {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006318 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006319 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = true;
6320
Amir Levy9659e592016-10-27 18:08:27 +03006321 if (iommu_domain_set_attr(cb->iommu,
6322 DOMAIN_ATTR_S1_BYPASS,
6323 &bypass)) {
6324 IPAERR("couldn't set bypass\n");
6325 cb->valid = false;
6326 return -EIO;
6327 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006328 IPADBG("WLAN SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03006329 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006330 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006331 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN] = false;
6332
Amir Levy9659e592016-10-27 18:08:27 +03006333 if (iommu_domain_set_attr(cb->iommu,
6334 DOMAIN_ATTR_ATOMIC,
6335 &atomic_ctx)) {
6336 IPAERR("couldn't disable coherent HTW\n");
6337 cb->valid = false;
6338 return -EIO;
6339 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006340 IPADBG(" WLAN SMMU ATTR ATOMIC\n");
Amir Levy9659e592016-10-27 18:08:27 +03006341
6342 if (smmu_info.fast_map) {
6343 if (iommu_domain_set_attr(cb->iommu,
6344 DOMAIN_ATTR_FAST,
6345 &fast)) {
6346 IPAERR("couldn't set fast map\n");
6347 cb->valid = false;
6348 return -EIO;
6349 }
6350 IPADBG("SMMU fast map set\n");
6351 }
6352 }
6353
Michael Adisumarta93e97522017-10-06 15:49:46 -07006354 pr_info("IPA smmu_info.s1_bypass_arr[WLAN]=%d smmu_info.fast_map=%d\n",
6355 smmu_info.s1_bypass_arr[IPA_SMMU_CB_WLAN], smmu_info.fast_map);
6356
Amir Levy9659e592016-10-27 18:08:27 +03006357 ret = iommu_attach_device(cb->iommu, dev);
6358 if (ret) {
6359 IPAERR("could not attach device ret=%d\n", ret);
6360 cb->valid = false;
6361 return ret;
6362 }
6363 /* MAP ipa-uc ram */
6364 add_map = of_get_property(dev->of_node,
6365 "qcom,additional-mapping", &add_map_size);
6366 if (add_map) {
6367 /* mapping size is an array of 3-tuple of u32 */
6368 if (add_map_size % (3 * sizeof(u32))) {
6369 IPAERR("wrong additional mapping format\n");
6370 cb->valid = false;
6371 return -EFAULT;
6372 }
6373
6374 /* iterate of each entry of the additional mapping array */
6375 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
6376 u32 iova = be32_to_cpu(add_map[i]);
6377 u32 pa = be32_to_cpu(add_map[i + 1]);
6378 u32 size = be32_to_cpu(add_map[i + 2]);
6379 unsigned long iova_p;
6380 phys_addr_t pa_p;
6381 u32 size_p;
6382
6383 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
6384 iova_p, pa_p, size_p);
6385 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
6386 iova_p, &pa_p, size_p);
6387 ipa3_iommu_map(cb->iommu,
6388 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02006389 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03006390 }
6391 }
6392 return 0;
6393}
6394
6395static int ipa_smmu_uc_cb_probe(struct device *dev)
6396{
Skylar Changefc0a0f2018-03-29 11:17:40 -07006397 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
Amir Levy9659e592016-10-27 18:08:27 +03006398 int atomic_ctx = 1;
6399 int bypass = 1;
6400 int fast = 1;
6401 int ret;
6402 u32 iova_ap_mapping[2];
6403
6404 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
6405
Skylar Changefc0a0f2018-03-29 11:17:40 -07006406 if (!smmu_info.present[IPA_SMMU_CB_UC]) {
6407 IPAERR("UC SMMU is disabled\n");
6408 return 0;
6409 }
6410
Amir Levy9659e592016-10-27 18:08:27 +03006411 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
6412 iova_ap_mapping, 2);
6413 if (ret) {
6414 IPAERR("Fail to read UC start/size iova addresses\n");
6415 return ret;
6416 }
6417 cb->va_start = iova_ap_mapping[0];
6418 cb->va_size = iova_ap_mapping[1];
6419 cb->va_end = cb->va_start + cb->va_size;
6420 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
6421
6422 if (smmu_info.use_64_bit_dma_mask) {
6423 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
6424 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
6425 IPAERR("DMA set 64bit mask failed\n");
6426 return -EOPNOTSUPP;
6427 }
6428 } else {
6429 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
6430 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
6431 IPAERR("DMA set 32bit mask failed\n");
6432 return -EOPNOTSUPP;
6433 }
6434 }
6435 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
6436
6437 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02006438 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03006439 cb->va_start, cb->va_size);
6440 if (IS_ERR_OR_NULL(cb->mapping)) {
6441 IPADBG("Fail to create mapping\n");
6442 /* assume this failure is because iommu driver is not ready */
6443 return -EPROBE_DEFER;
6444 }
6445 IPADBG("SMMU mapping created\n");
6446 cb->valid = true;
6447
Amir Levy9659e592016-10-27 18:08:27 +03006448 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
Michael Adisumarta93e97522017-10-06 15:49:46 -07006449
Skylar Changefc0a0f2018-03-29 11:17:40 -07006450 if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass") ||
6451 ipa3_ctx->ipa_config_is_mhi) {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006452 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = true;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006453 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = true;
6454
Amir Levy9659e592016-10-27 18:08:27 +03006455 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07006456 DOMAIN_ATTR_S1_BYPASS,
6457 &bypass)) {
Amir Levy9659e592016-10-27 18:08:27 +03006458 IPAERR("couldn't set bypass\n");
6459 arm_iommu_release_mapping(cb->mapping);
6460 cb->valid = false;
6461 return -EIO;
6462 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006463 IPADBG("UC SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03006464 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006465 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC] = false;
Michael Adisumarta972e33e2017-10-20 15:24:27 -07006466 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] = false;
6467
Amir Levy9659e592016-10-27 18:08:27 +03006468 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07006469 DOMAIN_ATTR_ATOMIC,
6470 &atomic_ctx)) {
Amir Levy9659e592016-10-27 18:08:27 +03006471 IPAERR("couldn't set domain as atomic\n");
6472 arm_iommu_release_mapping(cb->mapping);
6473 cb->valid = false;
6474 return -EIO;
6475 }
6476 IPADBG("SMMU atomic set\n");
6477
6478 if (smmu_info.fast_map) {
6479 if (iommu_domain_set_attr(cb->mapping->domain,
Michael Adisumarta93e97522017-10-06 15:49:46 -07006480 DOMAIN_ATTR_FAST,
6481 &fast)) {
Amir Levy9659e592016-10-27 18:08:27 +03006482 IPAERR("couldn't set fast map\n");
6483 arm_iommu_release_mapping(cb->mapping);
6484 cb->valid = false;
6485 return -EIO;
6486 }
6487 IPADBG("SMMU fast map set\n");
6488 }
6489 }
6490
Michael Adisumarta93e97522017-10-06 15:49:46 -07006491 pr_info("IPA smmu_info.s1_bypass_arr[UC]=%d smmu_info.fast_map=%d\n",
6492 smmu_info.s1_bypass_arr[IPA_SMMU_CB_UC], smmu_info.fast_map);
6493
Amir Levy9659e592016-10-27 18:08:27 +03006494 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
6495 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
6496 if (ret) {
6497 IPAERR("could not attach device ret=%d\n", ret);
6498 arm_iommu_release_mapping(cb->mapping);
6499 cb->valid = false;
6500 return ret;
6501 }
6502
6503 cb->next_addr = cb->va_end;
6504 ipa3_ctx->uc_pdev = dev;
6505
6506 return 0;
6507}
6508
6509static int ipa_smmu_ap_cb_probe(struct device *dev)
6510{
Skylar Changefc0a0f2018-03-29 11:17:40 -07006511 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
Amir Levy9659e592016-10-27 18:08:27 +03006512 int result;
Amir Levy9659e592016-10-27 18:08:27 +03006513 int atomic_ctx = 1;
6514 int fast = 1;
6515 int bypass = 1;
6516 u32 iova_ap_mapping[2];
6517 u32 add_map_size;
Mohammed Javid36d13cf2018-01-26 22:49:03 +05306518 u32 q6_smem_size;
Amir Levy9659e592016-10-27 18:08:27 +03006519 const u32 *add_map;
6520 void *smem_addr;
6521 int i;
6522
6523 IPADBG("AP CB probe: sub pdev=%p\n", dev);
6524
Skylar Changefc0a0f2018-03-29 11:17:40 -07006525 if (!smmu_info.present[IPA_SMMU_CB_AP]) {
6526 IPAERR("AP SMMU is disabled");
6527 return 0;
6528 }
6529
Amir Levy9659e592016-10-27 18:08:27 +03006530 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
6531 iova_ap_mapping, 2);
6532 if (result) {
6533 IPAERR("Fail to read AP start/size iova addresses\n");
6534 return result;
6535 }
6536 cb->va_start = iova_ap_mapping[0];
6537 cb->va_size = iova_ap_mapping[1];
6538 cb->va_end = cb->va_start + cb->va_size;
6539 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
6540
6541 if (smmu_info.use_64_bit_dma_mask) {
6542 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
6543 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
6544 IPAERR("DMA set 64bit mask failed\n");
6545 return -EOPNOTSUPP;
6546 }
6547 } else {
6548 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
6549 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
6550 IPAERR("DMA set 32bit mask failed\n");
6551 return -EOPNOTSUPP;
6552 }
6553 }
6554
6555 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02006556 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03006557 cb->va_start, cb->va_size);
6558 if (IS_ERR_OR_NULL(cb->mapping)) {
6559 IPADBG("Fail to create mapping\n");
6560 /* assume this failure is because iommu driver is not ready */
6561 return -EPROBE_DEFER;
6562 }
6563 IPADBG("SMMU mapping created\n");
6564 cb->valid = true;
6565
Michael Adisumarta93e97522017-10-06 15:49:46 -07006566 if (of_property_read_bool(dev->of_node,
Skylar Changefc0a0f2018-03-29 11:17:40 -07006567 "qcom,smmu-s1-bypass") || ipa3_ctx->ipa_config_is_mhi) {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006568 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = true;
Skylar Change87894f2018-04-02 15:49:12 -07006569 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = true;
Amir Levy9659e592016-10-27 18:08:27 +03006570 if (iommu_domain_set_attr(cb->mapping->domain,
6571 DOMAIN_ATTR_S1_BYPASS,
6572 &bypass)) {
6573 IPAERR("couldn't set bypass\n");
6574 arm_iommu_release_mapping(cb->mapping);
6575 cb->valid = false;
6576 return -EIO;
6577 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006578 IPADBG("AP/USB SMMU S1 BYPASS\n");
Amir Levy9659e592016-10-27 18:08:27 +03006579 } else {
Michael Adisumarta93e97522017-10-06 15:49:46 -07006580 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP] = false;
Skylar Change87894f2018-04-02 15:49:12 -07006581 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] = false;
Amir Levy9659e592016-10-27 18:08:27 +03006582 if (iommu_domain_set_attr(cb->mapping->domain,
6583 DOMAIN_ATTR_ATOMIC,
6584 &atomic_ctx)) {
6585 IPAERR("couldn't set domain as atomic\n");
6586 arm_iommu_release_mapping(cb->mapping);
6587 cb->valid = false;
6588 return -EIO;
6589 }
Michael Adisumarta93e97522017-10-06 15:49:46 -07006590 IPADBG("AP/USB SMMU atomic set\n");
Amir Levy9659e592016-10-27 18:08:27 +03006591
Skylar Chang578e1a42018-06-15 10:33:26 -07006592 if (smmu_info.fast_map) {
6593 if (iommu_domain_set_attr(cb->mapping->domain,
Amir Levy9659e592016-10-27 18:08:27 +03006594 DOMAIN_ATTR_FAST,
6595 &fast)) {
Skylar Chang578e1a42018-06-15 10:33:26 -07006596 IPAERR("couldn't set fast map\n");
6597 arm_iommu_release_mapping(cb->mapping);
6598 cb->valid = false;
6599 return -EIO;
6600 }
6601 IPADBG("SMMU fast map set\n");
Amir Levy9659e592016-10-27 18:08:27 +03006602 }
Amir Levy9659e592016-10-27 18:08:27 +03006603 }
6604
Michael Adisumarta93e97522017-10-06 15:49:46 -07006605 pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n",
6606 smmu_info.s1_bypass_arr[IPA_SMMU_CB_AP], smmu_info.fast_map);
6607
Amir Levy9659e592016-10-27 18:08:27 +03006608 result = arm_iommu_attach_device(cb->dev, cb->mapping);
6609 if (result) {
6610 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
6611 cb->valid = false;
6612 return result;
6613 }
6614
6615 add_map = of_get_property(dev->of_node,
6616 "qcom,additional-mapping", &add_map_size);
6617 if (add_map) {
6618 /* mapping size is an array of 3-tuple of u32 */
6619 if (add_map_size % (3 * sizeof(u32))) {
6620 IPAERR("wrong additional mapping format\n");
6621 cb->valid = false;
6622 return -EFAULT;
6623 }
6624
6625 /* iterate of each entry of the additional mapping array */
6626 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
6627 u32 iova = be32_to_cpu(add_map[i]);
6628 u32 pa = be32_to_cpu(add_map[i + 1]);
6629 u32 size = be32_to_cpu(add_map[i + 2]);
6630 unsigned long iova_p;
6631 phys_addr_t pa_p;
6632 u32 size_p;
6633
6634 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
6635 iova_p, pa_p, size_p);
6636 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
6637 iova_p, &pa_p, size_p);
6638 ipa3_iommu_map(cb->mapping->domain,
6639 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02006640 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03006641 }
6642 }
6643
Mohammed Javid36d13cf2018-01-26 22:49:03 +05306644 result = of_property_read_u32_array(dev->of_node,
6645 "qcom,ipa-q6-smem-size", &q6_smem_size, 1);
6646 if (result) {
6647 IPADBG("ipa q6 smem size = %d\n", IPA_SMEM_SIZE);
6648 /* map SMEM memory for IPA table accesses */
6649 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
6650 SMEM_MODEM, 0);
Chaitanya Pratapa4c38c592019-07-31 19:56:44 +05306651 q6_smem_size = IPA_SMEM_SIZE;
Mohammed Javid36d13cf2018-01-26 22:49:03 +05306652 } else {
6653 IPADBG("ipa q6 smem size = %d\n", q6_smem_size);
6654 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, q6_smem_size,
6655 SMEM_MODEM, 0);
6656 }
Amir Levy9659e592016-10-27 18:08:27 +03006657 if (smem_addr) {
6658 phys_addr_t iova = smem_virt_to_phys(smem_addr);
6659 phys_addr_t pa = iova;
6660 unsigned long iova_p;
6661 phys_addr_t pa_p;
6662 u32 size_p;
6663
Chaitanya Pratapa4c38c592019-07-31 19:56:44 +05306664 IPA_SMMU_ROUND_TO_PAGE(iova, pa, q6_smem_size,
Amir Levy9659e592016-10-27 18:08:27 +03006665 iova_p, pa_p, size_p);
6666 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
6667 iova_p, &pa_p, size_p);
6668 ipa3_iommu_map(cb->mapping->domain,
6669 iova_p, pa_p, size_p,
Skylar Chang2d1a7622018-05-30 17:01:58 -07006670 IOMMU_READ | IOMMU_WRITE);
Amir Levy9659e592016-10-27 18:08:27 +03006671 }
6672
6673
Skylar Changefc0a0f2018-03-29 11:17:40 -07006674 smmu_info.present[IPA_SMMU_CB_AP] = true;
6675 ipa3_ctx->pdev = dev;
Amir Levy9659e592016-10-27 18:08:27 +03006676
Michael Adisumartac8c404a2018-04-05 18:01:45 -07006677 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03006678}
6679
Skylar Changefc0a0f2018-03-29 11:17:40 -07006680static int ipa_smmu_cb_probe(struct device *dev, enum ipa_smmu_cb_type cb_type)
6681{
6682 switch (cb_type) {
6683 case IPA_SMMU_CB_AP:
6684 return ipa_smmu_ap_cb_probe(dev);
6685 case IPA_SMMU_CB_WLAN:
6686 return ipa_smmu_wlan_cb_probe(dev);
6687 case IPA_SMMU_CB_UC:
6688 return ipa_smmu_uc_cb_probe(dev);
6689 case IPA_SMMU_CB_MAX:
6690 IPAERR("Invalid cb_type\n");
6691 }
6692 return 0;
6693}
6694
6695static int ipa3_attach_to_smmu(void)
6696{
6697 struct ipa_smmu_cb_ctx *cb;
6698 int i, result;
6699
6700 ipa3_ctx->pdev = &ipa3_ctx->master_pdev->dev;
6701 ipa3_ctx->uc_pdev = &ipa3_ctx->master_pdev->dev;
6702
6703 if (smmu_info.arm_smmu) {
6704 IPADBG("smmu is enabled\n");
6705 for (i = 0; i < IPA_SMMU_CB_MAX; i++) {
6706 cb = ipa3_get_smmu_ctx(i);
6707 result = ipa_smmu_cb_probe(cb->dev, i);
6708 if (result)
6709 IPAERR("probe failed for cb %d\n", i);
6710 }
6711 } else {
6712 IPADBG("smmu is disabled\n");
6713 }
6714 return 0;
6715}
6716
Amir Levy9659e592016-10-27 18:08:27 +03006717static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
6718{
6719 ipa3_freeze_clock_vote_and_notify_modem();
6720
6721 return IRQ_HANDLED;
6722}
6723
6724static int ipa3_smp2p_probe(struct device *dev)
6725{
6726 struct device_node *node = dev->of_node;
6727 int res;
6728
Mohammed Javid7de12702017-07-21 15:22:58 +05306729 if (ipa3_ctx == NULL) {
6730 IPAERR("ipa3_ctx was not initialized\n");
6731 return -ENXIO;
6732 }
Amir Levy9659e592016-10-27 18:08:27 +03006733 IPADBG("node->name=%s\n", node->name);
6734 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
6735 res = of_get_gpio(node, 0);
6736 if (res < 0) {
6737 IPADBG("of_get_gpio returned %d\n", res);
6738 return res;
6739 }
6740
6741 ipa3_ctx->smp2p_info.out_base_id = res;
6742 IPADBG("smp2p out_base_id=%d\n",
6743 ipa3_ctx->smp2p_info.out_base_id);
6744 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
6745 int irq;
6746
6747 res = of_get_gpio(node, 0);
6748 if (res < 0) {
6749 IPADBG("of_get_gpio returned %d\n", res);
6750 return res;
6751 }
6752
6753 ipa3_ctx->smp2p_info.in_base_id = res;
6754 IPADBG("smp2p in_base_id=%d\n",
6755 ipa3_ctx->smp2p_info.in_base_id);
6756
6757 /* register for modem clk query */
6758 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
6759 IPA_GPIO_IN_QUERY_CLK_IDX);
6760 if (irq < 0) {
6761 IPAERR("gpio_to_irq failed %d\n", irq);
6762 return -ENODEV;
6763 }
6764 IPADBG("smp2p irq#=%d\n", irq);
6765 res = request_irq(irq,
6766 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
6767 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
6768 if (res) {
6769 IPAERR("fail to register smp2p irq=%d\n", irq);
6770 return -ENODEV;
6771 }
6772 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
6773 IPA_GPIO_IN_QUERY_CLK_IDX);
6774 if (res)
6775 IPAERR("failed to enable irq wake\n");
6776 }
6777
6778 return 0;
6779}
6780
6781int ipa3_plat_drv_probe(struct platform_device *pdev_p,
6782 struct ipa_api_controller *api_ctrl,
6783 const struct of_device_id *pdrv_match)
6784{
6785 int result;
6786 struct device *dev = &pdev_p->dev;
Skylar Changefc0a0f2018-03-29 11:17:40 -07006787 struct ipa_smmu_cb_ctx *cb;
Amir Levy9659e592016-10-27 18:08:27 +03006788
6789 IPADBG("IPA driver probing started\n");
6790 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
6791
Skylar Changefc0a0f2018-03-29 11:17:40 -07006792 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) {
6793 cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
6794 cb->dev = dev;
6795 smmu_info.present[IPA_SMMU_CB_AP] = true;
Amir Levy9659e592016-10-27 18:08:27 +03006796
Skylar Changefc0a0f2018-03-29 11:17:40 -07006797 return 0;
6798 }
Amir Levy9659e592016-10-27 18:08:27 +03006799
Skylar Changefc0a0f2018-03-29 11:17:40 -07006800 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) {
6801 cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
6802 cb->dev = dev;
6803 smmu_info.present[IPA_SMMU_CB_WLAN] = true;
6804
6805 return 0;
6806 }
6807
6808 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) {
6809 cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
6810 cb->dev = dev;
6811 smmu_info.present[IPA_SMMU_CB_UC] = true;
6812
Chaitanya Pratapa3f54f3b2019-11-10 20:24:38 -08006813 if (ipa3_ctx->use_xbl_boot) {
6814 /* Ensure uC probe is the last. */
6815 if (!smmu_info.present[IPA_SMMU_CB_AP] ||
6816 !smmu_info.present[IPA_SMMU_CB_WLAN]) {
6817 IPAERR("AP or WLAN CB probe not done. Defer");
6818 return -EPROBE_DEFER;
6819 }
6820
6821 pr_info("Using XBL boot load for IPA FW\n");
6822 ipa3_ctx->fw_loaded = true;
6823
6824 result = ipa3_attach_to_smmu();
6825 if (result) {
6826 IPAERR("IPA attach to smmu failed %d\n",
6827 result);
6828 return result;
6829 }
6830
6831 result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
6832 if (result) {
6833 IPAERR("IPA post init failed %d\n", result);
6834 return result;
6835 }
6836 }
6837
6838
Skylar Changefc0a0f2018-03-29 11:17:40 -07006839 return 0;
6840 }
Amir Levy9659e592016-10-27 18:08:27 +03006841
6842 if (of_device_is_compatible(dev->of_node,
6843 "qcom,smp2pgpio-map-ipa-1-in"))
6844 return ipa3_smp2p_probe(dev);
6845
6846 if (of_device_is_compatible(dev->of_node,
6847 "qcom,smp2pgpio-map-ipa-1-out"))
6848 return ipa3_smp2p_probe(dev);
6849
Amir Levy9659e592016-10-27 18:08:27 +03006850 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
6851 if (result) {
6852 IPAERR("IPA dts parsing failed\n");
6853 return result;
6854 }
6855
6856 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
6857 if (result) {
6858 IPAERR("IPA API binding failed\n");
6859 return result;
6860 }
6861
Amir Levy9659e592016-10-27 18:08:27 +03006862 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
6863 if (of_property_read_bool(pdev_p->dev.of_node,
Amir Levy9659e592016-10-27 18:08:27 +03006864 "qcom,smmu-fast-map"))
6865 smmu_info.fast_map = true;
6866 if (of_property_read_bool(pdev_p->dev.of_node,
6867 "qcom,use-64-bit-dma-mask"))
6868 smmu_info.use_64_bit_dma_mask = true;
6869 smmu_info.arm_smmu = true;
Amir Levy9659e592016-10-27 18:08:27 +03006870 } else if (of_property_read_bool(pdev_p->dev.of_node,
6871 "qcom,msm-smmu")) {
6872 IPAERR("Legacy IOMMU not supported\n");
6873 result = -EOPNOTSUPP;
6874 } else {
6875 if (of_property_read_bool(pdev_p->dev.of_node,
6876 "qcom,use-64-bit-dma-mask")) {
6877 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
6878 dma_set_coherent_mask(&pdev_p->dev,
6879 DMA_BIT_MASK(64))) {
6880 IPAERR("DMA set 64bit mask failed\n");
6881 return -EOPNOTSUPP;
6882 }
6883 } else {
6884 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
6885 dma_set_coherent_mask(&pdev_p->dev,
6886 DMA_BIT_MASK(32))) {
6887 IPAERR("DMA set 32bit mask failed\n");
6888 return -EOPNOTSUPP;
6889 }
6890 }
Skylar Changefc0a0f2018-03-29 11:17:40 -07006891 }
Amir Levy9659e592016-10-27 18:08:27 +03006892
Skylar Changefc0a0f2018-03-29 11:17:40 -07006893 /* Proceed to real initialization */
6894 result = ipa3_pre_init(&ipa3_res, pdev_p);
6895 if (result) {
6896 IPAERR("ipa3_init failed\n");
6897 return result;
Amir Levy9659e592016-10-27 18:08:27 +03006898 }
6899
Ghanim Fodi115bf8a2017-04-21 01:36:06 -07006900 result = of_platform_populate(pdev_p->dev.of_node,
6901 pdrv_match, NULL, &pdev_p->dev);
6902 if (result) {
6903 IPAERR("failed to populate platform\n");
6904 return result;
6905 }
6906
Amir Levy9659e592016-10-27 18:08:27 +03006907 return result;
6908}
6909
6910/**
6911 * ipa3_ap_suspend() - suspend callback for runtime_pm
6912 * @dev: pointer to device
6913 *
6914 * This callback will be invoked by the runtime_pm framework when an AP suspend
6915 * operation is invoked, usually by pressing a suspend button.
6916 *
6917 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
6918 * This will postpone the suspend operation until IPA is no longer used by AP.
Skylar Chang68c37d82018-04-07 16:42:36 -07006919 */
Amir Levy9659e592016-10-27 18:08:27 +03006920int ipa3_ap_suspend(struct device *dev)
6921{
6922 int i;
6923
6924 IPADBG("Enter...\n");
6925
6926 /* In case there is a tx/rx handler in polling mode fail to suspend */
6927 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
6928 if (ipa3_ctx->ep[i].sys &&
6929 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
6930 IPAERR("EP %d is in polling state, do not suspend\n",
6931 i);
6932 return -EAGAIN;
6933 }
6934 }
6935
Michael Adisumarta3e350812017-09-18 14:54:36 -07006936 if (ipa3_ctx->use_ipa_pm) {
6937 ipa_pm_deactivate_all_deferred();
6938 } else {
6939 /*
6940 * Release transport IPA resource without waiting
6941 * for inactivity timer
6942 */
6943 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
6944 ipa3_transport_release_resource(NULL);
6945 }
Amir Levy9659e592016-10-27 18:08:27 +03006946 IPADBG("Exit\n");
6947
6948 return 0;
6949}
6950
6951/**
Skylar Chang68c37d82018-04-07 16:42:36 -07006952 * ipa3_ap_resume() - resume callback for runtime_pm
6953 * @dev: pointer to device
6954 *
6955 * This callback will be invoked by the runtime_pm framework when an AP resume
6956 * operation is invoked.
6957 *
6958 * Always returns 0 since resume should always succeed.
6959 */
Amir Levy9659e592016-10-27 18:08:27 +03006960int ipa3_ap_resume(struct device *dev)
6961{
6962 return 0;
6963}
6964
6965struct ipa3_context *ipa3_get_ctx(void)
6966{
6967 return ipa3_ctx;
6968}
6969
Amir Levy8fb98e02019-10-29 14:22:26 +02006970bool ipa3_get_lan_rx_napi(void)
6971{
6972 return false;
6973}
6974
Amir Levy9659e592016-10-27 18:08:27 +03006975static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
6976{
6977 switch (notify->evt_id) {
6978 case GSI_PER_EVT_GLOB_ERROR:
6979 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
6980 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
6981 break;
6982 case GSI_PER_EVT_GLOB_GP1:
6983 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
6984 BUG();
6985 break;
6986 case GSI_PER_EVT_GLOB_GP2:
6987 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
6988 BUG();
6989 break;
6990 case GSI_PER_EVT_GLOB_GP3:
6991 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
6992 BUG();
6993 break;
6994 case GSI_PER_EVT_GENERAL_BREAK_POINT:
6995 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
6996 break;
6997 case GSI_PER_EVT_GENERAL_BUS_ERROR:
6998 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
6999 BUG();
7000 break;
7001 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
7002 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
7003 BUG();
7004 break;
7005 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
7006 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
7007 BUG();
7008 break;
7009 default:
7010 IPAERR("Received unexpected evt: %d\n",
7011 notify->evt_id);
7012 BUG();
7013 }
7014}
7015
7016int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
7017{
7018 struct ipa3_ready_cb_info *cb_info = NULL;
7019
7020 /* check ipa3_ctx existed or not */
7021 if (!ipa3_ctx) {
7022 IPADBG("IPA driver haven't initialized\n");
7023 return -ENXIO;
7024 }
7025 mutex_lock(&ipa3_ctx->lock);
7026 if (ipa3_ctx->ipa_initialization_complete) {
7027 mutex_unlock(&ipa3_ctx->lock);
7028 IPADBG("IPA driver finished initialization already\n");
7029 return -EEXIST;
7030 }
7031
7032 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
7033 if (!cb_info) {
7034 mutex_unlock(&ipa3_ctx->lock);
7035 return -ENOMEM;
7036 }
7037
7038 cb_info->ready_cb = ipa_ready_cb;
7039 cb_info->user_data = user_data;
7040
7041 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
7042 mutex_unlock(&ipa3_ctx->lock);
7043
7044 return 0;
7045}
7046
7047int ipa3_iommu_map(struct iommu_domain *domain,
7048 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
7049{
Skylar Changefc0a0f2018-03-29 11:17:40 -07007050 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
7051 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
Amir Levy9659e592016-10-27 18:08:27 +03007052
7053 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
7054 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
7055
7056 /* make sure no overlapping */
7057 if (domain == ipa3_get_smmu_domain()) {
7058 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
7059 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
7060 ipa_assert();
7061 return -EFAULT;
7062 }
7063 } else if (domain == ipa3_get_wlan_smmu_domain()) {
7064 /* wlan is one time map */
7065 } else if (domain == ipa3_get_uc_smmu_domain()) {
7066 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
7067 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
7068 ipa_assert();
7069 return -EFAULT;
7070 }
7071 } else {
7072 IPAERR("Unexpected domain 0x%p\n", domain);
7073 ipa_assert();
7074 return -EFAULT;
7075 }
7076
7077 return iommu_map(domain, iova, paddr, size, prot);
7078}
7079
Michael Adisumartad04e6d62017-11-09 17:46:35 -08007080/**
7081 * ipa3_get_smmu_params()- Return the ipa3 smmu related params.
7082 */
7083int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
7084 struct ipa_smmu_out_params *out)
7085{
7086 bool is_smmu_enable = 0;
7087
7088 if (out == NULL || in == NULL) {
7089 IPAERR("bad parms for Client SMMU out params\n");
7090 return -EINVAL;
7091 }
7092
7093 if (!ipa3_ctx) {
7094 IPAERR("IPA not yet initialized\n");
7095 return -EINVAL;
7096 }
7097
7098 switch (in->smmu_client) {
7099 case IPA_SMMU_WLAN_CLIENT:
7100 is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
7101 ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
7102 break;
7103 default:
7104 is_smmu_enable = 0;
7105 IPAERR("Trying to get illegal clients SMMU status");
7106 return -EINVAL;
7107 }
7108
7109 out->smmu_enable = is_smmu_enable;
7110
7111 return 0;
7112}
7113
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04007114/**************************************************************
7115 * PCIe Version
7116 *************************************************************/
7117
7118int ipa3_pci_drv_probe(
7119 struct pci_dev *pci_dev,
7120 struct ipa_api_controller *api_ctrl,
7121 const struct of_device_id *pdrv_match)
7122{
7123 int result;
7124 struct ipa3_plat_drv_res *ipa_drv_res;
7125 u32 bar0_offset;
7126 u32 mem_start;
7127 u32 mem_end;
7128 uint32_t bits;
7129 uint32_t ipa_start, gsi_start, intctrl_start;
7130 struct device *dev;
7131 static struct platform_device platform_dev;
7132
7133 if (!pci_dev || !api_ctrl || !pdrv_match) {
7134 IPAERR(
7135 "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n",
7136 pci_dev, api_ctrl, pdrv_match);
7137 return -EOPNOTSUPP;
7138 }
7139
7140 dev = &(pci_dev->dev);
7141
7142 IPADBG("IPA PCI driver probing started\n");
7143
7144 /*
7145 * Follow PCI driver flow here.
7146 * pci_enable_device: Enables device and assigns resources
7147 * pci_request_region: Makes BAR0 address region usable
7148 */
7149 result = pci_enable_device(pci_dev);
7150 if (result < 0) {
7151 IPAERR("pci_enable_device() failed\n");
7152 return -EOPNOTSUPP;
7153 }
7154
7155 result = pci_request_region(pci_dev, 0, "IPA Memory");
7156 if (result < 0) {
7157 IPAERR("pci_request_region() failed\n");
7158 pci_disable_device(pci_dev);
7159 return -EOPNOTSUPP;
7160 }
7161
7162 /*
7163 * When in the PCI/emulation environment, &platform_dev is
7164 * passed to get_ipa_dts_configuration(), but is unused, since
7165 * all usages of it in the function are replaced by CPP
7166 * relative to definitions in ipa_emulation_stubs.h. Passing
7167 * &platform_dev makes code validity tools happy.
7168 */
7169 if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) {
7170 IPAERR("get_ipa_dts_configuration() failed\n");
7171 pci_release_region(pci_dev, 0);
7172 pci_disable_device(pci_dev);
7173 return -EOPNOTSUPP;
7174 }
7175
7176 ipa_drv_res = &ipa3_res;
7177
7178 result =
7179 of_property_read_u32(NULL, "emulator-bar0-offset",
7180 &bar0_offset);
7181 if (result) {
7182 IPAERR(":get resource failed for emulator-bar0-offset!\n");
7183 pci_release_region(pci_dev, 0);
7184 pci_disable_device(pci_dev);
7185 return -ENODEV;
7186 }
7187 IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
7188
7189 ipa_start = ipa_drv_res->ipa_mem_base;
7190 gsi_start = ipa_drv_res->transport_mem_base;
7191 intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
7192
7193 /*
7194 * Where will we be inerrupted at?
7195 */
7196 ipa_drv_res->emulator_irq = pci_dev->irq;
7197 IPADBG(
7198 "EMULATION PCI_INTERRUPT_PIN(%u)\n",
7199 ipa_drv_res->emulator_irq);
7200
7201 /*
7202 * Set the ipa_mem_base to the PCI base address of BAR0
7203 */
7204 mem_start = pci_resource_start(pci_dev, 0);
7205 mem_end = pci_resource_end(pci_dev, 0);
7206
7207 IPADBG("PCI START = 0x%x\n", mem_start);
7208 IPADBG("PCI END = 0x%x\n", mem_end);
7209
7210 ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
7211
7212 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
7213 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
7214
7215 ipa_drv_res->transport_mem_base =
7216 ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start);
7217
7218 ipa_drv_res->emulator_intcntrlr_mem_base =
7219 ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
7220
7221 IPADBG("ipa_mem_base = 0x%x\n",
7222 ipa_drv_res->ipa_mem_base);
7223 IPADBG("ipa_mem_size = 0x%x\n",
7224 ipa_drv_res->ipa_mem_size);
7225
7226 IPADBG("transport_mem_base = 0x%x\n",
7227 ipa_drv_res->transport_mem_base);
7228 IPADBG("transport_mem_size = 0x%x\n",
7229 ipa_drv_res->transport_mem_size);
7230
7231 IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
7232 ipa_drv_res->emulator_intcntrlr_mem_base);
7233 IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
7234 ipa_drv_res->emulator_intcntrlr_mem_size);
7235
7236 result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl);
7237 if (result != 0) {
7238 IPAERR("ipa3_bind_api_controller() failed\n");
7239 pci_release_region(pci_dev, 0);
7240 pci_disable_device(pci_dev);
7241 return result;
7242 }
7243
7244 bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
7245
7246 if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) {
7247 IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits);
7248 pci_release_region(pci_dev, 0);
7249 pci_disable_device(pci_dev);
7250 return -EOPNOTSUPP;
7251 }
7252
7253 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) {
7254 IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits);
7255 pci_release_region(pci_dev, 0);
7256 pci_disable_device(pci_dev);
7257 return -EOPNOTSUPP;
7258 }
7259
7260 pci_set_master(pci_dev);
7261
7262 memset(&platform_dev, 0, sizeof(platform_dev));
7263 platform_dev.dev = *dev;
7264
7265 /* Proceed to real initialization */
7266 result = ipa3_pre_init(&ipa3_res, &platform_dev);
7267 if (result) {
7268 IPAERR("ipa3_init failed\n");
7269 pci_clear_master(pci_dev);
7270 pci_release_region(pci_dev, 0);
7271 pci_disable_device(pci_dev);
7272 return result;
7273 }
7274
7275 return result;
7276}
7277
7278/*
7279 * The following returns transport register memory location and
7280 * size...
7281 */
7282int ipa3_get_transport_info(
7283 phys_addr_t *phys_addr_ptr,
7284 unsigned long *size_ptr)
7285{
7286 if (!phys_addr_ptr || !size_ptr) {
7287 IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
7288 phys_addr_ptr, size_ptr);
7289 return -EINVAL;
7290 }
7291
7292 *phys_addr_ptr = ipa3_res.transport_mem_base;
7293 *size_ptr = ipa3_res.transport_mem_size;
7294
7295 return 0;
7296}
7297EXPORT_SYMBOL(ipa3_get_transport_info);
7298
7299static uint emulation_type = IPA_HW_v4_0;
7300
7301/*
7302 * The following returns emulation type...
7303 */
7304uint ipa3_get_emulation_type(void)
7305{
7306 return emulation_type;
7307}
7308
Amir Levy9659e592016-10-27 18:08:27 +03007309MODULE_LICENSE("GPL v2");
7310MODULE_DESCRIPTION("IPA HW device driver");
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04007311
7312/*
7313 * Module parameter. Invoke as follows:
7314 * insmod ipat.ko emulation_type=[13|14|...|N]
7315 * Examples:
7316 * insmod ipat.ko emulation_type=13 # for IPA 3.5.1
7317 * insmod ipat.ko emulation_type=14 # for IPA 4.0
7318 *
7319 * NOTE: The emulation_type values need to come from: enum ipa_hw_type
7320 *
7321 */
7322
7323module_param(emulation_type, uint, 0000);
7324MODULE_PARM_DESC(
7325 emulation_type,
7326 "IPA emulation type (Use 13 for IPA 3.5.1, 14 for IPA 4.0)");