blob: f19e9d644f386812938de1c1913026225f55d4e6 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/uaccess.h>
28#include <linux/interrupt.h>
29#include <linux/msm-bus.h>
30#include <linux/msm-bus-board.h>
31#include <linux/netdevice.h>
32#include <linux/delay.h>
Amir Levy9659e592016-10-27 18:08:27 +030033#include <linux/time.h>
34#include <linux/hashtable.h>
Mohammed Javid4c4037e2017-11-27 16:23:35 +053035#include <linux/jhash.h>
Amir Levy9659e592016-10-27 18:08:27 +030036#include "ipa_i.h"
37#include "../ipa_rm_i.h"
38
39#define CREATE_TRACE_POINTS
40#include "ipa_trace.h"
41
42#define IPA_SUMMING_THRESHOLD (0x10)
43#define IPA_PIPE_MEM_START_OFST (0x0)
44#define IPA_PIPE_MEM_SIZE (0x0)
45#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
46 x == IPA_MODE_MOBILE_AP_WAN || \
47 x == IPA_MODE_MOBILE_AP_WLAN)
48#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
49#define IPA_A5_MUX_HEADER_LENGTH (8)
50#define IPA_ROUTING_RULE_BYTE_SIZE (4)
51#define IPA_BAM_CNFG_BITS_VALv1_1 (0x7FFFE004)
52#define IPA_BAM_CNFG_BITS_VALv2_0 (0xFFFFE004)
53#define IPA_STATUS_CLEAR_OFST (0x3f28)
54#define IPA_STATUS_CLEAR_SIZE (32)
55
56#define IPA_AGGR_MAX_STR_LENGTH (10)
57
58#define CLEANUP_TAG_PROCESS_TIMEOUT 150
59
60#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
61
62#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0
63#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
64#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
65#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
66
67#define MAX_POLLING_ITERATION 40
68#define MIN_POLLING_ITERATION 1
69#define ONE_MSEC 1
70
71#define IPA_AGGR_STR_IN_BYTES(str) \
72 (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
73
74#define IPA_SPS_PROD_TIMEOUT_MSEC 100
75
76#ifdef CONFIG_COMPAT
77#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
78 IPA_IOCTL_ADD_HDR, \
79 compat_uptr_t)
80#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
81 IPA_IOCTL_DEL_HDR, \
82 compat_uptr_t)
83#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
84 IPA_IOCTL_ADD_RT_RULE, \
85 compat_uptr_t)
86#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
87 IPA_IOCTL_DEL_RT_RULE, \
88 compat_uptr_t)
89#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
90 IPA_IOCTL_ADD_FLT_RULE, \
91 compat_uptr_t)
92#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
93 IPA_IOCTL_DEL_FLT_RULE, \
94 compat_uptr_t)
95#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
96 IPA_IOCTL_GET_RT_TBL, \
97 compat_uptr_t)
98#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
99 IPA_IOCTL_COPY_HDR, \
100 compat_uptr_t)
101#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
102 IPA_IOCTL_QUERY_INTF, \
103 compat_uptr_t)
104#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
105 IPA_IOCTL_QUERY_INTF_TX_PROPS, \
106 compat_uptr_t)
107#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
108 IPA_IOCTL_QUERY_INTF_RX_PROPS, \
109 compat_uptr_t)
110#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
111 IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
112 compat_uptr_t)
113#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
114 IPA_IOCTL_GET_HDR, \
115 compat_uptr_t)
116#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
117 IPA_IOCTL_ALLOC_NAT_MEM, \
118 compat_uptr_t)
119#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
120 IPA_IOCTL_V4_INIT_NAT, \
121 compat_uptr_t)
122#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
123 IPA_IOCTL_NAT_DMA, \
124 compat_uptr_t)
125#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
126 IPA_IOCTL_V4_DEL_NAT, \
127 compat_uptr_t)
128#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
129 IPA_IOCTL_GET_NAT_OFFSET, \
130 compat_uptr_t)
131#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
132 IPA_IOCTL_PULL_MSG, \
133 compat_uptr_t)
134#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
135 IPA_IOCTL_RM_ADD_DEPENDENCY, \
136 compat_uptr_t)
137#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
138 IPA_IOCTL_RM_DEL_DEPENDENCY, \
139 compat_uptr_t)
140#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
141 IPA_IOCTL_GENERATE_FLT_EQ, \
142 compat_uptr_t)
143#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
144 IPA_IOCTL_QUERY_RT_TBL_INDEX, \
145 compat_uptr_t)
146#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
147 IPA_IOCTL_WRITE_QMAPID, \
148 compat_uptr_t)
149#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
150 IPA_IOCTL_MDFY_FLT_RULE, \
151 compat_uptr_t)
152#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
153 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
154 compat_uptr_t)
155#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
156 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
157 compat_uptr_t)
158#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
159 IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
160 compat_uptr_t)
161#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
162 IPA_IOCTL_ADD_HDR_PROC_CTX, \
163 compat_uptr_t)
164#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
165 IPA_IOCTL_DEL_HDR_PROC_CTX, \
166 compat_uptr_t)
167#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
168 IPA_IOCTL_MDFY_RT_RULE, \
169 compat_uptr_t)
170
171/**
172 * struct ipa_ioc_nat_alloc_mem32 - nat table memory allocation
173 * properties
174 * @dev_name: input parameter, the name of table
175 * @size: input parameter, size of table in bytes
176 * @offset: output parameter, offset into page in case of system memory
177 */
178struct ipa_ioc_nat_alloc_mem32 {
179 char dev_name[IPA_RESOURCE_NAME_MAX];
180 compat_size_t size;
181 compat_off_t offset;
182};
183#endif
184
185static void ipa_start_tag_process(struct work_struct *work);
186static DECLARE_WORK(ipa_tag_work, ipa_start_tag_process);
187
188static void ipa_sps_release_resource(struct work_struct *work);
189static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work,
190 ipa_sps_release_resource);
191
192static struct ipa_plat_drv_res ipa_res = {0, };
193
194struct msm_bus_scale_pdata *bus_scale_table;
195
196static struct clk *ipa_clk_src;
197static struct clk *ipa_clk;
198static struct clk *smmu_clk;
199static struct clk *sys_noc_ipa_axi_clk;
200static struct clk *ipa_cnoc_clk;
201static struct clk *ipa_inactivity_clk;
202
203struct ipa_context *ipa_ctx;
204static struct device *master_dev;
205struct platform_device *ipa_pdev;
206static struct {
207 bool present;
208 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300209 bool fast_map;
210 bool s1_bypass;
211 u32 ipa_base;
212 u32 ipa_size;
213} smmu_info;
214
215static char *active_clients_table_buf;
216
217int ipa2_active_clients_log_print_buffer(char *buf, int size)
218{
219 int i;
220 int nbytes;
221 int cnt = 0;
222 int start_idx;
223 int end_idx;
224
225 start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
226 IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
227 end_idx = ipa_ctx->ipa2_active_clients_logging.log_head;
228 for (i = start_idx; i != end_idx;
229 i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
230 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
231 ipa_ctx->ipa2_active_clients_logging
232 .log_buffer[i]);
233 cnt += nbytes;
234 }
235
236 return cnt;
237}
238
239int ipa2_active_clients_log_print_table(char *buf, int size)
240{
241 int i;
242 struct ipa2_active_client_htable_entry *iterator;
243 int cnt = 0;
244
245 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
246 hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i,
247 iterator, list) {
248 switch (iterator->type) {
249 case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP:
250 cnt += scnprintf(buf + cnt, size - cnt,
251 "%-40s %-3d ENDPOINT\n",
252 iterator->id_string, iterator->count);
253 break;
254 case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
255 cnt += scnprintf(buf + cnt, size - cnt,
256 "%-40s %-3d SIMPLE\n",
257 iterator->id_string, iterator->count);
258 break;
259 case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
260 cnt += scnprintf(buf + cnt, size - cnt,
261 "%-40s %-3d RESOURCE\n",
262 iterator->id_string, iterator->count);
263 break;
264 case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
265 cnt += scnprintf(buf + cnt, size - cnt,
266 "%-40s %-3d SPECIAL\n",
267 iterator->id_string, iterator->count);
268 break;
269 default:
270 IPAERR("Trying to print illegal active_clients type");
271 break;
272 }
273 }
274 cnt += scnprintf(buf + cnt, size - cnt,
275 "\nTotal active clients count: %d\n",
276 ipa_ctx->ipa_active_clients.cnt);
277
278 return cnt;
279}
280
281static int ipa2_active_clients_panic_notifier(struct notifier_block *this,
282 unsigned long event, void *ptr)
283{
284 ipa_active_clients_lock();
285 ipa2_active_clients_log_print_table(active_clients_table_buf,
286 IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
287 IPAERR("%s", active_clients_table_buf);
288 ipa_active_clients_unlock();
289
290 return NOTIFY_DONE;
291}
292
293static struct notifier_block ipa2_active_clients_panic_blk = {
294 .notifier_call = ipa2_active_clients_panic_notifier,
295};
296
297static int ipa2_active_clients_log_insert(const char *string)
298{
299 int head;
300 int tail;
301
302 head = ipa_ctx->ipa2_active_clients_logging.log_head;
303 tail = ipa_ctx->ipa2_active_clients_logging.log_tail;
304
305 if (!ipa_ctx->ipa2_active_clients_logging.log_rdy)
306 return -EPERM;
307 memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_',
308 IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
309 strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string,
310 (size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
311 head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
312 if (tail == head)
313 tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
314
315 ipa_ctx->ipa2_active_clients_logging.log_tail = tail;
316 ipa_ctx->ipa2_active_clients_logging.log_head = head;
317
318 return 0;
319}
320
321static int ipa2_active_clients_log_init(void)
322{
323 int i;
324
325 ipa_ctx->ipa2_active_clients_logging.log_buffer[0] = kzalloc(
326 IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
327 sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]),
328 GFP_KERNEL);
329 active_clients_table_buf = kzalloc(sizeof(
330 char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
331 if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) {
332 IPAERR("Active Clients Logging memory allocation failed");
333 goto bail;
334 }
335 for (i = 0; i < IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
336 ipa_ctx->ipa2_active_clients_logging.log_buffer[i] =
337 ipa_ctx->ipa2_active_clients_logging.log_buffer[0] +
338 (IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
339 }
340 ipa_ctx->ipa2_active_clients_logging.log_head = 0;
341 ipa_ctx->ipa2_active_clients_logging.log_tail =
342 IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
343 hash_init(ipa_ctx->ipa2_active_clients_logging.htable);
344 atomic_notifier_chain_register(&panic_notifier_list,
345 &ipa2_active_clients_panic_blk);
346 ipa_ctx->ipa2_active_clients_logging.log_rdy = 1;
347
348 return 0;
349
350bail:
351 return -ENOMEM;
352}
353
354void ipa2_active_clients_log_clear(void)
355{
356 ipa_active_clients_lock();
357 ipa_ctx->ipa2_active_clients_logging.log_head = 0;
358 ipa_ctx->ipa2_active_clients_logging.log_tail =
359 IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
360 ipa_active_clients_unlock();
361}
362
363static void ipa2_active_clients_log_destroy(void)
364{
365 ipa_ctx->ipa2_active_clients_logging.log_rdy = 0;
Ghanim Fodic48ba992017-12-24 19:28:38 +0200366 kfree(active_clients_table_buf);
367 active_clients_table_buf = NULL;
Amir Levy9659e592016-10-27 18:08:27 +0300368 kfree(ipa_ctx->ipa2_active_clients_logging.log_buffer[0]);
369 ipa_ctx->ipa2_active_clients_logging.log_head = 0;
370 ipa_ctx->ipa2_active_clients_logging.log_tail =
371 IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
372}
373
374enum ipa_smmu_cb_type {
375 IPA_SMMU_CB_AP,
376 IPA_SMMU_CB_WLAN,
377 IPA_SMMU_CB_UC,
378 IPA_SMMU_CB_MAX
379
380};
381
382static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
383
384struct iommu_domain *ipa2_get_smmu_domain(void)
385{
386 if (smmu_cb[IPA_SMMU_CB_AP].valid)
387 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
388
389 IPAERR("CB not valid\n");
390
391 return NULL;
392}
393
394struct iommu_domain *ipa2_get_uc_smmu_domain(void)
395{
396 if (smmu_cb[IPA_SMMU_CB_UC].valid)
397 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
398
399 IPAERR("CB not valid\n");
400
401 return NULL;
402}
403
404struct iommu_domain *ipa2_get_wlan_smmu_domain(void)
405{
406 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
407 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
408
409 IPAERR("CB not valid\n");
410
411 return NULL;
412}
413
414struct device *ipa2_get_dma_dev(void)
415{
416 return ipa_ctx->pdev;
417}
418
419/**
420 * ipa2_get_smmu_ctx()- Return the smmu context
421 *
422 * Return value: pointer to smmu context address
423 */
424struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void)
425{
426 return &smmu_cb[IPA_SMMU_CB_AP];
427}
428
429
430/**
431 * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context
432 *
433 * Return value: pointer to smmu context address
434 */
435struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void)
436{
437 return &smmu_cb[IPA_SMMU_CB_WLAN];
438}
439
440/**
441 * ipa2_get_uc_smmu_ctx()- Return the uc smmu context
442 *
443 * Return value: pointer to smmu context address
444 */
445struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void)
446{
447 return &smmu_cb[IPA_SMMU_CB_UC];
448}
449
450static int ipa_open(struct inode *inode, struct file *filp)
451{
452 struct ipa_context *ctx = NULL;
453
Utkarsh Saxena41d57c52016-11-16 12:04:28 +0530454 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +0300455 ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
456 filp->private_data = ctx;
457
458 return 0;
459}
460
461/**
462* ipa_flow_control() - Enable/Disable flow control on a particular client.
463* Return codes:
464* None
465*/
466void ipa_flow_control(enum ipa_client_type ipa_client,
467 bool enable, uint32_t qmap_id)
468{
469 struct ipa_ep_cfg_ctrl ep_ctrl = {0};
470 int ep_idx;
471 struct ipa_ep_context *ep;
472
473 /* Check if tethered flow control is needed or not.*/
474 if (!ipa_ctx->tethered_flow_control) {
475 IPADBG("Apps flow control is not needed\n");
476 return;
477 }
478
479 /* Check if ep is valid. */
480 ep_idx = ipa2_get_ep_mapping(ipa_client);
481 if (ep_idx == -1) {
482 IPADBG("Invalid IPA client\n");
483 return;
484 }
485
486 ep = &ipa_ctx->ep[ep_idx];
487 if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
488 IPADBG("EP not valid/Not applicable for client.\n");
489 return;
490 }
491
492 spin_lock(&ipa_ctx->disconnect_lock);
493 /* Check if the QMAP_ID matches. */
494 if (ep->cfg.meta.qmap_id != qmap_id) {
495 IPADBG("Flow control ind not for same flow: %u %u\n",
496 ep->cfg.meta.qmap_id, qmap_id);
497 spin_unlock(&ipa_ctx->disconnect_lock);
498 return;
499 }
500 if (!ep->disconnect_in_progress) {
501 if (enable) {
502 IPADBG("Enabling Flow\n");
503 ep_ctrl.ipa_ep_delay = false;
504 IPA_STATS_INC_CNT(ipa_ctx->stats.flow_enable);
505 } else {
506 IPADBG("Disabling Flow\n");
507 ep_ctrl.ipa_ep_delay = true;
508 IPA_STATS_INC_CNT(ipa_ctx->stats.flow_disable);
509 }
510 ep_ctrl.ipa_ep_suspend = false;
511 ipa2_cfg_ep_ctrl(ep_idx, &ep_ctrl);
512 } else {
513 IPADBG("EP disconnect is in progress\n");
514 }
515 spin_unlock(&ipa_ctx->disconnect_lock);
516}
517
518static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type)
519{
520 if (!buff) {
521 IPAERR("Null buffer\n");
522 return;
523 }
524
525 if (type != WAN_UPSTREAM_ROUTE_ADD &&
526 type != WAN_UPSTREAM_ROUTE_DEL &&
527 type != WAN_EMBMS_CONNECT) {
528 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
529 return;
530 }
531
532 kfree(buff);
533}
534
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530535static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache)
Amir Levy9659e592016-10-27 18:08:27 +0300536{
537 int retval;
538 struct ipa_wan_msg *wan_msg;
539 struct ipa_msg_meta msg_meta;
Mohammed Javid616bb992017-10-03 13:10:05 +0530540 struct ipa_wan_msg cache_wan_msg;
Amir Levy9659e592016-10-27 18:08:27 +0300541
542 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
543 if (!wan_msg) {
544 IPAERR("no memory\n");
545 return -ENOMEM;
546 }
547
548 if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
549 sizeof(struct ipa_wan_msg))) {
550 kfree(wan_msg);
551 return -EFAULT;
552 }
553
Mohammed Javid616bb992017-10-03 13:10:05 +0530554 memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
555
Amir Levy9659e592016-10-27 18:08:27 +0300556 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
557 msg_meta.msg_type = msg_type;
558 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
559 retval = ipa2_send_msg(&msg_meta, wan_msg, ipa_wan_msg_free_cb);
560 if (retval) {
561 IPAERR("ipa2_send_msg failed: %d\n", retval);
562 kfree(wan_msg);
563 return retval;
564 }
565
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530566 if (is_cache) {
567 mutex_lock(&ipa_ctx->ipa_cne_evt_lock);
568
569 /* cache the cne event */
570 memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
571 ipa_ctx->num_ipa_cne_evt_req].wan_msg,
Mohammed Javid616bb992017-10-03 13:10:05 +0530572 &cache_wan_msg,
573 sizeof(cache_wan_msg));
Mohammed Javidb4b5ef42017-08-29 01:05:46 +0530574
575 memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
576 ipa_ctx->num_ipa_cne_evt_req].msg_meta,
577 &msg_meta,
578 sizeof(struct ipa_msg_meta));
579
580 ipa_ctx->num_ipa_cne_evt_req++;
581 ipa_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE;
582 mutex_unlock(&ipa_ctx->ipa_cne_evt_lock);
583 }
584
Amir Levy9659e592016-10-27 18:08:27 +0300585 return 0;
586}
587
588
589static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
590{
591 int retval = 0;
592 u32 pyld_sz;
593 u8 header[128] = { 0 };
594 u8 *param = NULL;
595 struct ipa_ioc_nat_alloc_mem nat_mem;
596 struct ipa_ioc_v4_nat_init nat_init;
597 struct ipa_ioc_v4_nat_del nat_del;
598 struct ipa_ioc_rm_dependency rm_depend;
599 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200600 int pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300601
602 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
603
604 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
605 return -ENOTTY;
Amir Levy9659e592016-10-27 18:08:27 +0300606
607 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
608
609 switch (cmd) {
610 case IPA_IOC_ALLOC_NAT_MEM:
611 if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
612 sizeof(struct ipa_ioc_nat_alloc_mem))) {
613 retval = -EFAULT;
614 break;
615 }
616 /* null terminate the string */
617 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
618
619 if (ipa2_allocate_nat_device(&nat_mem)) {
620 retval = -EFAULT;
621 break;
622 }
623 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
624 sizeof(struct ipa_ioc_nat_alloc_mem))) {
625 retval = -EFAULT;
626 break;
627 }
628 break;
629 case IPA_IOC_V4_INIT_NAT:
630 if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
631 sizeof(struct ipa_ioc_v4_nat_init))) {
632 retval = -EFAULT;
633 break;
634 }
635 if (ipa2_nat_init_cmd(&nat_init)) {
636 retval = -EFAULT;
637 break;
638 }
639 break;
640
641 case IPA_IOC_NAT_DMA:
642 if (copy_from_user(header, (u8 *)arg,
643 sizeof(struct ipa_ioc_nat_dma_cmd))) {
644 retval = -EFAULT;
645 break;
646 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200647 pre_entry =
648 ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
Amir Levy9659e592016-10-27 18:08:27 +0300649 pyld_sz =
650 sizeof(struct ipa_ioc_nat_dma_cmd) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200651 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300652 param = kzalloc(pyld_sz, GFP_KERNEL);
653 if (!param) {
654 retval = -ENOMEM;
655 break;
656 }
657
658 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
659 retval = -EFAULT;
660 break;
661 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200662 /* add check in case user-space module compromised */
663 if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
664 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530665 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200666 ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
667 pre_entry);
668 retval = -EFAULT;
669 break;
670 }
Amir Levy9659e592016-10-27 18:08:27 +0300671 if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
672 retval = -EFAULT;
673 break;
674 }
675 break;
676
677 case IPA_IOC_V4_DEL_NAT:
678 if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
679 sizeof(struct ipa_ioc_v4_nat_del))) {
680 retval = -EFAULT;
681 break;
682 }
683 if (ipa2_nat_del_cmd(&nat_del)) {
684 retval = -EFAULT;
685 break;
686 }
687 break;
688
689 case IPA_IOC_ADD_HDR:
690 if (copy_from_user(header, (u8 *)arg,
691 sizeof(struct ipa_ioc_add_hdr))) {
692 retval = -EFAULT;
693 break;
694 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200695 pre_entry =
696 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +0300697 pyld_sz =
698 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200699 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +0300700 param = kzalloc(pyld_sz, GFP_KERNEL);
701 if (!param) {
702 retval = -ENOMEM;
703 break;
704 }
705 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
706 retval = -EFAULT;
707 break;
708 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200709 /* add check in case user-space module compromised */
710 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
711 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530712 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200713 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
714 pre_entry);
715 retval = -EFAULT;
716 break;
717 }
Amir Levy9659e592016-10-27 18:08:27 +0300718 if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
719 retval = -EFAULT;
720 break;
721 }
722 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
723 retval = -EFAULT;
724 break;
725 }
726 break;
727
728 case IPA_IOC_DEL_HDR:
729 if (copy_from_user(header, (u8 *)arg,
730 sizeof(struct ipa_ioc_del_hdr))) {
731 retval = -EFAULT;
732 break;
733 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200734 pre_entry =
735 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300736 pyld_sz =
737 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200738 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +0300739 param = kzalloc(pyld_sz, GFP_KERNEL);
740 if (!param) {
741 retval = -ENOMEM;
742 break;
743 }
744 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
745 retval = -EFAULT;
746 break;
747 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200748 /* add check in case user-space module compromised */
749 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
750 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530751 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200752 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
753 pre_entry);
754 retval = -EFAULT;
755 break;
756 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200757 if (ipa2_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
758 true)) {
Amir Levy9659e592016-10-27 18:08:27 +0300759 retval = -EFAULT;
760 break;
761 }
762 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
763 retval = -EFAULT;
764 break;
765 }
766 break;
767
768 case IPA_IOC_ADD_RT_RULE:
769 if (copy_from_user(header, (u8 *)arg,
770 sizeof(struct ipa_ioc_add_rt_rule))) {
771 retval = -EFAULT;
772 break;
773 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200774 pre_entry =
775 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300776 pyld_sz =
777 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200778 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300779 param = kzalloc(pyld_sz, GFP_KERNEL);
780 if (!param) {
781 retval = -ENOMEM;
782 break;
783 }
784 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
785 retval = -EFAULT;
786 break;
787 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200788 /* add check in case user-space module compromised */
789 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
790 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530791 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200792 ((struct ipa_ioc_add_rt_rule *)param)->
793 num_rules,
794 pre_entry);
795 retval = -EFAULT;
796 break;
797 }
Amir Levy9659e592016-10-27 18:08:27 +0300798 if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
799 retval = -EFAULT;
800 break;
801 }
802 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
803 retval = -EFAULT;
804 break;
805 }
806 break;
807
808 case IPA_IOC_MDFY_RT_RULE:
809 if (copy_from_user(header, (u8 *)arg,
810 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
811 retval = -EFAULT;
812 break;
813 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200814 pre_entry =
815 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300816 pyld_sz =
817 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200818 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300819 param = kzalloc(pyld_sz, GFP_KERNEL);
820 if (!param) {
821 retval = -ENOMEM;
822 break;
823 }
824 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
825 retval = -EFAULT;
826 break;
827 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200828 /* add check in case user-space module compromised */
829 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
830 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530831 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200832 ((struct ipa_ioc_mdfy_rt_rule *)param)->
833 num_rules,
834 pre_entry);
835 retval = -EFAULT;
836 break;
837 }
Amir Levy9659e592016-10-27 18:08:27 +0300838 if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
839 retval = -EFAULT;
840 break;
841 }
842 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
843 retval = -EFAULT;
844 break;
845 }
846 break;
847
848 case IPA_IOC_DEL_RT_RULE:
849 if (copy_from_user(header, (u8 *)arg,
850 sizeof(struct ipa_ioc_del_rt_rule))) {
851 retval = -EFAULT;
852 break;
853 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200854 pre_entry =
855 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300856 pyld_sz =
857 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200858 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +0300859 param = kzalloc(pyld_sz, GFP_KERNEL);
860 if (!param) {
861 retval = -ENOMEM;
862 break;
863 }
864 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
865 retval = -EFAULT;
866 break;
867 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200868 /* add check in case user-space module compromised */
869 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
870 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530871 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200872 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
873 pre_entry);
874 retval = -EFAULT;
875 break;
876 }
Amir Levy9659e592016-10-27 18:08:27 +0300877 if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
878 retval = -EFAULT;
879 break;
880 }
881 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
882 retval = -EFAULT;
883 break;
884 }
885 break;
886
887 case IPA_IOC_ADD_FLT_RULE:
888 if (copy_from_user(header, (u8 *)arg,
889 sizeof(struct ipa_ioc_add_flt_rule))) {
890 retval = -EFAULT;
891 break;
892 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200893 pre_entry =
894 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300895 pyld_sz =
896 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200897 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300898 param = kzalloc(pyld_sz, GFP_KERNEL);
899 if (!param) {
900 retval = -ENOMEM;
901 break;
902 }
903 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
904 retval = -EFAULT;
905 break;
906 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200907 /* add check in case user-space module compromised */
908 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
909 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530910 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200911 ((struct ipa_ioc_add_flt_rule *)param)->
912 num_rules,
913 pre_entry);
914 retval = -EFAULT;
915 break;
916 }
Amir Levy9659e592016-10-27 18:08:27 +0300917 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
918 retval = -EFAULT;
919 break;
920 }
921 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
922 retval = -EFAULT;
923 break;
924 }
925 break;
926
927 case IPA_IOC_DEL_FLT_RULE:
928 if (copy_from_user(header, (u8 *)arg,
929 sizeof(struct ipa_ioc_del_flt_rule))) {
930 retval = -EFAULT;
931 break;
932 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200933 pre_entry =
934 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300935 pyld_sz =
936 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200937 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +0300938 param = kzalloc(pyld_sz, GFP_KERNEL);
939 if (!param) {
940 retval = -ENOMEM;
941 break;
942 }
943 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
944 retval = -EFAULT;
945 break;
946 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200947 /* add check in case user-space module compromised */
948 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
949 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530950 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200951 ((struct ipa_ioc_del_flt_rule *)param)->
952 num_hdls,
953 pre_entry);
954 retval = -EFAULT;
955 break;
956 }
Amir Levy9659e592016-10-27 18:08:27 +0300957 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
958 retval = -EFAULT;
959 break;
960 }
961 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
962 retval = -EFAULT;
963 break;
964 }
965 break;
966
967 case IPA_IOC_MDFY_FLT_RULE:
968 if (copy_from_user(header, (u8 *)arg,
969 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
970 retval = -EFAULT;
971 break;
972 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200973 pre_entry =
974 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300975 pyld_sz =
976 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200977 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300978 param = kzalloc(pyld_sz, GFP_KERNEL);
979 if (!param) {
980 retval = -ENOMEM;
981 break;
982 }
983 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
984 retval = -EFAULT;
985 break;
986 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200987 /* add check in case user-space module compromised */
988 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
989 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +0530990 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200991 ((struct ipa_ioc_mdfy_flt_rule *)param)->
992 num_rules,
993 pre_entry);
994 retval = -EFAULT;
995 break;
996 }
Amir Levy9659e592016-10-27 18:08:27 +0300997 if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
998 retval = -EFAULT;
999 break;
1000 }
1001 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1002 retval = -EFAULT;
1003 break;
1004 }
1005 break;
1006
1007 case IPA_IOC_COMMIT_HDR:
1008 retval = ipa2_commit_hdr();
1009 break;
1010 case IPA_IOC_RESET_HDR:
1011 retval = ipa2_reset_hdr();
1012 break;
1013 case IPA_IOC_COMMIT_RT:
1014 retval = ipa2_commit_rt(arg);
1015 break;
1016 case IPA_IOC_RESET_RT:
1017 retval = ipa2_reset_rt(arg);
1018 break;
1019 case IPA_IOC_COMMIT_FLT:
1020 retval = ipa2_commit_flt(arg);
1021 break;
1022 case IPA_IOC_RESET_FLT:
1023 retval = ipa2_reset_flt(arg);
1024 break;
1025 case IPA_IOC_GET_RT_TBL:
1026 if (copy_from_user(header, (u8 *)arg,
1027 sizeof(struct ipa_ioc_get_rt_tbl))) {
1028 retval = -EFAULT;
1029 break;
1030 }
1031 if (ipa2_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1032 retval = -EFAULT;
1033 break;
1034 }
1035 if (copy_to_user((u8 *)arg, header,
1036 sizeof(struct ipa_ioc_get_rt_tbl))) {
1037 retval = -EFAULT;
1038 break;
1039 }
1040 break;
1041 case IPA_IOC_PUT_RT_TBL:
1042 retval = ipa2_put_rt_tbl(arg);
1043 break;
1044 case IPA_IOC_GET_HDR:
1045 if (copy_from_user(header, (u8 *)arg,
1046 sizeof(struct ipa_ioc_get_hdr))) {
1047 retval = -EFAULT;
1048 break;
1049 }
1050 if (ipa2_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1051 retval = -EFAULT;
1052 break;
1053 }
1054 if (copy_to_user((u8 *)arg, header,
1055 sizeof(struct ipa_ioc_get_hdr))) {
1056 retval = -EFAULT;
1057 break;
1058 }
1059 break;
1060 case IPA_IOC_PUT_HDR:
1061 retval = ipa2_put_hdr(arg);
1062 break;
1063 case IPA_IOC_SET_FLT:
1064 retval = ipa_cfg_filter(arg);
1065 break;
1066 case IPA_IOC_COPY_HDR:
1067 if (copy_from_user(header, (u8 *)arg,
1068 sizeof(struct ipa_ioc_copy_hdr))) {
1069 retval = -EFAULT;
1070 break;
1071 }
1072 if (ipa2_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1073 retval = -EFAULT;
1074 break;
1075 }
1076 if (copy_to_user((u8 *)arg, header,
1077 sizeof(struct ipa_ioc_copy_hdr))) {
1078 retval = -EFAULT;
1079 break;
1080 }
1081 break;
1082 case IPA_IOC_QUERY_INTF:
1083 if (copy_from_user(header, (u8 *)arg,
1084 sizeof(struct ipa_ioc_query_intf))) {
1085 retval = -EFAULT;
1086 break;
1087 }
1088 if (ipa_query_intf((struct ipa_ioc_query_intf *)header)) {
1089 retval = -1;
1090 break;
1091 }
1092 if (copy_to_user((u8 *)arg, header,
1093 sizeof(struct ipa_ioc_query_intf))) {
1094 retval = -EFAULT;
1095 break;
1096 }
1097 break;
1098 case IPA_IOC_QUERY_INTF_TX_PROPS:
1099 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
1100 if (copy_from_user(header, (u8 *)arg, sz)) {
1101 retval = -EFAULT;
1102 break;
1103 }
1104
1105 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
1106 > IPA_NUM_PROPS_MAX) {
1107 retval = -EFAULT;
1108 break;
1109 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001110 pre_entry =
1111 ((struct ipa_ioc_query_intf_tx_props *)
1112 header)->num_tx_props;
1113 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001114 sizeof(struct ipa_ioc_tx_intf_prop);
1115 param = kzalloc(pyld_sz, GFP_KERNEL);
1116 if (!param) {
1117 retval = -ENOMEM;
1118 break;
1119 }
1120 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1121 retval = -EFAULT;
1122 break;
1123 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001124 /* add check in case user-space module compromised */
1125 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1126 param)->num_tx_props
1127 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301128 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001129 ((struct ipa_ioc_query_intf_tx_props *)
1130 param)->num_tx_props, pre_entry);
1131 retval = -EFAULT;
1132 break;
1133 }
Amir Levy9659e592016-10-27 18:08:27 +03001134 if (ipa_query_intf_tx_props(
1135 (struct ipa_ioc_query_intf_tx_props *)param)) {
1136 retval = -1;
1137 break;
1138 }
1139 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1140 retval = -EFAULT;
1141 break;
1142 }
1143 break;
1144 case IPA_IOC_QUERY_INTF_RX_PROPS:
1145 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
1146 if (copy_from_user(header, (u8 *)arg, sz)) {
1147 retval = -EFAULT;
1148 break;
1149 }
1150
1151 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
1152 > IPA_NUM_PROPS_MAX) {
1153 retval = -EFAULT;
1154 break;
1155 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001156 pre_entry =
1157 ((struct ipa_ioc_query_intf_rx_props *)
1158 header)->num_rx_props;
1159 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001160 sizeof(struct ipa_ioc_rx_intf_prop);
1161 param = kzalloc(pyld_sz, GFP_KERNEL);
1162 if (!param) {
1163 retval = -ENOMEM;
1164 break;
1165 }
1166 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1167 retval = -EFAULT;
1168 break;
1169 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001170 /* add check in case user-space module compromised */
1171 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1172 param)->num_rx_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301173 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001174 ((struct ipa_ioc_query_intf_rx_props *)
1175 param)->num_rx_props, pre_entry);
1176 retval = -EFAULT;
1177 break;
1178 }
Amir Levy9659e592016-10-27 18:08:27 +03001179 if (ipa_query_intf_rx_props(
1180 (struct ipa_ioc_query_intf_rx_props *)param)) {
1181 retval = -1;
1182 break;
1183 }
1184 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1185 retval = -EFAULT;
1186 break;
1187 }
1188 break;
1189 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1190 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
1191 if (copy_from_user(header, (u8 *)arg, sz)) {
1192 retval = -EFAULT;
1193 break;
1194 }
1195
1196 if (((struct ipa_ioc_query_intf_ext_props *)
1197 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
1198 retval = -EFAULT;
1199 break;
1200 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001201 pre_entry =
1202 ((struct ipa_ioc_query_intf_ext_props *)
1203 header)->num_ext_props;
1204 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001205 sizeof(struct ipa_ioc_ext_intf_prop);
1206 param = kzalloc(pyld_sz, GFP_KERNEL);
1207 if (!param) {
1208 retval = -ENOMEM;
1209 break;
1210 }
1211 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1212 retval = -EFAULT;
1213 break;
1214 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001215 /* add check in case user-space module compromised */
1216 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1217 param)->num_ext_props != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301218 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001219 ((struct ipa_ioc_query_intf_ext_props *)
1220 param)->num_ext_props, pre_entry);
1221 retval = -EFAULT;
1222 break;
1223 }
Amir Levy9659e592016-10-27 18:08:27 +03001224 if (ipa_query_intf_ext_props(
1225 (struct ipa_ioc_query_intf_ext_props *)param)) {
1226 retval = -1;
1227 break;
1228 }
1229 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1230 retval = -EFAULT;
1231 break;
1232 }
1233 break;
1234 case IPA_IOC_PULL_MSG:
1235 if (copy_from_user(header, (u8 *)arg,
1236 sizeof(struct ipa_msg_meta))) {
1237 retval = -EFAULT;
1238 break;
1239 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001240 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001241 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001242 pyld_sz = sizeof(struct ipa_msg_meta) +
1243 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001244 param = kzalloc(pyld_sz, GFP_KERNEL);
1245 if (!param) {
1246 retval = -ENOMEM;
1247 break;
1248 }
1249 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1250 retval = -EFAULT;
1251 break;
1252 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001253 /* add check in case user-space module compromised */
1254 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1255 != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301256 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001257 ((struct ipa_msg_meta *)param)->msg_len,
1258 pre_entry);
1259 retval = -EFAULT;
1260 break;
1261 }
Amir Levy9659e592016-10-27 18:08:27 +03001262 if (ipa_pull_msg((struct ipa_msg_meta *)param,
1263 (char *)param + sizeof(struct ipa_msg_meta),
1264 ((struct ipa_msg_meta *)param)->msg_len) !=
1265 ((struct ipa_msg_meta *)param)->msg_len) {
1266 retval = -1;
1267 break;
1268 }
1269 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1270 retval = -EFAULT;
1271 break;
1272 }
1273 break;
1274 case IPA_IOC_RM_ADD_DEPENDENCY:
1275 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1276 sizeof(struct ipa_ioc_rm_dependency))) {
1277 retval = -EFAULT;
1278 break;
1279 }
1280 retval = ipa_rm_add_dependency_from_ioctl(
1281 rm_depend.resource_name, rm_depend.depends_on_name);
1282 break;
1283 case IPA_IOC_RM_DEL_DEPENDENCY:
1284 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1285 sizeof(struct ipa_ioc_rm_dependency))) {
1286 retval = -EFAULT;
1287 break;
1288 }
1289 retval = ipa_rm_delete_dependency_from_ioctl(
1290 rm_depend.resource_name, rm_depend.depends_on_name);
1291 break;
1292 case IPA_IOC_GENERATE_FLT_EQ:
1293 {
1294 struct ipa_ioc_generate_flt_eq flt_eq;
1295
1296 if (copy_from_user(&flt_eq, (u8 *)arg,
1297 sizeof(struct ipa_ioc_generate_flt_eq))) {
1298 retval = -EFAULT;
1299 break;
1300 }
1301 if (ipa_generate_flt_eq(flt_eq.ip, &flt_eq.attrib,
1302 &flt_eq.eq_attrib)) {
1303 retval = -EFAULT;
1304 break;
1305 }
1306 if (copy_to_user((u8 *)arg, &flt_eq,
1307 sizeof(struct ipa_ioc_generate_flt_eq))) {
1308 retval = -EFAULT;
1309 break;
1310 }
1311 break;
1312 }
1313 case IPA_IOC_QUERY_EP_MAPPING:
1314 {
1315 retval = ipa2_get_ep_mapping(arg);
1316 break;
1317 }
1318 case IPA_IOC_QUERY_RT_TBL_INDEX:
1319 if (copy_from_user(header, (u8 *)arg,
1320 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1321 retval = -EFAULT;
1322 break;
1323 }
1324 if (ipa2_query_rt_index(
1325 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
1326 retval = -EFAULT;
1327 break;
1328 }
1329 if (copy_to_user((u8 *)arg, header,
1330 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1331 retval = -EFAULT;
1332 break;
1333 }
1334 break;
1335 case IPA_IOC_WRITE_QMAPID:
1336 if (copy_from_user(header, (u8 *)arg,
1337 sizeof(struct ipa_ioc_write_qmapid))) {
1338 retval = -EFAULT;
1339 break;
1340 }
1341 if (ipa2_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1342 retval = -EFAULT;
1343 break;
1344 }
1345 if (copy_to_user((u8 *)arg, header,
1346 sizeof(struct ipa_ioc_write_qmapid))) {
1347 retval = -EFAULT;
1348 break;
1349 }
1350 break;
1351 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301352 retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true);
Amir Levy9659e592016-10-27 18:08:27 +03001353 if (retval) {
1354 IPAERR("ipa_send_wan_msg failed: %d\n", retval);
1355 break;
1356 }
1357 break;
1358 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301359 retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true);
Amir Levy9659e592016-10-27 18:08:27 +03001360 if (retval) {
1361 IPAERR("ipa_send_wan_msg failed: %d\n", retval);
1362 break;
1363 }
1364 break;
1365 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05301366 retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT, false);
Amir Levy9659e592016-10-27 18:08:27 +03001367 if (retval) {
1368 IPAERR("ipa_send_wan_msg failed: %d\n", retval);
1369 break;
1370 }
1371 break;
1372 case IPA_IOC_ADD_HDR_PROC_CTX:
1373 if (copy_from_user(header, (u8 *)arg,
1374 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1375 retval = -EFAULT;
1376 break;
1377 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001378 pre_entry =
1379 ((struct ipa_ioc_add_hdr_proc_ctx *)
1380 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001381 pyld_sz =
1382 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001383 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001384 param = kzalloc(pyld_sz, GFP_KERNEL);
1385 if (!param) {
1386 retval = -ENOMEM;
1387 break;
1388 }
1389 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1390 retval = -EFAULT;
1391 break;
1392 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001393 /* add check in case user-space module compromised */
1394 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1395 param)->num_proc_ctxs != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301396 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001397 ((struct ipa_ioc_add_hdr_proc_ctx *)
1398 param)->num_proc_ctxs, pre_entry);
1399 retval = -EFAULT;
1400 break;
1401 }
Amir Levy9659e592016-10-27 18:08:27 +03001402 if (ipa2_add_hdr_proc_ctx(
1403 (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
1404 retval = -EFAULT;
1405 break;
1406 }
1407 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1408 retval = -EFAULT;
1409 break;
1410 }
1411 break;
1412 case IPA_IOC_DEL_HDR_PROC_CTX:
1413 if (copy_from_user(header, (u8 *)arg,
1414 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1415 retval = -EFAULT;
1416 break;
1417 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001418 pre_entry =
1419 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001420 pyld_sz =
1421 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001422 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001423 param = kzalloc(pyld_sz, GFP_KERNEL);
1424 if (!param) {
1425 retval = -ENOMEM;
1426 break;
1427 }
1428 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1429 retval = -EFAULT;
1430 break;
1431 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001432 /* add check in case user-space module compromised */
1433 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1434 param)->num_hdls != pre_entry)) {
Utkarsh Saxenae9782812017-05-26 17:20:32 +05301435 IPAERR_RL("current %d pre %d\n",
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001436 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1437 num_hdls,
1438 pre_entry);
1439 retval = -EFAULT;
1440 break;
1441 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001442 if (ipa2_del_hdr_proc_ctx_by_user(
1443 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001444 retval = -EFAULT;
1445 break;
1446 }
1447 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1448 retval = -EFAULT;
1449 break;
1450 }
1451 break;
1452
1453 case IPA_IOC_GET_HW_VERSION:
1454 pyld_sz = sizeof(enum ipa_hw_type);
1455 param = kzalloc(pyld_sz, GFP_KERNEL);
1456 if (!param) {
1457 retval = -ENOMEM;
1458 break;
1459 }
1460 memcpy(param, &ipa_ctx->ipa_hw_type, pyld_sz);
1461 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1462 retval = -EFAULT;
1463 break;
1464 }
1465 break;
1466
Amir Levy479cfdd2017-10-26 12:23:14 +03001467 default:
Amir Levy9659e592016-10-27 18:08:27 +03001468 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1469 return -ENOTTY;
1470 }
1471 kfree(param);
1472
1473 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1474
1475 return retval;
1476}
1477
1478/**
1479* ipa_setup_dflt_rt_tables() - Setup default routing tables
1480*
1481* Return codes:
1482* 0: success
1483* -ENOMEM: failed to allocate memory
1484* -EPERM: failed to add the tables
1485*/
1486int ipa_setup_dflt_rt_tables(void)
1487{
1488 struct ipa_ioc_add_rt_rule *rt_rule;
1489 struct ipa_rt_rule_add *rt_rule_entry;
1490
1491 rt_rule =
1492 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
1493 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
1494 if (!rt_rule) {
1495 IPAERR("fail to alloc mem\n");
1496 return -ENOMEM;
1497 }
1498 /* setup a default v4 route to point to Apps */
1499 rt_rule->num_rules = 1;
1500 rt_rule->commit = 1;
1501 rt_rule->ip = IPA_IP_v4;
1502 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
1503 IPA_RESOURCE_NAME_MAX);
1504
1505 rt_rule_entry = &rt_rule->rules[0];
1506 rt_rule_entry->at_rear = 1;
1507 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
1508 rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
1509
1510 if (ipa2_add_rt_rule(rt_rule)) {
1511 IPAERR("fail to add dflt v4 rule\n");
1512 kfree(rt_rule);
1513 return -EPERM;
1514 }
1515 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1516 ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1517
1518 /* setup a default v6 route to point to A5 */
1519 rt_rule->ip = IPA_IP_v6;
1520 if (ipa2_add_rt_rule(rt_rule)) {
1521 IPAERR("fail to add dflt v6 rule\n");
1522 kfree(rt_rule);
1523 return -EPERM;
1524 }
1525 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1526 ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1527
1528 /*
1529 * because these tables are the very first to be added, they will both
1530 * have the same index (0) which is essential for programming the
1531 * "route" end-point config
1532 */
1533
1534 kfree(rt_rule);
1535
1536 return 0;
1537}
1538
1539static int ipa_setup_exception_path(void)
1540{
1541 struct ipa_ioc_add_hdr *hdr;
1542 struct ipa_hdr_add *hdr_entry;
1543 struct ipa_route route = { 0 };
1544 int ret;
1545
1546 /* install the basic exception header */
1547 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
1548 sizeof(struct ipa_hdr_add), GFP_KERNEL);
1549 if (!hdr) {
1550 IPAERR("fail to alloc exception hdr\n");
1551 return -ENOMEM;
1552 }
1553 hdr->num_hdrs = 1;
1554 hdr->commit = 1;
1555 hdr_entry = &hdr->hdr[0];
1556
1557 if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
1558 strlcpy(hdr_entry->name, IPA_A5_MUX_HDR_NAME,
1559 IPA_RESOURCE_NAME_MAX);
1560 /* set template for the A5_MUX hdr in header addition block */
1561 hdr_entry->hdr_len = IPA_A5_MUX_HEADER_LENGTH;
1562 } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
1563 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME,
1564 IPA_RESOURCE_NAME_MAX);
1565 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
1566 } else {
1567 WARN_ON(1);
1568 }
1569
1570 if (ipa2_add_hdr(hdr)) {
1571 IPAERR("fail to add exception hdr\n");
1572 ret = -EPERM;
1573 goto bail;
1574 }
1575
1576 if (hdr_entry->status) {
1577 IPAERR("fail to add exception hdr\n");
1578 ret = -EPERM;
1579 goto bail;
1580 }
1581
1582 ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
1583
1584 /* set the route register to pass exception packets to Apps */
1585 route.route_def_pipe = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1586 route.route_frag_def_pipe = ipa2_get_ep_mapping(
1587 IPA_CLIENT_APPS_LAN_CONS);
1588 route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
1589
1590 if (ipa_cfg_route(&route)) {
1591 IPAERR("fail to add exception hdr\n");
1592 ret = -EPERM;
1593 goto bail;
1594 }
1595
1596 ret = 0;
1597bail:
1598 kfree(hdr);
1599 return ret;
1600}
1601
1602static int ipa_init_smem_region(int memory_region_size,
1603 int memory_region_offset)
1604{
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301605 struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03001606 struct ipa_desc desc;
1607 struct ipa_mem_buffer mem;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05301608 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03001609 int rc;
1610
1611 if (memory_region_size == 0)
1612 return 0;
1613
1614 memset(&desc, 0, sizeof(desc));
Amir Levy9659e592016-10-27 18:08:27 +03001615 memset(&mem, 0, sizeof(mem));
1616
1617 mem.size = memory_region_size;
1618 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size,
1619 &mem.phys_base, GFP_KERNEL);
1620 if (!mem.base) {
1621 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
1622 return -ENOMEM;
1623 }
1624
1625 memset(mem.base, 0, mem.size);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301626
1627 cmd = kzalloc(sizeof(*cmd),
Utkarsh Saxenae6510102017-04-14 19:31:07 +05301628 flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301629 if (cmd == NULL) {
1630 IPAERR("Failed to alloc immediate command object\n");
1631 rc = -ENOMEM;
1632 goto fail_send_cmd;
1633 }
1634
1635 cmd->size = mem.size;
1636 cmd->system_addr = mem.phys_base;
1637 cmd->local_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03001638 memory_region_offset;
1639 desc.opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301640 desc.pyld = cmd;
1641 desc.len = sizeof(*cmd);
Amir Levy9659e592016-10-27 18:08:27 +03001642 desc.type = IPA_IMM_CMD_DESC;
1643
1644 rc = ipa_send_cmd(1, &desc);
1645 if (rc) {
1646 IPAERR("failed to send immediate command (error %d)\n", rc);
1647 rc = -EFAULT;
1648 }
1649
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05301650 kfree(cmd);
1651fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03001652 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
1653 mem.phys_base);
1654
1655 return rc;
1656}
1657
1658/**
1659* ipa_init_q6_smem() - Initialize Q6 general memory and
1660* header memory regions in IPA.
1661*
1662* Return codes:
1663* 0: success
1664* -ENOMEM: failed to allocate dma memory
1665* -EFAULT: failed to send IPA command to initialize the memory
1666*/
1667int ipa_init_q6_smem(void)
1668{
1669 int rc;
1670
1671 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1672
1673 if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0)
1674 rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) -
1675 IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE,
1676 IPA_MEM_PART(modem_ofst));
1677 else
1678 rc = ipa_init_smem_region(IPA_MEM_PART(modem_size),
1679 IPA_MEM_PART(modem_ofst));
1680
1681 if (rc) {
1682 IPAERR("failed to initialize Modem RAM memory\n");
1683 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1684 return rc;
1685 }
1686
1687 rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_size),
1688 IPA_MEM_PART(modem_hdr_ofst));
1689 if (rc) {
1690 IPAERR("failed to initialize Modem HDRs RAM memory\n");
1691 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1692 return rc;
1693 }
1694
1695 rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
1696 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
1697 if (rc) {
1698 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
1699 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1700 return rc;
1701 }
1702
1703 rc = ipa_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
1704 IPA_MEM_PART(modem_comp_decomp_ofst));
1705 if (rc) {
1706 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
1707 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1708 return rc;
1709 }
1710
1711 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1712
1713 return rc;
1714}
1715
1716static void ipa_free_buffer(void *user1, int user2)
1717{
1718 kfree(user1);
1719}
1720
1721int ipa_q6_pipe_delay(bool zip_pipes)
1722{
1723 u32 reg_val = 0;
1724 int client_idx;
1725 int ep_idx;
1726
1727 /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */
1728 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1729 /* Skip the processing for non Q6 pipes. */
1730 if (!IPA_CLIENT_IS_Q6_PROD(client_idx))
1731 continue;
1732 /* Skip the processing for NON-ZIP pipes. */
1733 else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx))
1734 continue;
1735 /* Skip the processing for ZIP pipes. */
1736 else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx))
1737 continue;
1738
1739 ep_idx = ipa2_get_ep_mapping(client_idx);
1740 if (ep_idx == -1)
1741 continue;
1742
1743 IPA_SETFIELD_IN_REG(reg_val, 1,
1744 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
1745 IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
1746
1747 ipa_write_reg(ipa_ctx->mmio,
1748 IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val);
1749 }
1750
1751 return 0;
1752}
1753
1754int ipa_q6_monitor_holb_mitigation(bool enable)
1755{
1756 int ep_idx;
1757 int client_idx;
1758
1759 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1760 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1761 if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) {
1762 ep_idx = ipa2_get_ep_mapping(client_idx);
1763 if (ep_idx == -1)
1764 continue;
1765 /* Send a command to Uc to enable/disable
1766 * holb monitoring.
1767 */
1768 ipa_uc_monitor_holb(client_idx, enable);
1769 }
1770 }
1771 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1772
1773 return 0;
1774}
1775
1776static int ipa_q6_avoid_holb(bool zip_pipes)
1777{
1778 u32 reg_val;
1779 int ep_idx;
1780 int client_idx;
1781 struct ipa_ep_cfg_ctrl avoid_holb;
1782
1783 memset(&avoid_holb, 0, sizeof(avoid_holb));
1784 avoid_holb.ipa_ep_suspend = true;
1785
1786 /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */
1787 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1788 /* Skip the processing for non Q6 pipes. */
1789 if (!IPA_CLIENT_IS_Q6_CONS(client_idx))
1790 continue;
1791 /* Skip the processing for NON-ZIP pipes. */
1792 else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx))
1793 continue;
1794 /* Skip the processing for ZIP pipes. */
1795 else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx))
1796 continue;
1797
1798 ep_idx = ipa2_get_ep_mapping(client_idx);
1799 if (ep_idx == -1)
1800 continue;
1801
1802 /*
1803 * ipa2_cfg_ep_holb is not used here because we are
1804 * setting HOLB on Q6 pipes, and from APPS perspective
1805 * they are not valid, therefore, the above function
1806 * will fail.
1807 */
1808 reg_val = 0;
1809 IPA_SETFIELD_IN_REG(reg_val, 0,
1810 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT,
1811 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK);
1812
1813 ipa_write_reg(ipa_ctx->mmio,
1814 IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx),
1815 reg_val);
1816
1817 reg_val = 0;
1818 IPA_SETFIELD_IN_REG(reg_val, 1,
1819 IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT,
1820 IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK);
1821
1822 ipa_write_reg(ipa_ctx->mmio,
1823 IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx),
1824 reg_val);
1825
1826 ipa2_cfg_ep_ctrl(ep_idx, &avoid_holb);
1827 }
1828
1829 return 0;
1830}
1831
1832static u32 ipa_get_max_flt_rt_cmds(u32 num_pipes)
1833{
1834 u32 max_cmds = 0;
1835
1836 /* As many filter tables as there are pipes, x2 for IPv4 and IPv6 */
1837 max_cmds += num_pipes * 2;
1838
1839 /* For each of the Modem routing tables */
1840 max_cmds += (IPA_MEM_PART(v4_modem_rt_index_hi) -
1841 IPA_MEM_PART(v4_modem_rt_index_lo) + 1);
1842
1843 max_cmds += (IPA_MEM_PART(v6_modem_rt_index_hi) -
1844 IPA_MEM_PART(v6_modem_rt_index_lo) + 1);
1845
1846 return max_cmds;
1847}
1848
1849static int ipa_q6_clean_q6_tables(void)
1850{
1851 struct ipa_desc *desc;
1852 struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
1853 int pipe_idx;
1854 int num_cmds = 0;
1855 int index;
1856 int retval;
1857 struct ipa_mem_buffer mem = { 0 };
1858 u32 *entry;
1859 u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05301860 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03001861
1862 mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
Ghanim Fodi70379e22016-11-01 00:09:10 +02001863 GFP_ATOMIC);
Amir Levy9659e592016-10-27 18:08:27 +03001864 if (!mem.base) {
1865 IPAERR("failed to alloc DMA buff of size 4\n");
1866 return -ENOMEM;
1867 }
1868
1869 mem.size = 4;
1870 entry = mem.base;
1871 *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
1872
1873 desc = kcalloc(max_cmds, sizeof(struct ipa_desc), GFP_KERNEL);
1874 if (!desc) {
1875 IPAERR("failed to allocate memory\n");
1876 retval = -ENOMEM;
1877 goto bail_dma;
1878 }
1879
1880 cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
Utkarsh Saxena67d59b62017-05-16 22:41:50 +05301881 flag);
Amir Levy9659e592016-10-27 18:08:27 +03001882 if (!cmd) {
1883 IPAERR("failed to allocate memory\n");
1884 retval = -ENOMEM;
1885 goto bail_desc;
1886 }
1887
1888 /*
1889 * Iterating over all the pipes which are either invalid but connected
1890 * or connected but not configured by AP.
1891 */
1892 for (pipe_idx = 0; pipe_idx < ipa_ctx->ipa_num_pipes; pipe_idx++) {
1893 if (!ipa_ctx->ep[pipe_idx].valid ||
1894 ipa_ctx->ep[pipe_idx].skip_ep_cfg) {
1895 /*
1896 * Need to point v4 and v6 fltr tables to an empty
1897 * table
1898 */
1899 cmd[num_cmds].size = mem.size;
1900 cmd[num_cmds].system_addr = mem.phys_base;
1901 cmd[num_cmds].local_addr =
1902 ipa_ctx->smem_restricted_bytes +
1903 IPA_MEM_PART(v4_flt_ofst) + 8 + pipe_idx * 4;
1904
1905 desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
1906 desc[num_cmds].pyld = &cmd[num_cmds];
1907 desc[num_cmds].len = sizeof(*cmd);
1908 desc[num_cmds].type = IPA_IMM_CMD_DESC;
1909 num_cmds++;
1910
1911 cmd[num_cmds].size = mem.size;
1912 cmd[num_cmds].system_addr = mem.phys_base;
1913 cmd[num_cmds].local_addr =
1914 ipa_ctx->smem_restricted_bytes +
1915 IPA_MEM_PART(v6_flt_ofst) + 8 + pipe_idx * 4;
1916
1917 desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
1918 desc[num_cmds].pyld = &cmd[num_cmds];
1919 desc[num_cmds].len = sizeof(*cmd);
1920 desc[num_cmds].type = IPA_IMM_CMD_DESC;
1921 num_cmds++;
1922 }
1923 }
1924
1925 /* Need to point v4/v6 modem routing tables to an empty table */
1926 for (index = IPA_MEM_PART(v4_modem_rt_index_lo);
1927 index <= IPA_MEM_PART(v4_modem_rt_index_hi);
1928 index++) {
1929 cmd[num_cmds].size = mem.size;
1930 cmd[num_cmds].system_addr = mem.phys_base;
1931 cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
1932 IPA_MEM_PART(v4_rt_ofst) + index * 4;
1933
1934 desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
1935 desc[num_cmds].pyld = &cmd[num_cmds];
1936 desc[num_cmds].len = sizeof(*cmd);
1937 desc[num_cmds].type = IPA_IMM_CMD_DESC;
1938 num_cmds++;
1939 }
1940
1941 for (index = IPA_MEM_PART(v6_modem_rt_index_lo);
1942 index <= IPA_MEM_PART(v6_modem_rt_index_hi);
1943 index++) {
1944 cmd[num_cmds].size = mem.size;
1945 cmd[num_cmds].system_addr = mem.phys_base;
1946 cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
1947 IPA_MEM_PART(v6_rt_ofst) + index * 4;
1948
1949 desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
1950 desc[num_cmds].pyld = &cmd[num_cmds];
1951 desc[num_cmds].len = sizeof(*cmd);
1952 desc[num_cmds].type = IPA_IMM_CMD_DESC;
1953 num_cmds++;
1954 }
1955
1956 retval = ipa_send_cmd(num_cmds, desc);
1957 if (retval) {
1958 IPAERR("failed to send immediate command (error %d)\n", retval);
1959 retval = -EFAULT;
1960 }
1961
1962 kfree(cmd);
1963
1964bail_desc:
1965 kfree(desc);
1966
1967bail_dma:
1968 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
1969
1970 return retval;
1971}
1972
1973static void ipa_q6_disable_agg_reg(struct ipa_register_write *reg_write,
1974 int ep_idx)
1975{
1976 reg_write->skip_pipeline_clear = 0;
1977
1978 reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(ep_idx);
1979 reg_write->value =
1980 (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
1981 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
1982 reg_write->value_mask =
1983 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
1984 IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
1985
1986 reg_write->value |=
1987 ((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) <<
1988 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT);
1989 reg_write->value_mask |=
1990 ((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK <<
1991 IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT));
1992}
1993
1994static int ipa_q6_set_ex_path_dis_agg(void)
1995{
1996 int ep_idx;
1997 int client_idx;
1998 struct ipa_desc *desc;
1999 int num_descs = 0;
2000 int index;
2001 struct ipa_register_write *reg_write;
2002 int retval;
Mohammed Javid097ca402017-11-02 19:10:22 +05302003 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002004
2005 desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc),
2006 GFP_KERNEL);
2007 if (!desc) {
2008 IPAERR("failed to allocate memory\n");
2009 return -ENOMEM;
2010 }
2011
2012 /* Set the exception path to AP */
2013 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2014 ep_idx = ipa2_get_ep_mapping(client_idx);
2015 if (ep_idx == -1)
2016 continue;
2017
2018 if (ipa_ctx->ep[ep_idx].valid &&
2019 ipa_ctx->ep[ep_idx].skip_ep_cfg) {
2020 BUG_ON(num_descs >= ipa_ctx->ipa_num_pipes);
Mohammed Javid097ca402017-11-02 19:10:22 +05302021 reg_write = kzalloc(sizeof(*reg_write), flag);
Amir Levy9659e592016-10-27 18:08:27 +03002022
2023 if (!reg_write) {
2024 IPAERR("failed to allocate memory\n");
2025 BUG();
2026 }
2027 reg_write->skip_pipeline_clear = 0;
2028 reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx);
2029 reg_write->value =
2030 (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) &
2031 IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
2032 IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
2033 reg_write->value_mask =
2034 IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
2035 IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
2036
2037 desc[num_descs].opcode = IPA_REGISTER_WRITE;
2038 desc[num_descs].pyld = reg_write;
2039 desc[num_descs].len = sizeof(*reg_write);
2040 desc[num_descs].type = IPA_IMM_CMD_DESC;
2041 desc[num_descs].callback = ipa_free_buffer;
2042 desc[num_descs].user1 = reg_write;
2043 num_descs++;
2044 }
2045 }
2046
2047 /* Disable AGGR on IPA->Q6 pipes */
2048 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2049 ep_idx = ipa2_get_ep_mapping(client_idx);
2050 if (ep_idx == -1)
2051 continue;
2052 if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
2053 IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) {
Mohammed Javid097ca402017-11-02 19:10:22 +05302054 reg_write = kzalloc(sizeof(*reg_write), flag);
Amir Levy9659e592016-10-27 18:08:27 +03002055
2056 if (!reg_write) {
2057 IPAERR("failed to allocate memory\n");
2058 BUG();
2059 }
2060
2061 ipa_q6_disable_agg_reg(reg_write, ep_idx);
2062
2063 desc[num_descs].opcode = IPA_REGISTER_WRITE;
2064 desc[num_descs].pyld = reg_write;
2065 desc[num_descs].len = sizeof(*reg_write);
2066 desc[num_descs].type = IPA_IMM_CMD_DESC;
2067 desc[num_descs].callback = ipa_free_buffer;
2068 desc[num_descs].user1 = reg_write;
2069 num_descs++;
2070 }
2071 }
2072
2073 /* Will wait 150msecs for IPA tag process completion */
2074 retval = ipa_tag_process(desc, num_descs,
2075 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2076 if (retval) {
2077 IPAERR("TAG process failed! (error %d)\n", retval);
2078 /* For timeout error ipa_free_buffer cb will free user1 */
2079 if (retval != -ETIME) {
2080 for (index = 0; index < num_descs; index++)
2081 kfree(desc[index].user1);
2082 retval = -EINVAL;
2083 }
2084 }
2085
2086 kfree(desc);
2087
2088 return retval;
2089}
2090
2091/**
2092* ipa_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2093* in IPA HW before modem shutdown. This is performed in
2094* case of SSR.
2095*
2096* Return codes:
2097* 0: success
2098* This is a mandatory procedure, in case one of the steps fails, the
2099* AP needs to restart.
2100*/
2101int ipa_q6_pre_shutdown_cleanup(void)
2102{
2103 /* If uC has notified the APPS upon a ZIP engine error,
2104 * APPS need to assert (This is a non recoverable error).
2105 */
2106 if (ipa_ctx->uc_ctx.uc_zip_error)
2107 BUG();
2108
2109 IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6");
2110
2111 /*
2112 * Do not delay Q6 pipes here. This may result in IPA reading a
2113 * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this
2114 * situation IPA will be remain locked as the DMA_TASK with unlock
2115 * bit will not be read by IPA as pipe delay is enabled. IPA uC will
2116 * wait for pipe to be empty before issuing a BAM pipe reset.
2117 */
2118
2119 if (ipa_q6_monitor_holb_mitigation(false)) {
2120 IPAERR("Failed to disable HOLB monitroing on Q6 pipes\n");
2121 BUG();
2122 }
2123
2124 if (ipa_q6_avoid_holb(false)) {
2125 IPAERR("Failed to set HOLB on Q6 pipes\n");
2126 BUG();
2127 }
2128 if (ipa_q6_clean_q6_tables()) {
2129 IPAERR("Failed to clean Q6 tables\n");
2130 BUG();
2131 }
2132 if (ipa_q6_set_ex_path_dis_agg()) {
2133 IPAERR("Failed to disable aggregation on Q6 pipes\n");
2134 BUG();
2135 }
2136
2137 ipa_ctx->q6_proxy_clk_vote_valid = true;
2138 return 0;
2139}
2140
2141/**
2142* ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes
2143* in IPA HW after modem shutdown. This is performed
2144* in case of SSR.
2145*
2146* Return codes:
2147* 0: success
2148* This is a mandatory procedure, in case one of the steps fails, the
2149* AP needs to restart.
2150*/
2151int ipa_q6_post_shutdown_cleanup(void)
2152{
2153 int client_idx;
2154 int res;
2155
2156 /*
2157 * Do not delay Q6 pipes here. This may result in IPA reading a
2158 * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this
2159 * situation IPA will be remain locked as the DMA_TASK with unlock
2160 * bit will not be read by IPA as pipe delay is enabled. IPA uC will
2161 * wait for pipe to be empty before issuing a BAM pipe reset.
2162 */
2163
2164 if (ipa_q6_avoid_holb(true)) {
2165 IPAERR("Failed to set HOLB on Q6 ZIP pipes\n");
2166 BUG();
2167 }
2168
2169 if (!ipa_ctx->uc_ctx.uc_loaded) {
2170 IPAERR("uC is not loaded, won't reset Q6 pipes\n");
2171 return 0;
2172 }
2173
2174 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2175 if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
2176 IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
2177 IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
2178 IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
2179 res = ipa_uc_reset_pipe(client_idx);
2180 if (res)
2181 BUG();
2182 }
2183 return 0;
2184}
2185
2186int _ipa_init_sram_v2(void)
2187{
2188 u32 *ipa_sram_mmio;
2189 unsigned long phys_addr;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302190 struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03002191 struct ipa_desc desc = {0};
2192 struct ipa_mem_buffer mem;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302193 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002194 int rc = 0;
2195
2196 phys_addr = ipa_ctx->ipa_wrapper_base +
2197 ipa_ctx->ctrl->ipa_reg_base_ofst +
2198 IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(
2199 ipa_ctx->smem_restricted_bytes / 4);
2200
2201 ipa_sram_mmio = ioremap(phys_addr,
2202 ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
2203 if (!ipa_sram_mmio) {
2204 IPAERR("fail to ioremap IPA SRAM\n");
2205 return -ENOMEM;
2206 }
2207
2208#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)
2209
2210 IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
2211 IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL);
2212 IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL);
2213 IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL);
2214 IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL);
2215 IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL);
2216 IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL);
2217 IPA_SRAM_SET(IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_CANARY_VAL);
2218 IPA_SRAM_SET(IPA_MEM_PART(uc_info_ofst), IPA_MEM_CANARY_VAL);
2219
2220 iounmap(ipa_sram_mmio);
2221
2222 mem.size = IPA_STATUS_CLEAR_SIZE;
2223 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2224 GFP_KERNEL);
2225 if (!mem.base) {
2226 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2227 return -ENOMEM;
2228 }
2229 memset(mem.base, 0, mem.size);
2230
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302231 cmd = kzalloc(sizeof(*cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302232 if (cmd == NULL) {
2233 IPAERR("Failed to alloc immediate command object\n");
2234 rc = -ENOMEM;
2235 goto fail_send_cmd;
2236 }
2237
2238 cmd->size = mem.size;
2239 cmd->system_addr = mem.phys_base;
2240 cmd->local_addr = IPA_STATUS_CLEAR_OFST;
Amir Levy9659e592016-10-27 18:08:27 +03002241 desc.opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302242 desc.pyld = (void *)cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002243 desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
2244 desc.type = IPA_IMM_CMD_DESC;
2245
2246 if (ipa_send_cmd(1, &desc)) {
2247 IPAERR("fail to send immediate command\n");
2248 rc = -EFAULT;
2249 }
2250
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302251 kfree(cmd);
2252fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03002253 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2254 return rc;
2255}
2256
2257int _ipa_init_sram_v2_5(void)
2258{
2259 u32 *ipa_sram_mmio;
2260 unsigned long phys_addr;
2261
2262 phys_addr = ipa_ctx->ipa_wrapper_base +
2263 ipa_ctx->ctrl->ipa_reg_base_ofst +
2264 IPA_SRAM_SW_FIRST_v2_5;
2265
2266 ipa_sram_mmio = ioremap(phys_addr,
2267 ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
2268 if (!ipa_sram_mmio) {
2269 IPAERR("fail to ioremap IPA SRAM\n");
2270 return -ENOMEM;
2271 }
2272
2273#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)
2274
2275 IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
2276 IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst), IPA_MEM_CANARY_VAL);
2277 IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
2278 IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL);
2279 IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL);
2280 IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL);
2281 IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL);
2282 IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL);
2283 IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4,
2284 IPA_MEM_CANARY_VAL);
2285 IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst), IPA_MEM_CANARY_VAL);
2286 IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL);
2287 IPA_SRAM_SET(IPA_MEM_PART(end_ofst), IPA_MEM_CANARY_VAL);
2288
2289 iounmap(ipa_sram_mmio);
2290
2291 return 0;
2292}
2293
2294static inline void ipa_sram_set_canary(u32 *sram_mmio, int offset)
2295{
2296 /* Set 4 bytes of CANARY before the offset */
2297 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2298}
2299
2300int _ipa_init_sram_v2_6L(void)
2301{
2302 u32 *ipa_sram_mmio;
2303 unsigned long phys_addr;
2304
2305 phys_addr = ipa_ctx->ipa_wrapper_base +
2306 ipa_ctx->ctrl->ipa_reg_base_ofst +
2307 IPA_SRAM_SW_FIRST_v2_5;
2308
2309 ipa_sram_mmio = ioremap(phys_addr,
2310 ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
2311 if (!ipa_sram_mmio) {
2312 IPAERR("fail to ioremap IPA SRAM\n");
2313 return -ENOMEM;
2314 }
2315
2316 /* Consult with ipa_ram_mmap.h on the location of the CANARY values */
2317 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst) - 4);
2318 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst));
2319 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst) - 4);
2320 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst));
2321 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst) - 4);
2322 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst));
2323 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_ofst));
2324 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
2325 ipa_sram_set_canary(ipa_sram_mmio,
2326 IPA_MEM_PART(modem_comp_decomp_ofst) - 4);
2327 ipa_sram_set_canary(ipa_sram_mmio,
2328 IPA_MEM_PART(modem_comp_decomp_ofst));
2329 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
2330 ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
2331
2332 iounmap(ipa_sram_mmio);
2333
2334 return 0;
2335}
2336
2337int _ipa_init_hdr_v2(void)
2338{
2339 struct ipa_desc desc = { 0 };
2340 struct ipa_mem_buffer mem;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302341 struct ipa_hdr_init_local *cmd = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302342 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002343 int rc = 0;
2344
2345 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2346 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2347 GFP_KERNEL);
2348 if (!mem.base) {
2349 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2350 return -ENOMEM;
2351 }
2352 memset(mem.base, 0, mem.size);
2353
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302354 cmd = kzalloc(sizeof(*cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302355 if (cmd == NULL) {
2356 IPAERR("Failed to alloc header init command object\n");
2357 rc = -ENOMEM;
2358 goto fail_send_cmd;
2359 }
2360
2361 cmd->hdr_table_src_addr = mem.phys_base;
2362 cmd->size_hdr_table = mem.size;
2363 cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002364 IPA_MEM_PART(modem_hdr_ofst);
2365
2366 desc.opcode = IPA_HDR_INIT_LOCAL;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302367 desc.pyld = (void *)cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002368 desc.len = sizeof(struct ipa_hdr_init_local);
2369 desc.type = IPA_IMM_CMD_DESC;
2370 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2371
2372 if (ipa_send_cmd(1, &desc)) {
2373 IPAERR("fail to send immediate command\n");
2374 rc = -EFAULT;
2375 }
2376
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302377 kfree(cmd);
2378fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03002379 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2380 return rc;
2381}
2382
2383int _ipa_init_hdr_v2_5(void)
2384{
2385 struct ipa_desc desc = { 0 };
2386 struct ipa_mem_buffer mem;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302387 struct ipa_hdr_init_local *cmd = NULL;
2388 struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302389 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002390
2391 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2392 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2393 GFP_KERNEL);
2394 if (!mem.base) {
2395 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2396 return -ENOMEM;
2397 }
2398 memset(mem.base, 0, mem.size);
2399
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302400 cmd = kzalloc(sizeof(*cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302401 if (cmd == NULL) {
2402 IPAERR("Failed to alloc header init command object\n");
2403 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
2404 mem.phys_base);
2405 return -ENOMEM;
2406 }
2407
2408 cmd->hdr_table_src_addr = mem.phys_base;
2409 cmd->size_hdr_table = mem.size;
2410 cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002411 IPA_MEM_PART(modem_hdr_ofst);
2412
2413 desc.opcode = IPA_HDR_INIT_LOCAL;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302414 desc.pyld = (void *)cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002415 desc.len = sizeof(struct ipa_hdr_init_local);
2416 desc.type = IPA_IMM_CMD_DESC;
2417 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2418
2419 if (ipa_send_cmd(1, &desc)) {
2420 IPAERR("fail to send immediate command\n");
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302421 kfree(cmd);
2422 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
Amir Levy9659e592016-10-27 18:08:27 +03002423 mem.phys_base);
2424 return -EFAULT;
2425 }
2426
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302427 kfree(cmd);
Amir Levy9659e592016-10-27 18:08:27 +03002428 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2429
2430 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
2431 IPA_MEM_PART(apps_hdr_proc_ctx_size);
2432 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2433 GFP_KERNEL);
2434 if (!mem.base) {
2435 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2436 return -ENOMEM;
2437 }
2438 memset(mem.base, 0, mem.size);
2439 memset(&desc, 0, sizeof(desc));
2440
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302441 dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302442 if (dma_cmd == NULL) {
2443 IPAERR("Failed to alloc immediate command object\n");
2444 dma_free_coherent(ipa_ctx->pdev,
2445 mem.size,
2446 mem.base,
2447 mem.phys_base);
2448 return -ENOMEM;
2449 }
2450
2451 dma_cmd->system_addr = mem.phys_base;
2452 dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002453 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302454 dma_cmd->size = mem.size;
Amir Levy9659e592016-10-27 18:08:27 +03002455 desc.opcode = IPA_DMA_SHARED_MEM;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302456 desc.pyld = (void *)dma_cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002457 desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
2458 desc.type = IPA_IMM_CMD_DESC;
2459 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2460
2461 if (ipa_send_cmd(1, &desc)) {
2462 IPAERR("fail to send immediate command\n");
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302463 kfree(dma_cmd);
Amir Levy9659e592016-10-27 18:08:27 +03002464 dma_free_coherent(ipa_ctx->pdev,
2465 mem.size,
2466 mem.base,
2467 mem.phys_base);
2468 return -EFAULT;
2469 }
2470
2471 ipa_write_reg(ipa_ctx->mmio,
2472 IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST,
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302473 dma_cmd->local_addr);
Amir Levy9659e592016-10-27 18:08:27 +03002474
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302475 kfree(dma_cmd);
Amir Levy9659e592016-10-27 18:08:27 +03002476 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2477
2478 return 0;
2479}
2480
2481int _ipa_init_hdr_v2_6L(void)
2482{
2483 /* Same implementation as IPAv2 */
2484 return _ipa_init_hdr_v2();
2485}
2486
2487int _ipa_init_rt4_v2(void)
2488{
2489 struct ipa_desc desc = { 0 };
2490 struct ipa_mem_buffer mem;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302491 struct ipa_ip_v4_routing_init *v4_cmd = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302492 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002493 u32 *entry;
2494 int i;
2495 int rc = 0;
2496
2497 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
2498 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
2499 i++)
2500 ipa_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
2501 IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]);
2502
2503 mem.size = IPA_MEM_PART(v4_rt_size);
2504 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2505 GFP_KERNEL);
2506 if (!mem.base) {
2507 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2508 return -ENOMEM;
2509 }
2510
2511 entry = mem.base;
2512 for (i = 0; i < IPA_MEM_PART(v4_num_index); i++) {
2513 *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
2514 entry++;
2515 }
2516
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302517 v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302518 if (v4_cmd == NULL) {
2519 IPAERR("Failed to alloc v4 routing init command object\n");
2520 rc = -ENOMEM;
2521 goto fail_send_cmd;
2522 }
2523
Amir Levy9659e592016-10-27 18:08:27 +03002524 desc.opcode = IPA_IP_V4_ROUTING_INIT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302525 v4_cmd->ipv4_rules_addr = mem.phys_base;
2526 v4_cmd->size_ipv4_rules = mem.size;
2527 v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002528 IPA_MEM_PART(v4_rt_ofst);
2529 IPADBG("putting Routing IPv4 rules to phys 0x%x",
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302530 v4_cmd->ipv4_addr);
Amir Levy9659e592016-10-27 18:08:27 +03002531
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302532 desc.pyld = (void *)v4_cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002533 desc.len = sizeof(struct ipa_ip_v4_routing_init);
2534 desc.type = IPA_IMM_CMD_DESC;
2535 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2536
2537 if (ipa_send_cmd(1, &desc)) {
2538 IPAERR("fail to send immediate command\n");
2539 rc = -EFAULT;
2540 }
2541
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302542 kfree(v4_cmd);
2543fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03002544 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2545 return rc;
2546}
2547
2548int _ipa_init_rt6_v2(void)
2549{
2550 struct ipa_desc desc = { 0 };
2551 struct ipa_mem_buffer mem;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302552 struct ipa_ip_v6_routing_init *v6_cmd = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302553 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002554 u32 *entry;
2555 int i;
2556 int rc = 0;
2557
2558 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
2559 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
2560 i++)
2561 ipa_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
2562 IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]);
2563
2564 mem.size = IPA_MEM_PART(v6_rt_size);
2565 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2566 GFP_KERNEL);
2567 if (!mem.base) {
2568 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2569 return -ENOMEM;
2570 }
2571
2572 entry = mem.base;
2573 for (i = 0; i < IPA_MEM_PART(v6_num_index); i++) {
2574 *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
2575 entry++;
2576 }
2577
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302578 v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302579 if (v6_cmd == NULL) {
2580 IPAERR("Failed to alloc v6 routing init command object\n");
2581 rc = -ENOMEM;
2582 goto fail_send_cmd;
2583 }
2584
Amir Levy9659e592016-10-27 18:08:27 +03002585 desc.opcode = IPA_IP_V6_ROUTING_INIT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302586 v6_cmd->ipv6_rules_addr = mem.phys_base;
2587 v6_cmd->size_ipv6_rules = mem.size;
2588 v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002589 IPA_MEM_PART(v6_rt_ofst);
2590 IPADBG("putting Routing IPv6 rules to phys 0x%x",
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302591 v6_cmd->ipv6_addr);
Amir Levy9659e592016-10-27 18:08:27 +03002592
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302593 desc.pyld = (void *)v6_cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002594 desc.len = sizeof(struct ipa_ip_v6_routing_init);
2595 desc.type = IPA_IMM_CMD_DESC;
2596 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2597
2598 if (ipa_send_cmd(1, &desc)) {
2599 IPAERR("fail to send immediate command\n");
2600 rc = -EFAULT;
2601 }
2602
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302603 kfree(v6_cmd);
2604fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03002605 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2606 return rc;
2607}
2608
2609int _ipa_init_flt4_v2(void)
2610{
2611 struct ipa_desc desc = { 0 };
2612 struct ipa_mem_buffer mem;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302613 struct ipa_ip_v4_filter_init *v4_cmd = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302614 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002615 u32 *entry;
2616 int i;
2617 int rc = 0;
2618
2619 mem.size = IPA_MEM_PART(v4_flt_size);
2620 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2621 GFP_KERNEL);
2622 if (!mem.base) {
2623 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2624 return -ENOMEM;
2625 }
2626
2627 entry = mem.base;
2628
2629 *entry = ((0xFFFFF << 1) | 0x1);
2630 entry++;
2631
2632 for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) {
2633 *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
2634 entry++;
2635 }
2636
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302637 v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302638 if (v4_cmd == NULL) {
2639 IPAERR("Failed to alloc v4 fliter init command object\n");
2640 rc = -ENOMEM;
2641 goto fail_send_cmd;
2642 }
2643
Amir Levy9659e592016-10-27 18:08:27 +03002644 desc.opcode = IPA_IP_V4_FILTER_INIT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302645 v4_cmd->ipv4_rules_addr = mem.phys_base;
2646 v4_cmd->size_ipv4_rules = mem.size;
2647 v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002648 IPA_MEM_PART(v4_flt_ofst);
2649 IPADBG("putting Filtering IPv4 rules to phys 0x%x",
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302650 v4_cmd->ipv4_addr);
Amir Levy9659e592016-10-27 18:08:27 +03002651
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302652 desc.pyld = (void *)v4_cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002653 desc.len = sizeof(struct ipa_ip_v4_filter_init);
2654 desc.type = IPA_IMM_CMD_DESC;
2655 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2656
2657 if (ipa_send_cmd(1, &desc)) {
2658 IPAERR("fail to send immediate command\n");
2659 rc = -EFAULT;
2660 }
2661
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302662 kfree(v4_cmd);
2663fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03002664 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2665 return rc;
2666}
2667
2668int _ipa_init_flt6_v2(void)
2669{
2670 struct ipa_desc desc = { 0 };
2671 struct ipa_mem_buffer mem;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302672 struct ipa_ip_v6_filter_init *v6_cmd = NULL;
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302673 gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
Amir Levy9659e592016-10-27 18:08:27 +03002674 u32 *entry;
2675 int i;
2676 int rc = 0;
2677
2678 mem.size = IPA_MEM_PART(v6_flt_size);
2679 mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
2680 GFP_KERNEL);
2681 if (!mem.base) {
2682 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2683 return -ENOMEM;
2684 }
2685
2686 entry = mem.base;
2687
2688 *entry = (0xFFFFF << 1) | 0x1;
2689 entry++;
2690
2691 for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) {
2692 *entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
2693 entry++;
2694 }
2695
Utkarsh Saxenae6510102017-04-14 19:31:07 +05302696 v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302697 if (v6_cmd == NULL) {
2698 IPAERR("Failed to alloc v6 fliter init command object\n");
2699 rc = -ENOMEM;
2700 goto fail_send_cmd;
2701 }
2702
Amir Levy9659e592016-10-27 18:08:27 +03002703 desc.opcode = IPA_IP_V6_FILTER_INIT;
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302704 v6_cmd->ipv6_rules_addr = mem.phys_base;
2705 v6_cmd->size_ipv6_rules = mem.size;
2706 v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes +
Amir Levy9659e592016-10-27 18:08:27 +03002707 IPA_MEM_PART(v6_flt_ofst);
2708 IPADBG("putting Filtering IPv6 rules to phys 0x%x",
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302709 v6_cmd->ipv6_addr);
Amir Levy9659e592016-10-27 18:08:27 +03002710
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302711 desc.pyld = (void *)v6_cmd;
Amir Levy9659e592016-10-27 18:08:27 +03002712 desc.len = sizeof(struct ipa_ip_v6_filter_init);
2713 desc.type = IPA_IMM_CMD_DESC;
2714 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2715
2716 if (ipa_send_cmd(1, &desc)) {
2717 IPAERR("fail to send immediate command\n");
2718 rc = -EFAULT;
2719 }
2720
Utkarsh Saxena88dd3e82017-03-01 19:22:31 +05302721 kfree(v6_cmd);
2722fail_send_cmd:
Amir Levy9659e592016-10-27 18:08:27 +03002723 dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
2724 return rc;
2725}
2726
2727static int ipa_setup_apps_pipes(void)
2728{
2729 struct ipa_sys_connect_params sys_in;
2730 int result = 0;
2731
2732 /* CMD OUT (A5->IPA) */
2733 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2734 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
2735 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2736 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
2737 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
2738 sys_in.skip_ep_cfg = true;
2739 if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
2740 IPAERR(":setup sys pipe failed.\n");
2741 result = -EPERM;
2742 goto fail_cmd;
2743 }
2744 IPADBG("Apps to IPA cmd pipe is connected\n");
2745
2746 ipa_ctx->ctrl->ipa_init_sram();
2747 IPADBG("SRAM initialized\n");
2748
2749 ipa_ctx->ctrl->ipa_init_hdr();
2750 IPADBG("HDR initialized\n");
2751
2752 ipa_ctx->ctrl->ipa_init_rt4();
2753 IPADBG("V4 RT initialized\n");
2754
2755 ipa_ctx->ctrl->ipa_init_rt6();
2756 IPADBG("V6 RT initialized\n");
2757
2758 ipa_ctx->ctrl->ipa_init_flt4();
2759 IPADBG("V4 FLT initialized\n");
2760
2761 ipa_ctx->ctrl->ipa_init_flt6();
2762 IPADBG("V6 FLT initialized\n");
2763
2764 if (ipa_setup_exception_path()) {
2765 IPAERR(":fail to setup excp path\n");
2766 result = -EPERM;
2767 goto fail_schedule_delayed_work;
2768 }
2769 IPADBG("Exception path was successfully set");
2770
2771 if (ipa_setup_dflt_rt_tables()) {
2772 IPAERR(":fail to setup dflt routes\n");
2773 result = -EPERM;
2774 goto fail_schedule_delayed_work;
2775 }
2776 IPADBG("default routing was set\n");
2777
2778 /* LAN IN (IPA->A5) */
2779 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2780 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
2781 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2782 if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
2783 sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
2784 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_A5_MUX_HEADER_LENGTH;
2785 } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
2786 sys_in.notify = ipa_lan_rx_cb;
2787 sys_in.priv = NULL;
2788 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
2789 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
2790 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
2791 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
2792 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
2793 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
2794 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
2795 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
2796 } else {
2797 WARN_ON(1);
2798 }
2799
2800 /**
2801 * ipa_lan_rx_cb() intended to notify the source EP about packet
2802 * being received on the LAN_CONS via calling the source EP call-back.
2803 * There could be a race condition with calling this call-back. Other
2804 * thread may nullify it - e.g. on EP disconnect.
2805 * This lock intended to protect the access to the source EP call-back
2806 */
2807 spin_lock_init(&ipa_ctx->disconnect_lock);
2808 if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
2809 IPAERR(":setup sys pipe failed.\n");
2810 result = -EPERM;
2811 goto fail_schedule_delayed_work;
2812 }
2813
2814 /* LAN-WAN OUT (A5->IPA) */
2815 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2816 sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD;
2817 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
2818 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
2819 if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
2820 IPAERR(":setup sys pipe failed.\n");
2821 result = -EPERM;
2822 goto fail_data_out;
2823 }
2824
2825 return 0;
2826
2827fail_data_out:
2828 ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
2829fail_schedule_delayed_work:
2830 if (ipa_ctx->dflt_v6_rt_rule_hdl)
2831 __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
2832 if (ipa_ctx->dflt_v4_rt_rule_hdl)
2833 __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
2834 if (ipa_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02002835 __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03002836 ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
2837fail_cmd:
2838 return result;
2839}
2840
2841static void ipa_teardown_apps_pipes(void)
2842{
2843 ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
2844 ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
2845 __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
2846 __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02002847 __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03002848 ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
2849}
2850
2851#ifdef CONFIG_COMPAT
2852long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2853{
2854 int retval = 0;
2855 struct ipa_ioc_nat_alloc_mem32 nat_mem32;
2856 struct ipa_ioc_nat_alloc_mem nat_mem;
2857
2858 switch (cmd) {
2859 case IPA_IOC_ADD_HDR32:
2860 cmd = IPA_IOC_ADD_HDR;
2861 break;
2862 case IPA_IOC_DEL_HDR32:
2863 cmd = IPA_IOC_DEL_HDR;
2864 break;
2865 case IPA_IOC_ADD_RT_RULE32:
2866 cmd = IPA_IOC_ADD_RT_RULE;
2867 break;
2868 case IPA_IOC_DEL_RT_RULE32:
2869 cmd = IPA_IOC_DEL_RT_RULE;
2870 break;
2871 case IPA_IOC_ADD_FLT_RULE32:
2872 cmd = IPA_IOC_ADD_FLT_RULE;
2873 break;
2874 case IPA_IOC_DEL_FLT_RULE32:
2875 cmd = IPA_IOC_DEL_FLT_RULE;
2876 break;
2877 case IPA_IOC_GET_RT_TBL32:
2878 cmd = IPA_IOC_GET_RT_TBL;
2879 break;
2880 case IPA_IOC_COPY_HDR32:
2881 cmd = IPA_IOC_COPY_HDR;
2882 break;
2883 case IPA_IOC_QUERY_INTF32:
2884 cmd = IPA_IOC_QUERY_INTF;
2885 break;
2886 case IPA_IOC_QUERY_INTF_TX_PROPS32:
2887 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
2888 break;
2889 case IPA_IOC_QUERY_INTF_RX_PROPS32:
2890 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
2891 break;
2892 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
2893 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
2894 break;
2895 case IPA_IOC_GET_HDR32:
2896 cmd = IPA_IOC_GET_HDR;
2897 break;
2898 case IPA_IOC_ALLOC_NAT_MEM32:
2899 if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
2900 sizeof(struct ipa_ioc_nat_alloc_mem32))) {
2901 retval = -EFAULT;
2902 goto ret;
2903 }
2904 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
2905 IPA_RESOURCE_NAME_MAX);
2906 nat_mem.size = (size_t)nat_mem32.size;
2907 nat_mem.offset = (off_t)nat_mem32.offset;
2908
2909 /* null terminate the string */
2910 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
2911
2912 if (ipa2_allocate_nat_device(&nat_mem)) {
2913 retval = -EFAULT;
2914 goto ret;
2915 }
2916 nat_mem32.offset = (compat_off_t)nat_mem.offset;
2917 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
2918 sizeof(struct ipa_ioc_nat_alloc_mem32))) {
2919 retval = -EFAULT;
2920 }
2921ret:
2922 return retval;
2923 case IPA_IOC_V4_INIT_NAT32:
2924 cmd = IPA_IOC_V4_INIT_NAT;
2925 break;
2926 case IPA_IOC_NAT_DMA32:
2927 cmd = IPA_IOC_NAT_DMA;
2928 break;
2929 case IPA_IOC_V4_DEL_NAT32:
2930 cmd = IPA_IOC_V4_DEL_NAT;
2931 break;
2932 case IPA_IOC_GET_NAT_OFFSET32:
2933 cmd = IPA_IOC_GET_NAT_OFFSET;
2934 break;
2935 case IPA_IOC_PULL_MSG32:
2936 cmd = IPA_IOC_PULL_MSG;
2937 break;
2938 case IPA_IOC_RM_ADD_DEPENDENCY32:
2939 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
2940 break;
2941 case IPA_IOC_RM_DEL_DEPENDENCY32:
2942 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
2943 break;
2944 case IPA_IOC_GENERATE_FLT_EQ32:
2945 cmd = IPA_IOC_GENERATE_FLT_EQ;
2946 break;
2947 case IPA_IOC_QUERY_RT_TBL_INDEX32:
2948 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
2949 break;
2950 case IPA_IOC_WRITE_QMAPID32:
2951 cmd = IPA_IOC_WRITE_QMAPID;
2952 break;
2953 case IPA_IOC_MDFY_FLT_RULE32:
2954 cmd = IPA_IOC_MDFY_FLT_RULE;
2955 break;
2956 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
2957 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
2958 break;
2959 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
2960 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
2961 break;
2962 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
2963 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
2964 break;
2965 case IPA_IOC_MDFY_RT_RULE32:
2966 cmd = IPA_IOC_MDFY_RT_RULE;
2967 break;
2968 case IPA_IOC_COMMIT_HDR:
2969 case IPA_IOC_RESET_HDR:
2970 case IPA_IOC_COMMIT_RT:
2971 case IPA_IOC_RESET_RT:
2972 case IPA_IOC_COMMIT_FLT:
2973 case IPA_IOC_RESET_FLT:
2974 case IPA_IOC_DUMP:
2975 case IPA_IOC_PUT_RT_TBL:
2976 case IPA_IOC_PUT_HDR:
2977 case IPA_IOC_SET_FLT:
2978 case IPA_IOC_QUERY_EP_MAPPING:
2979 break;
2980 default:
2981 return -ENOIOCTLCMD;
2982 }
2983 return ipa_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
2984}
2985#endif
2986
2987static const struct file_operations ipa_drv_fops = {
2988 .owner = THIS_MODULE,
2989 .open = ipa_open,
2990 .read = ipa_read,
2991 .unlocked_ioctl = ipa_ioctl,
2992#ifdef CONFIG_COMPAT
2993 .compat_ioctl = compat_ipa_ioctl,
2994#endif
2995};
2996
2997static int ipa_get_clks(struct device *dev)
2998{
2999 ipa_clk = clk_get(dev, "core_clk");
3000 if (IS_ERR(ipa_clk)) {
3001 if (ipa_clk != ERR_PTR(-EPROBE_DEFER))
3002 IPAERR("fail to get ipa clk\n");
3003 return PTR_ERR(ipa_clk);
3004 }
3005
3006 if (smmu_info.present && smmu_info.arm_smmu) {
3007 smmu_clk = clk_get(dev, "smmu_clk");
3008 if (IS_ERR(smmu_clk)) {
3009 if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
3010 IPAERR("fail to get smmu clk\n");
3011 return PTR_ERR(smmu_clk);
3012 }
3013
3014 if (clk_get_rate(smmu_clk) == 0) {
3015 long rate = clk_round_rate(smmu_clk, 1000);
3016
3017 clk_set_rate(smmu_clk, rate);
3018 }
3019 }
3020
3021 if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
3022 ipa_cnoc_clk = clk_get(dev, "iface_clk");
3023 if (IS_ERR(ipa_cnoc_clk)) {
3024 ipa_cnoc_clk = NULL;
3025 IPAERR("fail to get cnoc clk\n");
3026 return -ENODEV;
3027 }
3028
3029 ipa_clk_src = clk_get(dev, "core_src_clk");
3030 if (IS_ERR(ipa_clk_src)) {
3031 ipa_clk_src = NULL;
3032 IPAERR("fail to get ipa clk src\n");
3033 return -ENODEV;
3034 }
3035
3036 sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
3037 if (IS_ERR(sys_noc_ipa_axi_clk)) {
3038 sys_noc_ipa_axi_clk = NULL;
3039 IPAERR("fail to get sys_noc_ipa_axi clk\n");
3040 return -ENODEV;
3041 }
3042
3043 ipa_inactivity_clk = clk_get(dev, "inactivity_clk");
3044 if (IS_ERR(ipa_inactivity_clk)) {
3045 ipa_inactivity_clk = NULL;
3046 IPAERR("fail to get inactivity clk\n");
3047 return -ENODEV;
3048 }
3049 }
3050
3051 return 0;
3052}
3053
3054void _ipa_enable_clks_v2_0(void)
3055{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303056 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003057 if (ipa_clk) {
3058 clk_prepare(ipa_clk);
3059 clk_enable(ipa_clk);
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303060 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003061 clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
3062 ipa_uc_notify_clk_state(true);
3063 } else {
3064 WARN_ON(1);
3065 }
3066
3067 if (smmu_clk)
3068 clk_prepare_enable(smmu_clk);
3069 /* Enable the BAM IRQ. */
3070 ipa_sps_irq_control_all(true);
3071 ipa_suspend_apps_pipes(false);
3072}
3073
3074void _ipa_enable_clks_v1_1(void)
3075{
3076
3077 if (ipa_cnoc_clk) {
3078 clk_prepare(ipa_cnoc_clk);
3079 clk_enable(ipa_cnoc_clk);
3080 clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
3081 } else {
3082 WARN_ON(1);
3083 }
3084
3085 if (ipa_clk_src)
3086 clk_set_rate(ipa_clk_src,
3087 ipa_ctx->curr_ipa_clk_rate);
3088 else
3089 WARN_ON(1);
3090
3091 if (ipa_clk)
3092 clk_prepare(ipa_clk);
3093 else
3094 WARN_ON(1);
3095
3096 if (sys_noc_ipa_axi_clk)
3097 clk_prepare(sys_noc_ipa_axi_clk);
3098 else
3099 WARN_ON(1);
3100
3101 if (ipa_inactivity_clk)
3102 clk_prepare(ipa_inactivity_clk);
3103 else
3104 WARN_ON(1);
3105
3106 if (ipa_clk)
3107 clk_enable(ipa_clk);
3108 else
3109 WARN_ON(1);
3110
3111 if (sys_noc_ipa_axi_clk)
3112 clk_enable(sys_noc_ipa_axi_clk);
3113 else
3114 WARN_ON(1);
3115
3116 if (ipa_inactivity_clk)
3117 clk_enable(ipa_inactivity_clk);
3118 else
3119 WARN_ON(1);
3120
3121}
3122
3123static unsigned int ipa_get_bus_vote(void)
3124{
3125 unsigned int idx = 1;
3126
3127 if (ipa_ctx->curr_ipa_clk_rate == ipa_ctx->ctrl->ipa_clk_rate_svs) {
3128 idx = 1;
3129 } else if (ipa_ctx->curr_ipa_clk_rate ==
3130 ipa_ctx->ctrl->ipa_clk_rate_nominal) {
3131 if (ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
3132 idx = 1;
3133 else
3134 idx = 2;
3135 } else if (ipa_ctx->curr_ipa_clk_rate ==
3136 ipa_ctx->ctrl->ipa_clk_rate_turbo) {
3137 idx = ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3138 } else {
3139 WARN_ON(1);
3140 }
3141
3142 IPADBG("curr %d idx %d\n", ipa_ctx->curr_ipa_clk_rate, idx);
3143
3144 return idx;
3145}
3146
3147/**
3148* ipa_enable_clks() - Turn on IPA clocks
3149*
3150* Return codes:
3151* None
3152*/
3153void ipa_enable_clks(void)
3154{
3155 IPADBG("enabling IPA clocks and bus voting\n");
3156
3157 ipa_ctx->ctrl->ipa_enable_clks();
3158
3159 if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
3160 if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl,
3161 ipa_get_bus_vote()))
3162 WARN_ON(1);
3163}
3164
3165void _ipa_disable_clks_v1_1(void)
3166{
3167
3168 if (ipa_inactivity_clk)
3169 clk_disable_unprepare(ipa_inactivity_clk);
3170 else
3171 WARN_ON(1);
3172
3173 if (sys_noc_ipa_axi_clk)
3174 clk_disable_unprepare(sys_noc_ipa_axi_clk);
3175 else
3176 WARN_ON(1);
3177
3178 if (ipa_clk)
3179 clk_disable_unprepare(ipa_clk);
3180 else
3181 WARN_ON(1);
3182
3183 if (ipa_cnoc_clk)
3184 clk_disable_unprepare(ipa_cnoc_clk);
3185 else
3186 WARN_ON(1);
3187
3188}
3189
3190void _ipa_disable_clks_v2_0(void)
3191{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303192 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003193 ipa_suspend_apps_pipes(true);
3194 ipa_sps_irq_control_all(false);
3195 ipa_uc_notify_clk_state(false);
3196 if (ipa_clk)
3197 clk_disable_unprepare(ipa_clk);
3198 else
3199 WARN_ON(1);
3200
3201 if (smmu_clk)
3202 clk_disable_unprepare(smmu_clk);
3203}
3204
3205/**
3206* ipa_disable_clks() - Turn off IPA clocks
3207*
3208* Return codes:
3209* None
3210*/
3211void ipa_disable_clks(void)
3212{
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303213 IPADBG_LOW("disabling IPA clocks and bus voting\n");
Amir Levy9659e592016-10-27 18:08:27 +03003214
3215 ipa_ctx->ctrl->ipa_disable_clks();
3216
3217 if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
3218 if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl,
3219 0))
3220 WARN_ON(1);
3221}
3222
3223/**
3224 * ipa_start_tag_process() - Send TAG packet and wait for it to come back
3225 *
3226 * This function is called prior to clock gating when active client counter
3227 * is 1. TAG process ensures that there are no packets inside IPA HW that
3228 * were not submitted to peer's BAM. During TAG process all aggregation frames
3229 * are (force) closed.
3230 *
3231 * Return codes:
3232 * None
3233 */
3234static void ipa_start_tag_process(struct work_struct *work)
3235{
3236 int res;
3237
3238 IPADBG("starting TAG process\n");
3239 /* close aggregation frames on all pipes */
3240 res = ipa_tag_aggr_force_close(-1);
3241 if (res)
3242 IPAERR("ipa_tag_aggr_force_close failed %d\n", res);
3243
3244 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3245
3246 IPADBG("TAG process done\n");
3247}
3248
3249/**
3250* ipa2_active_clients_log_mod() - Log a modification in the active clients
3251* reference count
3252*
3253* This method logs any modification in the active clients reference count:
3254* It logs the modification in the circular history buffer
3255* It logs the modification in the hash table - looking for an entry,
3256* creating one if needed and deleting one if needed.
3257*
3258* @id: ipa2_active client logging info struct to hold the log information
3259* @inc: a boolean variable to indicate whether the modification is an increase
3260* or decrease
3261* @int_ctx: a boolean variable to indicate whether this call is being made from
3262* an interrupt context and therefore should allocate GFP_ATOMIC memory
3263*
3264* Method process:
3265* - Hash the unique identifier string
3266* - Find the hash in the table
3267* 1)If found, increase or decrease the reference count
3268* 2)If not found, allocate a new hash table entry struct and initialize it
3269* - Remove and deallocate unneeded data structure
3270* - Log the call in the circular history buffer (unless it is a simple call)
3271*/
3272void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3273 bool inc, bool int_ctx)
3274{
3275 char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
3276 unsigned long long t;
3277 unsigned long nanosec_rem;
3278 struct ipa2_active_client_htable_entry *hentry;
3279 struct ipa2_active_client_htable_entry *hfound;
3280 u32 hkey;
3281 char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
3282
3283 hfound = NULL;
3284 memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
3285 strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
Mohammed Javid4c4037e2017-11-27 16:23:35 +05303286 hkey = jhash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003287 0);
3288 hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable,
3289 hentry, list, hkey) {
3290 if (!strcmp(hentry->id_string, id->id_string)) {
3291 hentry->count = hentry->count + (inc ? 1 : -1);
3292 hfound = hentry;
3293 }
3294 }
3295 if (hfound == NULL) {
3296 hentry = NULL;
3297 hentry = kzalloc(sizeof(
3298 struct ipa2_active_client_htable_entry),
3299 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3300 if (hentry == NULL) {
3301 IPAERR("failed allocating active clients hash entry");
3302 return;
3303 }
3304 hentry->type = id->type;
3305 strlcpy(hentry->id_string, id->id_string,
3306 IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
3307 INIT_HLIST_NODE(&hentry->list);
3308 hentry->count = inc ? 1 : -1;
3309 hash_add(ipa_ctx->ipa2_active_clients_logging.htable,
3310 &hentry->list, hkey);
3311 } else if (hfound->count == 0) {
3312 hash_del(&hfound->list);
3313 kfree(hfound);
3314 }
3315
3316 if (id->type != SIMPLE) {
3317 t = local_clock();
3318 nanosec_rem = do_div(t, 1000000000) / 1000;
3319 snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
3320 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3321 "[%5lu.%06lu] v %s, %s: %d",
3322 (unsigned long)t, nanosec_rem,
3323 id->id_string, id->file, id->line);
3324 ipa2_active_clients_log_insert(temp_str);
3325 }
3326}
3327
3328void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
3329 bool int_ctx)
3330{
3331 ipa2_active_clients_log_mod(id, false, int_ctx);
3332}
3333
3334void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
3335 bool int_ctx)
3336{
3337 ipa2_active_clients_log_mod(id, true, int_ctx);
3338}
3339
3340/**
3341* ipa_inc_client_enable_clks() - Increase active clients counter, and
3342* enable ipa clocks if necessary
3343*
3344* Please do not use this API, use the wrapper macros instead (ipa_i.h)
3345* IPA2_ACTIVE_CLIENTS_INC_XXXX();
3346*
3347* Return codes:
3348* None
3349*/
3350void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
3351{
3352 ipa_active_clients_lock();
3353 ipa2_active_clients_log_inc(id, false);
3354 ipa_ctx->ipa_active_clients.cnt++;
3355 if (ipa_ctx->ipa_active_clients.cnt == 1)
3356 ipa_enable_clks();
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303357 IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
Amir Levy9659e592016-10-27 18:08:27 +03003358 ipa_active_clients_unlock();
3359}
3360
3361/**
3362* ipa_inc_client_enable_clks_no_block() - Only increment the number of active
3363* clients if no asynchronous actions should be done. Asynchronous actions are
3364* locking a mutex and waking up IPA HW.
3365*
3366* Please do not use this API, use the wrapper macros instead (ipa_i.h)
3367*
3368*
3369* Return codes: 0 for success
3370* -EPERM if an asynchronous action should have been done
3371*/
3372int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
3373 *id)
3374{
3375 int res = 0;
3376 unsigned long flags;
3377
3378 if (ipa_active_clients_trylock(&flags) == 0)
3379 return -EPERM;
3380
3381 if (ipa_ctx->ipa_active_clients.cnt == 0) {
3382 res = -EPERM;
3383 goto bail;
3384 }
3385
3386 ipa2_active_clients_log_inc(id, true);
3387
3388 ipa_ctx->ipa_active_clients.cnt++;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303389 IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
Amir Levy9659e592016-10-27 18:08:27 +03003390bail:
3391 ipa_active_clients_trylock_unlock(&flags);
3392
3393 return res;
3394}
3395
3396/**
3397 * ipa_dec_client_disable_clks() - Decrease active clients counter
3398 *
3399 * In case that there are no active clients this function also starts
3400 * TAG process. When TAG progress ends ipa clocks will be gated.
3401 * start_tag_process_again flag is set during this function to signal TAG
3402 * process to start again as there was another client that may send data to ipa
3403 *
3404 * Please do not use this API, use the wrapper macros instead (ipa_i.h)
3405 * IPA2_ACTIVE_CLIENTS_DEC_XXXX();
3406 *
3407 * Return codes:
3408 * None
3409 */
3410void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
3411{
3412 struct ipa_active_client_logging_info log_info;
3413
3414 ipa_active_clients_lock();
3415 ipa2_active_clients_log_dec(id, false);
3416 ipa_ctx->ipa_active_clients.cnt--;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303417 IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
Amir Levy9659e592016-10-27 18:08:27 +03003418 if (ipa_ctx->ipa_active_clients.cnt == 0) {
3419 if (ipa_ctx->tag_process_before_gating) {
3420 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
3421 "TAG_PROCESS");
3422 ipa2_active_clients_log_inc(&log_info, false);
3423 ipa_ctx->tag_process_before_gating = false;
3424 /*
3425 * When TAG process ends, active clients will be
3426 * decreased
3427 */
3428 ipa_ctx->ipa_active_clients.cnt = 1;
3429 queue_work(ipa_ctx->power_mgmt_wq, &ipa_tag_work);
3430 } else {
3431 ipa_disable_clks();
3432 }
3433 }
3434 ipa_active_clients_unlock();
3435}
3436
3437/**
3438* ipa_inc_acquire_wakelock() - Increase active clients counter, and
3439* acquire wakelock if necessary
3440*
3441* Return codes:
3442* None
3443*/
3444void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client)
3445{
3446 unsigned long flags;
3447
3448 if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
3449 return;
3450 spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
3451 if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client))
3452 IPAERR("client enum %d mask already set. ref cnt = %d\n",
3453 ref_client, ipa_ctx->wakelock_ref_cnt.cnt);
3454 ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client);
3455 if (ipa_ctx->wakelock_ref_cnt.cnt)
3456 __pm_stay_awake(&ipa_ctx->w_lock);
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303457 IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n",
Amir Levy9659e592016-10-27 18:08:27 +03003458 ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
3459 spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
3460}
3461
3462/**
3463 * ipa_dec_release_wakelock() - Decrease active clients counter
3464 *
3465 * In case if the ref count is 0, release the wakelock.
3466 *
3467 * Return codes:
3468 * None
3469 */
3470void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client)
3471{
3472 unsigned long flags;
3473
3474 if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
3475 return;
3476 spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
3477 ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client);
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303478 IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n",
Amir Levy9659e592016-10-27 18:08:27 +03003479 ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
3480 if (ipa_ctx->wakelock_ref_cnt.cnt == 0)
3481 __pm_relax(&ipa_ctx->w_lock);
3482 spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
3483}
3484
3485static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
3486{
3487 void *ipa_bam_mmio;
3488 int reg_val;
3489 int retval = 0;
3490
3491 ipa_bam_mmio = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
3492 IPA_BAM_REMAP_SIZE);
3493 if (!ipa_bam_mmio)
3494 return -ENOMEM;
3495 switch (ipa_ctx->ipa_hw_type) {
3496 case IPA_HW_v1_1:
3497 reg_val = IPA_BAM_CNFG_BITS_VALv1_1;
3498 break;
3499 case IPA_HW_v2_0:
3500 case IPA_HW_v2_5:
3501 case IPA_HW_v2_6L:
3502 reg_val = IPA_BAM_CNFG_BITS_VALv2_0;
3503 break;
3504 default:
3505 retval = -EPERM;
3506 goto fail;
3507 }
3508 if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5)
3509 ipa_write_reg(ipa_bam_mmio, IPA_BAM_CNFG_BITS_OFST, reg_val);
3510fail:
3511 iounmap(ipa_bam_mmio);
3512
3513 return retval;
3514}
3515
3516int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
3517 u32 bandwidth_mbps)
3518{
3519 enum ipa_voltage_level needed_voltage;
3520 u32 clk_rate;
3521
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303522 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
Amir Levy9659e592016-10-27 18:08:27 +03003523 floor_voltage, bandwidth_mbps);
3524
3525 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
3526 floor_voltage >= IPA_VOLTAGE_MAX) {
3527 IPAERR("bad voltage\n");
3528 return -EINVAL;
3529 }
3530
3531 if (ipa_ctx->enable_clock_scaling) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303532 IPADBG_LOW("Clock scaling is enabled\n");
Amir Levy9659e592016-10-27 18:08:27 +03003533 if (bandwidth_mbps >=
3534 ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo)
3535 needed_voltage = IPA_VOLTAGE_TURBO;
3536 else if (bandwidth_mbps >=
3537 ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal)
3538 needed_voltage = IPA_VOLTAGE_NOMINAL;
3539 else
3540 needed_voltage = IPA_VOLTAGE_SVS;
3541 } else {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303542 IPADBG_LOW("Clock scaling is disabled\n");
Amir Levy9659e592016-10-27 18:08:27 +03003543 needed_voltage = IPA_VOLTAGE_NOMINAL;
3544 }
3545
3546 needed_voltage = max(needed_voltage, floor_voltage);
3547 switch (needed_voltage) {
3548 case IPA_VOLTAGE_SVS:
3549 clk_rate = ipa_ctx->ctrl->ipa_clk_rate_svs;
3550 break;
3551 case IPA_VOLTAGE_NOMINAL:
3552 clk_rate = ipa_ctx->ctrl->ipa_clk_rate_nominal;
3553 break;
3554 case IPA_VOLTAGE_TURBO:
3555 clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo;
3556 break;
3557 default:
3558 IPAERR("bad voltage\n");
3559 WARN_ON(1);
3560 return -EFAULT;
3561 }
3562
3563 if (clk_rate == ipa_ctx->curr_ipa_clk_rate) {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303564 IPADBG_LOW("Same voltage\n");
Amir Levy9659e592016-10-27 18:08:27 +03003565 return 0;
3566 }
3567
3568 ipa_active_clients_lock();
3569 ipa_ctx->curr_ipa_clk_rate = clk_rate;
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303570 IPADBG_LOW("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003571 if (ipa_ctx->ipa_active_clients.cnt > 0) {
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003572 struct ipa_active_client_logging_info log_info;
3573
3574 /*
3575 * clk_set_rate should be called with unlocked lock to allow
3576 * clients to get a reference to IPA clock synchronously.
3577 * Hold a reference to IPA clock here to make sure clock
3578 * state does not change during set_rate.
3579 */
3580 IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
3581 ipa_ctx->ipa_active_clients.cnt++;
3582 ipa2_active_clients_log_inc(&log_info, false);
3583 ipa_active_clients_unlock();
3584
Amir Levy9659e592016-10-27 18:08:27 +03003585 clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
3586 if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
3587 if (msm_bus_scale_client_update_request(
3588 ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote()))
3589 WARN_ON(1);
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003590 /* remove the vote added here */
3591 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03003592 } else {
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303593 IPADBG_LOW("clocks are gated, not setting rate\n");
Amir Levy9659e592016-10-27 18:08:27 +03003594 }
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303595 ipa_active_clients_unlock();
3596 IPADBG_LOW("Done\n");
Amir Levy9659e592016-10-27 18:08:27 +03003597 return 0;
3598}
3599
3600static int ipa_init_flt_block(void)
3601{
3602 int result = 0;
3603
3604 /*
3605 * SW workaround for Improper Filter Behavior when neither Global nor
3606 * Pipe Rules are present => configure dummy global filter rule
3607 * always which results in a miss
3608 */
3609 struct ipa_ioc_add_flt_rule *rules;
3610 struct ipa_flt_rule_add *rule;
3611 struct ipa_ioc_get_rt_tbl rt_lookup;
3612 enum ipa_ip_type ip;
3613
3614 if (ipa_ctx->ipa_hw_type >= IPA_HW_v1_1) {
3615 size_t sz = sizeof(struct ipa_ioc_add_flt_rule) +
3616 sizeof(struct ipa_flt_rule_add);
3617
3618 rules = kmalloc(sz, GFP_KERNEL);
3619 if (rules == NULL) {
3620 IPAERR("fail to alloc mem for dummy filter rule\n");
3621 return -ENOMEM;
3622 }
3623
3624 IPADBG("Adding global rules for IPv4 and IPv6");
3625 for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) {
3626 memset(&rt_lookup, 0,
3627 sizeof(struct ipa_ioc_get_rt_tbl));
3628 rt_lookup.ip = ip;
3629 strlcpy(rt_lookup.name, IPA_DFLT_RT_TBL_NAME,
3630 IPA_RESOURCE_NAME_MAX);
3631 ipa2_get_rt_tbl(&rt_lookup);
3632 ipa2_put_rt_tbl(rt_lookup.hdl);
3633
3634 memset(rules, 0, sz);
3635 rule = &rules->rules[0];
3636 rules->commit = 1;
3637 rules->ip = ip;
3638 rules->global = 1;
3639 rules->num_rules = 1;
3640 rule->at_rear = 1;
3641 if (ip == IPA_IP_v4) {
3642 rule->rule.attrib.attrib_mask =
3643 IPA_FLT_PROTOCOL | IPA_FLT_DST_ADDR;
3644 rule->rule.attrib.u.v4.protocol =
3645 IPA_INVALID_L4_PROTOCOL;
3646 rule->rule.attrib.u.v4.dst_addr_mask = ~0;
3647 rule->rule.attrib.u.v4.dst_addr = ~0;
3648 } else if (ip == IPA_IP_v6) {
3649 rule->rule.attrib.attrib_mask =
3650 IPA_FLT_NEXT_HDR | IPA_FLT_DST_ADDR;
3651 rule->rule.attrib.u.v6.next_hdr =
3652 IPA_INVALID_L4_PROTOCOL;
3653 rule->rule.attrib.u.v6.dst_addr_mask[0] = ~0;
3654 rule->rule.attrib.u.v6.dst_addr_mask[1] = ~0;
3655 rule->rule.attrib.u.v6.dst_addr_mask[2] = ~0;
3656 rule->rule.attrib.u.v6.dst_addr_mask[3] = ~0;
3657 rule->rule.attrib.u.v6.dst_addr[0] = ~0;
3658 rule->rule.attrib.u.v6.dst_addr[1] = ~0;
3659 rule->rule.attrib.u.v6.dst_addr[2] = ~0;
3660 rule->rule.attrib.u.v6.dst_addr[3] = ~0;
3661 } else {
3662 result = -EINVAL;
3663 WARN_ON(1);
3664 break;
3665 }
3666 rule->rule.action = IPA_PASS_TO_ROUTING;
3667 rule->rule.rt_tbl_hdl = rt_lookup.hdl;
3668 rule->rule.retain_hdr = true;
3669
3670 if (ipa2_add_flt_rule(rules) ||
3671 rules->rules[0].status) {
3672
3673 result = -EINVAL;
3674 WARN_ON(1);
3675 break;
3676 }
3677 }
3678 kfree(rules);
3679 }
3680 return result;
3681}
3682
3683static void ipa_sps_process_irq_schedule_rel(void)
3684{
3685 queue_delayed_work(ipa_ctx->sps_power_mgmt_wq,
3686 &ipa_sps_release_resource_work,
3687 msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
3688}
3689
3690/**
3691* ipa_suspend_handler() - Handles the suspend interrupt:
3692* wakes up the suspended peripheral by requesting its consumer
3693* @interrupt: Interrupt type
3694* @private_data: The client's private data
3695* @interrupt_data: Interrupt specific information data
3696*/
3697void ipa_suspend_handler(enum ipa_irq_type interrupt,
3698 void *private_data,
3699 void *interrupt_data)
3700{
3701 enum ipa_rm_resource_name resource;
3702 u32 suspend_data =
3703 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
3704 u32 bmsk = 1;
3705 u32 i = 0;
3706 int res;
3707 struct ipa_ep_cfg_holb holb_cfg;
3708
3709 IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data);
3710 memset(&holb_cfg, 0, sizeof(holb_cfg));
3711 holb_cfg.tmr_val = 0;
3712
3713 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
3714 if ((suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) {
3715 if (IPA_CLIENT_IS_APPS_CONS(ipa_ctx->ep[i].client)) {
3716 /*
3717 * pipe will be unsuspended as part of
3718 * enabling IPA clocks
3719 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08003720 mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003721 if (!atomic_read(
3722 &ipa_ctx->sps_pm.dec_clients)
3723 ) {
3724 IPA_ACTIVE_CLIENTS_INC_EP(
3725 ipa_ctx->ep[i].client);
3726 IPADBG("Pipes un-suspended.\n");
3727 IPADBG("Enter poll mode.\n");
3728 atomic_set(
3729 &ipa_ctx->sps_pm.dec_clients,
3730 1);
Skylar Chang9e3b6492017-11-07 09:49:48 -08003731 /*
3732 * acquire wake lock as long as suspend
3733 * vote is held
3734 */
Mohammed Javid4c4037e2017-11-27 16:23:35 +05303735 ipa_inc_acquire_wakelock(
3736 IPA_WAKELOCK_REF_CLIENT_SPS);
Amir Levy9659e592016-10-27 18:08:27 +03003737 ipa_sps_process_irq_schedule_rel();
3738 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08003739 mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003740 } else {
3741 resource = ipa2_get_rm_resource_from_ep(i);
3742 res = ipa_rm_request_resource_with_timer(
3743 resource);
Skylar Changa9516582017-05-09 11:36:47 -07003744 if ((res == -EPERM) &&
Amir Levy9659e592016-10-27 18:08:27 +03003745 IPA_CLIENT_IS_CONS(
3746 ipa_ctx->ep[i].client)) {
3747 holb_cfg.en = 1;
3748 res = ipa2_cfg_ep_holb_by_client(
3749 ipa_ctx->ep[i].client, &holb_cfg);
3750 if (res) {
3751 IPAERR("holb en fail\n");
3752 IPAERR("IPAHW stall\n");
3753 BUG();
3754 }
3755 }
3756 }
3757 }
3758 bmsk = bmsk << 1;
3759 }
3760}
3761
3762/**
3763* ipa2_restore_suspend_handler() - restores the original suspend IRQ handler
3764* as it was registered in the IPA init sequence.
3765* Return codes:
3766* 0: success
3767* -EPERM: failed to remove current handler or failed to add original handler
3768*/
3769int ipa2_restore_suspend_handler(void)
3770{
3771 int result = 0;
3772
3773 result = ipa2_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
3774 if (result) {
3775 IPAERR("remove handler for suspend interrupt failed\n");
3776 return -EPERM;
3777 }
3778
3779 result = ipa2_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3780 ipa_suspend_handler, true, NULL);
3781 if (result) {
3782 IPAERR("register handler for suspend interrupt failed\n");
3783 result = -EPERM;
3784 }
3785
3786 return result;
3787}
3788
3789static int apps_cons_release_resource(void)
3790{
3791 return 0;
3792}
3793
3794static int apps_cons_request_resource(void)
3795{
3796 return 0;
3797}
3798
3799static void ipa_sps_release_resource(struct work_struct *work)
3800{
3801 mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
3802 /* check whether still need to decrease client usage */
3803 if (atomic_read(&ipa_ctx->sps_pm.dec_clients)) {
3804 if (atomic_read(&ipa_ctx->sps_pm.eot_activity)) {
3805 IPADBG("EOT pending Re-scheduling\n");
3806 ipa_sps_process_irq_schedule_rel();
3807 } else {
3808 atomic_set(&ipa_ctx->sps_pm.dec_clients, 0);
Mohammed Javid4c4037e2017-11-27 16:23:35 +05303809 ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_SPS);
Amir Levy9659e592016-10-27 18:08:27 +03003810 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
3811 }
3812 }
3813 atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
3814 mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
3815}
3816
3817int ipa_create_apps_resource(void)
3818{
3819 struct ipa_rm_create_params apps_cons_create_params;
3820 struct ipa_rm_perf_profile profile;
3821 int result = 0;
3822
3823 memset(&apps_cons_create_params, 0,
3824 sizeof(apps_cons_create_params));
3825 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
3826 apps_cons_create_params.request_resource = apps_cons_request_resource;
3827 apps_cons_create_params.release_resource = apps_cons_release_resource;
3828 result = ipa_rm_create_resource(&apps_cons_create_params);
3829 if (result) {
3830 IPAERR("ipa_rm_create_resource failed\n");
3831 return result;
3832 }
3833
3834 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
3835 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
3836
3837 return result;
3838}
3839
3840
3841/**
3842* ipa_init() - Initialize the IPA Driver
3843* @resource_p: contain platform specific values from DST file
3844* @pdev: The platform device structure representing the IPA driver
3845*
3846* Function initialization process:
3847* - Allocate memory for the driver context data struct
3848* - Initializing the ipa_ctx with:
3849* 1)parsed values from the dts file
3850* 2)parameters passed to the module initialization
3851* 3)read HW values(such as core memory size)
3852* - Map IPA core registers to CPU memory
3853* - Restart IPA core(HW reset)
3854* - Register IPA BAM to SPS driver and get a BAM handler
3855* - Set configuration for IPA BAM via BAM_CNFG_BITS
3856* - Initialize the look-aside caches(kmem_cache/slab) for filter,
3857* routing and IPA-tree
3858* - Create memory pool with 4 objects for DMA operations(each object
3859* is 512Bytes long), this object will be use for tx(A5->IPA)
3860* - Initialize lists head(routing,filter,hdr,system pipes)
3861* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
3862* - Initialize spinlocks (for list related to A5<->IPA pipes)
3863* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
3864* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
3865* routing table ,filtering rule
3866* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
3867* - Preparing the descriptors for System pipes
3868* - Initialize the filter block by committing IPV4 and IPV6 default rules
3869* - Create empty routing table in system memory(no committing)
3870* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
3871* - Create a char-device for IPA
3872* - Initialize IPA RM (resource manager)
3873*/
3874static int ipa_init(const struct ipa_plat_drv_res *resource_p,
3875 struct device *ipa_dev)
3876{
3877 int result = 0;
3878 int i;
3879 struct sps_bam_props bam_props = { 0 };
3880 struct ipa_flt_tbl *flt_tbl;
3881 struct ipa_rt_tbl_set *rset;
3882 struct ipa_active_client_logging_info log_info;
3883
3884 IPADBG("IPA Driver initialization started\n");
3885
3886 /*
3887 * since structure alignment is implementation dependent, add test to
3888 * avoid different and incompatible data layouts
3889 */
3890 BUILD_BUG_ON(sizeof(struct ipa_hw_pkt_status) != IPA_PKT_STATUS_SIZE);
3891
3892 ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
3893 if (!ipa_ctx) {
3894 IPAERR(":kzalloc err.\n");
3895 result = -ENOMEM;
3896 goto fail_mem_ctx;
3897 }
3898
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05303899 ipa_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
3900 if (ipa_ctx->logbuf == NULL) {
3901 IPAERR("failed to get logbuf\n");
3902 result = -ENOMEM;
3903 goto fail_logbuf;
3904 }
3905
Amir Levy9659e592016-10-27 18:08:27 +03003906 ipa_ctx->pdev = ipa_dev;
3907 ipa_ctx->uc_pdev = ipa_dev;
3908 ipa_ctx->smmu_present = smmu_info.present;
3909 if (!ipa_ctx->smmu_present)
3910 ipa_ctx->smmu_s1_bypass = true;
3911 else
3912 ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
3913 ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
3914 ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
3915 ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
3916 ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
Utkarsh Saxenacd4c9aa2017-01-17 12:50:37 +05303917 ipa_ctx->ipa_uc_monitor_holb =
3918 resource_p->ipa_uc_monitor_holb;
Amir Levy9659e592016-10-27 18:08:27 +03003919 ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
3920 ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
3921 ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02003922 ipa_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
Amir Levy9659e592016-10-27 18:08:27 +03003923 ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
3924 ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
3925 ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
3926 ipa_ctx->use_dma_zone = resource_p->use_dma_zone;
3927 ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Mohammed Javid4c4037e2017-11-27 16:23:35 +05303928 ipa_ctx->use_ipa_pm = resource_p->use_ipa_pm;
Amir Levy9659e592016-10-27 18:08:27 +03003929
3930 /* Setting up IPA RX Polling Timeout Seconds */
3931 ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
3932 &ipa_ctx->ipa_rx_max_timeout_usec,
3933 resource_p->ipa_rx_polling_sleep_msec);
3934
3935 /* Setting up ipa polling iteration */
3936 if ((resource_p->ipa_polling_iteration >= MIN_POLLING_ITERATION)
3937 && (resource_p->ipa_polling_iteration <= MAX_POLLING_ITERATION))
3938 ipa_ctx->ipa_polling_iteration =
3939 resource_p->ipa_polling_iteration;
3940 else
3941 ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
3942
3943 /* default aggregation parameters */
3944 ipa_ctx->aggregation_type = IPA_MBIM_16;
3945 ipa_ctx->aggregation_byte_limit = 1;
3946 ipa_ctx->aggregation_time_limit = 0;
3947 ipa_ctx->ipa2_active_clients_logging.log_rdy = false;
3948
3949 ipa_ctx->ctrl = kzalloc(sizeof(*ipa_ctx->ctrl), GFP_KERNEL);
3950 if (!ipa_ctx->ctrl) {
3951 IPAERR("memory allocation error for ctrl\n");
3952 result = -ENOMEM;
3953 goto fail_mem_ctrl;
3954 }
3955 result = ipa_controller_static_bind(ipa_ctx->ctrl,
3956 ipa_ctx->ipa_hw_type);
3957 if (result) {
3958 IPAERR("fail to static bind IPA ctrl.\n");
3959 result = -EFAULT;
3960 goto fail_bind;
3961 }
3962
3963 IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
3964 ipa_ctx->hdr_tbl_lcl, ipa_ctx->ip4_rt_tbl_lcl,
3965 ipa_ctx->ip6_rt_tbl_lcl, ipa_ctx->ip4_flt_tbl_lcl,
3966 ipa_ctx->ip6_flt_tbl_lcl);
3967
3968 if (bus_scale_table) {
3969 IPADBG("Use bus scaling info from device tree\n");
3970 ipa_ctx->ctrl->msm_bus_data_ptr = bus_scale_table;
3971 }
3972
3973 if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) {
3974 /* get BUS handle */
3975 ipa_ctx->ipa_bus_hdl =
3976 msm_bus_scale_register_client(
3977 ipa_ctx->ctrl->msm_bus_data_ptr);
3978 if (!ipa_ctx->ipa_bus_hdl) {
3979 IPAERR("fail to register with bus mgr!\n");
Mohammed Javid4c4037e2017-11-27 16:23:35 +05303980 result = -EPROBE_DEFER;
3981 bus_scale_table = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03003982 goto fail_bus_reg;
3983 }
3984 } else {
3985 IPADBG("Skipping bus scaling registration on Virtual plat\n");
3986 }
3987
Maria Yubc8b5142017-12-27 11:12:08 +08003988 result = ipa2_active_clients_log_init();
Ghanim Fodic48ba992017-12-24 19:28:38 +02003989 if (result)
Amir Levy9659e592016-10-27 18:08:27 +03003990 goto fail_init_active_client;
3991
3992 /* get IPA clocks */
3993 result = ipa_get_clks(master_dev);
3994 if (result)
3995 goto fail_clk;
3996
3997 /* Enable ipa_ctx->enable_clock_scaling */
3998 ipa_ctx->enable_clock_scaling = 1;
3999 ipa_ctx->curr_ipa_clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo;
4000
4001 /* enable IPA clocks explicitly to allow the initialization */
4002 ipa_enable_clks();
4003
4004 /* setup IPA register access */
4005 ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base +
4006 ipa_ctx->ctrl->ipa_reg_base_ofst,
4007 resource_p->ipa_mem_size);
4008 if (!ipa_ctx->mmio) {
4009 IPAERR(":ipa-base ioremap err.\n");
4010 result = -EFAULT;
4011 goto fail_remap;
4012 }
4013
4014 result = ipa_init_hw();
4015 if (result) {
4016 IPAERR(":error initializing HW.\n");
4017 result = -ENODEV;
4018 goto fail_init_hw;
4019 }
4020 IPADBG("IPA HW initialization sequence completed");
4021
4022 ipa_ctx->ipa_num_pipes = ipa_get_num_pipes();
4023 ipa_ctx->ctrl->ipa_sram_read_settings();
4024 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4025 ipa_ctx->smem_sz, ipa_ctx->smem_restricted_bytes);
4026
4027 if (ipa_ctx->smem_reqd_sz >
4028 ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes) {
4029 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4030 ipa_ctx->smem_reqd_sz, ipa_ctx->smem_sz -
4031 ipa_ctx->smem_restricted_bytes);
4032 result = -ENOMEM;
4033 goto fail_init_hw;
4034 }
4035
4036 mutex_init(&ipa_ctx->ipa_active_clients.mutex);
4037 spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock);
4038 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
4039 ipa2_active_clients_log_inc(&log_info, false);
4040 ipa_ctx->ipa_active_clients.cnt = 1;
4041
4042 /* Create workqueues for power management */
4043 ipa_ctx->power_mgmt_wq =
4044 create_singlethread_workqueue("ipa_power_mgmt");
4045 if (!ipa_ctx->power_mgmt_wq) {
4046 IPAERR("failed to create power mgmt wq\n");
4047 result = -ENOMEM;
4048 goto fail_init_hw;
4049 }
4050
4051 ipa_ctx->sps_power_mgmt_wq =
4052 create_singlethread_workqueue("sps_ipa_power_mgmt");
4053 if (!ipa_ctx->sps_power_mgmt_wq) {
4054 IPAERR("failed to create sps power mgmt wq\n");
4055 result = -ENOMEM;
4056 goto fail_create_sps_wq;
4057 }
4058
4059 /* register IPA with SPS driver */
4060 bam_props.phys_addr = resource_p->bam_mem_base;
4061 bam_props.virt_size = resource_p->bam_mem_size;
4062 bam_props.irq = resource_p->bam_irq;
4063 bam_props.num_pipes = ipa_ctx->ipa_num_pipes;
4064 bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
4065 bam_props.event_threshold = IPA_EVENT_THRESHOLD;
4066 bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
4067 if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
4068 bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
4069 if (ipa_ctx->ipa_bam_remote_mode == true)
4070 bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
4071 if (!ipa_ctx->smmu_s1_bypass)
4072 bam_props.options |= SPS_BAM_SMMU_EN;
4073 bam_props.options |= SPS_BAM_CACHED_WP;
4074 bam_props.ee = resource_p->ee;
4075 bam_props.ipc_loglevel = 3;
4076
4077 result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
4078 if (result) {
4079 IPAERR(":bam register err.\n");
4080 result = -EPROBE_DEFER;
4081 goto fail_register_bam_device;
4082 }
4083 IPADBG("IPA BAM is registered\n");
4084
4085 if (ipa_setup_bam_cfg(resource_p)) {
4086 IPAERR(":bam cfg err.\n");
4087 result = -ENODEV;
4088 goto fail_flt_rule_cache;
4089 }
4090
4091 /* init the lookaside cache */
4092 ipa_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
4093 sizeof(struct ipa_flt_entry), 0, 0, NULL);
4094 if (!ipa_ctx->flt_rule_cache) {
4095 IPAERR(":ipa flt cache create failed\n");
4096 result = -ENOMEM;
4097 goto fail_flt_rule_cache;
4098 }
4099 ipa_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
4100 sizeof(struct ipa_rt_entry), 0, 0, NULL);
4101 if (!ipa_ctx->rt_rule_cache) {
4102 IPAERR(":ipa rt cache create failed\n");
4103 result = -ENOMEM;
4104 goto fail_rt_rule_cache;
4105 }
4106 ipa_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
4107 sizeof(struct ipa_hdr_entry), 0, 0, NULL);
4108 if (!ipa_ctx->hdr_cache) {
4109 IPAERR(":ipa hdr cache create failed\n");
4110 result = -ENOMEM;
4111 goto fail_hdr_cache;
4112 }
4113 ipa_ctx->hdr_offset_cache =
4114 kmem_cache_create("IPA_HDR_OFFSET",
4115 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
4116 if (!ipa_ctx->hdr_offset_cache) {
4117 IPAERR(":ipa hdr off cache create failed\n");
4118 result = -ENOMEM;
4119 goto fail_hdr_offset_cache;
4120 }
4121 ipa_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
4122 sizeof(struct ipa_hdr_proc_ctx_entry), 0, 0, NULL);
4123 if (!ipa_ctx->hdr_proc_ctx_cache) {
4124 IPAERR(":ipa hdr proc ctx cache create failed\n");
4125 result = -ENOMEM;
4126 goto fail_hdr_proc_ctx_cache;
4127 }
4128 ipa_ctx->hdr_proc_ctx_offset_cache =
4129 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
4130 sizeof(struct ipa_hdr_proc_ctx_offset_entry), 0, 0, NULL);
4131 if (!ipa_ctx->hdr_proc_ctx_offset_cache) {
4132 IPAERR(":ipa hdr proc ctx off cache create failed\n");
4133 result = -ENOMEM;
4134 goto fail_hdr_proc_ctx_offset_cache;
4135 }
4136 ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
4137 sizeof(struct ipa_rt_tbl), 0, 0, NULL);
4138 if (!ipa_ctx->rt_tbl_cache) {
4139 IPAERR(":ipa rt tbl cache create failed\n");
4140 result = -ENOMEM;
4141 goto fail_rt_tbl_cache;
4142 }
4143 ipa_ctx->tx_pkt_wrapper_cache =
4144 kmem_cache_create("IPA_TX_PKT_WRAPPER",
4145 sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
4146 if (!ipa_ctx->tx_pkt_wrapper_cache) {
4147 IPAERR(":ipa tx pkt wrapper cache create failed\n");
4148 result = -ENOMEM;
4149 goto fail_tx_pkt_wrapper_cache;
4150 }
4151 ipa_ctx->rx_pkt_wrapper_cache =
4152 kmem_cache_create("IPA_RX_PKT_WRAPPER",
4153 sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
4154 if (!ipa_ctx->rx_pkt_wrapper_cache) {
4155 IPAERR(":ipa rx pkt wrapper cache create failed\n");
4156 result = -ENOMEM;
4157 goto fail_rx_pkt_wrapper_cache;
4158 }
4159
4160 /* Setup DMA pool */
4161 ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev,
4162 IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
4163 0, 0);
4164 if (!ipa_ctx->dma_pool) {
4165 IPAERR("cannot alloc DMA pool.\n");
4166 result = -ENOMEM;
4167 goto fail_dma_pool;
4168 }
4169
4170 ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
4171 ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
4172
4173 /* init the various list heads */
4174 INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
4175 INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
4176 INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
4177 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
4178 INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
4179 INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
4180 }
4181 INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
4182 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
4183 INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
4184 INIT_LIST_HEAD(&ipa_ctx->
4185 hdr_proc_ctx_tbl.head_free_offset_list[i]);
4186 }
4187 INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
4188 INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
4189 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
4190 flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
4191 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4192 flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
4193
4194 flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
4195 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4196 flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
4197 }
4198
4199 rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
4200 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4201 rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
4202 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4203
4204 INIT_LIST_HEAD(&ipa_ctx->intf_list);
4205 INIT_LIST_HEAD(&ipa_ctx->msg_list);
4206 INIT_LIST_HEAD(&ipa_ctx->pull_msg_list);
4207 init_waitqueue_head(&ipa_ctx->msg_waitq);
4208 mutex_init(&ipa_ctx->msg_lock);
4209
4210 mutex_init(&ipa_ctx->lock);
4211 mutex_init(&ipa_ctx->nat_mem.lock);
Mohammed Javidb4b5ef42017-08-29 01:05:46 +05304212 mutex_init(&ipa_ctx->ipa_cne_evt_lock);
Amir Levy9659e592016-10-27 18:08:27 +03004213
4214 idr_init(&ipa_ctx->ipa_idr);
4215 spin_lock_init(&ipa_ctx->idr_lock);
4216
4217 /* wlan related member */
4218 memset(&ipa_ctx->wc_memb, 0, sizeof(ipa_ctx->wc_memb));
4219 spin_lock_init(&ipa_ctx->wc_memb.wlan_spinlock);
4220 spin_lock_init(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
4221 INIT_LIST_HEAD(&ipa_ctx->wc_memb.wlan_comm_desc_list);
4222 /*
4223 * setup an empty routing table in system memory, this will be used
4224 * to delete a routing table cleanly and safely
4225 */
4226 ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
4227
4228 ipa_ctx->empty_rt_tbl_mem.base =
4229 dma_alloc_coherent(ipa_ctx->pdev,
4230 ipa_ctx->empty_rt_tbl_mem.size,
4231 &ipa_ctx->empty_rt_tbl_mem.phys_base,
4232 GFP_KERNEL);
4233 if (!ipa_ctx->empty_rt_tbl_mem.base) {
4234 IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
4235 ipa_ctx->empty_rt_tbl_mem.size);
4236 result = -ENOMEM;
4237 goto fail_apps_pipes;
4238 }
4239 memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
4240 ipa_ctx->empty_rt_tbl_mem.size);
4241 IPADBG("empty routing table was allocated in system memory");
4242
4243 /* setup the A5-IPA pipes */
4244 if (ipa_setup_apps_pipes()) {
4245 IPAERR(":failed to setup IPA-Apps pipes.\n");
4246 result = -ENODEV;
4247 goto fail_empty_rt_tbl;
4248 }
4249 IPADBG("IPA System2Bam pipes were connected\n");
4250
4251 if (ipa_init_flt_block()) {
4252 IPAERR("fail to setup dummy filter rules\n");
4253 result = -ENODEV;
4254 goto fail_empty_rt_tbl;
4255 }
4256 IPADBG("filter block was set with dummy filter rules");
4257
4258 /* setup the IPA pipe mem pool */
4259 if (resource_p->ipa_pipe_mem_size)
4260 ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
4261 resource_p->ipa_pipe_mem_size);
4262
4263 ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
4264
4265 result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
4266 if (result) {
4267 IPAERR("alloc_chrdev_region err.\n");
4268 result = -ENODEV;
4269 goto fail_alloc_chrdev_region;
4270 }
4271
4272 ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
4273 ipa_ctx, DRV_NAME);
4274 if (IS_ERR(ipa_ctx->dev)) {
4275 IPAERR(":device_create err.\n");
4276 result = -ENODEV;
4277 goto fail_device_create;
4278 }
4279
4280 cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
4281 ipa_ctx->cdev.owner = THIS_MODULE;
4282 ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */
4283
4284 result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
4285 if (result) {
4286 IPAERR(":cdev_add err=%d\n", -result);
4287 result = -ENODEV;
4288 goto fail_cdev_add;
4289 }
4290 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
4291 MAJOR(ipa_ctx->dev_num),
4292 MINOR(ipa_ctx->dev_num));
4293
4294 if (create_nat_device()) {
4295 IPAERR("unable to create nat device\n");
4296 result = -ENODEV;
4297 goto fail_nat_dev_add;
4298 }
4299
4300
4301
4302 /* Create a wakeup source. */
4303 wakeup_source_init(&ipa_ctx->w_lock, "IPA_WS");
4304 spin_lock_init(&ipa_ctx->wakelock_ref_cnt.spinlock);
4305
4306 /* Initialize the SPS PM lock. */
4307 mutex_init(&ipa_ctx->sps_pm.sps_pm_lock);
4308
4309 /* Initialize IPA RM (resource manager) */
4310 result = ipa_rm_initialize();
4311 if (result) {
4312 IPAERR("RM initialization failed (%d)\n", -result);
4313 result = -ENODEV;
4314 goto fail_ipa_rm_init;
4315 }
4316 IPADBG("IPA resource manager initialized");
4317
4318 result = ipa_create_apps_resource();
4319 if (result) {
4320 IPAERR("Failed to create APPS_CONS resource\n");
4321 result = -ENODEV;
4322 goto fail_create_apps_resource;
4323 }
4324
4325 /*register IPA IRQ handler*/
4326 result = ipa_interrupts_init(resource_p->ipa_irq, 0,
4327 master_dev);
4328 if (result) {
4329 IPAERR("ipa interrupts initialization failed\n");
4330 result = -ENODEV;
4331 goto fail_ipa_interrupts_init;
4332 }
4333
4334 /*add handler for suspend interrupt*/
4335 result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
4336 ipa_suspend_handler, false, NULL);
4337 if (result) {
4338 IPAERR("register handler for suspend interrupt failed\n");
4339 result = -ENODEV;
4340 goto fail_add_interrupt_handler;
4341 }
4342
4343 if (ipa_ctx->use_ipa_teth_bridge) {
4344 /* Initialize the tethering bridge driver */
4345 result = teth_bridge_driver_init();
4346 if (result) {
4347 IPAERR(":teth_bridge init failed (%d)\n", -result);
4348 result = -ENODEV;
4349 goto fail_add_interrupt_handler;
4350 }
4351 IPADBG("teth_bridge initialized");
4352 }
4353
4354 ipa_debugfs_init();
4355
4356 result = ipa_uc_interface_init();
4357 if (result)
4358 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4359 else
4360 IPADBG(":ipa Uc interface init ok\n");
4361
4362 result = ipa_wdi_init();
4363 if (result)
4364 IPAERR(":wdi init failed (%d)\n", -result);
4365 else
4366 IPADBG(":wdi init ok\n");
4367
4368 result = ipa_ntn_init();
4369 if (result)
4370 IPAERR(":ntn init failed (%d)\n", -result);
4371 else
4372 IPADBG(":ntn init ok\n");
4373
4374 ipa_ctx->q6_proxy_clk_vote_valid = true;
4375
4376 ipa_register_panic_hdlr();
4377
4378 pr_info("IPA driver initialization was successful.\n");
4379
4380 return 0;
4381
4382fail_add_interrupt_handler:
4383 free_irq(resource_p->ipa_irq, master_dev);
4384fail_ipa_interrupts_init:
4385 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
4386fail_create_apps_resource:
4387 ipa_rm_exit();
4388fail_ipa_rm_init:
4389fail_nat_dev_add:
4390 cdev_del(&ipa_ctx->cdev);
4391fail_cdev_add:
4392 device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
4393fail_device_create:
4394 unregister_chrdev_region(ipa_ctx->dev_num, 1);
4395fail_alloc_chrdev_region:
4396 if (ipa_ctx->pipe_mem_pool)
4397 gen_pool_destroy(ipa_ctx->pipe_mem_pool);
4398fail_empty_rt_tbl:
4399 ipa_teardown_apps_pipes();
4400 dma_free_coherent(ipa_ctx->pdev,
4401 ipa_ctx->empty_rt_tbl_mem.size,
4402 ipa_ctx->empty_rt_tbl_mem.base,
4403 ipa_ctx->empty_rt_tbl_mem.phys_base);
4404fail_apps_pipes:
4405 idr_destroy(&ipa_ctx->ipa_idr);
4406fail_dma_pool:
4407 kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
4408fail_rx_pkt_wrapper_cache:
4409 kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
4410fail_tx_pkt_wrapper_cache:
4411 kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
4412fail_rt_tbl_cache:
4413 kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_offset_cache);
4414fail_hdr_proc_ctx_offset_cache:
4415 kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_cache);
4416fail_hdr_proc_ctx_cache:
4417 kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
4418fail_hdr_offset_cache:
4419 kmem_cache_destroy(ipa_ctx->hdr_cache);
4420fail_hdr_cache:
4421 kmem_cache_destroy(ipa_ctx->rt_rule_cache);
4422fail_rt_rule_cache:
4423 kmem_cache_destroy(ipa_ctx->flt_rule_cache);
4424fail_flt_rule_cache:
4425 sps_deregister_bam_device(ipa_ctx->bam_handle);
4426fail_register_bam_device:
4427 destroy_workqueue(ipa_ctx->sps_power_mgmt_wq);
4428fail_create_sps_wq:
4429 destroy_workqueue(ipa_ctx->power_mgmt_wq);
4430fail_init_hw:
4431 iounmap(ipa_ctx->mmio);
4432fail_remap:
4433 ipa_disable_clks();
4434fail_clk:
4435 ipa2_active_clients_log_destroy();
4436fail_init_active_client:
4437 msm_bus_scale_unregister_client(ipa_ctx->ipa_bus_hdl);
Amir Levy9659e592016-10-27 18:08:27 +03004438 if (bus_scale_table) {
4439 msm_bus_cl_clear_pdata(bus_scale_table);
4440 bus_scale_table = NULL;
4441 }
Mohammed Javid4c4037e2017-11-27 16:23:35 +05304442fail_bus_reg:
Amir Levy9659e592016-10-27 18:08:27 +03004443fail_bind:
4444 kfree(ipa_ctx->ctrl);
4445fail_mem_ctrl:
Utkarsh Saxena41d57c52016-11-16 12:04:28 +05304446 ipc_log_context_destroy(ipa_ctx->logbuf);
4447fail_logbuf:
Amir Levy9659e592016-10-27 18:08:27 +03004448 kfree(ipa_ctx);
4449 ipa_ctx = NULL;
4450fail_mem_ctx:
4451 return result;
4452}
4453
Mohammed Javid4c4037e2017-11-27 16:23:35 +05304454bool ipa_pm_is_used(void)
4455{
4456 return (ipa_ctx) ? ipa_ctx->use_ipa_pm : false;
4457}
4458
Amir Levy9659e592016-10-27 18:08:27 +03004459static int get_ipa_dts_configuration(struct platform_device *pdev,
4460 struct ipa_plat_drv_res *ipa_drv_res)
4461{
4462 int result;
4463 struct resource *resource;
4464
Mohammed Javid4c4037e2017-11-27 16:23:35 +05304465 ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
4466 "qcom,use-ipa-pm");
4467 IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
Amir Levy9659e592016-10-27 18:08:27 +03004468 /* initialize ipa_res */
4469 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
4470 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
4471 ipa_drv_res->ipa_hw_type = 0;
4472 ipa_drv_res->ipa_hw_mode = 0;
Utkarsh Saxenacd4c9aa2017-01-17 12:50:37 +05304473 ipa_drv_res->ipa_uc_monitor_holb = false;
Amir Levy9659e592016-10-27 18:08:27 +03004474 ipa_drv_res->ipa_bam_remote_mode = false;
4475 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004476 ipa_drv_res->ipa_wdi2 = false;
Amir Levy9659e592016-10-27 18:08:27 +03004477 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4478 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4479
Amir Levy9659e592016-10-27 18:08:27 +03004480 /* Get IPA HW Version */
4481 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
4482 &ipa_drv_res->ipa_hw_type);
4483 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
4484 IPAERR(":get resource failed for ipa-hw-ver!\n");
4485 return -ENODEV;
4486 }
4487 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
4488
4489 /* Get IPA HW mode */
4490 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
4491 &ipa_drv_res->ipa_hw_mode);
4492 if (result)
4493 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
4494 else
4495 IPADBG(": found ipa_drv_res->ipa_hw_mode = %d",
4496 ipa_drv_res->ipa_hw_mode);
4497
Utkarsh Saxenacd4c9aa2017-01-17 12:50:37 +05304498 /* Check ipa_uc_monitor_holb enabled or disabled */
4499 ipa_drv_res->ipa_uc_monitor_holb =
4500 of_property_read_bool(pdev->dev.of_node,
4501 "qcom,ipa-uc-monitor-holb");
4502 IPADBG(": ipa uc monitor holb = %s\n",
4503 ipa_drv_res->ipa_uc_monitor_holb
4504 ? "Enabled" : "Disabled");
4505
Amir Levy9659e592016-10-27 18:08:27 +03004506 /* Get IPA WAN / LAN RX pool sizes */
4507 result = of_property_read_u32(pdev->dev.of_node,
4508 "qcom,wan-rx-ring-size",
4509 &ipa_drv_res->wan_rx_ring_size);
4510 if (result)
4511 IPADBG("using default for wan-rx-ring-size = %u\n",
4512 ipa_drv_res->wan_rx_ring_size);
4513 else
4514 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
4515 ipa_drv_res->wan_rx_ring_size);
4516
4517 result = of_property_read_u32(pdev->dev.of_node,
4518 "qcom,lan-rx-ring-size",
4519 &ipa_drv_res->lan_rx_ring_size);
4520 if (result)
4521 IPADBG("using default for lan-rx-ring-size = %u\n",
4522 ipa_drv_res->lan_rx_ring_size);
4523 else
4524 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
4525 ipa_drv_res->lan_rx_ring_size);
4526
4527 ipa_drv_res->use_ipa_teth_bridge =
4528 of_property_read_bool(pdev->dev.of_node,
4529 "qcom,use-ipa-tethering-bridge");
4530 IPADBG(": using TBDr = %s",
4531 ipa_drv_res->use_ipa_teth_bridge
4532 ? "True" : "False");
4533
4534 ipa_drv_res->ipa_bam_remote_mode =
4535 of_property_read_bool(pdev->dev.of_node,
4536 "qcom,ipa-bam-remote-mode");
4537 IPADBG(": ipa bam remote mode = %s\n",
4538 ipa_drv_res->ipa_bam_remote_mode
4539 ? "True" : "False");
4540
4541 ipa_drv_res->modem_cfg_emb_pipe_flt =
4542 of_property_read_bool(pdev->dev.of_node,
4543 "qcom,modem-cfg-emb-pipe-flt");
4544 IPADBG(": modem configure embedded pipe filtering = %s\n",
4545 ipa_drv_res->modem_cfg_emb_pipe_flt
4546 ? "True" : "False");
4547
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004548 ipa_drv_res->ipa_wdi2 =
4549 of_property_read_bool(pdev->dev.of_node,
4550 "qcom,ipa-wdi2");
4551 IPADBG(": WDI-2.0 = %s\n",
4552 ipa_drv_res->ipa_wdi2
4553 ? "True" : "False");
4554
Amir Levy9659e592016-10-27 18:08:27 +03004555 ipa_drv_res->skip_uc_pipe_reset =
4556 of_property_read_bool(pdev->dev.of_node,
4557 "qcom,skip-uc-pipe-reset");
4558 IPADBG(": skip uC pipe reset = %s\n",
4559 ipa_drv_res->skip_uc_pipe_reset
4560 ? "True" : "False");
4561
4562 ipa_drv_res->use_dma_zone =
4563 of_property_read_bool(pdev->dev.of_node,
4564 "qcom,use-dma-zone");
4565 IPADBG(": use dma zone = %s\n",
4566 ipa_drv_res->use_dma_zone
4567 ? "True" : "False");
4568
4569 ipa_drv_res->tethered_flow_control =
4570 of_property_read_bool(pdev->dev.of_node,
4571 "qcom,tethered-flow-control");
4572 IPADBG(": Use apps based flow control = %s\n",
4573 ipa_drv_res->tethered_flow_control
4574 ? "True" : "False");
4575
4576 /* Get IPA wrapper address */
4577 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4578 "ipa-base");
4579 if (!resource) {
4580 IPAERR(":get resource failed for ipa-base!\n");
4581 return -ENODEV;
4582 }
4583 ipa_drv_res->ipa_mem_base = resource->start;
4584 ipa_drv_res->ipa_mem_size = resource_size(resource);
4585 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
4586 ipa_drv_res->ipa_mem_base,
4587 ipa_drv_res->ipa_mem_size);
4588
4589 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
4590 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
4591
4592 /* Get IPA BAM address */
4593 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4594 "bam-base");
4595 if (!resource) {
4596 IPAERR(":get resource failed for bam-base!\n");
4597 return -ENODEV;
4598 }
4599 ipa_drv_res->bam_mem_base = resource->start;
4600 ipa_drv_res->bam_mem_size = resource_size(resource);
4601 IPADBG(": bam-base = 0x%x, size = 0x%x\n",
4602 ipa_drv_res->bam_mem_base,
4603 ipa_drv_res->bam_mem_size);
4604
4605 /* Get IPA pipe mem start ofst */
4606 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4607 "ipa-pipe-mem");
4608 if (!resource) {
4609 IPADBG(":not using pipe memory - resource nonexisting\n");
4610 } else {
4611 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
4612 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
4613 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
4614 ipa_drv_res->ipa_pipe_mem_start_ofst,
4615 ipa_drv_res->ipa_pipe_mem_size);
4616 }
4617
4618 /* Get IPA IRQ number */
4619 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4620 "ipa-irq");
4621 if (!resource) {
4622 IPAERR(":get resource failed for ipa-irq!\n");
4623 return -ENODEV;
4624 }
4625 ipa_drv_res->ipa_irq = resource->start;
4626 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
4627
4628 /* Get IPA BAM IRQ number */
4629 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4630 "bam-irq");
4631 if (!resource) {
4632 IPAERR(":get resource failed for bam-irq!\n");
4633 return -ENODEV;
4634 }
4635 ipa_drv_res->bam_irq = resource->start;
4636 IPADBG(":ibam-irq = %d\n", ipa_drv_res->bam_irq);
4637
4638 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
4639 &ipa_drv_res->ee);
4640 if (result)
4641 ipa_drv_res->ee = 0;
4642
4643 /* Get IPA RX Polling Timeout Seconds */
4644 result = of_property_read_u32(pdev->dev.of_node,
4645 "qcom,rx-polling-sleep-ms",
4646 &ipa_drv_res->ipa_rx_polling_sleep_msec);
4647
4648 if (result) {
4649 ipa_drv_res->ipa_rx_polling_sleep_msec = ONE_MSEC;
4650 IPADBG("using default polling timeout of 1MSec\n");
4651 } else {
4652 IPADBG(": found ipa_drv_res->ipa_rx_polling_sleep_sec = %d",
4653 ipa_drv_res->ipa_rx_polling_sleep_msec);
4654 }
4655
4656 /* Get IPA Polling Iteration */
4657 result = of_property_read_u32(pdev->dev.of_node,
4658 "qcom,ipa-polling-iteration",
4659 &ipa_drv_res->ipa_polling_iteration);
4660 if (result) {
4661 ipa_drv_res->ipa_polling_iteration = MAX_POLLING_ITERATION;
4662 IPADBG("using default polling iteration\n");
4663 } else {
4664 IPADBG(": found ipa_drv_res->ipa_polling_iteration = %d",
4665 ipa_drv_res->ipa_polling_iteration);
4666 }
4667
4668 return 0;
4669}
4670
4671static int ipa_smmu_wlan_cb_probe(struct device *dev)
4672{
4673 struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03004674 int atomic_ctx = 1;
4675 int fast = 1;
4676 int bypass = 1;
4677 int ret;
4678
4679 IPADBG("sub pdev=%p\n", dev);
4680
4681 cb->dev = dev;
Mohammed Javida03e31a2017-11-29 12:08:04 +05304682 cb->iommu = iommu_domain_alloc(&platform_bus_type);
Amir Levy9659e592016-10-27 18:08:27 +03004683 if (!cb->iommu) {
4684 IPAERR("could not alloc iommu domain\n");
4685 /* assume this failure is because iommu driver is not ready */
4686 return -EPROBE_DEFER;
4687 }
4688 cb->valid = true;
4689
Amir Levy9659e592016-10-27 18:08:27 +03004690 if (smmu_info.s1_bypass) {
4691 if (iommu_domain_set_attr(cb->iommu,
4692 DOMAIN_ATTR_S1_BYPASS,
4693 &bypass)) {
4694 IPAERR("couldn't set bypass\n");
4695 cb->valid = false;
4696 return -EIO;
4697 }
4698 IPADBG("SMMU S1 BYPASS\n");
4699 } else {
4700 if (iommu_domain_set_attr(cb->iommu,
4701 DOMAIN_ATTR_ATOMIC,
4702 &atomic_ctx)) {
4703 IPAERR("couldn't set domain as atomic\n");
4704 cb->valid = false;
4705 return -EIO;
4706 }
4707 IPADBG("SMMU atomic set\n");
4708 if (smmu_info.fast_map) {
4709 if (iommu_domain_set_attr(cb->iommu,
4710 DOMAIN_ATTR_FAST,
4711 &fast)) {
4712 IPAERR("couldn't set fast map\n");
4713 cb->valid = false;
4714 return -EIO;
4715 }
4716 IPADBG("SMMU fast map set\n");
4717 }
4718 }
4719
4720 ret = iommu_attach_device(cb->iommu, dev);
4721 if (ret) {
4722 IPAERR("could not attach device ret=%d\n", ret);
4723 cb->valid = false;
4724 return ret;
4725 }
4726
4727 if (!smmu_info.s1_bypass) {
4728 IPAERR("map IPA region to WLAN_CB IOMMU\n");
4729 ret = ipa_iommu_map(cb->iommu,
4730 rounddown(smmu_info.ipa_base, PAGE_SIZE),
4731 rounddown(smmu_info.ipa_base, PAGE_SIZE),
4732 roundup(smmu_info.ipa_size, PAGE_SIZE),
Amir Levyf5625342016-12-25 10:21:02 +02004733 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03004734 if (ret) {
4735 IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n",
4736 ret);
4737 arm_iommu_detach_device(cb->dev);
4738 cb->valid = false;
4739 return ret;
4740 }
4741 }
4742
4743 return 0;
4744}
4745
4746static int ipa_smmu_uc_cb_probe(struct device *dev)
4747{
4748 struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03004749 int atomic_ctx = 1;
4750 int ret;
4751 int fast = 1;
4752 int bypass = 1;
4753 u32 iova_ap_mapping[2];
4754
4755 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
4756
4757 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
4758 iova_ap_mapping, 2);
4759 if (ret) {
4760 IPAERR("Fail to read UC start/size iova addresses\n");
4761 return ret;
4762 }
4763 cb->va_start = iova_ap_mapping[0];
4764 cb->va_size = iova_ap_mapping[1];
4765 cb->va_end = cb->va_start + cb->va_size;
4766 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
4767
4768 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
4769 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
4770 IPAERR("DMA set mask failed\n");
4771 return -EOPNOTSUPP;
4772 }
4773
4774 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
4775
4776 cb->dev = dev;
Mohammed Javida03e31a2017-11-29 12:08:04 +05304777 cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
Amir Levy9659e592016-10-27 18:08:27 +03004778 cb->va_start, cb->va_size);
4779 if (IS_ERR_OR_NULL(cb->mapping)) {
4780 IPADBG("Fail to create mapping\n");
4781 /* assume this failure is because iommu driver is not ready */
4782 return -EPROBE_DEFER;
4783 }
4784 IPADBG("SMMU mapping created\n");
4785 cb->valid = true;
4786
Amir Levy9659e592016-10-27 18:08:27 +03004787 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
4788 if (smmu_info.s1_bypass) {
4789 if (iommu_domain_set_attr(cb->mapping->domain,
4790 DOMAIN_ATTR_S1_BYPASS,
4791 &bypass)) {
4792 IPAERR("couldn't set bypass\n");
4793 arm_iommu_release_mapping(cb->mapping);
4794 cb->valid = false;
4795 return -EIO;
4796 }
4797 IPADBG("SMMU S1 BYPASS\n");
4798 } else {
4799 if (iommu_domain_set_attr(cb->mapping->domain,
4800 DOMAIN_ATTR_ATOMIC,
4801 &atomic_ctx)) {
4802 IPAERR("couldn't set domain as atomic\n");
4803 arm_iommu_release_mapping(cb->mapping);
4804 cb->valid = false;
4805 return -EIO;
4806 }
4807 IPADBG("SMMU atomic set\n");
4808 if (smmu_info.fast_map) {
4809 if (iommu_domain_set_attr(cb->mapping->domain,
4810 DOMAIN_ATTR_FAST,
4811 &fast)) {
4812 IPAERR("couldn't set fast map\n");
4813 arm_iommu_release_mapping(cb->mapping);
4814 cb->valid = false;
4815 return -EIO;
4816 }
4817 IPADBG("SMMU fast map set\n");
4818 }
4819 }
4820
4821 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
4822 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
4823 if (ret) {
4824 IPAERR("could not attach device ret=%d\n", ret);
4825 arm_iommu_release_mapping(cb->mapping);
4826 cb->valid = false;
4827 return ret;
4828 }
4829
4830 cb->next_addr = cb->va_end;
4831 ipa_ctx->uc_pdev = dev;
4832
4833 IPADBG("UC CB PROBE pdev=%p attached\n", dev);
4834 return 0;
4835}
4836
4837static int ipa_smmu_ap_cb_probe(struct device *dev)
4838{
4839 struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
4840 int result;
Amir Levy9659e592016-10-27 18:08:27 +03004841 int atomic_ctx = 1;
4842 int fast = 1;
4843 int bypass = 1;
4844 u32 iova_ap_mapping[2];
4845
4846 IPADBG("AP CB probe: sub pdev=%p\n", dev);
4847
4848 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
4849 iova_ap_mapping, 2);
4850 if (result) {
4851 IPAERR("Fail to read AP start/size iova addresses\n");
4852 return result;
4853 }
4854 cb->va_start = iova_ap_mapping[0];
4855 cb->va_size = iova_ap_mapping[1];
4856 cb->va_end = cb->va_start + cb->va_size;
4857 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
4858
4859 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
4860 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
4861 IPAERR("DMA set mask failed\n");
4862 return -EOPNOTSUPP;
4863 }
4864
4865 cb->dev = dev;
Mohammed Javida03e31a2017-11-29 12:08:04 +05304866 cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
Amir Levy9659e592016-10-27 18:08:27 +03004867 cb->va_start,
4868 cb->va_size);
4869 if (IS_ERR_OR_NULL(cb->mapping)) {
4870 IPADBG("Fail to create mapping\n");
4871 /* assume this failure is because iommu driver is not ready */
4872 return -EPROBE_DEFER;
4873 }
4874 IPADBG("SMMU mapping created\n");
4875 cb->valid = true;
4876
Amir Levy9659e592016-10-27 18:08:27 +03004877 if (smmu_info.s1_bypass) {
4878 if (iommu_domain_set_attr(cb->mapping->domain,
4879 DOMAIN_ATTR_S1_BYPASS,
4880 &bypass)) {
4881 IPAERR("couldn't set bypass\n");
4882 arm_iommu_release_mapping(cb->mapping);
4883 cb->valid = false;
4884 return -EIO;
4885 }
4886 IPADBG("SMMU S1 BYPASS\n");
4887 } else {
4888 if (iommu_domain_set_attr(cb->mapping->domain,
4889 DOMAIN_ATTR_ATOMIC,
4890 &atomic_ctx)) {
4891 IPAERR("couldn't set domain as atomic\n");
4892 arm_iommu_release_mapping(cb->mapping);
4893 cb->valid = false;
4894 return -EIO;
4895 }
4896 IPADBG("SMMU atomic set\n");
4897
4898 if (iommu_domain_set_attr(cb->mapping->domain,
4899 DOMAIN_ATTR_FAST,
4900 &fast)) {
4901 IPAERR("couldn't set fast map\n");
4902 arm_iommu_release_mapping(cb->mapping);
4903 cb->valid = false;
4904 return -EIO;
4905 }
4906 IPADBG("SMMU fast map set\n");
4907 }
4908
4909 result = arm_iommu_attach_device(cb->dev, cb->mapping);
4910 if (result) {
4911 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
4912 cb->valid = false;
4913 return result;
4914 }
4915
4916 if (!smmu_info.s1_bypass) {
4917 IPAERR("map IPA region to AP_CB IOMMU\n");
4918 result = ipa_iommu_map(cb->mapping->domain,
4919 rounddown(smmu_info.ipa_base, PAGE_SIZE),
4920 rounddown(smmu_info.ipa_base, PAGE_SIZE),
4921 roundup(smmu_info.ipa_size, PAGE_SIZE),
Amir Levyf5625342016-12-25 10:21:02 +02004922 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03004923 if (result) {
4924 IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
4925 result);
4926 arm_iommu_release_mapping(cb->mapping);
4927 cb->valid = false;
4928 return result;
4929 }
4930 }
4931
4932 smmu_info.present = true;
4933
4934 if (!bus_scale_table)
4935 bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev);
4936
4937 /* Proceed to real initialization */
4938 result = ipa_init(&ipa_res, dev);
4939 if (result) {
4940 IPAERR("ipa_init failed\n");
4941 arm_iommu_detach_device(cb->dev);
4942 arm_iommu_release_mapping(cb->mapping);
4943 cb->valid = false;
4944 return result;
4945 }
4946
4947 return result;
4948}
4949
4950int ipa_plat_drv_probe(struct platform_device *pdev_p,
4951 struct ipa_api_controller *api_ctrl,
4952 const struct of_device_id *pdrv_match)
4953{
4954 int result;
4955 struct device *dev = &pdev_p->dev;
4956
4957 IPADBG("IPA driver probing started\n");
4958
4959 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
4960 return ipa_smmu_ap_cb_probe(dev);
4961
4962 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
4963 return ipa_smmu_wlan_cb_probe(dev);
4964
4965 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
4966 return ipa_smmu_uc_cb_probe(dev);
4967
4968 master_dev = dev;
4969 if (!ipa_pdev)
4970 ipa_pdev = pdev_p;
4971
4972 result = get_ipa_dts_configuration(pdev_p, &ipa_res);
4973 if (result) {
4974 IPAERR("IPA dts parsing failed\n");
4975 return result;
4976 }
4977
4978 result = ipa2_bind_api_controller(ipa_res.ipa_hw_type, api_ctrl);
4979 if (result) {
4980 IPAERR("IPA API binding failed\n");
4981 return result;
4982 }
4983
4984 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
4985 if (of_property_read_bool(pdev_p->dev.of_node,
4986 "qcom,smmu-s1-bypass"))
4987 smmu_info.s1_bypass = true;
4988 if (of_property_read_bool(pdev_p->dev.of_node,
4989 "qcom,smmu-fast-map"))
4990 smmu_info.fast_map = true;
4991 smmu_info.arm_smmu = true;
4992 pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
4993 smmu_info.s1_bypass, smmu_info.fast_map);
4994 result = of_platform_populate(pdev_p->dev.of_node,
4995 pdrv_match, NULL, &pdev_p->dev);
4996 } else if (of_property_read_bool(pdev_p->dev.of_node,
4997 "qcom,msm-smmu")) {
4998 IPAERR("Legacy IOMMU not supported\n");
4999 result = -EOPNOTSUPP;
5000 } else {
5001 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
5002 dma_set_coherent_mask(&pdev_p->dev,
5003 DMA_BIT_MASK(32))) {
5004 IPAERR("DMA set mask failed\n");
5005 return -EOPNOTSUPP;
5006 }
5007
5008 if (!bus_scale_table)
5009 bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
5010
5011 /* Proceed to real initialization */
5012 result = ipa_init(&ipa_res, dev);
5013 if (result) {
5014 IPAERR("ipa_init failed\n");
5015 return result;
5016 }
5017 }
5018
5019 return result;
5020}
5021
5022/**
5023 * ipa2_ap_suspend() - suspend callback for runtime_pm
5024 * @dev: pointer to device
5025 *
5026 * This callback will be invoked by the runtime_pm framework when an AP suspend
5027 * operation is invoked, usually by pressing a suspend button.
5028 *
5029 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
5030 * This will postpone the suspend operation until IPA is no longer used by AP.
5031*/
5032int ipa2_ap_suspend(struct device *dev)
5033{
5034 int i;
5035
5036 IPADBG("Enter...\n");
5037
5038 /* In case there is a tx/rx handler in polling mode fail to suspend */
5039 for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
5040 if (ipa_ctx->ep[i].sys &&
5041 atomic_read(&ipa_ctx->ep[i].sys->curr_polling_state)) {
5042 IPAERR("EP %d is in polling state, do not suspend\n",
5043 i);
5044 return -EAGAIN;
5045 }
5046 }
5047
5048 /* release SPS IPA resource without waiting for inactivity timer */
5049 atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
5050 ipa_sps_release_resource(NULL);
5051 IPADBG("Exit\n");
5052
5053 return 0;
5054}
5055
5056/**
5057* ipa2_ap_resume() - resume callback for runtime_pm
5058* @dev: pointer to device
5059*
5060* This callback will be invoked by the runtime_pm framework when an AP resume
5061* operation is invoked.
5062*
5063* Always returns 0 since resume should always succeed.
5064*/
5065int ipa2_ap_resume(struct device *dev)
5066{
5067 return 0;
5068}
5069
5070struct ipa_context *ipa_get_ctx(void)
5071{
5072 return ipa_ctx;
5073}
5074
5075int ipa_iommu_map(struct iommu_domain *domain,
5076 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
5077{
5078 struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx();
5079 struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx();
5080
5081 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
5082 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
5083
Utkarsh Saxenafbd1dd42016-11-10 16:33:13 +05305084 /* Checking the address overlapping */
Amir Levy9659e592016-10-27 18:08:27 +03005085 if (domain == ipa2_get_smmu_domain()) {
5086 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
5087 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
Amir Levy9659e592016-10-27 18:08:27 +03005088 }
5089 } else if (domain == ipa2_get_wlan_smmu_domain()) {
5090 /* wlan is one time map */
5091 } else if (domain == ipa2_get_uc_smmu_domain()) {
5092 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
5093 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
Amir Levy9659e592016-10-27 18:08:27 +03005094 }
5095 } else {
5096 IPAERR("Unexpected domain 0x%p\n", domain);
5097 ipa_assert();
5098 return -EFAULT;
5099 }
5100
5101 return iommu_map(domain, iova, paddr, size, prot);
5102}
5103
5104MODULE_LICENSE("GPL v2");
5105MODULE_DESCRIPTION("IPA HW device driver");
5106