blob: 7759c98540985501e7bf3970d3f188011b0e9b87 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Amir Levy9659e592016-10-27 18:08:27 +030038#include <soc/qcom/subsystem_restart.h>
39#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020040#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020041#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020042
43#ifdef CONFIG_ARM64
44
45/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
60#define IPA_GPIO_IN_QUERY_CLK_IDX 0
61#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
62#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
63
64#define IPA_SUMMING_THRESHOLD (0x10)
65#define IPA_PIPE_MEM_START_OFST (0x0)
66#define IPA_PIPE_MEM_SIZE (0x0)
67#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
68 x == IPA_MODE_MOBILE_AP_WAN || \
69 x == IPA_MODE_MOBILE_AP_WLAN)
70#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
71#define IPA_A5_MUX_HEADER_LENGTH (8)
72
73#define IPA_AGGR_MAX_STR_LENGTH (10)
74
Gidon Studinski3021a6f2016-11-10 12:48:48 +020075#define CLEANUP_TAG_PROCESS_TIMEOUT 500
Amir Levy9659e592016-10-27 18:08:27 +030076
77#define IPA_AGGR_STR_IN_BYTES(str) \
78 (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
79
80#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
81
82#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
83
84#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
85#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
86#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
87#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
88
89#define IPA_SMEM_SIZE (8 * 1024)
90
91/* round addresses for closes page per SMMU requirements */
92#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
93 do { \
94 (iova_p) = rounddown((iova), PAGE_SIZE); \
95 (pa_p) = rounddown((pa), PAGE_SIZE); \
96 (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
97 } while (0)
98
99
100/* The relative location in /lib/firmware where the FWs will reside */
101#define IPA_FWS_PATH "ipa/ipa_fws.elf"
102
103#ifdef CONFIG_COMPAT
104#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
105 IPA_IOCTL_ADD_HDR, \
106 compat_uptr_t)
107#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
108 IPA_IOCTL_DEL_HDR, \
109 compat_uptr_t)
110#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
111 IPA_IOCTL_ADD_RT_RULE, \
112 compat_uptr_t)
113#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
114 IPA_IOCTL_DEL_RT_RULE, \
115 compat_uptr_t)
116#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
117 IPA_IOCTL_ADD_FLT_RULE, \
118 compat_uptr_t)
119#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
120 IPA_IOCTL_DEL_FLT_RULE, \
121 compat_uptr_t)
122#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
123 IPA_IOCTL_GET_RT_TBL, \
124 compat_uptr_t)
125#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
126 IPA_IOCTL_COPY_HDR, \
127 compat_uptr_t)
128#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
129 IPA_IOCTL_QUERY_INTF, \
130 compat_uptr_t)
131#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
132 IPA_IOCTL_QUERY_INTF_TX_PROPS, \
133 compat_uptr_t)
134#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
135 IPA_IOCTL_QUERY_INTF_RX_PROPS, \
136 compat_uptr_t)
137#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
138 IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
139 compat_uptr_t)
140#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
141 IPA_IOCTL_GET_HDR, \
142 compat_uptr_t)
143#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
144 IPA_IOCTL_ALLOC_NAT_MEM, \
145 compat_uptr_t)
146#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
147 IPA_IOCTL_V4_INIT_NAT, \
148 compat_uptr_t)
149#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
150 IPA_IOCTL_NAT_DMA, \
151 compat_uptr_t)
152#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
153 IPA_IOCTL_V4_DEL_NAT, \
154 compat_uptr_t)
155#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
156 IPA_IOCTL_GET_NAT_OFFSET, \
157 compat_uptr_t)
158#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
159 IPA_IOCTL_PULL_MSG, \
160 compat_uptr_t)
161#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
162 IPA_IOCTL_RM_ADD_DEPENDENCY, \
163 compat_uptr_t)
164#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
165 IPA_IOCTL_RM_DEL_DEPENDENCY, \
166 compat_uptr_t)
167#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
168 IPA_IOCTL_GENERATE_FLT_EQ, \
169 compat_uptr_t)
170#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
171 IPA_IOCTL_QUERY_RT_TBL_INDEX, \
172 compat_uptr_t)
173#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
174 IPA_IOCTL_WRITE_QMAPID, \
175 compat_uptr_t)
176#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
177 IPA_IOCTL_MDFY_FLT_RULE, \
178 compat_uptr_t)
179#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
180 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
181 compat_uptr_t)
182#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
183 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
184 compat_uptr_t)
185#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
186 IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
187 compat_uptr_t)
188#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
189 IPA_IOCTL_ADD_HDR_PROC_CTX, \
190 compat_uptr_t)
191#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
192 IPA_IOCTL_DEL_HDR_PROC_CTX, \
193 compat_uptr_t)
194#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
195 IPA_IOCTL_MDFY_RT_RULE, \
196 compat_uptr_t)
197
198/**
199 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
200 * properties
201 * @dev_name: input parameter, the name of table
202 * @size: input parameter, size of table in bytes
203 * @offset: output parameter, offset into page in case of system memory
204 */
205struct ipa3_ioc_nat_alloc_mem32 {
206 char dev_name[IPA_RESOURCE_NAME_MAX];
207 compat_size_t size;
208 compat_off_t offset;
209};
210#endif
211
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200212#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
213#define TZ_MEM_PROTECT_REGION_ID 0x10
214
215struct tz_smmu_ipa_protect_region_iovec_s {
216 u64 input_addr;
217 u64 output_addr;
218 u64 size;
219 u32 attr;
220} __packed;
221
222struct tz_smmu_ipa_protect_region_s {
223 phys_addr_t iovec_buf;
224 u32 size_bytes;
225} __packed;
226
Amir Levy9659e592016-10-27 18:08:27 +0300227static void ipa3_start_tag_process(struct work_struct *work);
228static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
229
Amir Levya59ed3f2017-03-05 17:30:55 +0200230static void ipa3_transport_release_resource(struct work_struct *work);
231static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
232 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300233static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
234
Amir Levy9659e592016-10-27 18:08:27 +0300235static struct ipa3_plat_drv_res ipa3_res = {0, };
236struct msm_bus_scale_pdata *ipa3_bus_scale_table;
237
238static struct clk *ipa3_clk;
239
240struct ipa3_context *ipa3_ctx;
241static struct device *master_dev;
242struct platform_device *ipa3_pdev;
243static struct {
244 bool present;
245 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300246 bool fast_map;
247 bool s1_bypass;
248 bool use_64_bit_dma_mask;
249 u32 ipa_base;
250 u32 ipa_size;
251} smmu_info;
252
253static char *active_clients_table_buf;
254
255int ipa3_active_clients_log_print_buffer(char *buf, int size)
256{
257 int i;
258 int nbytes;
259 int cnt = 0;
260 int start_idx;
261 int end_idx;
262
263 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
264 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
265 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
266 for (i = start_idx; i != end_idx;
267 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
268 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
269 ipa3_ctx->ipa3_active_clients_logging
270 .log_buffer[i]);
271 cnt += nbytes;
272 }
273
274 return cnt;
275}
276
277int ipa3_active_clients_log_print_table(char *buf, int size)
278{
279 int i;
280 struct ipa3_active_client_htable_entry *iterator;
281 int cnt = 0;
282
283 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
284 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
285 iterator, list) {
286 switch (iterator->type) {
287 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
288 cnt += scnprintf(buf + cnt, size - cnt,
289 "%-40s %-3d ENDPOINT\n",
290 iterator->id_string, iterator->count);
291 break;
292 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
293 cnt += scnprintf(buf + cnt, size - cnt,
294 "%-40s %-3d SIMPLE\n",
295 iterator->id_string, iterator->count);
296 break;
297 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
298 cnt += scnprintf(buf + cnt, size - cnt,
299 "%-40s %-3d RESOURCE\n",
300 iterator->id_string, iterator->count);
301 break;
302 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
303 cnt += scnprintf(buf + cnt, size - cnt,
304 "%-40s %-3d SPECIAL\n",
305 iterator->id_string, iterator->count);
306 break;
307 default:
308 IPAERR("Trying to print illegal active_clients type");
309 break;
310 }
311 }
312 cnt += scnprintf(buf + cnt, size - cnt,
313 "\nTotal active clients count: %d\n",
314 ipa3_ctx->ipa3_active_clients.cnt);
315
316 return cnt;
317}
318
319static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
320 unsigned long event, void *ptr)
321{
322 ipa3_active_clients_lock();
323 ipa3_active_clients_log_print_table(active_clients_table_buf,
324 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
325 IPAERR("%s", active_clients_table_buf);
326 ipa3_active_clients_unlock();
327
328 return NOTIFY_DONE;
329}
330
331static struct notifier_block ipa3_active_clients_panic_blk = {
332 .notifier_call = ipa3_active_clients_panic_notifier,
333};
334
335static int ipa3_active_clients_log_insert(const char *string)
336{
337 int head;
338 int tail;
339
340 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
341 return -EPERM;
342
343 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
344 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
345
346 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
347 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
348 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
349 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
350 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
351 if (tail == head)
352 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
353
354 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
355 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
356
357 return 0;
358}
359
360static int ipa3_active_clients_log_init(void)
361{
362 int i;
363
364 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
365 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
366 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
367 GFP_KERNEL);
368 active_clients_table_buf = kzalloc(sizeof(
369 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
370 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
371 pr_err("Active Clients Logging memory allocation failed");
372 goto bail;
373 }
374 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
375 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
376 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
377 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
378 }
379 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
380 ipa3_ctx->ipa3_active_clients_logging.log_tail =
381 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
382 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
383 atomic_notifier_chain_register(&panic_notifier_list,
384 &ipa3_active_clients_panic_blk);
385 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
386
387 return 0;
388
389bail:
390 return -ENOMEM;
391}
392
393void ipa3_active_clients_log_clear(void)
394{
395 ipa3_active_clients_lock();
396 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
397 ipa3_ctx->ipa3_active_clients_logging.log_tail =
398 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
399 ipa3_active_clients_unlock();
400}
401
402static void ipa3_active_clients_log_destroy(void)
403{
404 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
405 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
406 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
407 ipa3_ctx->ipa3_active_clients_logging.log_tail =
408 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
409}
410
411enum ipa_smmu_cb_type {
412 IPA_SMMU_CB_AP,
413 IPA_SMMU_CB_WLAN,
414 IPA_SMMU_CB_UC,
415 IPA_SMMU_CB_MAX
416
417};
418
419static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
420
421struct iommu_domain *ipa3_get_smmu_domain(void)
422{
423 if (smmu_cb[IPA_SMMU_CB_AP].valid)
424 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
425
426 IPAERR("CB not valid\n");
427
428 return NULL;
429}
430
431struct iommu_domain *ipa3_get_uc_smmu_domain(void)
432{
433 if (smmu_cb[IPA_SMMU_CB_UC].valid)
434 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
435
436 IPAERR("CB not valid\n");
437
438 return NULL;
439}
440
441struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
442{
443 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
444 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
445
446 IPAERR("CB not valid\n");
447
448 return NULL;
449}
450
451
452struct device *ipa3_get_dma_dev(void)
453{
454 return ipa3_ctx->pdev;
455}
456
457/**
458 * ipa3_get_smmu_ctx()- Return the wlan smmu context
459 *
460 * Return value: pointer to smmu context address
461 */
462struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
463{
464 return &smmu_cb[IPA_SMMU_CB_AP];
465}
466
467/**
468 * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
469 *
470 * Return value: pointer to smmu context address
471 */
472struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
473{
474 return &smmu_cb[IPA_SMMU_CB_WLAN];
475}
476
477/**
478 * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
479 *
480 * Return value: pointer to smmu context address
481 */
482struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
483{
484 return &smmu_cb[IPA_SMMU_CB_UC];
485}
486
487static int ipa3_open(struct inode *inode, struct file *filp)
488{
489 struct ipa3_context *ctx = NULL;
490
491 IPADBG_LOW("ENTER\n");
492 ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
493 filp->private_data = ctx;
494
495 return 0;
496}
497
498/**
499* ipa3_flow_control() - Enable/Disable flow control on a particular client.
500* Return codes:
501* None
502*/
503void ipa3_flow_control(enum ipa_client_type ipa_client,
504 bool enable, uint32_t qmap_id)
505{
506 struct ipa_ep_cfg_ctrl ep_ctrl = {0};
507 int ep_idx;
508 struct ipa3_ep_context *ep;
509
510 /* Check if tethered flow control is needed or not.*/
511 if (!ipa3_ctx->tethered_flow_control) {
512 IPADBG("Apps flow control is not needed\n");
513 return;
514 }
515
516 /* Check if ep is valid. */
517 ep_idx = ipa3_get_ep_mapping(ipa_client);
518 if (ep_idx == -1) {
519 IPADBG("Invalid IPA client\n");
520 return;
521 }
522
523 ep = &ipa3_ctx->ep[ep_idx];
524 if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
525 IPADBG("EP not valid/Not applicable for client.\n");
526 return;
527 }
528
529 spin_lock(&ipa3_ctx->disconnect_lock);
530 /* Check if the QMAP_ID matches. */
531 if (ep->cfg.meta.qmap_id != qmap_id) {
532 IPADBG("Flow control ind not for same flow: %u %u\n",
533 ep->cfg.meta.qmap_id, qmap_id);
534 spin_unlock(&ipa3_ctx->disconnect_lock);
535 return;
536 }
537 if (!ep->disconnect_in_progress) {
538 if (enable) {
539 IPADBG("Enabling Flow\n");
540 ep_ctrl.ipa_ep_delay = false;
541 IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
542 } else {
543 IPADBG("Disabling Flow\n");
544 ep_ctrl.ipa_ep_delay = true;
545 IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
546 }
547 ep_ctrl.ipa_ep_suspend = false;
548 ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
549 } else {
550 IPADBG("EP disconnect is in progress\n");
551 }
552 spin_unlock(&ipa3_ctx->disconnect_lock);
553}
554
555static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
556{
557 if (!buff) {
558 IPAERR("Null buffer\n");
559 return;
560 }
561
562 if (type != WAN_UPSTREAM_ROUTE_ADD &&
563 type != WAN_UPSTREAM_ROUTE_DEL &&
564 type != WAN_EMBMS_CONNECT) {
565 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
566 return;
567 }
568
569 kfree(buff);
570}
571
572static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
573{
574 int retval;
575 struct ipa_wan_msg *wan_msg;
576 struct ipa_msg_meta msg_meta;
577
578 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
579 if (!wan_msg) {
580 IPAERR("no memory\n");
581 return -ENOMEM;
582 }
583
584 if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
585 sizeof(struct ipa_wan_msg))) {
586 kfree(wan_msg);
587 return -EFAULT;
588 }
589
590 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
591 msg_meta.msg_type = msg_type;
592 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
593 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
594 if (retval) {
595 IPAERR("ipa3_send_msg failed: %d\n", retval);
596 kfree(wan_msg);
597 return retval;
598 }
599
600 return 0;
601}
602
603
604static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
605{
606 int retval = 0;
607 u32 pyld_sz;
608 u8 header[128] = { 0 };
609 u8 *param = NULL;
610 struct ipa_ioc_nat_alloc_mem nat_mem;
611 struct ipa_ioc_v4_nat_init nat_init;
612 struct ipa_ioc_v4_nat_del nat_del;
613 struct ipa_ioc_rm_dependency rm_depend;
614 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200615 int pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300616
617 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
618
Amir Levy9659e592016-10-27 18:08:27 +0300619 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
620 return -ENOTTY;
621 if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
622 return -ENOTTY;
623
Amir Levy05532622016-11-28 12:12:01 +0200624 if (!ipa3_is_ready()) {
625 IPAERR("IPA not ready, waiting for init completion\n");
626 wait_for_completion(&ipa3_ctx->init_completion_obj);
627 }
628
Amir Levy9659e592016-10-27 18:08:27 +0300629 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
630
631 switch (cmd) {
632 case IPA_IOC_ALLOC_NAT_MEM:
633 if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
634 sizeof(struct ipa_ioc_nat_alloc_mem))) {
635 retval = -EFAULT;
636 break;
637 }
638 /* null terminate the string */
639 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
640
641 if (ipa3_allocate_nat_device(&nat_mem)) {
642 retval = -EFAULT;
643 break;
644 }
645 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
646 sizeof(struct ipa_ioc_nat_alloc_mem))) {
647 retval = -EFAULT;
648 break;
649 }
650 break;
651 case IPA_IOC_V4_INIT_NAT:
652 if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
653 sizeof(struct ipa_ioc_v4_nat_init))) {
654 retval = -EFAULT;
655 break;
656 }
657 if (ipa3_nat_init_cmd(&nat_init)) {
658 retval = -EFAULT;
659 break;
660 }
661 break;
662
663 case IPA_IOC_NAT_DMA:
664 if (copy_from_user(header, (u8 *)arg,
665 sizeof(struct ipa_ioc_nat_dma_cmd))) {
666 retval = -EFAULT;
667 break;
668 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200669 pre_entry =
670 ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
Amir Levy9659e592016-10-27 18:08:27 +0300671 pyld_sz =
672 sizeof(struct ipa_ioc_nat_dma_cmd) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200673 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300674 param = kzalloc(pyld_sz, GFP_KERNEL);
675 if (!param) {
676 retval = -ENOMEM;
677 break;
678 }
679
680 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
681 retval = -EFAULT;
682 break;
683 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200684 /* add check in case user-space module compromised */
685 if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
686 != pre_entry)) {
687 IPAERR("current %d pre %d\n",
688 ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
689 pre_entry);
690 retval = -EFAULT;
691 break;
692 }
Amir Levy9659e592016-10-27 18:08:27 +0300693 if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
694 retval = -EFAULT;
695 break;
696 }
697 break;
698
699 case IPA_IOC_V4_DEL_NAT:
700 if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
701 sizeof(struct ipa_ioc_v4_nat_del))) {
702 retval = -EFAULT;
703 break;
704 }
705 if (ipa3_nat_del_cmd(&nat_del)) {
706 retval = -EFAULT;
707 break;
708 }
709 break;
710
711 case IPA_IOC_ADD_HDR:
712 if (copy_from_user(header, (u8 *)arg,
713 sizeof(struct ipa_ioc_add_hdr))) {
714 retval = -EFAULT;
715 break;
716 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200717 pre_entry =
718 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +0300719 pyld_sz =
720 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200721 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +0300722 param = kzalloc(pyld_sz, GFP_KERNEL);
723 if (!param) {
724 retval = -ENOMEM;
725 break;
726 }
727 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
728 retval = -EFAULT;
729 break;
730 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200731 /* add check in case user-space module compromised */
732 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
733 != pre_entry)) {
734 IPAERR("current %d pre %d\n",
735 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
736 pre_entry);
737 retval = -EFAULT;
738 break;
739 }
Amir Levy9659e592016-10-27 18:08:27 +0300740 if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
741 retval = -EFAULT;
742 break;
743 }
744 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
745 retval = -EFAULT;
746 break;
747 }
748 break;
749
750 case IPA_IOC_DEL_HDR:
751 if (copy_from_user(header, (u8 *)arg,
752 sizeof(struct ipa_ioc_del_hdr))) {
753 retval = -EFAULT;
754 break;
755 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200756 pre_entry =
757 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300758 pyld_sz =
759 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200760 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +0300761 param = kzalloc(pyld_sz, GFP_KERNEL);
762 if (!param) {
763 retval = -ENOMEM;
764 break;
765 }
766 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
767 retval = -EFAULT;
768 break;
769 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200770 /* add check in case user-space module compromised */
771 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
772 != pre_entry)) {
773 IPAERR("current %d pre %d\n",
774 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
775 pre_entry);
776 retval = -EFAULT;
777 break;
778 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200779 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
780 true)) {
Amir Levy9659e592016-10-27 18:08:27 +0300781 retval = -EFAULT;
782 break;
783 }
784 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
785 retval = -EFAULT;
786 break;
787 }
788 break;
789
790 case IPA_IOC_ADD_RT_RULE:
791 if (copy_from_user(header, (u8 *)arg,
792 sizeof(struct ipa_ioc_add_rt_rule))) {
793 retval = -EFAULT;
794 break;
795 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200796 pre_entry =
797 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300798 pyld_sz =
799 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200800 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300801 param = kzalloc(pyld_sz, GFP_KERNEL);
802 if (!param) {
803 retval = -ENOMEM;
804 break;
805 }
806 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
807 retval = -EFAULT;
808 break;
809 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200810 /* add check in case user-space module compromised */
811 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
812 != pre_entry)) {
813 IPAERR("current %d pre %d\n",
814 ((struct ipa_ioc_add_rt_rule *)param)->
815 num_rules,
816 pre_entry);
817 retval = -EFAULT;
818 break;
819 }
Amir Levy9659e592016-10-27 18:08:27 +0300820 if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
821 retval = -EFAULT;
822 break;
823 }
824 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
825 retval = -EFAULT;
826 break;
827 }
828 break;
829 case IPA_IOC_ADD_RT_RULE_AFTER:
830 if (copy_from_user(header, (u8 *)arg,
831 sizeof(struct ipa_ioc_add_rt_rule_after))) {
832
833 retval = -EFAULT;
834 break;
835 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200836 pre_entry =
837 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300838 pyld_sz =
839 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200840 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300841 param = kzalloc(pyld_sz, GFP_KERNEL);
842 if (!param) {
843 retval = -ENOMEM;
844 break;
845 }
846 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
847 retval = -EFAULT;
848 break;
849 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200850 /* add check in case user-space module compromised */
851 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
852 num_rules != pre_entry)) {
853 IPAERR("current %d pre %d\n",
854 ((struct ipa_ioc_add_rt_rule_after *)param)->
855 num_rules,
856 pre_entry);
857 retval = -EFAULT;
858 break;
859 }
Amir Levy9659e592016-10-27 18:08:27 +0300860 if (ipa3_add_rt_rule_after(
861 (struct ipa_ioc_add_rt_rule_after *)param)) {
862
863 retval = -EFAULT;
864 break;
865 }
866 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
867 retval = -EFAULT;
868 break;
869 }
870 break;
871
872 case IPA_IOC_MDFY_RT_RULE:
873 if (copy_from_user(header, (u8 *)arg,
874 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
875 retval = -EFAULT;
876 break;
877 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200878 pre_entry =
879 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300880 pyld_sz =
881 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200882 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300883 param = kzalloc(pyld_sz, GFP_KERNEL);
884 if (!param) {
885 retval = -ENOMEM;
886 break;
887 }
888 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
889 retval = -EFAULT;
890 break;
891 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200892 /* add check in case user-space module compromised */
893 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
894 != pre_entry)) {
895 IPAERR("current %d pre %d\n",
896 ((struct ipa_ioc_mdfy_rt_rule *)param)->
897 num_rules,
898 pre_entry);
899 retval = -EFAULT;
900 break;
901 }
Amir Levy9659e592016-10-27 18:08:27 +0300902 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
903 retval = -EFAULT;
904 break;
905 }
906 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
907 retval = -EFAULT;
908 break;
909 }
910 break;
911
912 case IPA_IOC_DEL_RT_RULE:
913 if (copy_from_user(header, (u8 *)arg,
914 sizeof(struct ipa_ioc_del_rt_rule))) {
915 retval = -EFAULT;
916 break;
917 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200918 pre_entry =
919 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300920 pyld_sz =
921 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200922 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +0300923 param = kzalloc(pyld_sz, GFP_KERNEL);
924 if (!param) {
925 retval = -ENOMEM;
926 break;
927 }
928 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
929 retval = -EFAULT;
930 break;
931 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200932 /* add check in case user-space module compromised */
933 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
934 != pre_entry)) {
935 IPAERR("current %d pre %d\n",
936 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
937 pre_entry);
938 retval = -EFAULT;
939 break;
940 }
Amir Levy9659e592016-10-27 18:08:27 +0300941 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
942 retval = -EFAULT;
943 break;
944 }
945 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
946 retval = -EFAULT;
947 break;
948 }
949 break;
950
951 case IPA_IOC_ADD_FLT_RULE:
952 if (copy_from_user(header, (u8 *)arg,
953 sizeof(struct ipa_ioc_add_flt_rule))) {
954 retval = -EFAULT;
955 break;
956 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200957 pre_entry =
958 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300959 pyld_sz =
960 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200961 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300962 param = kzalloc(pyld_sz, GFP_KERNEL);
963 if (!param) {
964 retval = -ENOMEM;
965 break;
966 }
967 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
968 retval = -EFAULT;
969 break;
970 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200971 /* add check in case user-space module compromised */
972 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
973 != pre_entry)) {
974 IPAERR("current %d pre %d\n",
975 ((struct ipa_ioc_add_flt_rule *)param)->
976 num_rules,
977 pre_entry);
978 retval = -EFAULT;
979 break;
980 }
Amir Levy9659e592016-10-27 18:08:27 +0300981 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
982 retval = -EFAULT;
983 break;
984 }
985 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
986 retval = -EFAULT;
987 break;
988 }
989 break;
990
991 case IPA_IOC_ADD_FLT_RULE_AFTER:
992 if (copy_from_user(header, (u8 *)arg,
993 sizeof(struct ipa_ioc_add_flt_rule_after))) {
994
995 retval = -EFAULT;
996 break;
997 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200998 pre_entry =
999 ((struct ipa_ioc_add_flt_rule_after *)header)->
1000 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001001 pyld_sz =
1002 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001003 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001004 param = kzalloc(pyld_sz, GFP_KERNEL);
1005 if (!param) {
1006 retval = -ENOMEM;
1007 break;
1008 }
1009 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1010 retval = -EFAULT;
1011 break;
1012 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001013 /* add check in case user-space module compromised */
1014 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1015 num_rules != pre_entry)) {
1016 IPAERR("current %d pre %d\n",
1017 ((struct ipa_ioc_add_flt_rule_after *)param)->
1018 num_rules,
1019 pre_entry);
1020 retval = -EFAULT;
1021 break;
1022 }
Amir Levy9659e592016-10-27 18:08:27 +03001023 if (ipa3_add_flt_rule_after(
1024 (struct ipa_ioc_add_flt_rule_after *)param)) {
1025 retval = -EFAULT;
1026 break;
1027 }
1028 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1029 retval = -EFAULT;
1030 break;
1031 }
1032 break;
1033
1034 case IPA_IOC_DEL_FLT_RULE:
1035 if (copy_from_user(header, (u8 *)arg,
1036 sizeof(struct ipa_ioc_del_flt_rule))) {
1037 retval = -EFAULT;
1038 break;
1039 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001040 pre_entry =
1041 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001042 pyld_sz =
1043 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001044 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001045 param = kzalloc(pyld_sz, GFP_KERNEL);
1046 if (!param) {
1047 retval = -ENOMEM;
1048 break;
1049 }
1050 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1051 retval = -EFAULT;
1052 break;
1053 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001054 /* add check in case user-space module compromised */
1055 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1056 != pre_entry)) {
1057 IPAERR("current %d pre %d\n",
1058 ((struct ipa_ioc_del_flt_rule *)param)->
1059 num_hdls,
1060 pre_entry);
1061 retval = -EFAULT;
1062 break;
1063 }
Amir Levy9659e592016-10-27 18:08:27 +03001064 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1065 retval = -EFAULT;
1066 break;
1067 }
1068 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1069 retval = -EFAULT;
1070 break;
1071 }
1072 break;
1073
1074 case IPA_IOC_MDFY_FLT_RULE:
1075 if (copy_from_user(header, (u8 *)arg,
1076 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
1077 retval = -EFAULT;
1078 break;
1079 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001080 pre_entry =
1081 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001082 pyld_sz =
1083 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001084 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001085 param = kzalloc(pyld_sz, GFP_KERNEL);
1086 if (!param) {
1087 retval = -ENOMEM;
1088 break;
1089 }
1090 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1091 retval = -EFAULT;
1092 break;
1093 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001094 /* add check in case user-space module compromised */
1095 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1096 != pre_entry)) {
1097 IPAERR("current %d pre %d\n",
1098 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1099 num_rules,
1100 pre_entry);
1101 retval = -EFAULT;
1102 break;
1103 }
Amir Levy9659e592016-10-27 18:08:27 +03001104 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1105 retval = -EFAULT;
1106 break;
1107 }
1108 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1109 retval = -EFAULT;
1110 break;
1111 }
1112 break;
1113
1114 case IPA_IOC_COMMIT_HDR:
1115 retval = ipa3_commit_hdr();
1116 break;
1117 case IPA_IOC_RESET_HDR:
1118 retval = ipa3_reset_hdr();
1119 break;
1120 case IPA_IOC_COMMIT_RT:
1121 retval = ipa3_commit_rt(arg);
1122 break;
1123 case IPA_IOC_RESET_RT:
1124 retval = ipa3_reset_rt(arg);
1125 break;
1126 case IPA_IOC_COMMIT_FLT:
1127 retval = ipa3_commit_flt(arg);
1128 break;
1129 case IPA_IOC_RESET_FLT:
1130 retval = ipa3_reset_flt(arg);
1131 break;
1132 case IPA_IOC_GET_RT_TBL:
1133 if (copy_from_user(header, (u8 *)arg,
1134 sizeof(struct ipa_ioc_get_rt_tbl))) {
1135 retval = -EFAULT;
1136 break;
1137 }
1138 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1139 retval = -EFAULT;
1140 break;
1141 }
1142 if (copy_to_user((u8 *)arg, header,
1143 sizeof(struct ipa_ioc_get_rt_tbl))) {
1144 retval = -EFAULT;
1145 break;
1146 }
1147 break;
1148 case IPA_IOC_PUT_RT_TBL:
1149 retval = ipa3_put_rt_tbl(arg);
1150 break;
1151 case IPA_IOC_GET_HDR:
1152 if (copy_from_user(header, (u8 *)arg,
1153 sizeof(struct ipa_ioc_get_hdr))) {
1154 retval = -EFAULT;
1155 break;
1156 }
1157 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1158 retval = -EFAULT;
1159 break;
1160 }
1161 if (copy_to_user((u8 *)arg, header,
1162 sizeof(struct ipa_ioc_get_hdr))) {
1163 retval = -EFAULT;
1164 break;
1165 }
1166 break;
1167 case IPA_IOC_PUT_HDR:
1168 retval = ipa3_put_hdr(arg);
1169 break;
1170 case IPA_IOC_SET_FLT:
1171 retval = ipa3_cfg_filter(arg);
1172 break;
1173 case IPA_IOC_COPY_HDR:
1174 if (copy_from_user(header, (u8 *)arg,
1175 sizeof(struct ipa_ioc_copy_hdr))) {
1176 retval = -EFAULT;
1177 break;
1178 }
1179 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1180 retval = -EFAULT;
1181 break;
1182 }
1183 if (copy_to_user((u8 *)arg, header,
1184 sizeof(struct ipa_ioc_copy_hdr))) {
1185 retval = -EFAULT;
1186 break;
1187 }
1188 break;
1189 case IPA_IOC_QUERY_INTF:
1190 if (copy_from_user(header, (u8 *)arg,
1191 sizeof(struct ipa_ioc_query_intf))) {
1192 retval = -EFAULT;
1193 break;
1194 }
1195 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1196 retval = -1;
1197 break;
1198 }
1199 if (copy_to_user((u8 *)arg, header,
1200 sizeof(struct ipa_ioc_query_intf))) {
1201 retval = -EFAULT;
1202 break;
1203 }
1204 break;
1205 case IPA_IOC_QUERY_INTF_TX_PROPS:
1206 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
1207 if (copy_from_user(header, (u8 *)arg, sz)) {
1208 retval = -EFAULT;
1209 break;
1210 }
1211
1212 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
1213 > IPA_NUM_PROPS_MAX) {
1214 retval = -EFAULT;
1215 break;
1216 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001217 pre_entry =
1218 ((struct ipa_ioc_query_intf_tx_props *)
1219 header)->num_tx_props;
1220 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001221 sizeof(struct ipa_ioc_tx_intf_prop);
1222 param = kzalloc(pyld_sz, GFP_KERNEL);
1223 if (!param) {
1224 retval = -ENOMEM;
1225 break;
1226 }
1227 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1228 retval = -EFAULT;
1229 break;
1230 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001231 /* add check in case user-space module compromised */
1232 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1233 param)->num_tx_props
1234 != pre_entry)) {
1235 IPAERR("current %d pre %d\n",
1236 ((struct ipa_ioc_query_intf_tx_props *)
1237 param)->num_tx_props, pre_entry);
1238 retval = -EFAULT;
1239 break;
1240 }
Amir Levy9659e592016-10-27 18:08:27 +03001241 if (ipa3_query_intf_tx_props(
1242 (struct ipa_ioc_query_intf_tx_props *)param)) {
1243 retval = -1;
1244 break;
1245 }
1246 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1247 retval = -EFAULT;
1248 break;
1249 }
1250 break;
1251 case IPA_IOC_QUERY_INTF_RX_PROPS:
1252 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
1253 if (copy_from_user(header, (u8 *)arg, sz)) {
1254 retval = -EFAULT;
1255 break;
1256 }
1257
1258 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
1259 > IPA_NUM_PROPS_MAX) {
1260 retval = -EFAULT;
1261 break;
1262 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001263 pre_entry =
1264 ((struct ipa_ioc_query_intf_rx_props *)
1265 header)->num_rx_props;
1266 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001267 sizeof(struct ipa_ioc_rx_intf_prop);
1268 param = kzalloc(pyld_sz, GFP_KERNEL);
1269 if (!param) {
1270 retval = -ENOMEM;
1271 break;
1272 }
1273 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1274 retval = -EFAULT;
1275 break;
1276 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001277 /* add check in case user-space module compromised */
1278 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1279 param)->num_rx_props != pre_entry)) {
1280 IPAERR("current %d pre %d\n",
1281 ((struct ipa_ioc_query_intf_rx_props *)
1282 param)->num_rx_props, pre_entry);
1283 retval = -EFAULT;
1284 break;
1285 }
Amir Levy9659e592016-10-27 18:08:27 +03001286 if (ipa3_query_intf_rx_props(
1287 (struct ipa_ioc_query_intf_rx_props *)param)) {
1288 retval = -1;
1289 break;
1290 }
1291 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1292 retval = -EFAULT;
1293 break;
1294 }
1295 break;
1296 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1297 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
1298 if (copy_from_user(header, (u8 *)arg, sz)) {
1299 retval = -EFAULT;
1300 break;
1301 }
1302
1303 if (((struct ipa_ioc_query_intf_ext_props *)
1304 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
1305 retval = -EFAULT;
1306 break;
1307 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001308 pre_entry =
1309 ((struct ipa_ioc_query_intf_ext_props *)
1310 header)->num_ext_props;
1311 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001312 sizeof(struct ipa_ioc_ext_intf_prop);
1313 param = kzalloc(pyld_sz, GFP_KERNEL);
1314 if (!param) {
1315 retval = -ENOMEM;
1316 break;
1317 }
1318 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1319 retval = -EFAULT;
1320 break;
1321 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001322 /* add check in case user-space module compromised */
1323 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1324 param)->num_ext_props != pre_entry)) {
1325 IPAERR("current %d pre %d\n",
1326 ((struct ipa_ioc_query_intf_ext_props *)
1327 param)->num_ext_props, pre_entry);
1328 retval = -EFAULT;
1329 break;
1330 }
Amir Levy9659e592016-10-27 18:08:27 +03001331 if (ipa3_query_intf_ext_props(
1332 (struct ipa_ioc_query_intf_ext_props *)param)) {
1333 retval = -1;
1334 break;
1335 }
1336 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1337 retval = -EFAULT;
1338 break;
1339 }
1340 break;
1341 case IPA_IOC_PULL_MSG:
1342 if (copy_from_user(header, (u8 *)arg,
1343 sizeof(struct ipa_msg_meta))) {
1344 retval = -EFAULT;
1345 break;
1346 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001347 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001348 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001349 pyld_sz = sizeof(struct ipa_msg_meta) +
1350 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001351 param = kzalloc(pyld_sz, GFP_KERNEL);
1352 if (!param) {
1353 retval = -ENOMEM;
1354 break;
1355 }
1356 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1357 retval = -EFAULT;
1358 break;
1359 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001360 /* add check in case user-space module compromised */
1361 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1362 != pre_entry)) {
1363 IPAERR("current %d pre %d\n",
1364 ((struct ipa_msg_meta *)param)->msg_len,
1365 pre_entry);
1366 retval = -EFAULT;
1367 break;
1368 }
Amir Levy9659e592016-10-27 18:08:27 +03001369 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
1370 (char *)param + sizeof(struct ipa_msg_meta),
1371 ((struct ipa_msg_meta *)param)->msg_len) !=
1372 ((struct ipa_msg_meta *)param)->msg_len) {
1373 retval = -1;
1374 break;
1375 }
1376 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1377 retval = -EFAULT;
1378 break;
1379 }
1380 break;
1381 case IPA_IOC_RM_ADD_DEPENDENCY:
1382 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1383 sizeof(struct ipa_ioc_rm_dependency))) {
1384 retval = -EFAULT;
1385 break;
1386 }
1387 retval = ipa_rm_add_dependency_from_ioctl(
1388 rm_depend.resource_name, rm_depend.depends_on_name);
1389 break;
1390 case IPA_IOC_RM_DEL_DEPENDENCY:
1391 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1392 sizeof(struct ipa_ioc_rm_dependency))) {
1393 retval = -EFAULT;
1394 break;
1395 }
1396 retval = ipa_rm_delete_dependency_from_ioctl(
1397 rm_depend.resource_name, rm_depend.depends_on_name);
1398 break;
1399 case IPA_IOC_GENERATE_FLT_EQ:
1400 {
1401 struct ipa_ioc_generate_flt_eq flt_eq;
1402
1403 if (copy_from_user(&flt_eq, (u8 *)arg,
1404 sizeof(struct ipa_ioc_generate_flt_eq))) {
1405 retval = -EFAULT;
1406 break;
1407 }
1408 if (ipahal_flt_generate_equation(flt_eq.ip,
1409 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1410 retval = -EFAULT;
1411 break;
1412 }
1413 if (copy_to_user((u8 *)arg, &flt_eq,
1414 sizeof(struct ipa_ioc_generate_flt_eq))) {
1415 retval = -EFAULT;
1416 break;
1417 }
1418 break;
1419 }
1420 case IPA_IOC_QUERY_EP_MAPPING:
1421 {
1422 retval = ipa3_get_ep_mapping(arg);
1423 break;
1424 }
1425 case IPA_IOC_QUERY_RT_TBL_INDEX:
1426 if (copy_from_user(header, (u8 *)arg,
1427 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1428 retval = -EFAULT;
1429 break;
1430 }
1431 if (ipa3_query_rt_index(
1432 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
1433 retval = -EFAULT;
1434 break;
1435 }
1436 if (copy_to_user((u8 *)arg, header,
1437 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1438 retval = -EFAULT;
1439 break;
1440 }
1441 break;
1442 case IPA_IOC_WRITE_QMAPID:
1443 if (copy_from_user(header, (u8 *)arg,
1444 sizeof(struct ipa_ioc_write_qmapid))) {
1445 retval = -EFAULT;
1446 break;
1447 }
1448 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1449 retval = -EFAULT;
1450 break;
1451 }
1452 if (copy_to_user((u8 *)arg, header,
1453 sizeof(struct ipa_ioc_write_qmapid))) {
1454 retval = -EFAULT;
1455 break;
1456 }
1457 break;
1458 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
1459 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
1460 if (retval) {
1461 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1462 break;
1463 }
1464 break;
1465 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
1466 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
1467 if (retval) {
1468 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1469 break;
1470 }
1471 break;
1472 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
1473 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT);
1474 if (retval) {
1475 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1476 break;
1477 }
1478 break;
1479 case IPA_IOC_ADD_HDR_PROC_CTX:
1480 if (copy_from_user(header, (u8 *)arg,
1481 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1482 retval = -EFAULT;
1483 break;
1484 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001485 pre_entry =
1486 ((struct ipa_ioc_add_hdr_proc_ctx *)
1487 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001488 pyld_sz =
1489 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001490 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001491 param = kzalloc(pyld_sz, GFP_KERNEL);
1492 if (!param) {
1493 retval = -ENOMEM;
1494 break;
1495 }
1496 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1497 retval = -EFAULT;
1498 break;
1499 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001500 /* add check in case user-space module compromised */
1501 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1502 param)->num_proc_ctxs != pre_entry)) {
1503 IPAERR("current %d pre %d\n",
1504 ((struct ipa_ioc_add_hdr_proc_ctx *)
1505 param)->num_proc_ctxs, pre_entry);
1506 retval = -EFAULT;
1507 break;
1508 }
Amir Levy9659e592016-10-27 18:08:27 +03001509 if (ipa3_add_hdr_proc_ctx(
1510 (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
1511 retval = -EFAULT;
1512 break;
1513 }
1514 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1515 retval = -EFAULT;
1516 break;
1517 }
1518 break;
1519 case IPA_IOC_DEL_HDR_PROC_CTX:
1520 if (copy_from_user(header, (u8 *)arg,
1521 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1522 retval = -EFAULT;
1523 break;
1524 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001525 pre_entry =
1526 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001527 pyld_sz =
1528 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001529 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001530 param = kzalloc(pyld_sz, GFP_KERNEL);
1531 if (!param) {
1532 retval = -ENOMEM;
1533 break;
1534 }
1535 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1536 retval = -EFAULT;
1537 break;
1538 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001539 /* add check in case user-space module compromised */
1540 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1541 param)->num_hdls != pre_entry)) {
1542 IPAERR("current %d pre %d\n",
1543 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1544 num_hdls,
1545 pre_entry);
1546 retval = -EFAULT;
1547 break;
1548 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001549 if (ipa3_del_hdr_proc_ctx_by_user(
1550 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001551 retval = -EFAULT;
1552 break;
1553 }
1554 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1555 retval = -EFAULT;
1556 break;
1557 }
1558 break;
1559
1560 case IPA_IOC_GET_HW_VERSION:
1561 pyld_sz = sizeof(enum ipa_hw_type);
1562 param = kzalloc(pyld_sz, GFP_KERNEL);
1563 if (!param) {
1564 retval = -ENOMEM;
1565 break;
1566 }
1567 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
1568 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1569 retval = -EFAULT;
1570 break;
1571 }
1572 break;
1573
1574 default: /* redundant, as cmd was checked against MAXNR */
1575 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1576 return -ENOTTY;
1577 }
1578 kfree(param);
1579 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1580
1581 return retval;
1582}
1583
1584/**
1585* ipa3_setup_dflt_rt_tables() - Setup default routing tables
1586*
1587* Return codes:
1588* 0: success
1589* -ENOMEM: failed to allocate memory
1590* -EPERM: failed to add the tables
1591*/
1592int ipa3_setup_dflt_rt_tables(void)
1593{
1594 struct ipa_ioc_add_rt_rule *rt_rule;
1595 struct ipa_rt_rule_add *rt_rule_entry;
1596
1597 rt_rule =
1598 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
1599 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
1600 if (!rt_rule) {
1601 IPAERR("fail to alloc mem\n");
1602 return -ENOMEM;
1603 }
1604 /* setup a default v4 route to point to Apps */
1605 rt_rule->num_rules = 1;
1606 rt_rule->commit = 1;
1607 rt_rule->ip = IPA_IP_v4;
1608 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
1609 IPA_RESOURCE_NAME_MAX);
1610
1611 rt_rule_entry = &rt_rule->rules[0];
1612 rt_rule_entry->at_rear = 1;
1613 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
1614 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
1615 rt_rule_entry->rule.retain_hdr = 1;
1616
1617 if (ipa3_add_rt_rule(rt_rule)) {
1618 IPAERR("fail to add dflt v4 rule\n");
1619 kfree(rt_rule);
1620 return -EPERM;
1621 }
1622 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1623 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1624
1625 /* setup a default v6 route to point to A5 */
1626 rt_rule->ip = IPA_IP_v6;
1627 if (ipa3_add_rt_rule(rt_rule)) {
1628 IPAERR("fail to add dflt v6 rule\n");
1629 kfree(rt_rule);
1630 return -EPERM;
1631 }
1632 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1633 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1634
1635 /*
1636 * because these tables are the very first to be added, they will both
1637 * have the same index (0) which is essential for programming the
1638 * "route" end-point config
1639 */
1640
1641 kfree(rt_rule);
1642
1643 return 0;
1644}
1645
1646static int ipa3_setup_exception_path(void)
1647{
1648 struct ipa_ioc_add_hdr *hdr;
1649 struct ipa_hdr_add *hdr_entry;
1650 struct ipahal_reg_route route = { 0 };
1651 int ret;
1652
1653 /* install the basic exception header */
1654 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
1655 sizeof(struct ipa_hdr_add), GFP_KERNEL);
1656 if (!hdr) {
1657 IPAERR("fail to alloc exception hdr\n");
1658 return -ENOMEM;
1659 }
1660 hdr->num_hdrs = 1;
1661 hdr->commit = 1;
1662 hdr_entry = &hdr->hdr[0];
1663
1664 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
1665 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
1666
1667 if (ipa3_add_hdr(hdr)) {
1668 IPAERR("fail to add exception hdr\n");
1669 ret = -EPERM;
1670 goto bail;
1671 }
1672
1673 if (hdr_entry->status) {
1674 IPAERR("fail to add exception hdr\n");
1675 ret = -EPERM;
1676 goto bail;
1677 }
1678
1679 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
1680
1681 /* set the route register to pass exception packets to Apps */
1682 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1683 route.route_frag_def_pipe = ipa3_get_ep_mapping(
1684 IPA_CLIENT_APPS_LAN_CONS);
1685 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
1686 route.route_def_retain_hdr = 1;
1687
1688 if (ipa3_cfg_route(&route)) {
1689 IPAERR("fail to add exception hdr\n");
1690 ret = -EPERM;
1691 goto bail;
1692 }
1693
1694 ret = 0;
1695bail:
1696 kfree(hdr);
1697 return ret;
1698}
1699
1700static int ipa3_init_smem_region(int memory_region_size,
1701 int memory_region_offset)
1702{
1703 struct ipahal_imm_cmd_dma_shared_mem cmd;
1704 struct ipahal_imm_cmd_pyld *cmd_pyld;
1705 struct ipa3_desc desc;
1706 struct ipa_mem_buffer mem;
1707 int rc;
1708
1709 if (memory_region_size == 0)
1710 return 0;
1711
1712 memset(&desc, 0, sizeof(desc));
1713 memset(&cmd, 0, sizeof(cmd));
1714 memset(&mem, 0, sizeof(mem));
1715
1716 mem.size = memory_region_size;
1717 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
1718 &mem.phys_base, GFP_KERNEL);
1719 if (!mem.base) {
1720 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
1721 return -ENOMEM;
1722 }
1723
1724 memset(mem.base, 0, mem.size);
1725 cmd.is_read = false;
1726 cmd.skip_pipeline_clear = false;
1727 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1728 cmd.size = mem.size;
1729 cmd.system_addr = mem.phys_base;
1730 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
1731 memory_region_offset;
1732 cmd_pyld = ipahal_construct_imm_cmd(
1733 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1734 if (!cmd_pyld) {
1735 IPAERR("failed to construct dma_shared_mem imm cmd\n");
1736 return -ENOMEM;
1737 }
1738 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
1739 desc.pyld = cmd_pyld->data;
1740 desc.len = cmd_pyld->len;
1741 desc.type = IPA_IMM_CMD_DESC;
1742
1743 rc = ipa3_send_cmd(1, &desc);
1744 if (rc) {
1745 IPAERR("failed to send immediate command (error %d)\n", rc);
1746 rc = -EFAULT;
1747 }
1748
1749 ipahal_destroy_imm_cmd(cmd_pyld);
1750 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
1751 mem.phys_base);
1752
1753 return rc;
1754}
1755
1756/**
1757* ipa3_init_q6_smem() - Initialize Q6 general memory and
1758* header memory regions in IPA.
1759*
1760* Return codes:
1761* 0: success
1762* -ENOMEM: failed to allocate dma memory
1763* -EFAULT: failed to send IPA command to initialize the memory
1764*/
1765int ipa3_init_q6_smem(void)
1766{
1767 int rc;
1768
1769 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1770
1771 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
1772 IPA_MEM_PART(modem_ofst));
1773 if (rc) {
1774 IPAERR("failed to initialize Modem RAM memory\n");
1775 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1776 return rc;
1777 }
1778
1779 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
1780 IPA_MEM_PART(modem_hdr_ofst));
1781 if (rc) {
1782 IPAERR("failed to initialize Modem HDRs RAM memory\n");
1783 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1784 return rc;
1785 }
1786
1787 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
1788 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
1789 if (rc) {
1790 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
1791 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1792 return rc;
1793 }
1794
1795 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
1796 IPA_MEM_PART(modem_comp_decomp_ofst));
1797 if (rc) {
1798 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
1799 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1800 return rc;
1801 }
1802 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1803
1804 return rc;
1805}
1806
1807static void ipa3_destroy_imm(void *user1, int user2)
1808{
1809 ipahal_destroy_imm_cmd(user1);
1810}
1811
1812static void ipa3_q6_pipe_delay(bool delay)
1813{
1814 int client_idx;
1815 int ep_idx;
1816 struct ipa_ep_cfg_ctrl ep_ctrl;
1817
1818 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
1819 ep_ctrl.ipa_ep_delay = delay;
1820
1821 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1822 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
1823 ep_idx = ipa3_get_ep_mapping(client_idx);
1824 if (ep_idx == -1)
1825 continue;
1826
1827 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
1828 ep_idx, &ep_ctrl);
1829 }
1830 }
1831}
1832
1833static void ipa3_q6_avoid_holb(void)
1834{
1835 int ep_idx;
1836 int client_idx;
1837 struct ipa_ep_cfg_ctrl ep_suspend;
1838 struct ipa_ep_cfg_holb ep_holb;
1839
1840 memset(&ep_suspend, 0, sizeof(ep_suspend));
1841 memset(&ep_holb, 0, sizeof(ep_holb));
1842
1843 ep_suspend.ipa_ep_suspend = true;
1844 ep_holb.tmr_val = 0;
1845 ep_holb.en = 1;
1846
1847 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1848 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1849 ep_idx = ipa3_get_ep_mapping(client_idx);
1850 if (ep_idx == -1)
1851 continue;
1852
1853 /*
1854 * ipa3_cfg_ep_holb is not used here because we are
1855 * setting HOLB on Q6 pipes, and from APPS perspective
1856 * they are not valid, therefore, the above function
1857 * will fail.
1858 */
1859 ipahal_write_reg_n_fields(
1860 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
1861 ep_idx, &ep_holb);
1862 ipahal_write_reg_n_fields(
1863 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
1864 ep_idx, &ep_holb);
1865
1866 ipahal_write_reg_n_fields(
1867 IPA_ENDP_INIT_CTRL_n,
1868 ep_idx, &ep_suspend);
1869 }
1870 }
1871}
1872
Skylar Chang94692c92017-03-01 09:07:11 -08001873static void ipa3_halt_q6_cons_gsi_channels(void)
1874{
1875 int ep_idx;
1876 int client_idx;
1877 const struct ipa_gsi_ep_config *gsi_ep_cfg;
1878 int ret;
1879 int code = 0;
1880
1881 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1882 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1883 ep_idx = ipa3_get_ep_mapping(client_idx);
1884 if (ep_idx == -1)
1885 continue;
1886
1887 gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
1888 if (!gsi_ep_cfg) {
1889 IPAERR("failed to get GSI config\n");
1890 ipa_assert();
1891 return;
1892 }
1893
1894 ret = gsi_halt_channel_ee(
1895 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
1896 &code);
1897 if (ret == GSI_STATUS_SUCCESS)
1898 IPADBG("halted gsi ch %d ee %d with code %d\n",
1899 gsi_ep_cfg->ipa_gsi_chan_num,
1900 gsi_ep_cfg->ee,
1901 code);
1902 else
1903 IPAERR("failed to halt ch %d ee %d code %d\n",
1904 gsi_ep_cfg->ipa_gsi_chan_num,
1905 gsi_ep_cfg->ee,
1906 code);
1907 }
1908 }
1909}
1910
1911
Amir Levy9659e592016-10-27 18:08:27 +03001912static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
1913 enum ipa_rule_type rlt)
1914{
1915 struct ipa3_desc *desc;
1916 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
1917 struct ipahal_imm_cmd_pyld **cmd_pyld;
1918 int retval = 0;
1919 int pipe_idx;
1920 int flt_idx = 0;
1921 int num_cmds = 0;
1922 int index;
1923 u32 lcl_addr_mem_part;
1924 u32 lcl_hdr_sz;
1925 struct ipa_mem_buffer mem;
1926
1927 IPADBG("Entry\n");
1928
1929 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
1930 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
1931 return -EINVAL;
1932 }
1933
1934 /* Up to filtering pipes we have filtering tables */
1935 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
1936 GFP_KERNEL);
1937 if (!desc) {
1938 IPAERR("failed to allocate memory\n");
1939 return -ENOMEM;
1940 }
1941
1942 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
1943 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
1944 if (!cmd_pyld) {
1945 IPAERR("failed to allocate memory\n");
1946 retval = -ENOMEM;
1947 goto free_desc;
1948 }
1949
1950 if (ip == IPA_IP_v4) {
1951 if (rlt == IPA_RULE_HASHABLE) {
1952 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
1953 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
1954 } else {
1955 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
1956 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
1957 }
1958 } else {
1959 if (rlt == IPA_RULE_HASHABLE) {
1960 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
1961 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
1962 } else {
1963 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
1964 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
1965 }
1966 }
1967
1968 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02001969 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03001970 if (retval) {
1971 IPAERR("failed to generate flt single tbl empty img\n");
1972 goto free_cmd_pyld;
1973 }
1974
1975 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
1976 if (!ipa_is_ep_support_flt(pipe_idx))
1977 continue;
1978
1979 /*
1980 * Iterating over all the filtering pipes which are either
1981 * invalid but connected or connected but not configured by AP.
1982 */
1983 if (!ipa3_ctx->ep[pipe_idx].valid ||
1984 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
1985
1986 cmd.is_read = false;
1987 cmd.skip_pipeline_clear = false;
1988 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1989 cmd.size = mem.size;
1990 cmd.system_addr = mem.phys_base;
1991 cmd.local_addr =
1992 ipa3_ctx->smem_restricted_bytes +
1993 lcl_addr_mem_part +
1994 ipahal_get_hw_tbl_hdr_width() +
1995 flt_idx * ipahal_get_hw_tbl_hdr_width();
1996 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
1997 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1998 if (!cmd_pyld[num_cmds]) {
1999 IPAERR("fail construct dma_shared_mem cmd\n");
2000 retval = -ENOMEM;
2001 goto free_empty_img;
2002 }
2003 desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
2004 IPA_IMM_CMD_DMA_SHARED_MEM);
2005 desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
2006 desc[num_cmds].len = cmd_pyld[num_cmds]->len;
2007 desc[num_cmds].type = IPA_IMM_CMD_DESC;
2008 num_cmds++;
2009 }
2010
2011 flt_idx++;
2012 }
2013
2014 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2015 retval = ipa3_send_cmd(num_cmds, desc);
2016 if (retval) {
2017 IPAERR("failed to send immediate command (err %d)\n", retval);
2018 retval = -EFAULT;
2019 }
2020
2021free_empty_img:
2022 ipahal_free_dma_mem(&mem);
2023free_cmd_pyld:
2024 for (index = 0; index < num_cmds; index++)
2025 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2026 kfree(cmd_pyld);
2027free_desc:
2028 kfree(desc);
2029 return retval;
2030}
2031
2032static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2033 enum ipa_rule_type rlt)
2034{
2035 struct ipa3_desc *desc;
2036 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2037 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2038 int retval = 0;
2039 u32 modem_rt_index_lo;
2040 u32 modem_rt_index_hi;
2041 u32 lcl_addr_mem_part;
2042 u32 lcl_hdr_sz;
2043 struct ipa_mem_buffer mem;
2044
2045 IPADBG("Entry\n");
2046
2047 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2048 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2049 return -EINVAL;
2050 }
2051
2052 if (ip == IPA_IP_v4) {
2053 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2054 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2055 if (rlt == IPA_RULE_HASHABLE) {
2056 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2057 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2058 } else {
2059 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2060 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2061 }
2062 } else {
2063 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2064 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2065 if (rlt == IPA_RULE_HASHABLE) {
2066 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2067 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2068 } else {
2069 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2070 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2071 }
2072 }
2073
2074 retval = ipahal_rt_generate_empty_img(
2075 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002076 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002077 if (retval) {
2078 IPAERR("fail generate empty rt img\n");
2079 return -ENOMEM;
2080 }
2081
2082 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2083 if (!desc) {
2084 IPAERR("failed to allocate memory\n");
2085 goto free_empty_img;
2086 }
2087
2088 cmd.is_read = false;
2089 cmd.skip_pipeline_clear = false;
2090 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2091 cmd.size = mem.size;
2092 cmd.system_addr = mem.phys_base;
2093 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2094 lcl_addr_mem_part +
2095 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2096 cmd_pyld = ipahal_construct_imm_cmd(
2097 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2098 if (!cmd_pyld) {
2099 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2100 retval = -ENOMEM;
2101 goto free_desc;
2102 }
2103 desc->opcode =
2104 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
2105 desc->pyld = cmd_pyld->data;
2106 desc->len = cmd_pyld->len;
2107 desc->type = IPA_IMM_CMD_DESC;
2108
2109 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2110 retval = ipa3_send_cmd(1, desc);
2111 if (retval) {
2112 IPAERR("failed to send immediate command (err %d)\n", retval);
2113 retval = -EFAULT;
2114 }
2115
2116 ipahal_destroy_imm_cmd(cmd_pyld);
2117free_desc:
2118 kfree(desc);
2119free_empty_img:
2120 ipahal_free_dma_mem(&mem);
2121 return retval;
2122}
2123
2124static int ipa3_q6_clean_q6_tables(void)
2125{
2126 struct ipa3_desc *desc;
2127 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2128 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2129 int retval;
2130 struct ipahal_reg_fltrt_hash_flush flush;
2131 struct ipahal_reg_valmask valmask;
2132
2133 IPADBG("Entry\n");
2134
2135
2136 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2137 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2138 return -EFAULT;
2139 }
2140 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2141 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2142 return -EFAULT;
2143 }
2144 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2145 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2146 return -EFAULT;
2147 }
2148 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2149 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2150 return -EFAULT;
2151 }
2152
2153 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2154 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2155 return -EFAULT;
2156 }
2157 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2158 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2159 return -EFAULT;
2160 }
2161 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2162 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2163 return -EFAULT;
2164 }
2165 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2166 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2167 return -EFAULT;
2168 }
2169
2170 /* Flush rules cache */
2171 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2172 if (!desc) {
2173 IPAERR("failed to allocate memory\n");
2174 return -ENOMEM;
2175 }
2176
2177 flush.v4_flt = true;
2178 flush.v4_rt = true;
2179 flush.v6_flt = true;
2180 flush.v6_rt = true;
2181 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2182 reg_write_cmd.skip_pipeline_clear = false;
2183 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2184 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2185 reg_write_cmd.value = valmask.val;
2186 reg_write_cmd.value_mask = valmask.mask;
2187 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2188 &reg_write_cmd, false);
2189 if (!cmd_pyld) {
2190 IPAERR("fail construct register_write imm cmd\n");
2191 retval = -EFAULT;
2192 goto bail_desc;
2193 }
2194 desc->opcode =
2195 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
2196 desc->pyld = cmd_pyld->data;
2197 desc->len = cmd_pyld->len;
2198 desc->type = IPA_IMM_CMD_DESC;
2199
2200 IPADBG("Sending 1 descriptor for tbls flush\n");
2201 retval = ipa3_send_cmd(1, desc);
2202 if (retval) {
2203 IPAERR("failed to send immediate command (err %d)\n", retval);
2204 retval = -EFAULT;
2205 }
2206
2207 ipahal_destroy_imm_cmd(cmd_pyld);
2208
2209bail_desc:
2210 kfree(desc);
2211 IPADBG("Done - retval = %d\n", retval);
2212 return retval;
2213}
2214
2215static int ipa3_q6_set_ex_path_to_apps(void)
2216{
2217 int ep_idx;
2218 int client_idx;
2219 struct ipa3_desc *desc;
2220 int num_descs = 0;
2221 int index;
2222 struct ipahal_imm_cmd_register_write reg_write;
2223 struct ipahal_imm_cmd_pyld *cmd_pyld;
2224 int retval;
2225 struct ipahal_reg_valmask valmask;
2226
2227 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2228 GFP_KERNEL);
2229 if (!desc) {
2230 IPAERR("failed to allocate memory\n");
2231 return -ENOMEM;
2232 }
2233
2234 /* Set the exception path to AP */
2235 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2236 ep_idx = ipa3_get_ep_mapping(client_idx);
2237 if (ep_idx == -1)
2238 continue;
2239
2240 if (ipa3_ctx->ep[ep_idx].valid &&
2241 ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
2242 BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
2243
2244 reg_write.skip_pipeline_clear = false;
2245 reg_write.pipeline_clear_options =
2246 IPAHAL_HPS_CLEAR;
2247 reg_write.offset =
2248 ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
2249 ipahal_get_status_ep_valmask(
2250 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
2251 &valmask);
2252 reg_write.value = valmask.val;
2253 reg_write.value_mask = valmask.mask;
2254 cmd_pyld = ipahal_construct_imm_cmd(
2255 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2256 if (!cmd_pyld) {
2257 IPAERR("fail construct register_write cmd\n");
2258 BUG();
2259 }
2260
2261 desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
2262 IPA_IMM_CMD_REGISTER_WRITE);
2263 desc[num_descs].type = IPA_IMM_CMD_DESC;
2264 desc[num_descs].callback = ipa3_destroy_imm;
2265 desc[num_descs].user1 = cmd_pyld;
2266 desc[num_descs].pyld = cmd_pyld->data;
2267 desc[num_descs].len = cmd_pyld->len;
2268 num_descs++;
2269 }
2270 }
2271
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002272 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002273 retval = ipa3_tag_process(desc, num_descs,
2274 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2275 if (retval) {
2276 IPAERR("TAG process failed! (error %d)\n", retval);
2277 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2278 if (retval != -ETIME) {
2279 for (index = 0; index < num_descs; index++)
2280 if (desc[index].callback)
2281 desc[index].callback(desc[index].user1,
2282 desc[index].user2);
2283 retval = -EINVAL;
2284 }
2285 }
2286
2287 kfree(desc);
2288
2289 return retval;
2290}
2291
2292/**
2293* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2294* in IPA HW. This is performed in case of SSR.
2295*
2296* This is a mandatory procedure, in case one of the steps fails, the
2297* AP needs to restart.
2298*/
2299void ipa3_q6_pre_shutdown_cleanup(void)
2300{
2301 IPADBG_LOW("ENTER\n");
2302
2303 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2304
2305 ipa3_q6_pipe_delay(true);
2306 ipa3_q6_avoid_holb();
2307 if (ipa3_q6_clean_q6_tables()) {
2308 IPAERR("Failed to clean Q6 tables\n");
2309 BUG();
2310 }
2311 if (ipa3_q6_set_ex_path_to_apps()) {
2312 IPAERR("Failed to redirect exceptions to APPS\n");
2313 BUG();
2314 }
2315 /* Remove delay from Q6 PRODs to avoid pending descriptors
2316 * on pipe reset procedure
2317 */
2318 ipa3_q6_pipe_delay(false);
2319
2320 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2321 IPADBG_LOW("Exit with success\n");
2322}
2323
2324/*
2325 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2326 * check if GSI channel related to Q6 producer client is empty.
2327 *
2328 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2329 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2330 */
2331void ipa3_q6_post_shutdown_cleanup(void)
2332{
2333 int client_idx;
2334
2335 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002336
2337 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2338 IPAERR("uC is not loaded. Skipping\n");
2339 return;
2340 }
2341
Skylar Chang94692c92017-03-01 09:07:11 -08002342 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2343
2344 /* Handle the issue where SUSPEND was removed for some reason */
2345 ipa3_q6_avoid_holb();
2346 ipa3_halt_q6_cons_gsi_channels();
2347
Amir Levy9659e592016-10-27 18:08:27 +03002348 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2349 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
2350 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2351 IPAERR("fail to validate Q6 ch emptiness %d\n",
2352 client_idx);
2353 BUG();
2354 return;
2355 }
2356 }
2357
2358 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2359 IPADBG_LOW("Exit with success\n");
2360}
2361
2362static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2363{
2364 /* Set 4 bytes of CANARY before the offset */
2365 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2366}
2367
2368/**
2369 * _ipa_init_sram_v3_0() - Initialize IPA local SRAM.
2370 *
2371 * Return codes: 0 for success, negative value for failure
2372 */
2373int _ipa_init_sram_v3_0(void)
2374{
2375 u32 *ipa_sram_mmio;
2376 unsigned long phys_addr;
2377
2378 phys_addr = ipa3_ctx->ipa_wrapper_base +
2379 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2380 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
2381 ipa3_ctx->smem_restricted_bytes / 4);
2382
2383 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
2384 if (!ipa_sram_mmio) {
2385 IPAERR("fail to ioremap IPA SRAM\n");
2386 return -ENOMEM;
2387 }
2388
2389 /* Consult with ipa_i.h on the location of the CANARY values */
2390 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
2391 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
2392 ipa3_sram_set_canary(ipa_sram_mmio,
2393 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
2394 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
2395 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
2396 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
2397 ipa3_sram_set_canary(ipa_sram_mmio,
2398 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
2399 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
2400 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
2401 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
2402 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
2403 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
2404 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
2405 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
2406 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
2407 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
2408 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
2409 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
2410 ipa3_sram_set_canary(ipa_sram_mmio,
2411 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
2412 ipa3_sram_set_canary(ipa_sram_mmio,
2413 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2414 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
2415 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
2416 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
2417
2418 iounmap(ipa_sram_mmio);
2419
2420 return 0;
2421}
2422
2423/**
2424 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
2425 *
2426 * Return codes: 0 for success, negative value for failure
2427 */
2428int _ipa_init_hdr_v3_0(void)
2429{
2430 struct ipa3_desc desc = { 0 };
2431 struct ipa_mem_buffer mem;
2432 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
2433 struct ipahal_imm_cmd_pyld *cmd_pyld;
2434 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
2435
2436 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2437 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2438 GFP_KERNEL);
2439 if (!mem.base) {
2440 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2441 return -ENOMEM;
2442 }
2443 memset(mem.base, 0, mem.size);
2444
2445 cmd.hdr_table_addr = mem.phys_base;
2446 cmd.size_hdr_table = mem.size;
2447 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
2448 IPA_MEM_PART(modem_hdr_ofst);
2449 cmd_pyld = ipahal_construct_imm_cmd(
2450 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
2451 if (!cmd_pyld) {
2452 IPAERR("fail to construct hdr_init_local imm cmd\n");
2453 dma_free_coherent(ipa3_ctx->pdev,
2454 mem.size, mem.base,
2455 mem.phys_base);
2456 return -EFAULT;
2457 }
2458 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
2459 desc.type = IPA_IMM_CMD_DESC;
2460 desc.pyld = cmd_pyld->data;
2461 desc.len = cmd_pyld->len;
2462 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2463
2464 if (ipa3_send_cmd(1, &desc)) {
2465 IPAERR("fail to send immediate command\n");
2466 ipahal_destroy_imm_cmd(cmd_pyld);
2467 dma_free_coherent(ipa3_ctx->pdev,
2468 mem.size, mem.base,
2469 mem.phys_base);
2470 return -EFAULT;
2471 }
2472
2473 ipahal_destroy_imm_cmd(cmd_pyld);
2474 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2475
2476 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
2477 IPA_MEM_PART(apps_hdr_proc_ctx_size);
2478 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2479 GFP_KERNEL);
2480 if (!mem.base) {
2481 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2482 return -ENOMEM;
2483 }
2484 memset(mem.base, 0, mem.size);
2485 memset(&desc, 0, sizeof(desc));
2486
2487 dma_cmd.is_read = false;
2488 dma_cmd.skip_pipeline_clear = false;
2489 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2490 dma_cmd.system_addr = mem.phys_base;
2491 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2492 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
2493 dma_cmd.size = mem.size;
2494 cmd_pyld = ipahal_construct_imm_cmd(
2495 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
2496 if (!cmd_pyld) {
2497 IPAERR("fail to construct dma_shared_mem imm\n");
2498 dma_free_coherent(ipa3_ctx->pdev,
2499 mem.size, mem.base,
2500 mem.phys_base);
2501 return -EFAULT;
2502 }
2503 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
2504 desc.pyld = cmd_pyld->data;
2505 desc.len = cmd_pyld->len;
2506 desc.type = IPA_IMM_CMD_DESC;
2507 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2508
2509 if (ipa3_send_cmd(1, &desc)) {
2510 IPAERR("fail to send immediate command\n");
2511 ipahal_destroy_imm_cmd(cmd_pyld);
2512 dma_free_coherent(ipa3_ctx->pdev,
2513 mem.size,
2514 mem.base,
2515 mem.phys_base);
2516 return -EFAULT;
2517 }
2518 ipahal_destroy_imm_cmd(cmd_pyld);
2519
2520 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
2521
2522 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2523
2524 return 0;
2525}
2526
2527/**
2528 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
2529 *
2530 * Return codes: 0 for success, negative value for failure
2531 */
2532int _ipa_init_rt4_v3(void)
2533{
2534 struct ipa3_desc desc = { 0 };
2535 struct ipa_mem_buffer mem;
2536 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
2537 struct ipahal_imm_cmd_pyld *cmd_pyld;
2538 int i;
2539 int rc = 0;
2540
2541 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
2542 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
2543 i++)
2544 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
2545 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
2546
2547 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
2548 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002549 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002550 if (rc) {
2551 IPAERR("fail generate empty v4 rt img\n");
2552 return rc;
2553 }
2554
2555 v4_cmd.hash_rules_addr = mem.phys_base;
2556 v4_cmd.hash_rules_size = mem.size;
2557 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2558 IPA_MEM_PART(v4_rt_hash_ofst);
2559 v4_cmd.nhash_rules_addr = mem.phys_base;
2560 v4_cmd.nhash_rules_size = mem.size;
2561 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2562 IPA_MEM_PART(v4_rt_nhash_ofst);
2563 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
2564 v4_cmd.hash_local_addr);
2565 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
2566 v4_cmd.nhash_local_addr);
2567 cmd_pyld = ipahal_construct_imm_cmd(
2568 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
2569 if (!cmd_pyld) {
2570 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
2571 rc = -EPERM;
2572 goto free_mem;
2573 }
2574
2575 desc.opcode =
2576 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
2577 desc.type = IPA_IMM_CMD_DESC;
2578 desc.pyld = cmd_pyld->data;
2579 desc.len = cmd_pyld->len;
2580 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2581
2582 if (ipa3_send_cmd(1, &desc)) {
2583 IPAERR("fail to send immediate command\n");
2584 rc = -EFAULT;
2585 }
2586
2587 ipahal_destroy_imm_cmd(cmd_pyld);
2588
2589free_mem:
2590 ipahal_free_dma_mem(&mem);
2591 return rc;
2592}
2593
2594/**
2595 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
2596 *
2597 * Return codes: 0 for success, negative value for failure
2598 */
2599int _ipa_init_rt6_v3(void)
2600{
2601 struct ipa3_desc desc = { 0 };
2602 struct ipa_mem_buffer mem;
2603 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
2604 struct ipahal_imm_cmd_pyld *cmd_pyld;
2605 int i;
2606 int rc = 0;
2607
2608 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
2609 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
2610 i++)
2611 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
2612 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
2613
2614 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
2615 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002616 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002617 if (rc) {
2618 IPAERR("fail generate empty v6 rt img\n");
2619 return rc;
2620 }
2621
2622 v6_cmd.hash_rules_addr = mem.phys_base;
2623 v6_cmd.hash_rules_size = mem.size;
2624 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2625 IPA_MEM_PART(v6_rt_hash_ofst);
2626 v6_cmd.nhash_rules_addr = mem.phys_base;
2627 v6_cmd.nhash_rules_size = mem.size;
2628 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2629 IPA_MEM_PART(v6_rt_nhash_ofst);
2630 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
2631 v6_cmd.hash_local_addr);
2632 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
2633 v6_cmd.nhash_local_addr);
2634 cmd_pyld = ipahal_construct_imm_cmd(
2635 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
2636 if (!cmd_pyld) {
2637 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
2638 rc = -EPERM;
2639 goto free_mem;
2640 }
2641
2642 desc.opcode =
2643 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
2644 desc.type = IPA_IMM_CMD_DESC;
2645 desc.pyld = cmd_pyld->data;
2646 desc.len = cmd_pyld->len;
2647 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2648
2649 if (ipa3_send_cmd(1, &desc)) {
2650 IPAERR("fail to send immediate command\n");
2651 rc = -EFAULT;
2652 }
2653
2654 ipahal_destroy_imm_cmd(cmd_pyld);
2655
2656free_mem:
2657 ipahal_free_dma_mem(&mem);
2658 return rc;
2659}
2660
2661/**
2662 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
2663 *
2664 * Return codes: 0 for success, negative value for failure
2665 */
2666int _ipa_init_flt4_v3(void)
2667{
2668 struct ipa3_desc desc = { 0 };
2669 struct ipa_mem_buffer mem;
2670 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
2671 struct ipahal_imm_cmd_pyld *cmd_pyld;
2672 int rc;
2673
2674 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2675 IPA_MEM_PART(v4_flt_hash_size),
2676 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002677 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002678 if (rc) {
2679 IPAERR("fail generate empty v4 flt img\n");
2680 return rc;
2681 }
2682
2683 v4_cmd.hash_rules_addr = mem.phys_base;
2684 v4_cmd.hash_rules_size = mem.size;
2685 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2686 IPA_MEM_PART(v4_flt_hash_ofst);
2687 v4_cmd.nhash_rules_addr = mem.phys_base;
2688 v4_cmd.nhash_rules_size = mem.size;
2689 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2690 IPA_MEM_PART(v4_flt_nhash_ofst);
2691 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
2692 v4_cmd.hash_local_addr);
2693 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
2694 v4_cmd.nhash_local_addr);
2695 cmd_pyld = ipahal_construct_imm_cmd(
2696 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
2697 if (!cmd_pyld) {
2698 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
2699 rc = -EPERM;
2700 goto free_mem;
2701 }
2702
2703 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
2704 desc.type = IPA_IMM_CMD_DESC;
2705 desc.pyld = cmd_pyld->data;
2706 desc.len = cmd_pyld->len;
2707 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2708
2709 if (ipa3_send_cmd(1, &desc)) {
2710 IPAERR("fail to send immediate command\n");
2711 rc = -EFAULT;
2712 }
2713
2714 ipahal_destroy_imm_cmd(cmd_pyld);
2715
2716free_mem:
2717 ipahal_free_dma_mem(&mem);
2718 return rc;
2719}
2720
2721/**
2722 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
2723 *
2724 * Return codes: 0 for success, negative value for failure
2725 */
2726int _ipa_init_flt6_v3(void)
2727{
2728 struct ipa3_desc desc = { 0 };
2729 struct ipa_mem_buffer mem;
2730 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
2731 struct ipahal_imm_cmd_pyld *cmd_pyld;
2732 int rc;
2733
2734 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2735 IPA_MEM_PART(v6_flt_hash_size),
2736 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002737 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002738 if (rc) {
2739 IPAERR("fail generate empty v6 flt img\n");
2740 return rc;
2741 }
2742
2743 v6_cmd.hash_rules_addr = mem.phys_base;
2744 v6_cmd.hash_rules_size = mem.size;
2745 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2746 IPA_MEM_PART(v6_flt_hash_ofst);
2747 v6_cmd.nhash_rules_addr = mem.phys_base;
2748 v6_cmd.nhash_rules_size = mem.size;
2749 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2750 IPA_MEM_PART(v6_flt_nhash_ofst);
2751 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
2752 v6_cmd.hash_local_addr);
2753 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
2754 v6_cmd.nhash_local_addr);
2755
2756 cmd_pyld = ipahal_construct_imm_cmd(
2757 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
2758 if (!cmd_pyld) {
2759 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
2760 rc = -EPERM;
2761 goto free_mem;
2762 }
2763
2764 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
2765 desc.type = IPA_IMM_CMD_DESC;
2766 desc.pyld = cmd_pyld->data;
2767 desc.len = cmd_pyld->len;
2768 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2769
2770 if (ipa3_send_cmd(1, &desc)) {
2771 IPAERR("fail to send immediate command\n");
2772 rc = -EFAULT;
2773 }
2774
2775 ipahal_destroy_imm_cmd(cmd_pyld);
2776
2777free_mem:
2778 ipahal_free_dma_mem(&mem);
2779 return rc;
2780}
2781
2782static int ipa3_setup_flt_hash_tuple(void)
2783{
2784 int pipe_idx;
2785 struct ipahal_reg_hash_tuple tuple;
2786
2787 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2788
2789 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
2790 if (!ipa_is_ep_support_flt(pipe_idx))
2791 continue;
2792
2793 if (ipa_is_modem_pipe(pipe_idx))
2794 continue;
2795
2796 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
2797 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
2798 return -EFAULT;
2799 }
2800 }
2801
2802 return 0;
2803}
2804
2805static int ipa3_setup_rt_hash_tuple(void)
2806{
2807 int tbl_idx;
2808 struct ipahal_reg_hash_tuple tuple;
2809
2810 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2811
2812 for (tbl_idx = 0;
2813 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
2814 IPA_MEM_PART(v4_rt_num_index));
2815 tbl_idx++) {
2816
2817 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
2818 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
2819 continue;
2820
2821 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
2822 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
2823 continue;
2824
2825 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
2826 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
2827 return -EFAULT;
2828 }
2829 }
2830
2831 return 0;
2832}
2833
2834static int ipa3_setup_apps_pipes(void)
2835{
2836 struct ipa_sys_connect_params sys_in;
2837 int result = 0;
2838
2839 if (ipa3_ctx->gsi_ch20_wa) {
2840 IPADBG("Allocating GSI physical channel 20\n");
2841 result = ipa_gsi_ch20_wa();
2842 if (result) {
2843 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002844 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002845 }
2846 }
2847
Skylar Changd407e592017-03-30 11:25:30 -07002848 /* allocate the common PROD event ring */
2849 if (ipa3_alloc_common_event_ring()) {
2850 IPAERR("ipa3_alloc_common_event_ring failed.\n");
2851 result = -EPERM;
2852 goto fail_ch20_wa;
2853 }
2854
Amir Levy9659e592016-10-27 18:08:27 +03002855 /* CMD OUT (AP->IPA) */
2856 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2857 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
2858 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2859 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
2860 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
2861 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02002862 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03002863 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002864 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002865 }
2866 IPADBG("Apps to IPA cmd pipe is connected\n");
2867
2868 ipa3_ctx->ctrl->ipa_init_sram();
2869 IPADBG("SRAM initialized\n");
2870
2871 ipa3_ctx->ctrl->ipa_init_hdr();
2872 IPADBG("HDR initialized\n");
2873
2874 ipa3_ctx->ctrl->ipa_init_rt4();
2875 IPADBG("V4 RT initialized\n");
2876
2877 ipa3_ctx->ctrl->ipa_init_rt6();
2878 IPADBG("V6 RT initialized\n");
2879
2880 ipa3_ctx->ctrl->ipa_init_flt4();
2881 IPADBG("V4 FLT initialized\n");
2882
2883 ipa3_ctx->ctrl->ipa_init_flt6();
2884 IPADBG("V6 FLT initialized\n");
2885
2886 if (ipa3_setup_flt_hash_tuple()) {
2887 IPAERR(":fail to configure flt hash tuple\n");
2888 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002889 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002890 }
2891 IPADBG("flt hash tuple is configured\n");
2892
2893 if (ipa3_setup_rt_hash_tuple()) {
2894 IPAERR(":fail to configure rt hash tuple\n");
2895 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002896 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002897 }
2898 IPADBG("rt hash tuple is configured\n");
2899
2900 if (ipa3_setup_exception_path()) {
2901 IPAERR(":fail to setup excp path\n");
2902 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002903 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002904 }
2905 IPADBG("Exception path was successfully set");
2906
2907 if (ipa3_setup_dflt_rt_tables()) {
2908 IPAERR(":fail to setup dflt routes\n");
2909 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002910 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002911 }
2912 IPADBG("default routing was set\n");
2913
Ghanim Fodic6b67492017-03-15 14:19:56 +02002914 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03002915 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2916 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
2917 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2918 sys_in.notify = ipa3_lan_rx_cb;
2919 sys_in.priv = NULL;
2920 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
2921 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
2922 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
2923 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
2924 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
2925 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
2926 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
2927 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
2928
2929 /**
2930 * ipa_lan_rx_cb() intended to notify the source EP about packet
2931 * being received on the LAN_CONS via calling the source EP call-back.
2932 * There could be a race condition with calling this call-back. Other
2933 * thread may nullify it - e.g. on EP disconnect.
2934 * This lock intended to protect the access to the source EP call-back
2935 */
2936 spin_lock_init(&ipa3_ctx->disconnect_lock);
2937 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02002938 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03002939 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002940 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002941 }
2942
Ghanim Fodic6b67492017-03-15 14:19:56 +02002943 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02002944 if (!ipa3_ctx->ipa_config_is_mhi) {
2945 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2946 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
2947 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
2948 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
2949 if (ipa3_setup_sys_pipe(&sys_in,
2950 &ipa3_ctx->clnt_hdl_data_out)) {
2951 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
2952 result = -EPERM;
2953 goto fail_lan_data_out;
2954 }
Amir Levy9659e592016-10-27 18:08:27 +03002955 }
2956
2957 return 0;
2958
Ghanim Fodic6b67492017-03-15 14:19:56 +02002959fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03002960 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002961fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03002962 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
2963 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
2964 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
2965 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
2966 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02002967 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03002968 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002969fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03002970 return result;
2971}
2972
2973static void ipa3_teardown_apps_pipes(void)
2974{
Amir Levy54fe4d32017-03-16 11:21:49 +02002975 if (!ipa3_ctx->ipa_config_is_mhi)
2976 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03002977 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
2978 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
2979 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02002980 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03002981 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
2982}
2983
2984#ifdef CONFIG_COMPAT
2985long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2986{
2987 int retval = 0;
2988 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
2989 struct ipa_ioc_nat_alloc_mem nat_mem;
2990
2991 switch (cmd) {
2992 case IPA_IOC_ADD_HDR32:
2993 cmd = IPA_IOC_ADD_HDR;
2994 break;
2995 case IPA_IOC_DEL_HDR32:
2996 cmd = IPA_IOC_DEL_HDR;
2997 break;
2998 case IPA_IOC_ADD_RT_RULE32:
2999 cmd = IPA_IOC_ADD_RT_RULE;
3000 break;
3001 case IPA_IOC_DEL_RT_RULE32:
3002 cmd = IPA_IOC_DEL_RT_RULE;
3003 break;
3004 case IPA_IOC_ADD_FLT_RULE32:
3005 cmd = IPA_IOC_ADD_FLT_RULE;
3006 break;
3007 case IPA_IOC_DEL_FLT_RULE32:
3008 cmd = IPA_IOC_DEL_FLT_RULE;
3009 break;
3010 case IPA_IOC_GET_RT_TBL32:
3011 cmd = IPA_IOC_GET_RT_TBL;
3012 break;
3013 case IPA_IOC_COPY_HDR32:
3014 cmd = IPA_IOC_COPY_HDR;
3015 break;
3016 case IPA_IOC_QUERY_INTF32:
3017 cmd = IPA_IOC_QUERY_INTF;
3018 break;
3019 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3020 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3021 break;
3022 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3023 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3024 break;
3025 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3026 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3027 break;
3028 case IPA_IOC_GET_HDR32:
3029 cmd = IPA_IOC_GET_HDR;
3030 break;
3031 case IPA_IOC_ALLOC_NAT_MEM32:
3032 if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
3033 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3034 retval = -EFAULT;
3035 goto ret;
3036 }
3037 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3038 IPA_RESOURCE_NAME_MAX);
3039 nat_mem.size = (size_t)nat_mem32.size;
3040 nat_mem.offset = (off_t)nat_mem32.offset;
3041
3042 /* null terminate the string */
3043 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3044
3045 if (ipa3_allocate_nat_device(&nat_mem)) {
3046 retval = -EFAULT;
3047 goto ret;
3048 }
3049 nat_mem32.offset = (compat_off_t)nat_mem.offset;
3050 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
3051 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3052 retval = -EFAULT;
3053 }
3054ret:
3055 return retval;
3056 case IPA_IOC_V4_INIT_NAT32:
3057 cmd = IPA_IOC_V4_INIT_NAT;
3058 break;
3059 case IPA_IOC_NAT_DMA32:
3060 cmd = IPA_IOC_NAT_DMA;
3061 break;
3062 case IPA_IOC_V4_DEL_NAT32:
3063 cmd = IPA_IOC_V4_DEL_NAT;
3064 break;
3065 case IPA_IOC_GET_NAT_OFFSET32:
3066 cmd = IPA_IOC_GET_NAT_OFFSET;
3067 break;
3068 case IPA_IOC_PULL_MSG32:
3069 cmd = IPA_IOC_PULL_MSG;
3070 break;
3071 case IPA_IOC_RM_ADD_DEPENDENCY32:
3072 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3073 break;
3074 case IPA_IOC_RM_DEL_DEPENDENCY32:
3075 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3076 break;
3077 case IPA_IOC_GENERATE_FLT_EQ32:
3078 cmd = IPA_IOC_GENERATE_FLT_EQ;
3079 break;
3080 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3081 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3082 break;
3083 case IPA_IOC_WRITE_QMAPID32:
3084 cmd = IPA_IOC_WRITE_QMAPID;
3085 break;
3086 case IPA_IOC_MDFY_FLT_RULE32:
3087 cmd = IPA_IOC_MDFY_FLT_RULE;
3088 break;
3089 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3090 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3091 break;
3092 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3093 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3094 break;
3095 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3096 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3097 break;
3098 case IPA_IOC_MDFY_RT_RULE32:
3099 cmd = IPA_IOC_MDFY_RT_RULE;
3100 break;
3101 case IPA_IOC_COMMIT_HDR:
3102 case IPA_IOC_RESET_HDR:
3103 case IPA_IOC_COMMIT_RT:
3104 case IPA_IOC_RESET_RT:
3105 case IPA_IOC_COMMIT_FLT:
3106 case IPA_IOC_RESET_FLT:
3107 case IPA_IOC_DUMP:
3108 case IPA_IOC_PUT_RT_TBL:
3109 case IPA_IOC_PUT_HDR:
3110 case IPA_IOC_SET_FLT:
3111 case IPA_IOC_QUERY_EP_MAPPING:
3112 break;
3113 default:
3114 return -ENOIOCTLCMD;
3115 }
3116 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3117}
3118#endif
3119
3120static ssize_t ipa3_write(struct file *file, const char __user *buf,
3121 size_t count, loff_t *ppos);
3122
3123static const struct file_operations ipa3_drv_fops = {
3124 .owner = THIS_MODULE,
3125 .open = ipa3_open,
3126 .read = ipa3_read,
3127 .write = ipa3_write,
3128 .unlocked_ioctl = ipa3_ioctl,
3129#ifdef CONFIG_COMPAT
3130 .compat_ioctl = compat_ipa3_ioctl,
3131#endif
3132};
3133
3134static int ipa3_get_clks(struct device *dev)
3135{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003136 if (ipa3_res.use_bw_vote) {
3137 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3138 ipa3_clk = NULL;
3139 return 0;
3140 }
3141
Amir Levy9659e592016-10-27 18:08:27 +03003142 ipa3_clk = clk_get(dev, "core_clk");
3143 if (IS_ERR(ipa3_clk)) {
3144 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3145 IPAERR("fail to get ipa clk\n");
3146 return PTR_ERR(ipa3_clk);
3147 }
3148 return 0;
3149}
3150
3151/**
3152 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3153 */
3154void _ipa_enable_clks_v3_0(void)
3155{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003156 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003157 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003158 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003159 clk_prepare(ipa3_clk);
3160 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003161 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003162 }
3163
Ghanim Fodi6a831342017-03-07 18:19:15 +02003164 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003165 ipa3_suspend_apps_pipes(false);
3166}
3167
3168static unsigned int ipa3_get_bus_vote(void)
3169{
3170 unsigned int idx = 1;
3171
3172 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3173 idx = 1;
3174 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3175 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3176 if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
3177 idx = 1;
3178 else
3179 idx = 2;
3180 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3181 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3182 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3183 } else {
3184 WARN_ON(1);
3185 }
3186
3187 IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
3188
3189 return idx;
3190}
3191
3192/**
3193* ipa3_enable_clks() - Turn on IPA clocks
3194*
3195* Return codes:
3196* None
3197*/
3198void ipa3_enable_clks(void)
3199{
3200 IPADBG("enabling IPA clocks and bus voting\n");
3201
Ghanim Fodi6a831342017-03-07 18:19:15 +02003202 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3203 ipa3_get_bus_vote()))
3204 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003205
Ghanim Fodi6a831342017-03-07 18:19:15 +02003206 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003207}
3208
3209
3210/**
3211 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3212 */
3213void _ipa_disable_clks_v3_0(void)
3214{
Amir Levy9659e592016-10-27 18:08:27 +03003215 ipa3_suspend_apps_pipes(true);
3216 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003217 if (ipa3_clk) {
3218 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003219 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003220 }
Amir Levy9659e592016-10-27 18:08:27 +03003221}
3222
3223/**
3224* ipa3_disable_clks() - Turn off IPA clocks
3225*
3226* Return codes:
3227* None
3228*/
3229void ipa3_disable_clks(void)
3230{
3231 IPADBG("disabling IPA clocks and bus voting\n");
3232
3233 ipa3_ctx->ctrl->ipa3_disable_clks();
3234
Ghanim Fodi6a831342017-03-07 18:19:15 +02003235 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3236 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003237}
3238
3239/**
3240 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3241 *
3242 * This function is called prior to clock gating when active client counter
3243 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003244 * were not submitted to the IPA client via the transport. During TAG process
3245 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003246 *
3247 * Return codes:
3248 * None
3249 */
3250static void ipa3_start_tag_process(struct work_struct *work)
3251{
3252 int res;
3253
3254 IPADBG("starting TAG process\n");
3255 /* close aggregation frames on all pipes */
3256 res = ipa3_tag_aggr_force_close(-1);
3257 if (res)
3258 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3259 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3260
3261 IPADBG("TAG process done\n");
3262}
3263
3264/**
3265* ipa3_active_clients_log_mod() - Log a modification in the active clients
3266* reference count
3267*
3268* This method logs any modification in the active clients reference count:
3269* It logs the modification in the circular history buffer
3270* It logs the modification in the hash table - looking for an entry,
3271* creating one if needed and deleting one if needed.
3272*
3273* @id: ipa3_active client logging info struct to hold the log information
3274* @inc: a boolean variable to indicate whether the modification is an increase
3275* or decrease
3276* @int_ctx: a boolean variable to indicate whether this call is being made from
3277* an interrupt context and therefore should allocate GFP_ATOMIC memory
3278*
3279* Method process:
3280* - Hash the unique identifier string
3281* - Find the hash in the table
3282* 1)If found, increase or decrease the reference count
3283* 2)If not found, allocate a new hash table entry struct and initialize it
3284* - Remove and deallocate unneeded data structure
3285* - Log the call in the circular history buffer (unless it is a simple call)
3286*/
3287void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3288 bool inc, bool int_ctx)
3289{
3290 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3291 unsigned long long t;
3292 unsigned long nanosec_rem;
3293 struct ipa3_active_client_htable_entry *hentry;
3294 struct ipa3_active_client_htable_entry *hfound;
3295 u32 hkey;
3296 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
3297
3298 hfound = NULL;
3299 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3300 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003301 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003302 0);
3303 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3304 hentry, list, hkey) {
3305 if (!strcmp(hentry->id_string, id->id_string)) {
3306 hentry->count = hentry->count + (inc ? 1 : -1);
3307 hfound = hentry;
3308 }
3309 }
3310 if (hfound == NULL) {
3311 hentry = NULL;
3312 hentry = kzalloc(sizeof(
3313 struct ipa3_active_client_htable_entry),
3314 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3315 if (hentry == NULL) {
3316 IPAERR("failed allocating active clients hash entry");
3317 return;
3318 }
3319 hentry->type = id->type;
3320 strlcpy(hentry->id_string, id->id_string,
3321 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3322 INIT_HLIST_NODE(&hentry->list);
3323 hentry->count = inc ? 1 : -1;
3324 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3325 &hentry->list, hkey);
3326 } else if (hfound->count == 0) {
3327 hash_del(&hfound->list);
3328 kfree(hfound);
3329 }
3330
3331 if (id->type != SIMPLE) {
3332 t = local_clock();
3333 nanosec_rem = do_div(t, 1000000000) / 1000;
3334 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3335 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3336 "[%5lu.%06lu] v %s, %s: %d",
3337 (unsigned long)t, nanosec_rem,
3338 id->id_string, id->file, id->line);
3339 ipa3_active_clients_log_insert(temp_str);
3340 }
3341}
3342
3343void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
3344 bool int_ctx)
3345{
3346 ipa3_active_clients_log_mod(id, false, int_ctx);
3347}
3348
3349void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
3350 bool int_ctx)
3351{
3352 ipa3_active_clients_log_mod(id, true, int_ctx);
3353}
3354
3355/**
3356* ipa3_inc_client_enable_clks() - Increase active clients counter, and
3357* enable ipa clocks if necessary
3358*
3359* Return codes:
3360* None
3361*/
3362void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
3363{
3364 ipa3_active_clients_lock();
3365 ipa3_active_clients_log_inc(id, false);
3366 ipa3_ctx->ipa3_active_clients.cnt++;
3367 if (ipa3_ctx->ipa3_active_clients.cnt == 1)
3368 ipa3_enable_clks();
3369 IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
3370 ipa3_active_clients_unlock();
3371}
3372
3373/**
3374* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
3375* clients if no asynchronous actions should be done. Asynchronous actions are
3376* locking a mutex and waking up IPA HW.
3377*
3378* Return codes: 0 for success
3379* -EPERM if an asynchronous action should have been done
3380*/
3381int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
3382 *id)
3383{
3384 int res = 0;
3385 unsigned long flags;
3386
3387 if (ipa3_active_clients_trylock(&flags) == 0)
3388 return -EPERM;
3389
3390 if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
3391 res = -EPERM;
3392 goto bail;
3393 }
3394 ipa3_active_clients_log_inc(id, true);
3395 ipa3_ctx->ipa3_active_clients.cnt++;
3396 IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
3397bail:
3398 ipa3_active_clients_trylock_unlock(&flags);
3399
3400 return res;
3401}
3402
3403/**
3404 * ipa3_dec_client_disable_clks() - Decrease active clients counter
3405 *
3406 * In case that there are no active clients this function also starts
3407 * TAG process. When TAG progress ends ipa clocks will be gated.
3408 * start_tag_process_again flag is set during this function to signal TAG
3409 * process to start again as there was another client that may send data to ipa
3410 *
3411 * Return codes:
3412 * None
3413 */
3414void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
3415{
3416 struct ipa_active_client_logging_info log_info;
3417
3418 ipa3_active_clients_lock();
3419 ipa3_active_clients_log_dec(id, false);
3420 ipa3_ctx->ipa3_active_clients.cnt--;
3421 IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
3422 if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
3423 if (ipa3_ctx->tag_process_before_gating) {
3424 ipa3_ctx->tag_process_before_gating = false;
3425 /*
3426 * When TAG process ends, active clients will be
3427 * decreased
3428 */
3429 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
3430 "TAG_PROCESS");
3431 ipa3_active_clients_log_inc(&log_info, false);
3432 ipa3_ctx->ipa3_active_clients.cnt = 1;
3433 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
3434 } else {
3435 ipa3_disable_clks();
3436 }
3437 }
3438 ipa3_active_clients_unlock();
3439}
3440
3441/**
3442* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
3443* acquire wakelock if necessary
3444*
3445* Return codes:
3446* None
3447*/
3448void ipa3_inc_acquire_wakelock(void)
3449{
3450 unsigned long flags;
3451
3452 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3453 ipa3_ctx->wakelock_ref_cnt.cnt++;
3454 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
3455 __pm_stay_awake(&ipa3_ctx->w_lock);
3456 IPADBG_LOW("active wakelock ref cnt = %d\n",
3457 ipa3_ctx->wakelock_ref_cnt.cnt);
3458 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3459}
3460
3461/**
3462 * ipa3_dec_release_wakelock() - Decrease active clients counter
3463 *
3464 * In case if the ref count is 0, release the wakelock.
3465 *
3466 * Return codes:
3467 * None
3468 */
3469void ipa3_dec_release_wakelock(void)
3470{
3471 unsigned long flags;
3472
3473 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3474 ipa3_ctx->wakelock_ref_cnt.cnt--;
3475 IPADBG_LOW("active wakelock ref cnt = %d\n",
3476 ipa3_ctx->wakelock_ref_cnt.cnt);
3477 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
3478 __pm_relax(&ipa3_ctx->w_lock);
3479 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3480}
3481
3482int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
3483 u32 bandwidth_mbps)
3484{
3485 enum ipa_voltage_level needed_voltage;
3486 u32 clk_rate;
3487
3488 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
3489 floor_voltage, bandwidth_mbps);
3490
3491 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
3492 floor_voltage >= IPA_VOLTAGE_MAX) {
3493 IPAERR("bad voltage\n");
3494 return -EINVAL;
3495 }
3496
3497 if (ipa3_ctx->enable_clock_scaling) {
3498 IPADBG_LOW("Clock scaling is enabled\n");
3499 if (bandwidth_mbps >=
3500 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
3501 needed_voltage = IPA_VOLTAGE_TURBO;
3502 else if (bandwidth_mbps >=
3503 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
3504 needed_voltage = IPA_VOLTAGE_NOMINAL;
3505 else
3506 needed_voltage = IPA_VOLTAGE_SVS;
3507 } else {
3508 IPADBG_LOW("Clock scaling is disabled\n");
3509 needed_voltage = IPA_VOLTAGE_NOMINAL;
3510 }
3511
3512 needed_voltage = max(needed_voltage, floor_voltage);
3513 switch (needed_voltage) {
3514 case IPA_VOLTAGE_SVS:
3515 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3516 break;
3517 case IPA_VOLTAGE_NOMINAL:
3518 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3519 break;
3520 case IPA_VOLTAGE_TURBO:
3521 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3522 break;
3523 default:
3524 IPAERR("bad voltage\n");
3525 WARN_ON(1);
3526 return -EFAULT;
3527 }
3528
3529 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3530 IPADBG_LOW("Same voltage\n");
3531 return 0;
3532 }
3533
3534 ipa3_active_clients_lock();
3535 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3536 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
3537 if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003538 if (ipa3_clk)
3539 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3540 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3541 ipa3_get_bus_vote()))
3542 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003543 } else {
3544 IPADBG_LOW("clocks are gated, not setting rate\n");
3545 }
3546 ipa3_active_clients_unlock();
3547 IPADBG_LOW("Done\n");
3548 return 0;
3549}
3550
Amir Levya59ed3f2017-03-05 17:30:55 +02003551static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03003552{
3553 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02003554 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03003555 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
3556}
3557
3558/**
3559* ipa3_suspend_handler() - Handles the suspend interrupt:
3560* wakes up the suspended peripheral by requesting its consumer
3561* @interrupt: Interrupt type
3562* @private_data: The client's private data
3563* @interrupt_data: Interrupt specific information data
3564*/
3565void ipa3_suspend_handler(enum ipa_irq_type interrupt,
3566 void *private_data,
3567 void *interrupt_data)
3568{
3569 enum ipa_rm_resource_name resource;
3570 u32 suspend_data =
3571 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
3572 u32 bmsk = 1;
3573 u32 i = 0;
3574 int res;
3575 struct ipa_ep_cfg_holb holb_cfg;
3576
3577 IPADBG("interrupt=%d, interrupt_data=%u\n",
3578 interrupt, suspend_data);
3579 memset(&holb_cfg, 0, sizeof(holb_cfg));
3580 holb_cfg.tmr_val = 0;
3581
3582 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3583 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
3584 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
3585 /*
3586 * pipe will be unsuspended as part of
3587 * enabling IPA clocks
3588 */
3589 if (!atomic_read(
3590 &ipa3_ctx->transport_pm.dec_clients)
3591 ) {
3592 IPA_ACTIVE_CLIENTS_INC_EP(
3593 ipa3_ctx->ep[i].client);
3594 IPADBG_LOW("Pipes un-suspended.\n");
3595 IPADBG_LOW("Enter poll mode.\n");
3596 atomic_set(
3597 &ipa3_ctx->transport_pm.dec_clients,
3598 1);
Amir Levya59ed3f2017-03-05 17:30:55 +02003599 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003600 }
3601 } else {
3602 resource = ipa3_get_rm_resource_from_ep(i);
3603 res =
3604 ipa_rm_request_resource_with_timer(resource);
3605 if (res == -EPERM &&
3606 IPA_CLIENT_IS_CONS(
3607 ipa3_ctx->ep[i].client)) {
3608 holb_cfg.en = 1;
3609 res = ipa3_cfg_ep_holb_by_client(
3610 ipa3_ctx->ep[i].client, &holb_cfg);
3611 if (res) {
3612 IPAERR("holb en fail, stall\n");
3613 BUG();
3614 }
3615 }
3616 }
3617 }
3618 bmsk = bmsk << 1;
3619 }
3620}
3621
3622/**
3623* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
3624* as it was registered in the IPA init sequence.
3625* Return codes:
3626* 0: success
3627* -EPERM: failed to remove current handler or failed to add original handler
3628*/
3629int ipa3_restore_suspend_handler(void)
3630{
3631 int result = 0;
3632
3633 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
3634 if (result) {
3635 IPAERR("remove handler for suspend interrupt failed\n");
3636 return -EPERM;
3637 }
3638
3639 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3640 ipa3_suspend_handler, false, NULL);
3641 if (result) {
3642 IPAERR("register handler for suspend interrupt failed\n");
3643 result = -EPERM;
3644 }
3645
3646 IPADBG("suspend handler successfully restored\n");
3647
3648 return result;
3649}
3650
3651static int ipa3_apps_cons_release_resource(void)
3652{
3653 return 0;
3654}
3655
3656static int ipa3_apps_cons_request_resource(void)
3657{
3658 return 0;
3659}
3660
Amir Levya59ed3f2017-03-05 17:30:55 +02003661static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03003662{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303663 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003664 /* check whether still need to decrease client usage */
3665 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
3666 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
3667 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02003668 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003669 } else {
3670 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02003671 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03003672 }
3673 }
3674 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303675 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003676}
3677
3678int ipa3_create_apps_resource(void)
3679{
3680 struct ipa_rm_create_params apps_cons_create_params;
3681 struct ipa_rm_perf_profile profile;
3682 int result = 0;
3683
3684 memset(&apps_cons_create_params, 0,
3685 sizeof(apps_cons_create_params));
3686 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
3687 apps_cons_create_params.request_resource =
3688 ipa3_apps_cons_request_resource;
3689 apps_cons_create_params.release_resource =
3690 ipa3_apps_cons_release_resource;
3691 result = ipa_rm_create_resource(&apps_cons_create_params);
3692 if (result) {
3693 IPAERR("ipa_rm_create_resource failed\n");
3694 return result;
3695 }
3696
3697 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
3698 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
3699
3700 return result;
3701}
3702
3703/**
3704 * ipa3_init_interrupts() - Register to IPA IRQs
3705 *
3706 * Return codes: 0 in success, negative in failure
3707 *
3708 */
3709int ipa3_init_interrupts(void)
3710{
3711 int result;
3712
3713 /*register IPA IRQ handler*/
3714 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
3715 master_dev);
3716 if (result) {
3717 IPAERR("ipa interrupts initialization failed\n");
3718 return -ENODEV;
3719 }
3720
3721 /*add handler for suspend interrupt*/
3722 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3723 ipa3_suspend_handler, false, NULL);
3724 if (result) {
3725 IPAERR("register handler for suspend interrupt failed\n");
3726 result = -ENODEV;
3727 goto fail_add_interrupt_handler;
3728 }
3729
3730 return 0;
3731
3732fail_add_interrupt_handler:
3733 free_irq(ipa3_res.ipa_irq, master_dev);
3734 return result;
3735}
3736
3737/**
3738 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
3739 * The idr strcuture per filtering table is intended for rule id generation
3740 * per filtering rule.
3741 */
3742static void ipa3_destroy_flt_tbl_idrs(void)
3743{
3744 int i;
3745 struct ipa3_flt_tbl *flt_tbl;
3746
3747 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3748 if (!ipa_is_ep_support_flt(i))
3749 continue;
3750
3751 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
3752 idr_destroy(&flt_tbl->rule_ids);
3753 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
3754 idr_destroy(&flt_tbl->rule_ids);
3755 }
3756}
3757
3758static void ipa3_freeze_clock_vote_and_notify_modem(void)
3759{
3760 int res;
Amir Levy9659e592016-10-27 18:08:27 +03003761 struct ipa_active_client_logging_info log_info;
3762
3763 if (ipa3_ctx->smp2p_info.res_sent)
3764 return;
3765
Skylar Change1209942017-02-02 14:26:38 -08003766 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
3767 IPAERR("smp2p out gpio not assigned\n");
3768 return;
3769 }
3770
Amir Levy9659e592016-10-27 18:08:27 +03003771 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
3772 res = ipa3_inc_client_enable_clks_no_block(&log_info);
3773 if (res)
Skylar Change1209942017-02-02 14:26:38 -08003774 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03003775 else
Skylar Change1209942017-02-02 14:26:38 -08003776 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03003777
Skylar Change1209942017-02-02 14:26:38 -08003778 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3779 IPA_GPIO_OUT_CLK_VOTE_IDX,
3780 ipa3_ctx->smp2p_info.ipa_clk_on);
3781 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3782 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03003783
Skylar Change1209942017-02-02 14:26:38 -08003784 ipa3_ctx->smp2p_info.res_sent = true;
3785 IPADBG("IPA clocks are %s\n",
3786 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
3787}
3788
3789void ipa3_reset_freeze_vote(void)
3790{
3791 if (ipa3_ctx->smp2p_info.res_sent == false)
3792 return;
3793
3794 if (ipa3_ctx->smp2p_info.ipa_clk_on)
3795 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
3796
3797 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3798 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
3799 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3800 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
3801
3802 ipa3_ctx->smp2p_info.res_sent = false;
3803 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03003804}
3805
3806static int ipa3_panic_notifier(struct notifier_block *this,
3807 unsigned long event, void *ptr)
3808{
3809 int res;
3810
3811 ipa3_freeze_clock_vote_and_notify_modem();
3812
3813 IPADBG("Calling uC panic handler\n");
3814 res = ipa3_uc_panic_notifier(this, event, ptr);
3815 if (res)
3816 IPAERR("uC panic handler failed %d\n", res);
3817
3818 return NOTIFY_DONE;
3819}
3820
3821static struct notifier_block ipa3_panic_blk = {
3822 .notifier_call = ipa3_panic_notifier,
3823 /* IPA panic handler needs to run before modem shuts down */
3824 .priority = INT_MAX,
3825};
3826
3827static void ipa3_register_panic_hdlr(void)
3828{
3829 atomic_notifier_chain_register(&panic_notifier_list,
3830 &ipa3_panic_blk);
3831}
3832
3833static void ipa3_trigger_ipa_ready_cbs(void)
3834{
3835 struct ipa3_ready_cb_info *info;
3836
3837 mutex_lock(&ipa3_ctx->lock);
3838
3839 /* Call all the CBs */
3840 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
3841 if (info->ready_cb)
3842 info->ready_cb(info->user_data);
3843
3844 mutex_unlock(&ipa3_ctx->lock);
3845}
3846
3847static int ipa3_gsi_pre_fw_load_init(void)
3848{
3849 int result;
3850
3851 result = gsi_configure_regs(ipa3_res.transport_mem_base,
3852 ipa3_res.transport_mem_size,
3853 ipa3_res.ipa_mem_base);
3854 if (result) {
3855 IPAERR("Failed to configure GSI registers\n");
3856 return -EINVAL;
3857 }
3858
3859 return 0;
3860}
3861
Skylar Chang0c17c7d2016-10-31 09:57:54 -07003862static void ipa3_uc_is_loaded(void)
3863{
3864 IPADBG("\n");
3865 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
3866}
3867
Amir Levy41644242016-11-03 15:38:09 +02003868static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
3869{
3870 enum gsi_ver gsi_ver;
3871
3872 switch (ipa_hw_type) {
3873 case IPA_HW_v3_0:
3874 case IPA_HW_v3_1:
3875 gsi_ver = GSI_VER_1_0;
3876 break;
3877 case IPA_HW_v3_5:
3878 gsi_ver = GSI_VER_1_2;
3879 break;
3880 case IPA_HW_v3_5_1:
3881 gsi_ver = GSI_VER_1_3;
3882 break;
3883 default:
3884 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
3885 WARN_ON(1);
3886 gsi_ver = GSI_VER_ERR;
3887 }
3888
3889 IPADBG("GSI version %d\n", gsi_ver);
3890
3891 return gsi_ver;
3892}
3893
Amir Levy9659e592016-10-27 18:08:27 +03003894/**
3895 * ipa3_post_init() - Initialize the IPA Driver (Part II).
3896 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02003897 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03003898 *
3899 * @resource_p: contain platform specific values from DST file
3900 * @pdev: The platform device structure representing the IPA driver
3901 *
3902 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02003903 * - Initialize endpoints bitmaps
3904 * - Initialize resource groups min and max values
3905 * - Initialize filtering lists heads and idr
3906 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02003907 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03003908 * - Setup APPS pipes
3909 * - Initialize tethering bridge
3910 * - Initialize IPA debugfs
3911 * - Initialize IPA uC interface
3912 * - Initialize WDI interface
3913 * - Initialize USB interface
3914 * - Register for panic handler
3915 * - Trigger IPA ready callbacks (to all subscribers)
3916 * - Trigger IPA completion object (to all who wait on it)
3917 */
3918static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
3919 struct device *ipa_dev)
3920{
3921 int result;
Amir Levy9659e592016-10-27 18:08:27 +03003922 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07003923 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02003924 struct ipa3_flt_tbl *flt_tbl;
3925 int i;
3926
3927 /*
3928 * indication whether working in MHI config or non MHI config is given
3929 * in ipa3_write which is launched before ipa3_post_init. i.e. from
3930 * this point it is safe to use ipa3_ep_mapping array and the correct
3931 * entry will be returned from ipa3_get_hw_type_index()
3932 */
3933 ipa_init_ep_flt_bitmap();
3934 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
3935 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
3936
3937 /* Assign resource limitation to each group */
3938 ipa3_set_resorce_groups_min_max_limits();
3939
3940 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3941 if (!ipa_is_ep_support_flt(i))
3942 continue;
3943
3944 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
3945 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
3946 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
3947 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
3948 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
3949 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
3950 idr_init(&flt_tbl->rule_ids);
3951
3952 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
3953 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
3954 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
3955 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
3956 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
3957 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
3958 idr_init(&flt_tbl->rule_ids);
3959 }
3960
3961 if (!ipa3_ctx->apply_rg10_wa) {
3962 result = ipa3_init_interrupts();
3963 if (result) {
3964 IPAERR("ipa initialization of interrupts failed\n");
3965 result = -ENODEV;
3966 goto fail_register_device;
3967 }
3968 } else {
3969 IPADBG("Initialization of ipa interrupts skipped\n");
3970 }
Amir Levy9659e592016-10-27 18:08:27 +03003971
Amir Levy3afd94a2017-01-05 10:19:13 +02003972 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02003973 * IPAv3.5 and above requires to disable prefetch for USB in order
3974 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
Amir Levy3afd94a2017-01-05 10:19:13 +02003975 */
Amir Levy5cfbb322017-01-09 14:53:02 +02003976 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) &&
3977 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02003978 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
3979
Amir Levya59ed3f2017-03-05 17:30:55 +02003980 memset(&gsi_props, 0, sizeof(gsi_props));
3981 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
3982 gsi_props.ee = resource_p->ee;
3983 gsi_props.intr = GSI_INTR_IRQ;
3984 gsi_props.irq = resource_p->transport_irq;
3985 gsi_props.phys_addr = resource_p->transport_mem_base;
3986 gsi_props.size = resource_p->transport_mem_size;
3987 gsi_props.notify_cb = ipa_gsi_notify_cb;
3988 gsi_props.req_clk_cb = NULL;
3989 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03003990
Amir Levya59ed3f2017-03-05 17:30:55 +02003991 result = gsi_register_device(&gsi_props,
3992 &ipa3_ctx->gsi_dev_hdl);
3993 if (result != GSI_STATUS_SUCCESS) {
3994 IPAERR(":gsi register error - %d\n", result);
3995 result = -ENODEV;
3996 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03003997 }
Amir Levya59ed3f2017-03-05 17:30:55 +02003998 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03003999
4000 /* setup the AP-IPA pipes */
4001 if (ipa3_setup_apps_pipes()) {
4002 IPAERR(":failed to setup IPA-Apps pipes\n");
4003 result = -ENODEV;
4004 goto fail_setup_apps_pipes;
4005 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004006 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004007
4008 if (ipa3_ctx->use_ipa_teth_bridge) {
4009 /* Initialize the tethering bridge driver */
4010 result = ipa3_teth_bridge_driver_init();
4011 if (result) {
4012 IPAERR(":teth_bridge init failed (%d)\n", -result);
4013 result = -ENODEV;
4014 goto fail_teth_bridge_driver_init;
4015 }
4016 IPADBG("teth_bridge initialized");
4017 }
4018
4019 ipa3_debugfs_init();
4020
4021 result = ipa3_uc_interface_init();
4022 if (result)
4023 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4024 else
4025 IPADBG(":ipa Uc interface init ok\n");
4026
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004027 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4028 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4029
Amir Levy9659e592016-10-27 18:08:27 +03004030 result = ipa3_wdi_init();
4031 if (result)
4032 IPAERR(":wdi init failed (%d)\n", -result);
4033 else
4034 IPADBG(":wdi init ok\n");
4035
4036 result = ipa3_ntn_init();
4037 if (result)
4038 IPAERR(":ntn init failed (%d)\n", -result);
4039 else
4040 IPADBG(":ntn init ok\n");
4041
4042 ipa3_register_panic_hdlr();
4043
4044 ipa3_ctx->q6_proxy_clk_vote_valid = true;
4045
4046 mutex_lock(&ipa3_ctx->lock);
4047 ipa3_ctx->ipa_initialization_complete = true;
4048 mutex_unlock(&ipa3_ctx->lock);
4049
4050 ipa3_trigger_ipa_ready_cbs();
4051 complete_all(&ipa3_ctx->init_completion_obj);
4052 pr_info("IPA driver initialization was successful.\n");
4053
4054 return 0;
4055
4056fail_teth_bridge_driver_init:
4057 ipa3_teardown_apps_pipes();
4058fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02004059 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03004060fail_register_device:
4061 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
4062 ipa_rm_exit();
4063 cdev_del(&ipa3_ctx->cdev);
4064 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
4065 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
Amir Levy9659e592016-10-27 18:08:27 +03004066 ipa3_destroy_flt_tbl_idrs();
4067 idr_destroy(&ipa3_ctx->ipa_idr);
4068 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
4069 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
4070 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
4071 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
4072 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
4073 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
4074 kmem_cache_destroy(ipa3_ctx->hdr_cache);
4075 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
4076 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
4077 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
4078 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
4079 iounmap(ipa3_ctx->mmio);
4080 ipa3_disable_clks();
Ghanim Fodi6a831342017-03-07 18:19:15 +02004081 if (ipa3_clk)
4082 clk_put(ipa3_clk);
4083 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004084 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
4085 if (ipa3_bus_scale_table) {
4086 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
4087 ipa3_bus_scale_table = NULL;
4088 }
4089 kfree(ipa3_ctx->ctrl);
4090 kfree(ipa3_ctx);
4091 ipa3_ctx = NULL;
4092 return result;
4093}
4094
4095static int ipa3_trigger_fw_loading_mdms(void)
4096{
4097 int result;
4098 const struct firmware *fw;
4099
4100 IPADBG("FW loading process initiated\n");
4101
4102 result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
4103 if (result < 0) {
4104 IPAERR("request_firmware failed, error %d\n", result);
4105 return result;
4106 }
4107 if (fw == NULL) {
4108 IPAERR("Firmware is NULL!\n");
4109 return -EINVAL;
4110 }
4111
4112 IPADBG("FWs are available for loading\n");
4113
Ghanim Fodi37b64952017-01-24 15:42:30 +02004114 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004115 if (result) {
4116 IPAERR("IPA FWs loading has failed\n");
4117 release_firmware(fw);
4118 return result;
4119 }
4120
4121 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02004122 ipa3_res.transport_mem_size,
4123 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03004124 if (result) {
4125 IPAERR("Failed to enable GSI FW\n");
4126 release_firmware(fw);
4127 return result;
4128 }
4129
4130 release_firmware(fw);
4131
4132 IPADBG("FW loading process is complete\n");
4133 return 0;
4134}
4135
4136static int ipa3_trigger_fw_loading_msms(void)
4137{
4138 void *subsystem_get_retval = NULL;
4139
4140 IPADBG("FW loading process initiated\n");
4141
4142 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
4143 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
4144 IPAERR("Unable to trigger PIL process for FW loading\n");
4145 return -EINVAL;
4146 }
4147
4148 IPADBG("FW loading process is complete\n");
4149 return 0;
4150}
4151
4152static ssize_t ipa3_write(struct file *file, const char __user *buf,
4153 size_t count, loff_t *ppos)
4154{
4155 unsigned long missing;
4156 int result = -EINVAL;
4157
4158 char dbg_buff[16] = { 0 };
4159
4160 if (sizeof(dbg_buff) < count + 1)
4161 return -EFAULT;
4162
4163 missing = copy_from_user(dbg_buff, buf, count);
4164
4165 if (missing) {
4166 IPAERR("Unable to copy data from user\n");
4167 return -EFAULT;
4168 }
4169
4170 /* Prevent consequent calls from trying to load the FW again. */
4171 if (ipa3_is_ready())
4172 return count;
4173
Amir Levya59ed3f2017-03-05 17:30:55 +02004174 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03004175
Amir Levy54fe4d32017-03-16 11:21:49 +02004176 if (ipa3_is_msm_device()) {
Amir Levya59ed3f2017-03-05 17:30:55 +02004177 result = ipa3_trigger_fw_loading_msms();
Amir Levy54fe4d32017-03-16 11:21:49 +02004178 } else {
4179 if (!strcasecmp(dbg_buff, "MHI")) {
4180 ipa3_ctx->ipa_config_is_mhi = true;
4181 pr_info(
4182 "IPA is loading with MHI configuration\n");
4183 } else {
4184 pr_info(
4185 "IPA is loading with non MHI configuration\n");
4186 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004187 result = ipa3_trigger_fw_loading_mdms();
Amir Levy54fe4d32017-03-16 11:21:49 +02004188 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004189 /* No IPAv3.x chipsets that don't support FW loading */
Amir Levy9659e592016-10-27 18:08:27 +03004190
Amir Levya59ed3f2017-03-05 17:30:55 +02004191 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03004192
Amir Levya59ed3f2017-03-05 17:30:55 +02004193 if (result) {
4194 IPAERR("FW loading process has failed\n");
Ghanim Fodi24fee1c2017-02-12 15:25:53 +02004195 return result;
Amir Levya59ed3f2017-03-05 17:30:55 +02004196 } else
4197 ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
4198
Amir Levy9659e592016-10-27 18:08:27 +03004199 return count;
4200}
4201
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004202static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
4203{
4204 int i, size, ret, resp;
4205 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
4206 struct tz_smmu_ipa_protect_region_s cmd_buf;
4207
4208 if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
4209 size = ipa3_ctx->ipa_tz_unlock_reg_num *
4210 sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
4211 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
4212 if (ipa_tz_unlock_vec == NULL)
4213 return -ENOMEM;
4214
4215 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4216 ipa_tz_unlock_vec[i].input_addr =
4217 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4218 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4219 0xFFF);
4220 ipa_tz_unlock_vec[i].output_addr =
4221 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4222 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4223 0xFFF);
4224 ipa_tz_unlock_vec[i].size =
4225 ipa3_ctx->ipa_tz_unlock_reg[i].size;
4226 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
4227 }
4228
4229 /* pass physical address of command buffer */
4230 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
4231 cmd_buf.size_bytes = size;
4232
4233 /* flush cache to DDR */
4234 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
4235 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
4236
4237 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
4238 sizeof(cmd_buf), &resp, sizeof(resp));
4239 if (ret) {
4240 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
4241 kfree(ipa_tz_unlock_vec);
4242 return -EFAULT;
4243 }
4244 kfree(ipa_tz_unlock_vec);
4245 }
4246 return 0;
4247}
4248
Skylar Changcd3902d2017-03-27 18:08:27 -07004249static int ipa3_alloc_pkt_init(void)
4250{
4251 struct ipa_mem_buffer mem;
4252 struct ipahal_imm_cmd_pyld *cmd_pyld;
4253 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
4254 int i;
4255
4256 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4257 &cmd, false);
4258 if (!cmd_pyld) {
4259 IPAERR("failed to construct IMM cmd\n");
4260 return -ENOMEM;
4261 }
4262
4263 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
4264 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
4265 &mem.phys_base, GFP_KERNEL);
4266 if (!mem.base) {
4267 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
4268 ipahal_destroy_imm_cmd(cmd_pyld);
4269 return -ENOMEM;
4270 }
4271 ipahal_destroy_imm_cmd(cmd_pyld);
4272
4273 memset(mem.base, 0, mem.size);
4274 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4275 cmd.destination_pipe_index = i;
4276 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4277 &cmd, false);
4278 if (!cmd_pyld) {
4279 IPAERR("failed to construct IMM cmd\n");
4280 dma_free_coherent(ipa3_ctx->pdev,
4281 mem.size,
4282 mem.base,
4283 mem.phys_base);
4284 return -ENOMEM;
4285 }
4286 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
4287 cmd_pyld->len);
4288 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
4289 ipahal_destroy_imm_cmd(cmd_pyld);
4290 }
4291
4292 return 0;
4293}
4294
Amir Levy9659e592016-10-27 18:08:27 +03004295/**
4296* ipa3_pre_init() - Initialize the IPA Driver.
4297* This part contains all initialization which doesn't require IPA HW, such
4298* as structure allocations and initializations, register writes, etc.
4299*
4300* @resource_p: contain platform specific values from DST file
4301* @pdev: The platform device structure representing the IPA driver
4302*
4303* Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004304* Allocate memory for the driver context data struct
4305* Initializing the ipa3_ctx with :
Amir Levy9659e592016-10-27 18:08:27 +03004306* 1)parsed values from the dts file
4307* 2)parameters passed to the module initialization
4308* 3)read HW values(such as core memory size)
Amir Levy54fe4d32017-03-16 11:21:49 +02004309* Map IPA core registers to CPU memory
4310* Restart IPA core(HW reset)
4311* Initialize the look-aside caches(kmem_cache/slab) for filter,
Amir Levy9659e592016-10-27 18:08:27 +03004312* routing and IPA-tree
Amir Levy54fe4d32017-03-16 11:21:49 +02004313* Create memory pool with 4 objects for DMA operations(each object
Amir Levy9659e592016-10-27 18:08:27 +03004314* is 512Bytes long), this object will be use for tx(A5->IPA)
Amir Levy54fe4d32017-03-16 11:21:49 +02004315* Initialize lists head(routing, hdr, system pipes)
4316* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
4317* Initialize spinlocks (for list related to A5<->IPA pipes)
4318* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
4319* Initialize Red-Black-Tree(s) for handles of header,routing rule,
4320* routing table ,filtering rule
4321* Initialize the filter block by committing IPV4 and IPV6 default rules
4322* Create empty routing table in system memory(no committing)
4323* Create a char-device for IPA
4324* Initialize IPA RM (resource manager)
4325* Configure GSI registers (in GSI case)
Amir Levy9659e592016-10-27 18:08:27 +03004326*/
4327static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
4328 struct device *ipa_dev)
4329{
4330 int result = 0;
4331 int i;
Amir Levy9659e592016-10-27 18:08:27 +03004332 struct ipa3_rt_tbl_set *rset;
4333 struct ipa_active_client_logging_info log_info;
4334
4335 IPADBG("IPA Driver initialization started\n");
4336
4337 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
4338 if (!ipa3_ctx) {
4339 IPAERR(":kzalloc err.\n");
4340 result = -ENOMEM;
4341 goto fail_mem_ctx;
4342 }
4343
4344 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
4345 if (ipa3_ctx->logbuf == NULL) {
4346 IPAERR("failed to get logbuf\n");
4347 result = -ENOMEM;
4348 goto fail_logbuf;
4349 }
4350
4351 ipa3_ctx->pdev = ipa_dev;
4352 ipa3_ctx->uc_pdev = ipa_dev;
4353 ipa3_ctx->smmu_present = smmu_info.present;
4354 if (!ipa3_ctx->smmu_present)
4355 ipa3_ctx->smmu_s1_bypass = true;
4356 else
4357 ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
4358 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
4359 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
4360 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
4361 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
4362 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03004363 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
4364 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
4365 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
4366 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
4367 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
4368 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
4369 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03004370 ipa3_ctx->ee = resource_p->ee;
4371 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
4372 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
4373 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004374 if (resource_p->ipa_tz_unlock_reg) {
4375 ipa3_ctx->ipa_tz_unlock_reg_num =
4376 resource_p->ipa_tz_unlock_reg_num;
4377 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
4378 ipa3_ctx->ipa_tz_unlock_reg_num,
4379 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
4380 GFP_KERNEL);
4381 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
4382 result = -ENOMEM;
4383 goto fail_tz_unlock_reg;
4384 }
4385 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4386 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
4387 resource_p->ipa_tz_unlock_reg[i].reg_addr;
4388 ipa3_ctx->ipa_tz_unlock_reg[i].size =
4389 resource_p->ipa_tz_unlock_reg[i].size;
4390 }
4391 }
4392
4393 /* unlock registers for uc */
4394 ipa3_tz_unlock_reg(ipa3_ctx);
Amir Levy9659e592016-10-27 18:08:27 +03004395
4396 /* default aggregation parameters */
4397 ipa3_ctx->aggregation_type = IPA_MBIM_16;
4398 ipa3_ctx->aggregation_byte_limit = 1;
4399 ipa3_ctx->aggregation_time_limit = 0;
4400
4401 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
4402 if (!ipa3_ctx->ctrl) {
4403 IPAERR("memory allocation error for ctrl\n");
4404 result = -ENOMEM;
4405 goto fail_mem_ctrl;
4406 }
4407 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
4408 ipa3_ctx->ipa_hw_type);
4409 if (result) {
4410 IPAERR("fail to static bind IPA ctrl.\n");
4411 result = -EFAULT;
4412 goto fail_bind;
4413 }
4414
4415 result = ipa3_init_mem_partition(master_dev->of_node);
4416 if (result) {
4417 IPAERR(":ipa3_init_mem_partition failed!\n");
4418 result = -ENODEV;
4419 goto fail_init_mem_partition;
4420 }
4421
4422 if (ipa3_bus_scale_table) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004423 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
4424 ipa3_bus_scale_table->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03004425 ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
4426 }
4427
Ghanim Fodi6a831342017-03-07 18:19:15 +02004428 /* get BUS handle */
4429 ipa3_ctx->ipa_bus_hdl =
4430 msm_bus_scale_register_client(
4431 ipa3_ctx->ctrl->msm_bus_data_ptr);
4432 if (!ipa3_ctx->ipa_bus_hdl) {
4433 IPAERR("fail to register with bus mgr!\n");
4434 result = -ENODEV;
4435 goto fail_bus_reg;
Amir Levy9659e592016-10-27 18:08:27 +03004436 }
4437
4438 /* get IPA clocks */
4439 result = ipa3_get_clks(master_dev);
4440 if (result)
4441 goto fail_clk;
4442
4443 /* init active_clients_log after getting ipa-clk */
4444 if (ipa3_active_clients_log_init())
4445 goto fail_init_active_client;
4446
4447 /* Enable ipa3_ctx->enable_clock_scaling */
4448 ipa3_ctx->enable_clock_scaling = 1;
4449 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4450
4451 /* enable IPA clocks explicitly to allow the initialization */
4452 ipa3_enable_clks();
4453
4454 /* setup IPA register access */
4455 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
4456 ipa3_ctx->ctrl->ipa_reg_base_ofst);
4457 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
4458 ipa3_ctx->ctrl->ipa_reg_base_ofst,
4459 resource_p->ipa_mem_size);
4460 if (!ipa3_ctx->mmio) {
4461 IPAERR(":ipa-base ioremap err.\n");
4462 result = -EFAULT;
4463 goto fail_remap;
4464 }
4465
4466 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4467 ipa3_ctx->pdev)) {
4468 IPAERR("fail to init ipahal\n");
4469 result = -EFAULT;
4470 goto fail_ipahal;
4471 }
4472
4473 result = ipa3_init_hw();
4474 if (result) {
4475 IPAERR(":error initializing HW.\n");
4476 result = -ENODEV;
4477 goto fail_init_hw;
4478 }
4479 IPADBG("IPA HW initialization sequence completed");
4480
4481 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4482 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4483 IPAERR("IPA has more pipes then supported! has %d, max %d\n",
4484 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4485 result = -ENODEV;
4486 goto fail_init_hw;
4487 }
4488
Amir Levy9659e592016-10-27 18:08:27 +03004489 ipa3_ctx->ctrl->ipa_sram_read_settings();
4490 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4491 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4492
4493 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4494 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4495 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4496
4497 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4498 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4499
4500 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4501 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4502 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4503
4504 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4505 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4506 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4507
4508 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4509 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4510 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4511 result = -ENOMEM;
4512 goto fail_init_hw;
4513 }
4514
4515 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
4516 spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
4517 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
4518 ipa3_active_clients_log_inc(&log_info, false);
4519 ipa3_ctx->ipa3_active_clients.cnt = 1;
4520
Amir Levy9659e592016-10-27 18:08:27 +03004521 /* Create workqueues for power management */
4522 ipa3_ctx->power_mgmt_wq =
4523 create_singlethread_workqueue("ipa_power_mgmt");
4524 if (!ipa3_ctx->power_mgmt_wq) {
4525 IPAERR("failed to create power mgmt wq\n");
4526 result = -ENOMEM;
4527 goto fail_init_hw;
4528 }
4529
4530 ipa3_ctx->transport_power_mgmt_wq =
4531 create_singlethread_workqueue("transport_power_mgmt");
4532 if (!ipa3_ctx->transport_power_mgmt_wq) {
4533 IPAERR("failed to create transport power mgmt wq\n");
4534 result = -ENOMEM;
4535 goto fail_create_transport_wq;
4536 }
4537
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304538 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004539
4540 /* init the lookaside cache */
4541 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
4542 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
4543 if (!ipa3_ctx->flt_rule_cache) {
4544 IPAERR(":ipa flt cache create failed\n");
4545 result = -ENOMEM;
4546 goto fail_flt_rule_cache;
4547 }
4548 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
4549 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
4550 if (!ipa3_ctx->rt_rule_cache) {
4551 IPAERR(":ipa rt cache create failed\n");
4552 result = -ENOMEM;
4553 goto fail_rt_rule_cache;
4554 }
4555 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
4556 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
4557 if (!ipa3_ctx->hdr_cache) {
4558 IPAERR(":ipa hdr cache create failed\n");
4559 result = -ENOMEM;
4560 goto fail_hdr_cache;
4561 }
4562 ipa3_ctx->hdr_offset_cache =
4563 kmem_cache_create("IPA_HDR_OFFSET",
4564 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
4565 if (!ipa3_ctx->hdr_offset_cache) {
4566 IPAERR(":ipa hdr off cache create failed\n");
4567 result = -ENOMEM;
4568 goto fail_hdr_offset_cache;
4569 }
4570 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
4571 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
4572 if (!ipa3_ctx->hdr_proc_ctx_cache) {
4573 IPAERR(":ipa hdr proc ctx cache create failed\n");
4574 result = -ENOMEM;
4575 goto fail_hdr_proc_ctx_cache;
4576 }
4577 ipa3_ctx->hdr_proc_ctx_offset_cache =
4578 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
4579 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
4580 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
4581 IPAERR(":ipa hdr proc ctx off cache create failed\n");
4582 result = -ENOMEM;
4583 goto fail_hdr_proc_ctx_offset_cache;
4584 }
4585 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
4586 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
4587 if (!ipa3_ctx->rt_tbl_cache) {
4588 IPAERR(":ipa rt tbl cache create failed\n");
4589 result = -ENOMEM;
4590 goto fail_rt_tbl_cache;
4591 }
4592 ipa3_ctx->tx_pkt_wrapper_cache =
4593 kmem_cache_create("IPA_TX_PKT_WRAPPER",
4594 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
4595 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
4596 IPAERR(":ipa tx pkt wrapper cache create failed\n");
4597 result = -ENOMEM;
4598 goto fail_tx_pkt_wrapper_cache;
4599 }
4600 ipa3_ctx->rx_pkt_wrapper_cache =
4601 kmem_cache_create("IPA_RX_PKT_WRAPPER",
4602 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
4603 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
4604 IPAERR(":ipa rx pkt wrapper cache create failed\n");
4605 result = -ENOMEM;
4606 goto fail_rx_pkt_wrapper_cache;
4607 }
4608
Amir Levy9659e592016-10-27 18:08:27 +03004609 /* init the various list heads */
4610 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
4611 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
4612 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
4613 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
4614 }
4615 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
4616 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
4617 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
4618 INIT_LIST_HEAD(&ipa3_ctx->
4619 hdr_proc_ctx_tbl.head_free_offset_list[i]);
4620 }
4621 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
4622 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Amir Levy9659e592016-10-27 18:08:27 +03004623
4624 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
4625 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4626 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
4627 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4628
4629 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
4630 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
4631 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
4632 init_waitqueue_head(&ipa3_ctx->msg_waitq);
4633 mutex_init(&ipa3_ctx->msg_lock);
4634
4635 mutex_init(&ipa3_ctx->lock);
4636 mutex_init(&ipa3_ctx->nat_mem.lock);
4637
4638 idr_init(&ipa3_ctx->ipa_idr);
4639 spin_lock_init(&ipa3_ctx->idr_lock);
4640
4641 /* wlan related member */
4642 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
4643 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
4644 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
4645 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
4646
Amir Levy9659e592016-10-27 18:08:27 +03004647 ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
4648
4649 result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
4650 if (result) {
4651 IPAERR("alloc_chrdev_region err.\n");
4652 result = -ENODEV;
4653 goto fail_alloc_chrdev_region;
4654 }
4655
4656 ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
4657 ipa3_ctx, DRV_NAME);
4658 if (IS_ERR(ipa3_ctx->dev)) {
4659 IPAERR(":device_create err.\n");
4660 result = -ENODEV;
4661 goto fail_device_create;
4662 }
4663
4664 cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
4665 ipa3_ctx->cdev.owner = THIS_MODULE;
4666 ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
4667
4668 result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
4669 if (result) {
4670 IPAERR(":cdev_add err=%d\n", -result);
4671 result = -ENODEV;
4672 goto fail_cdev_add;
4673 }
4674 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
4675 MAJOR(ipa3_ctx->dev_num),
4676 MINOR(ipa3_ctx->dev_num));
4677
4678 if (ipa3_create_nat_device()) {
4679 IPAERR("unable to create nat device\n");
4680 result = -ENODEV;
4681 goto fail_nat_dev_add;
4682 }
4683
4684 /* Create a wakeup source. */
4685 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
4686 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
4687
4688 /* Initialize IPA RM (resource manager) */
4689 result = ipa_rm_initialize();
4690 if (result) {
4691 IPAERR("RM initialization failed (%d)\n", -result);
4692 result = -ENODEV;
4693 goto fail_ipa_rm_init;
4694 }
4695 IPADBG("IPA resource manager initialized");
4696
4697 result = ipa3_create_apps_resource();
4698 if (result) {
4699 IPAERR("Failed to create APPS_CONS resource\n");
4700 result = -ENODEV;
4701 goto fail_create_apps_resource;
4702 }
4703
Skylar Changcd3902d2017-03-27 18:08:27 -07004704 result = ipa3_alloc_pkt_init();
4705 if (result) {
4706 IPAERR("Failed to alloc pkt_init payload\n");
4707 result = -ENODEV;
4708 goto fail_create_apps_resource;
4709 }
4710
Amir Levy12ef0912016-08-30 09:27:34 +03004711 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
4712 ipa3_enable_dcd();
4713
Amir Levy9659e592016-10-27 18:08:27 +03004714 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
4715
4716 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004717 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03004718
4719 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02004720 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03004721 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02004722 *
4723 * For IPA3.0, the GSI configuration is done by the GSI driver.
4724 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03004725 */
Amir Levya59ed3f2017-03-05 17:30:55 +02004726 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
4727 result = ipa3_gsi_pre_fw_load_init();
4728 if (result) {
4729 IPAERR("gsi pre FW loading config failed\n");
4730 result = -ENODEV;
4731 goto fail_ipa_init_interrupts;
Amir Levy9659e592016-10-27 18:08:27 +03004732 }
4733 }
Amir Levy9659e592016-10-27 18:08:27 +03004734
4735 return 0;
4736
4737fail_ipa_init_interrupts:
4738 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
4739fail_create_apps_resource:
4740 ipa_rm_exit();
4741fail_ipa_rm_init:
4742fail_nat_dev_add:
4743 cdev_del(&ipa3_ctx->cdev);
4744fail_cdev_add:
4745 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
4746fail_device_create:
4747 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
4748fail_alloc_chrdev_region:
Amir Levy9659e592016-10-27 18:08:27 +03004749 idr_destroy(&ipa3_ctx->ipa_idr);
Amir Levy9659e592016-10-27 18:08:27 +03004750 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
4751fail_rx_pkt_wrapper_cache:
4752 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
4753fail_tx_pkt_wrapper_cache:
4754 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
4755fail_rt_tbl_cache:
4756 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
4757fail_hdr_proc_ctx_offset_cache:
4758 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
4759fail_hdr_proc_ctx_cache:
4760 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
4761fail_hdr_offset_cache:
4762 kmem_cache_destroy(ipa3_ctx->hdr_cache);
4763fail_hdr_cache:
4764 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
4765fail_rt_rule_cache:
4766 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
4767fail_flt_rule_cache:
4768 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
4769fail_create_transport_wq:
4770 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
4771fail_init_hw:
4772 ipahal_destroy();
4773fail_ipahal:
4774 iounmap(ipa3_ctx->mmio);
4775fail_remap:
4776 ipa3_disable_clks();
4777 ipa3_active_clients_log_destroy();
4778fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02004779 if (ipa3_clk)
4780 clk_put(ipa3_clk);
4781 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004782fail_clk:
4783 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
4784fail_bus_reg:
Ghanim Fodi6a831342017-03-07 18:19:15 +02004785 if (ipa3_bus_scale_table) {
4786 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
4787 ipa3_bus_scale_table = NULL;
4788 }
Amir Levy9659e592016-10-27 18:08:27 +03004789fail_init_mem_partition:
4790fail_bind:
4791 kfree(ipa3_ctx->ctrl);
4792fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004793 kfree(ipa3_ctx->ipa_tz_unlock_reg);
4794fail_tz_unlock_reg:
Amir Levy9659e592016-10-27 18:08:27 +03004795 ipc_log_context_destroy(ipa3_ctx->logbuf);
4796fail_logbuf:
4797 kfree(ipa3_ctx);
4798 ipa3_ctx = NULL;
4799fail_mem_ctx:
4800 return result;
4801}
4802
4803static int get_ipa_dts_configuration(struct platform_device *pdev,
4804 struct ipa3_plat_drv_res *ipa_drv_res)
4805{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004806 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03004807 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004808 u32 *ipa_tz_unlock_reg;
4809 int elem_num;
Amir Levy9659e592016-10-27 18:08:27 +03004810
4811 /* initialize ipa3_res */
4812 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
4813 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
4814 ipa_drv_res->ipa_hw_type = 0;
4815 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03004816 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
4817 ipa_drv_res->ipa_wdi2 = false;
4818 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02004819 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03004820 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4821 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4822 ipa_drv_res->apply_rg10_wa = false;
4823 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004824 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
4825 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004826
4827 /* Get IPA HW Version */
4828 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
4829 &ipa_drv_res->ipa_hw_type);
4830 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
4831 IPAERR(":get resource failed for ipa-hw-ver!\n");
4832 return -ENODEV;
4833 }
4834 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
4835
4836 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
4837 IPAERR(":IPA version below 3.0 not supported!\n");
4838 return -ENODEV;
4839 }
4840
4841 /* Get IPA HW mode */
4842 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
4843 &ipa_drv_res->ipa3_hw_mode);
4844 if (result)
4845 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
4846 else
4847 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
4848 ipa_drv_res->ipa3_hw_mode);
4849
4850 /* Get IPA WAN / LAN RX pool size */
4851 result = of_property_read_u32(pdev->dev.of_node,
4852 "qcom,wan-rx-ring-size",
4853 &ipa_drv_res->wan_rx_ring_size);
4854 if (result)
4855 IPADBG("using default for wan-rx-ring-size = %u\n",
4856 ipa_drv_res->wan_rx_ring_size);
4857 else
4858 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
4859 ipa_drv_res->wan_rx_ring_size);
4860
4861 result = of_property_read_u32(pdev->dev.of_node,
4862 "qcom,lan-rx-ring-size",
4863 &ipa_drv_res->lan_rx_ring_size);
4864 if (result)
4865 IPADBG("using default for lan-rx-ring-size = %u\n",
4866 ipa_drv_res->lan_rx_ring_size);
4867 else
4868 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
4869 ipa_drv_res->lan_rx_ring_size);
4870
4871 ipa_drv_res->use_ipa_teth_bridge =
4872 of_property_read_bool(pdev->dev.of_node,
4873 "qcom,use-ipa-tethering-bridge");
4874 IPADBG(": using TBDr = %s",
4875 ipa_drv_res->use_ipa_teth_bridge
4876 ? "True" : "False");
4877
Amir Levy9659e592016-10-27 18:08:27 +03004878 ipa_drv_res->modem_cfg_emb_pipe_flt =
4879 of_property_read_bool(pdev->dev.of_node,
4880 "qcom,modem-cfg-emb-pipe-flt");
4881 IPADBG(": modem configure embedded pipe filtering = %s\n",
4882 ipa_drv_res->modem_cfg_emb_pipe_flt
4883 ? "True" : "False");
4884
4885 ipa_drv_res->ipa_wdi2 =
4886 of_property_read_bool(pdev->dev.of_node,
4887 "qcom,ipa-wdi2");
4888 IPADBG(": WDI-2.0 = %s\n",
4889 ipa_drv_res->ipa_wdi2
4890 ? "True" : "False");
4891
4892 ipa_drv_res->use_64_bit_dma_mask =
4893 of_property_read_bool(pdev->dev.of_node,
4894 "qcom,use-64-bit-dma-mask");
4895 IPADBG(": use_64_bit_dma_mask = %s\n",
4896 ipa_drv_res->use_64_bit_dma_mask
4897 ? "True" : "False");
4898
Ghanim Fodi6a831342017-03-07 18:19:15 +02004899 ipa_drv_res->use_bw_vote =
4900 of_property_read_bool(pdev->dev.of_node,
4901 "qcom,bandwidth-vote-for-ipa");
4902 IPADBG(": use_bw_vote = %s\n",
4903 ipa_drv_res->use_bw_vote
4904 ? "True" : "False");
4905
Amir Levy9659e592016-10-27 18:08:27 +03004906 ipa_drv_res->skip_uc_pipe_reset =
4907 of_property_read_bool(pdev->dev.of_node,
4908 "qcom,skip-uc-pipe-reset");
4909 IPADBG(": skip uC pipe reset = %s\n",
4910 ipa_drv_res->skip_uc_pipe_reset
4911 ? "True" : "False");
4912
4913 ipa_drv_res->tethered_flow_control =
4914 of_property_read_bool(pdev->dev.of_node,
4915 "qcom,tethered-flow-control");
4916 IPADBG(": Use apps based flow control = %s\n",
4917 ipa_drv_res->tethered_flow_control
4918 ? "True" : "False");
4919
Amir Levy9659e592016-10-27 18:08:27 +03004920 /* Get IPA wrapper address */
4921 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4922 "ipa-base");
4923 if (!resource) {
4924 IPAERR(":get resource failed for ipa-base!\n");
4925 return -ENODEV;
4926 }
4927 ipa_drv_res->ipa_mem_base = resource->start;
4928 ipa_drv_res->ipa_mem_size = resource_size(resource);
4929 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
4930 ipa_drv_res->ipa_mem_base,
4931 ipa_drv_res->ipa_mem_size);
4932
4933 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
4934 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
4935
Amir Levya59ed3f2017-03-05 17:30:55 +02004936 /* Get IPA GSI address */
4937 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4938 "gsi-base");
4939 if (!resource) {
4940 IPAERR(":get resource failed for gsi-base!\n");
4941 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03004942 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004943 ipa_drv_res->transport_mem_base = resource->start;
4944 ipa_drv_res->transport_mem_size = resource_size(resource);
4945 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
4946 ipa_drv_res->transport_mem_base,
4947 ipa_drv_res->transport_mem_size);
4948
4949 /* Get IPA GSI IRQ number */
4950 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4951 "gsi-irq");
4952 if (!resource) {
4953 IPAERR(":get resource failed for gsi-irq!\n");
4954 return -ENODEV;
4955 }
4956 ipa_drv_res->transport_irq = resource->start;
4957 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03004958
4959 /* Get IPA pipe mem start ofst */
4960 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4961 "ipa-pipe-mem");
4962 if (!resource) {
4963 IPADBG(":not using pipe memory - resource nonexisting\n");
4964 } else {
4965 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
4966 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
4967 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
4968 ipa_drv_res->ipa_pipe_mem_start_ofst,
4969 ipa_drv_res->ipa_pipe_mem_size);
4970 }
4971
4972 /* Get IPA IRQ number */
4973 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
4974 "ipa-irq");
4975 if (!resource) {
4976 IPAERR(":get resource failed for ipa-irq!\n");
4977 return -ENODEV;
4978 }
4979 ipa_drv_res->ipa_irq = resource->start;
4980 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
4981
4982 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
4983 &ipa_drv_res->ee);
4984 if (result)
4985 ipa_drv_res->ee = 0;
4986
4987 ipa_drv_res->apply_rg10_wa =
4988 of_property_read_bool(pdev->dev.of_node,
4989 "qcom,use-rg10-limitation-mitigation");
4990 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
4991 ipa_drv_res->apply_rg10_wa
4992 ? "True" : "False");
4993
4994 ipa_drv_res->gsi_ch20_wa =
4995 of_property_read_bool(pdev->dev.of_node,
4996 "qcom,do-not-use-ch-gsi-20");
4997 IPADBG(": GSI CH 20 WA is = %s\n",
4998 ipa_drv_res->apply_rg10_wa
4999 ? "Needed" : "Not needed");
5000
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005001 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
5002 "qcom,ipa-tz-unlock-reg", sizeof(u32));
5003
5004 if (elem_num > 0 && elem_num % 2 == 0) {
5005 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
5006
5007 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
5008 if (ipa_tz_unlock_reg == NULL)
5009 return -ENOMEM;
5010
5011 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
5012 ipa_drv_res->ipa_tz_unlock_reg_num,
5013 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
5014 GFP_KERNEL);
5015 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
5016 kfree(ipa_tz_unlock_reg);
5017 return -ENOMEM;
5018 }
5019
5020 if (of_property_read_u32_array(pdev->dev.of_node,
5021 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
5022 elem_num)) {
5023 IPAERR("failed to read register addresses\n");
5024 kfree(ipa_tz_unlock_reg);
5025 kfree(ipa_drv_res->ipa_tz_unlock_reg);
5026 return -EFAULT;
5027 }
5028
5029 pos = 0;
5030 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
5031 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
5032 ipa_tz_unlock_reg[pos++];
5033 ipa_drv_res->ipa_tz_unlock_reg[i].size =
5034 ipa_tz_unlock_reg[pos++];
5035 IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
5036 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
5037 ipa_drv_res->ipa_tz_unlock_reg[i].size);
5038 }
5039 kfree(ipa_tz_unlock_reg);
5040 }
Amir Levy9659e592016-10-27 18:08:27 +03005041 return 0;
5042}
5043
5044static int ipa_smmu_wlan_cb_probe(struct device *dev)
5045{
5046 struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005047 int atomic_ctx = 1;
5048 int fast = 1;
5049 int bypass = 1;
5050 int ret;
5051 u32 add_map_size;
5052 const u32 *add_map;
5053 int i;
5054
5055 IPADBG("sub pdev=%p\n", dev);
5056
5057 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005058 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03005059 if (!cb->iommu) {
5060 IPAERR("could not alloc iommu domain\n");
5061 /* assume this failure is because iommu driver is not ready */
5062 return -EPROBE_DEFER;
5063 }
5064 cb->valid = true;
5065
Amir Levy9659e592016-10-27 18:08:27 +03005066 if (smmu_info.s1_bypass) {
5067 if (iommu_domain_set_attr(cb->iommu,
5068 DOMAIN_ATTR_S1_BYPASS,
5069 &bypass)) {
5070 IPAERR("couldn't set bypass\n");
5071 cb->valid = false;
5072 return -EIO;
5073 }
5074 IPADBG("SMMU S1 BYPASS\n");
5075 } else {
5076 if (iommu_domain_set_attr(cb->iommu,
5077 DOMAIN_ATTR_ATOMIC,
5078 &atomic_ctx)) {
5079 IPAERR("couldn't disable coherent HTW\n");
5080 cb->valid = false;
5081 return -EIO;
5082 }
5083 IPADBG("SMMU ATTR ATOMIC\n");
5084
5085 if (smmu_info.fast_map) {
5086 if (iommu_domain_set_attr(cb->iommu,
5087 DOMAIN_ATTR_FAST,
5088 &fast)) {
5089 IPAERR("couldn't set fast map\n");
5090 cb->valid = false;
5091 return -EIO;
5092 }
5093 IPADBG("SMMU fast map set\n");
5094 }
5095 }
5096
5097 ret = iommu_attach_device(cb->iommu, dev);
5098 if (ret) {
5099 IPAERR("could not attach device ret=%d\n", ret);
5100 cb->valid = false;
5101 return ret;
5102 }
5103 /* MAP ipa-uc ram */
5104 add_map = of_get_property(dev->of_node,
5105 "qcom,additional-mapping", &add_map_size);
5106 if (add_map) {
5107 /* mapping size is an array of 3-tuple of u32 */
5108 if (add_map_size % (3 * sizeof(u32))) {
5109 IPAERR("wrong additional mapping format\n");
5110 cb->valid = false;
5111 return -EFAULT;
5112 }
5113
5114 /* iterate of each entry of the additional mapping array */
5115 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5116 u32 iova = be32_to_cpu(add_map[i]);
5117 u32 pa = be32_to_cpu(add_map[i + 1]);
5118 u32 size = be32_to_cpu(add_map[i + 2]);
5119 unsigned long iova_p;
5120 phys_addr_t pa_p;
5121 u32 size_p;
5122
5123 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5124 iova_p, pa_p, size_p);
5125 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5126 iova_p, &pa_p, size_p);
5127 ipa3_iommu_map(cb->iommu,
5128 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005129 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005130 }
5131 }
5132 return 0;
5133}
5134
5135static int ipa_smmu_uc_cb_probe(struct device *dev)
5136{
5137 struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005138 int atomic_ctx = 1;
5139 int bypass = 1;
5140 int fast = 1;
5141 int ret;
5142 u32 iova_ap_mapping[2];
5143
5144 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
5145
5146 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5147 iova_ap_mapping, 2);
5148 if (ret) {
5149 IPAERR("Fail to read UC start/size iova addresses\n");
5150 return ret;
5151 }
5152 cb->va_start = iova_ap_mapping[0];
5153 cb->va_size = iova_ap_mapping[1];
5154 cb->va_end = cb->va_start + cb->va_size;
5155 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5156
5157 if (smmu_info.use_64_bit_dma_mask) {
5158 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5159 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5160 IPAERR("DMA set 64bit mask failed\n");
5161 return -EOPNOTSUPP;
5162 }
5163 } else {
5164 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5165 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5166 IPAERR("DMA set 32bit mask failed\n");
5167 return -EOPNOTSUPP;
5168 }
5169 }
5170 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
5171
5172 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005173 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005174 cb->va_start, cb->va_size);
5175 if (IS_ERR_OR_NULL(cb->mapping)) {
5176 IPADBG("Fail to create mapping\n");
5177 /* assume this failure is because iommu driver is not ready */
5178 return -EPROBE_DEFER;
5179 }
5180 IPADBG("SMMU mapping created\n");
5181 cb->valid = true;
5182
Amir Levy9659e592016-10-27 18:08:27 +03005183 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
5184 if (smmu_info.s1_bypass) {
5185 if (iommu_domain_set_attr(cb->mapping->domain,
5186 DOMAIN_ATTR_S1_BYPASS,
5187 &bypass)) {
5188 IPAERR("couldn't set bypass\n");
5189 arm_iommu_release_mapping(cb->mapping);
5190 cb->valid = false;
5191 return -EIO;
5192 }
5193 IPADBG("SMMU S1 BYPASS\n");
5194 } else {
5195 if (iommu_domain_set_attr(cb->mapping->domain,
5196 DOMAIN_ATTR_ATOMIC,
5197 &atomic_ctx)) {
5198 IPAERR("couldn't set domain as atomic\n");
5199 arm_iommu_release_mapping(cb->mapping);
5200 cb->valid = false;
5201 return -EIO;
5202 }
5203 IPADBG("SMMU atomic set\n");
5204
5205 if (smmu_info.fast_map) {
5206 if (iommu_domain_set_attr(cb->mapping->domain,
5207 DOMAIN_ATTR_FAST,
5208 &fast)) {
5209 IPAERR("couldn't set fast map\n");
5210 arm_iommu_release_mapping(cb->mapping);
5211 cb->valid = false;
5212 return -EIO;
5213 }
5214 IPADBG("SMMU fast map set\n");
5215 }
5216 }
5217
5218 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
5219 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
5220 if (ret) {
5221 IPAERR("could not attach device ret=%d\n", ret);
5222 arm_iommu_release_mapping(cb->mapping);
5223 cb->valid = false;
5224 return ret;
5225 }
5226
5227 cb->next_addr = cb->va_end;
5228 ipa3_ctx->uc_pdev = dev;
5229
5230 return 0;
5231}
5232
5233static int ipa_smmu_ap_cb_probe(struct device *dev)
5234{
5235 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
5236 int result;
Amir Levy9659e592016-10-27 18:08:27 +03005237 int atomic_ctx = 1;
5238 int fast = 1;
5239 int bypass = 1;
5240 u32 iova_ap_mapping[2];
5241 u32 add_map_size;
5242 const u32 *add_map;
5243 void *smem_addr;
5244 int i;
5245
5246 IPADBG("AP CB probe: sub pdev=%p\n", dev);
5247
5248 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5249 iova_ap_mapping, 2);
5250 if (result) {
5251 IPAERR("Fail to read AP start/size iova addresses\n");
5252 return result;
5253 }
5254 cb->va_start = iova_ap_mapping[0];
5255 cb->va_size = iova_ap_mapping[1];
5256 cb->va_end = cb->va_start + cb->va_size;
5257 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5258
5259 if (smmu_info.use_64_bit_dma_mask) {
5260 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5261 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5262 IPAERR("DMA set 64bit mask failed\n");
5263 return -EOPNOTSUPP;
5264 }
5265 } else {
5266 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5267 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5268 IPAERR("DMA set 32bit mask failed\n");
5269 return -EOPNOTSUPP;
5270 }
5271 }
5272
5273 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005274 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005275 cb->va_start, cb->va_size);
5276 if (IS_ERR_OR_NULL(cb->mapping)) {
5277 IPADBG("Fail to create mapping\n");
5278 /* assume this failure is because iommu driver is not ready */
5279 return -EPROBE_DEFER;
5280 }
5281 IPADBG("SMMU mapping created\n");
5282 cb->valid = true;
5283
Amir Levy9659e592016-10-27 18:08:27 +03005284 if (smmu_info.s1_bypass) {
5285 if (iommu_domain_set_attr(cb->mapping->domain,
5286 DOMAIN_ATTR_S1_BYPASS,
5287 &bypass)) {
5288 IPAERR("couldn't set bypass\n");
5289 arm_iommu_release_mapping(cb->mapping);
5290 cb->valid = false;
5291 return -EIO;
5292 }
5293 IPADBG("SMMU S1 BYPASS\n");
5294 } else {
5295 if (iommu_domain_set_attr(cb->mapping->domain,
5296 DOMAIN_ATTR_ATOMIC,
5297 &atomic_ctx)) {
5298 IPAERR("couldn't set domain as atomic\n");
5299 arm_iommu_release_mapping(cb->mapping);
5300 cb->valid = false;
5301 return -EIO;
5302 }
5303 IPADBG("SMMU atomic set\n");
5304
5305 if (iommu_domain_set_attr(cb->mapping->domain,
5306 DOMAIN_ATTR_FAST,
5307 &fast)) {
5308 IPAERR("couldn't set fast map\n");
5309 arm_iommu_release_mapping(cb->mapping);
5310 cb->valid = false;
5311 return -EIO;
5312 }
5313 IPADBG("SMMU fast map set\n");
5314 }
5315
5316 result = arm_iommu_attach_device(cb->dev, cb->mapping);
5317 if (result) {
5318 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
5319 cb->valid = false;
5320 return result;
5321 }
5322
5323 add_map = of_get_property(dev->of_node,
5324 "qcom,additional-mapping", &add_map_size);
5325 if (add_map) {
5326 /* mapping size is an array of 3-tuple of u32 */
5327 if (add_map_size % (3 * sizeof(u32))) {
5328 IPAERR("wrong additional mapping format\n");
5329 cb->valid = false;
5330 return -EFAULT;
5331 }
5332
5333 /* iterate of each entry of the additional mapping array */
5334 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5335 u32 iova = be32_to_cpu(add_map[i]);
5336 u32 pa = be32_to_cpu(add_map[i + 1]);
5337 u32 size = be32_to_cpu(add_map[i + 2]);
5338 unsigned long iova_p;
5339 phys_addr_t pa_p;
5340 u32 size_p;
5341
5342 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5343 iova_p, pa_p, size_p);
5344 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5345 iova_p, &pa_p, size_p);
5346 ipa3_iommu_map(cb->mapping->domain,
5347 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005348 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005349 }
5350 }
5351
5352 /* map SMEM memory for IPA table accesses */
5353 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
5354 SMEM_MODEM, 0);
5355 if (smem_addr) {
5356 phys_addr_t iova = smem_virt_to_phys(smem_addr);
5357 phys_addr_t pa = iova;
5358 unsigned long iova_p;
5359 phys_addr_t pa_p;
5360 u32 size_p;
5361
5362 IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
5363 iova_p, pa_p, size_p);
5364 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5365 iova_p, &pa_p, size_p);
5366 ipa3_iommu_map(cb->mapping->domain,
5367 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005368 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005369 }
5370
5371
5372 smmu_info.present = true;
5373
5374 if (!ipa3_bus_scale_table)
5375 ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
5376
5377 /* Proceed to real initialization */
5378 result = ipa3_pre_init(&ipa3_res, dev);
5379 if (result) {
5380 IPAERR("ipa_init failed\n");
5381 arm_iommu_detach_device(cb->dev);
5382 arm_iommu_release_mapping(cb->mapping);
5383 cb->valid = false;
5384 return result;
5385 }
5386
5387 return result;
5388}
5389
5390static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
5391{
5392 ipa3_freeze_clock_vote_and_notify_modem();
5393
5394 return IRQ_HANDLED;
5395}
5396
5397static int ipa3_smp2p_probe(struct device *dev)
5398{
5399 struct device_node *node = dev->of_node;
5400 int res;
5401
5402 IPADBG("node->name=%s\n", node->name);
5403 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
5404 res = of_get_gpio(node, 0);
5405 if (res < 0) {
5406 IPADBG("of_get_gpio returned %d\n", res);
5407 return res;
5408 }
5409
5410 ipa3_ctx->smp2p_info.out_base_id = res;
5411 IPADBG("smp2p out_base_id=%d\n",
5412 ipa3_ctx->smp2p_info.out_base_id);
5413 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
5414 int irq;
5415
5416 res = of_get_gpio(node, 0);
5417 if (res < 0) {
5418 IPADBG("of_get_gpio returned %d\n", res);
5419 return res;
5420 }
5421
5422 ipa3_ctx->smp2p_info.in_base_id = res;
5423 IPADBG("smp2p in_base_id=%d\n",
5424 ipa3_ctx->smp2p_info.in_base_id);
5425
5426 /* register for modem clk query */
5427 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
5428 IPA_GPIO_IN_QUERY_CLK_IDX);
5429 if (irq < 0) {
5430 IPAERR("gpio_to_irq failed %d\n", irq);
5431 return -ENODEV;
5432 }
5433 IPADBG("smp2p irq#=%d\n", irq);
5434 res = request_irq(irq,
5435 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
5436 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
5437 if (res) {
5438 IPAERR("fail to register smp2p irq=%d\n", irq);
5439 return -ENODEV;
5440 }
5441 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
5442 IPA_GPIO_IN_QUERY_CLK_IDX);
5443 if (res)
5444 IPAERR("failed to enable irq wake\n");
5445 }
5446
5447 return 0;
5448}
5449
5450int ipa3_plat_drv_probe(struct platform_device *pdev_p,
5451 struct ipa_api_controller *api_ctrl,
5452 const struct of_device_id *pdrv_match)
5453{
5454 int result;
5455 struct device *dev = &pdev_p->dev;
5456
5457 IPADBG("IPA driver probing started\n");
5458 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
5459
5460 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
5461 return ipa_smmu_ap_cb_probe(dev);
5462
5463 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
5464 return ipa_smmu_wlan_cb_probe(dev);
5465
5466 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
5467 return ipa_smmu_uc_cb_probe(dev);
5468
5469 if (of_device_is_compatible(dev->of_node,
5470 "qcom,smp2pgpio-map-ipa-1-in"))
5471 return ipa3_smp2p_probe(dev);
5472
5473 if (of_device_is_compatible(dev->of_node,
5474 "qcom,smp2pgpio-map-ipa-1-out"))
5475 return ipa3_smp2p_probe(dev);
5476
5477 master_dev = dev;
5478 if (!ipa3_pdev)
5479 ipa3_pdev = pdev_p;
5480
5481 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
5482 if (result) {
5483 IPAERR("IPA dts parsing failed\n");
5484 return result;
5485 }
5486
5487 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
5488 if (result) {
5489 IPAERR("IPA API binding failed\n");
5490 return result;
5491 }
5492
5493 result = of_platform_populate(pdev_p->dev.of_node,
5494 pdrv_match, NULL, &pdev_p->dev);
5495 if (result) {
5496 IPAERR("failed to populate platform\n");
5497 return result;
5498 }
5499
5500 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
5501 if (of_property_read_bool(pdev_p->dev.of_node,
5502 "qcom,smmu-s1-bypass"))
5503 smmu_info.s1_bypass = true;
5504 if (of_property_read_bool(pdev_p->dev.of_node,
5505 "qcom,smmu-fast-map"))
5506 smmu_info.fast_map = true;
5507 if (of_property_read_bool(pdev_p->dev.of_node,
5508 "qcom,use-64-bit-dma-mask"))
5509 smmu_info.use_64_bit_dma_mask = true;
5510 smmu_info.arm_smmu = true;
5511 pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
5512 smmu_info.s1_bypass, smmu_info.fast_map);
5513 } else if (of_property_read_bool(pdev_p->dev.of_node,
5514 "qcom,msm-smmu")) {
5515 IPAERR("Legacy IOMMU not supported\n");
5516 result = -EOPNOTSUPP;
5517 } else {
5518 if (of_property_read_bool(pdev_p->dev.of_node,
5519 "qcom,use-64-bit-dma-mask")) {
5520 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
5521 dma_set_coherent_mask(&pdev_p->dev,
5522 DMA_BIT_MASK(64))) {
5523 IPAERR("DMA set 64bit mask failed\n");
5524 return -EOPNOTSUPP;
5525 }
5526 } else {
5527 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
5528 dma_set_coherent_mask(&pdev_p->dev,
5529 DMA_BIT_MASK(32))) {
5530 IPAERR("DMA set 32bit mask failed\n");
5531 return -EOPNOTSUPP;
5532 }
5533 }
5534
5535 if (!ipa3_bus_scale_table)
5536 ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
5537 /* Proceed to real initialization */
5538 result = ipa3_pre_init(&ipa3_res, dev);
5539 if (result) {
5540 IPAERR("ipa3_init failed\n");
5541 return result;
5542 }
5543 }
5544
5545 return result;
5546}
5547
5548/**
5549 * ipa3_ap_suspend() - suspend callback for runtime_pm
5550 * @dev: pointer to device
5551 *
5552 * This callback will be invoked by the runtime_pm framework when an AP suspend
5553 * operation is invoked, usually by pressing a suspend button.
5554 *
5555 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
5556 * This will postpone the suspend operation until IPA is no longer used by AP.
5557*/
5558int ipa3_ap_suspend(struct device *dev)
5559{
5560 int i;
5561
5562 IPADBG("Enter...\n");
5563
5564 /* In case there is a tx/rx handler in polling mode fail to suspend */
5565 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
5566 if (ipa3_ctx->ep[i].sys &&
5567 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
5568 IPAERR("EP %d is in polling state, do not suspend\n",
5569 i);
5570 return -EAGAIN;
5571 }
5572 }
5573
Amir Levya59ed3f2017-03-05 17:30:55 +02005574 /*
5575 * Release transport IPA resource without waiting for inactivity timer
5576 */
Amir Levy9659e592016-10-27 18:08:27 +03005577 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02005578 ipa3_transport_release_resource(NULL);
Amir Levy9659e592016-10-27 18:08:27 +03005579 IPADBG("Exit\n");
5580
5581 return 0;
5582}
5583
5584/**
5585* ipa3_ap_resume() - resume callback for runtime_pm
5586* @dev: pointer to device
5587*
5588* This callback will be invoked by the runtime_pm framework when an AP resume
5589* operation is invoked.
5590*
5591* Always returns 0 since resume should always succeed.
5592*/
5593int ipa3_ap_resume(struct device *dev)
5594{
5595 return 0;
5596}
5597
5598struct ipa3_context *ipa3_get_ctx(void)
5599{
5600 return ipa3_ctx;
5601}
5602
Amir Levy9659e592016-10-27 18:08:27 +03005603static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
5604{
5605 switch (notify->evt_id) {
5606 case GSI_PER_EVT_GLOB_ERROR:
5607 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
5608 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
5609 break;
5610 case GSI_PER_EVT_GLOB_GP1:
5611 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
5612 BUG();
5613 break;
5614 case GSI_PER_EVT_GLOB_GP2:
5615 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
5616 BUG();
5617 break;
5618 case GSI_PER_EVT_GLOB_GP3:
5619 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
5620 BUG();
5621 break;
5622 case GSI_PER_EVT_GENERAL_BREAK_POINT:
5623 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
5624 break;
5625 case GSI_PER_EVT_GENERAL_BUS_ERROR:
5626 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
5627 BUG();
5628 break;
5629 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
5630 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
5631 BUG();
5632 break;
5633 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
5634 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
5635 BUG();
5636 break;
5637 default:
5638 IPAERR("Received unexpected evt: %d\n",
5639 notify->evt_id);
5640 BUG();
5641 }
5642}
5643
5644int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
5645{
5646 struct ipa3_ready_cb_info *cb_info = NULL;
5647
5648 /* check ipa3_ctx existed or not */
5649 if (!ipa3_ctx) {
5650 IPADBG("IPA driver haven't initialized\n");
5651 return -ENXIO;
5652 }
5653 mutex_lock(&ipa3_ctx->lock);
5654 if (ipa3_ctx->ipa_initialization_complete) {
5655 mutex_unlock(&ipa3_ctx->lock);
5656 IPADBG("IPA driver finished initialization already\n");
5657 return -EEXIST;
5658 }
5659
5660 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
5661 if (!cb_info) {
5662 mutex_unlock(&ipa3_ctx->lock);
5663 return -ENOMEM;
5664 }
5665
5666 cb_info->ready_cb = ipa_ready_cb;
5667 cb_info->user_data = user_data;
5668
5669 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
5670 mutex_unlock(&ipa3_ctx->lock);
5671
5672 return 0;
5673}
5674
5675int ipa3_iommu_map(struct iommu_domain *domain,
5676 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
5677{
5678 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
5679 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
5680
5681 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
5682 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
5683
5684 /* make sure no overlapping */
5685 if (domain == ipa3_get_smmu_domain()) {
5686 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
5687 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
5688 ipa_assert();
5689 return -EFAULT;
5690 }
5691 } else if (domain == ipa3_get_wlan_smmu_domain()) {
5692 /* wlan is one time map */
5693 } else if (domain == ipa3_get_uc_smmu_domain()) {
5694 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
5695 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
5696 ipa_assert();
5697 return -EFAULT;
5698 }
5699 } else {
5700 IPAERR("Unexpected domain 0x%p\n", domain);
5701 ipa_assert();
5702 return -EFAULT;
5703 }
5704
5705 return iommu_map(domain, iova, paddr, size, prot);
5706}
5707
5708MODULE_LICENSE("GPL v2");
5709MODULE_DESCRIPTION("IPA HW device driver");