blob: 8b2beea45fb81f9174a262049b1a265b03b56526 [file] [log] [blame]
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/clk.h>
14#include <linux/compat.h>
15#include <linux/device.h>
16#include <linux/dmapool.h>
17#include <linux/fs.h>
18#include <linux/genalloc.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/platform_device.h>
26#include <linux/rbtree.h>
27#include <linux/of_gpio.h>
28#include <linux/uaccess.h>
29#include <linux/interrupt.h>
30#include <linux/msm-bus.h>
31#include <linux/msm-bus-board.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/msm_gsi.h>
Amir Levy9659e592016-10-27 18:08:27 +030035#include <linux/time.h>
36#include <linux/hashtable.h>
Amir Levyd9f51132016-11-14 16:55:35 +020037#include <linux/jhash.h>
Amir Levy9659e592016-10-27 18:08:27 +030038#include <soc/qcom/subsystem_restart.h>
39#include <soc/qcom/smem.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020040#include <soc/qcom/scm.h>
Amir Levy635bced2016-12-19 09:20:42 +020041#include <asm/cacheflush.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020042
43#ifdef CONFIG_ARM64
44
45/* Outer caches unsupported on ARM64 platforms */
46#define outer_flush_range(x, y)
47#define __cpuc_flush_dcache_area __flush_dcache_area
48
49#endif
50
Amir Levy9659e592016-10-27 18:08:27 +030051#define IPA_SUBSYSTEM_NAME "ipa_fws"
52#include "ipa_i.h"
53#include "../ipa_rm_i.h"
54#include "ipahal/ipahal.h"
55#include "ipahal/ipahal_fltrt.h"
56
57#define CREATE_TRACE_POINTS
58#include "ipa_trace.h"
59
60#define IPA_GPIO_IN_QUERY_CLK_IDX 0
61#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
62#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
63
64#define IPA_SUMMING_THRESHOLD (0x10)
65#define IPA_PIPE_MEM_START_OFST (0x0)
66#define IPA_PIPE_MEM_SIZE (0x0)
67#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
68 x == IPA_MODE_MOBILE_AP_WAN || \
69 x == IPA_MODE_MOBILE_AP_WLAN)
70#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
71#define IPA_A5_MUX_HEADER_LENGTH (8)
72
73#define IPA_AGGR_MAX_STR_LENGTH (10)
74
Gidon Studinski3021a6f2016-11-10 12:48:48 +020075#define CLEANUP_TAG_PROCESS_TIMEOUT 500
Amir Levy9659e592016-10-27 18:08:27 +030076
77#define IPA_AGGR_STR_IN_BYTES(str) \
78 (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
79
80#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
81
82#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
83
84#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
85#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
86#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
87#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
88
89#define IPA_SMEM_SIZE (8 * 1024)
90
91/* round addresses for closes page per SMMU requirements */
92#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
93 do { \
94 (iova_p) = rounddown((iova), PAGE_SIZE); \
95 (pa_p) = rounddown((pa), PAGE_SIZE); \
96 (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
97 } while (0)
98
99
100/* The relative location in /lib/firmware where the FWs will reside */
101#define IPA_FWS_PATH "ipa/ipa_fws.elf"
102
103#ifdef CONFIG_COMPAT
104#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
105 IPA_IOCTL_ADD_HDR, \
106 compat_uptr_t)
107#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
108 IPA_IOCTL_DEL_HDR, \
109 compat_uptr_t)
110#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
111 IPA_IOCTL_ADD_RT_RULE, \
112 compat_uptr_t)
113#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
114 IPA_IOCTL_DEL_RT_RULE, \
115 compat_uptr_t)
116#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
117 IPA_IOCTL_ADD_FLT_RULE, \
118 compat_uptr_t)
119#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
120 IPA_IOCTL_DEL_FLT_RULE, \
121 compat_uptr_t)
122#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
123 IPA_IOCTL_GET_RT_TBL, \
124 compat_uptr_t)
125#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
126 IPA_IOCTL_COPY_HDR, \
127 compat_uptr_t)
128#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
129 IPA_IOCTL_QUERY_INTF, \
130 compat_uptr_t)
131#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
132 IPA_IOCTL_QUERY_INTF_TX_PROPS, \
133 compat_uptr_t)
134#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
135 IPA_IOCTL_QUERY_INTF_RX_PROPS, \
136 compat_uptr_t)
137#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
138 IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
139 compat_uptr_t)
140#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
141 IPA_IOCTL_GET_HDR, \
142 compat_uptr_t)
143#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
144 IPA_IOCTL_ALLOC_NAT_MEM, \
145 compat_uptr_t)
146#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
147 IPA_IOCTL_V4_INIT_NAT, \
148 compat_uptr_t)
149#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
150 IPA_IOCTL_NAT_DMA, \
151 compat_uptr_t)
152#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
153 IPA_IOCTL_V4_DEL_NAT, \
154 compat_uptr_t)
155#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
156 IPA_IOCTL_GET_NAT_OFFSET, \
157 compat_uptr_t)
158#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
159 IPA_IOCTL_PULL_MSG, \
160 compat_uptr_t)
161#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
162 IPA_IOCTL_RM_ADD_DEPENDENCY, \
163 compat_uptr_t)
164#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
165 IPA_IOCTL_RM_DEL_DEPENDENCY, \
166 compat_uptr_t)
167#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
168 IPA_IOCTL_GENERATE_FLT_EQ, \
169 compat_uptr_t)
170#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
171 IPA_IOCTL_QUERY_RT_TBL_INDEX, \
172 compat_uptr_t)
173#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \
174 IPA_IOCTL_WRITE_QMAPID, \
175 compat_uptr_t)
176#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
177 IPA_IOCTL_MDFY_FLT_RULE, \
178 compat_uptr_t)
179#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
180 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
181 compat_uptr_t)
182#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
183 IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
184 compat_uptr_t)
185#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
186 IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
187 compat_uptr_t)
188#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
189 IPA_IOCTL_ADD_HDR_PROC_CTX, \
190 compat_uptr_t)
191#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
192 IPA_IOCTL_DEL_HDR_PROC_CTX, \
193 compat_uptr_t)
194#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
195 IPA_IOCTL_MDFY_RT_RULE, \
196 compat_uptr_t)
197
198/**
199 * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
200 * properties
201 * @dev_name: input parameter, the name of table
202 * @size: input parameter, size of table in bytes
203 * @offset: output parameter, offset into page in case of system memory
204 */
205struct ipa3_ioc_nat_alloc_mem32 {
206 char dev_name[IPA_RESOURCE_NAME_MAX];
207 compat_size_t size;
208 compat_off_t offset;
209};
210#endif
211
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200212#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
213#define TZ_MEM_PROTECT_REGION_ID 0x10
214
215struct tz_smmu_ipa_protect_region_iovec_s {
216 u64 input_addr;
217 u64 output_addr;
218 u64 size;
219 u32 attr;
220} __packed;
221
222struct tz_smmu_ipa_protect_region_s {
223 phys_addr_t iovec_buf;
224 u32 size_bytes;
225} __packed;
226
Amir Levy9659e592016-10-27 18:08:27 +0300227static void ipa3_start_tag_process(struct work_struct *work);
228static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
229
Amir Levya59ed3f2017-03-05 17:30:55 +0200230static void ipa3_transport_release_resource(struct work_struct *work);
231static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
232 ipa3_transport_release_resource);
Amir Levy9659e592016-10-27 18:08:27 +0300233static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
234
Amir Levy9659e592016-10-27 18:08:27 +0300235static struct ipa3_plat_drv_res ipa3_res = {0, };
236struct msm_bus_scale_pdata *ipa3_bus_scale_table;
237
238static struct clk *ipa3_clk;
239
240struct ipa3_context *ipa3_ctx;
241static struct device *master_dev;
242struct platform_device *ipa3_pdev;
243static struct {
244 bool present;
245 bool arm_smmu;
Amir Levy9659e592016-10-27 18:08:27 +0300246 bool fast_map;
247 bool s1_bypass;
248 bool use_64_bit_dma_mask;
249 u32 ipa_base;
250 u32 ipa_size;
251} smmu_info;
252
253static char *active_clients_table_buf;
254
255int ipa3_active_clients_log_print_buffer(char *buf, int size)
256{
257 int i;
258 int nbytes;
259 int cnt = 0;
260 int start_idx;
261 int end_idx;
262
263 start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
264 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
265 end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
266 for (i = start_idx; i != end_idx;
267 i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
268 nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
269 ipa3_ctx->ipa3_active_clients_logging
270 .log_buffer[i]);
271 cnt += nbytes;
272 }
273
274 return cnt;
275}
276
277int ipa3_active_clients_log_print_table(char *buf, int size)
278{
279 int i;
280 struct ipa3_active_client_htable_entry *iterator;
281 int cnt = 0;
282
283 cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
284 hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
285 iterator, list) {
286 switch (iterator->type) {
287 case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
288 cnt += scnprintf(buf + cnt, size - cnt,
289 "%-40s %-3d ENDPOINT\n",
290 iterator->id_string, iterator->count);
291 break;
292 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
293 cnt += scnprintf(buf + cnt, size - cnt,
294 "%-40s %-3d SIMPLE\n",
295 iterator->id_string, iterator->count);
296 break;
297 case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
298 cnt += scnprintf(buf + cnt, size - cnt,
299 "%-40s %-3d RESOURCE\n",
300 iterator->id_string, iterator->count);
301 break;
302 case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
303 cnt += scnprintf(buf + cnt, size - cnt,
304 "%-40s %-3d SPECIAL\n",
305 iterator->id_string, iterator->count);
306 break;
307 default:
308 IPAERR("Trying to print illegal active_clients type");
309 break;
310 }
311 }
312 cnt += scnprintf(buf + cnt, size - cnt,
313 "\nTotal active clients count: %d\n",
314 ipa3_ctx->ipa3_active_clients.cnt);
315
316 return cnt;
317}
318
319static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
320 unsigned long event, void *ptr)
321{
322 ipa3_active_clients_lock();
323 ipa3_active_clients_log_print_table(active_clients_table_buf,
324 IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
325 IPAERR("%s", active_clients_table_buf);
326 ipa3_active_clients_unlock();
327
328 return NOTIFY_DONE;
329}
330
331static struct notifier_block ipa3_active_clients_panic_blk = {
332 .notifier_call = ipa3_active_clients_panic_notifier,
333};
334
335static int ipa3_active_clients_log_insert(const char *string)
336{
337 int head;
338 int tail;
339
340 if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
341 return -EPERM;
342
343 head = ipa3_ctx->ipa3_active_clients_logging.log_head;
344 tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
345
346 memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
347 IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
348 strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
349 (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
350 head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
351 if (tail == head)
352 tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
353
354 ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
355 ipa3_ctx->ipa3_active_clients_logging.log_head = head;
356
357 return 0;
358}
359
360static int ipa3_active_clients_log_init(void)
361{
362 int i;
363
364 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
365 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
366 sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
367 GFP_KERNEL);
368 active_clients_table_buf = kzalloc(sizeof(
369 char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
370 if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
371 pr_err("Active Clients Logging memory allocation failed");
372 goto bail;
373 }
374 for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
375 ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
376 ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
377 (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
378 }
379 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
380 ipa3_ctx->ipa3_active_clients_logging.log_tail =
381 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
382 hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
383 atomic_notifier_chain_register(&panic_notifier_list,
384 &ipa3_active_clients_panic_blk);
385 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
386
387 return 0;
388
389bail:
390 return -ENOMEM;
391}
392
393void ipa3_active_clients_log_clear(void)
394{
395 ipa3_active_clients_lock();
396 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
397 ipa3_ctx->ipa3_active_clients_logging.log_tail =
398 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
399 ipa3_active_clients_unlock();
400}
401
402static void ipa3_active_clients_log_destroy(void)
403{
404 ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
405 kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
406 ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
407 ipa3_ctx->ipa3_active_clients_logging.log_tail =
408 IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
409}
410
411enum ipa_smmu_cb_type {
412 IPA_SMMU_CB_AP,
413 IPA_SMMU_CB_WLAN,
414 IPA_SMMU_CB_UC,
415 IPA_SMMU_CB_MAX
416
417};
418
419static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
420
421struct iommu_domain *ipa3_get_smmu_domain(void)
422{
423 if (smmu_cb[IPA_SMMU_CB_AP].valid)
424 return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
425
426 IPAERR("CB not valid\n");
427
428 return NULL;
429}
430
431struct iommu_domain *ipa3_get_uc_smmu_domain(void)
432{
433 if (smmu_cb[IPA_SMMU_CB_UC].valid)
434 return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
435
436 IPAERR("CB not valid\n");
437
438 return NULL;
439}
440
441struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
442{
443 if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
444 return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
445
446 IPAERR("CB not valid\n");
447
448 return NULL;
449}
450
451
452struct device *ipa3_get_dma_dev(void)
453{
454 return ipa3_ctx->pdev;
455}
456
457/**
458 * ipa3_get_smmu_ctx()- Return the wlan smmu context
459 *
460 * Return value: pointer to smmu context address
461 */
462struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
463{
464 return &smmu_cb[IPA_SMMU_CB_AP];
465}
466
467/**
468 * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
469 *
470 * Return value: pointer to smmu context address
471 */
472struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
473{
474 return &smmu_cb[IPA_SMMU_CB_WLAN];
475}
476
477/**
478 * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
479 *
480 * Return value: pointer to smmu context address
481 */
482struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
483{
484 return &smmu_cb[IPA_SMMU_CB_UC];
485}
486
487static int ipa3_open(struct inode *inode, struct file *filp)
488{
489 struct ipa3_context *ctx = NULL;
490
491 IPADBG_LOW("ENTER\n");
492 ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
493 filp->private_data = ctx;
494
495 return 0;
496}
497
498/**
499* ipa3_flow_control() - Enable/Disable flow control on a particular client.
500* Return codes:
501* None
502*/
503void ipa3_flow_control(enum ipa_client_type ipa_client,
504 bool enable, uint32_t qmap_id)
505{
506 struct ipa_ep_cfg_ctrl ep_ctrl = {0};
507 int ep_idx;
508 struct ipa3_ep_context *ep;
509
510 /* Check if tethered flow control is needed or not.*/
511 if (!ipa3_ctx->tethered_flow_control) {
512 IPADBG("Apps flow control is not needed\n");
513 return;
514 }
515
516 /* Check if ep is valid. */
517 ep_idx = ipa3_get_ep_mapping(ipa_client);
518 if (ep_idx == -1) {
519 IPADBG("Invalid IPA client\n");
520 return;
521 }
522
523 ep = &ipa3_ctx->ep[ep_idx];
524 if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
525 IPADBG("EP not valid/Not applicable for client.\n");
526 return;
527 }
528
529 spin_lock(&ipa3_ctx->disconnect_lock);
530 /* Check if the QMAP_ID matches. */
531 if (ep->cfg.meta.qmap_id != qmap_id) {
532 IPADBG("Flow control ind not for same flow: %u %u\n",
533 ep->cfg.meta.qmap_id, qmap_id);
534 spin_unlock(&ipa3_ctx->disconnect_lock);
535 return;
536 }
537 if (!ep->disconnect_in_progress) {
538 if (enable) {
539 IPADBG("Enabling Flow\n");
540 ep_ctrl.ipa_ep_delay = false;
541 IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
542 } else {
543 IPADBG("Disabling Flow\n");
544 ep_ctrl.ipa_ep_delay = true;
545 IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
546 }
547 ep_ctrl.ipa_ep_suspend = false;
548 ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
549 } else {
550 IPADBG("EP disconnect is in progress\n");
551 }
552 spin_unlock(&ipa3_ctx->disconnect_lock);
553}
554
555static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
556{
557 if (!buff) {
558 IPAERR("Null buffer\n");
559 return;
560 }
561
562 if (type != WAN_UPSTREAM_ROUTE_ADD &&
563 type != WAN_UPSTREAM_ROUTE_DEL &&
564 type != WAN_EMBMS_CONNECT) {
565 IPAERR("Wrong type given. buff %p type %d\n", buff, type);
566 return;
567 }
568
569 kfree(buff);
570}
571
572static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
573{
574 int retval;
575 struct ipa_wan_msg *wan_msg;
576 struct ipa_msg_meta msg_meta;
577
578 wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
579 if (!wan_msg) {
580 IPAERR("no memory\n");
581 return -ENOMEM;
582 }
583
584 if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
585 sizeof(struct ipa_wan_msg))) {
586 kfree(wan_msg);
587 return -EFAULT;
588 }
589
590 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
591 msg_meta.msg_type = msg_type;
592 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
593 retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
594 if (retval) {
595 IPAERR("ipa3_send_msg failed: %d\n", retval);
596 kfree(wan_msg);
597 return retval;
598 }
599
600 return 0;
601}
602
603
604static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
605{
606 int retval = 0;
607 u32 pyld_sz;
608 u8 header[128] = { 0 };
609 u8 *param = NULL;
610 struct ipa_ioc_nat_alloc_mem nat_mem;
611 struct ipa_ioc_v4_nat_init nat_init;
612 struct ipa_ioc_v4_nat_del nat_del;
613 struct ipa_ioc_rm_dependency rm_depend;
614 size_t sz;
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200615 int pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +0300616
617 IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
618
Amir Levy9659e592016-10-27 18:08:27 +0300619 if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
620 return -ENOTTY;
621 if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
622 return -ENOTTY;
623
Amir Levy05532622016-11-28 12:12:01 +0200624 if (!ipa3_is_ready()) {
625 IPAERR("IPA not ready, waiting for init completion\n");
626 wait_for_completion(&ipa3_ctx->init_completion_obj);
627 }
628
Amir Levy9659e592016-10-27 18:08:27 +0300629 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
630
631 switch (cmd) {
632 case IPA_IOC_ALLOC_NAT_MEM:
633 if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
634 sizeof(struct ipa_ioc_nat_alloc_mem))) {
635 retval = -EFAULT;
636 break;
637 }
638 /* null terminate the string */
639 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
640
641 if (ipa3_allocate_nat_device(&nat_mem)) {
642 retval = -EFAULT;
643 break;
644 }
645 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
646 sizeof(struct ipa_ioc_nat_alloc_mem))) {
647 retval = -EFAULT;
648 break;
649 }
650 break;
651 case IPA_IOC_V4_INIT_NAT:
652 if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
653 sizeof(struct ipa_ioc_v4_nat_init))) {
654 retval = -EFAULT;
655 break;
656 }
657 if (ipa3_nat_init_cmd(&nat_init)) {
658 retval = -EFAULT;
659 break;
660 }
661 break;
662
663 case IPA_IOC_NAT_DMA:
664 if (copy_from_user(header, (u8 *)arg,
665 sizeof(struct ipa_ioc_nat_dma_cmd))) {
666 retval = -EFAULT;
667 break;
668 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200669 pre_entry =
670 ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
Amir Levy9659e592016-10-27 18:08:27 +0300671 pyld_sz =
672 sizeof(struct ipa_ioc_nat_dma_cmd) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200673 pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
Amir Levy9659e592016-10-27 18:08:27 +0300674 param = kzalloc(pyld_sz, GFP_KERNEL);
675 if (!param) {
676 retval = -ENOMEM;
677 break;
678 }
679
680 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
681 retval = -EFAULT;
682 break;
683 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200684 /* add check in case user-space module compromised */
685 if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
686 != pre_entry)) {
687 IPAERR("current %d pre %d\n",
688 ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
689 pre_entry);
690 retval = -EFAULT;
691 break;
692 }
Amir Levy9659e592016-10-27 18:08:27 +0300693 if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
694 retval = -EFAULT;
695 break;
696 }
697 break;
698
699 case IPA_IOC_V4_DEL_NAT:
700 if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
701 sizeof(struct ipa_ioc_v4_nat_del))) {
702 retval = -EFAULT;
703 break;
704 }
705 if (ipa3_nat_del_cmd(&nat_del)) {
706 retval = -EFAULT;
707 break;
708 }
709 break;
710
711 case IPA_IOC_ADD_HDR:
712 if (copy_from_user(header, (u8 *)arg,
713 sizeof(struct ipa_ioc_add_hdr))) {
714 retval = -EFAULT;
715 break;
716 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200717 pre_entry =
718 ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
Amir Levy9659e592016-10-27 18:08:27 +0300719 pyld_sz =
720 sizeof(struct ipa_ioc_add_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200721 pre_entry * sizeof(struct ipa_hdr_add);
Amir Levy9659e592016-10-27 18:08:27 +0300722 param = kzalloc(pyld_sz, GFP_KERNEL);
723 if (!param) {
724 retval = -ENOMEM;
725 break;
726 }
727 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
728 retval = -EFAULT;
729 break;
730 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200731 /* add check in case user-space module compromised */
732 if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
733 != pre_entry)) {
734 IPAERR("current %d pre %d\n",
735 ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
736 pre_entry);
737 retval = -EFAULT;
738 break;
739 }
Amir Levy9659e592016-10-27 18:08:27 +0300740 if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
741 retval = -EFAULT;
742 break;
743 }
744 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
745 retval = -EFAULT;
746 break;
747 }
748 break;
749
750 case IPA_IOC_DEL_HDR:
751 if (copy_from_user(header, (u8 *)arg,
752 sizeof(struct ipa_ioc_del_hdr))) {
753 retval = -EFAULT;
754 break;
755 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200756 pre_entry =
757 ((struct ipa_ioc_del_hdr *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300758 pyld_sz =
759 sizeof(struct ipa_ioc_del_hdr) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200760 pre_entry * sizeof(struct ipa_hdr_del);
Amir Levy9659e592016-10-27 18:08:27 +0300761 param = kzalloc(pyld_sz, GFP_KERNEL);
762 if (!param) {
763 retval = -ENOMEM;
764 break;
765 }
766 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
767 retval = -EFAULT;
768 break;
769 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200770 /* add check in case user-space module compromised */
771 if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
772 != pre_entry)) {
773 IPAERR("current %d pre %d\n",
774 ((struct ipa_ioc_del_hdr *)param)->num_hdls,
775 pre_entry);
776 retval = -EFAULT;
777 break;
778 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +0200779 if (ipa3_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
780 true)) {
Amir Levy9659e592016-10-27 18:08:27 +0300781 retval = -EFAULT;
782 break;
783 }
784 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
785 retval = -EFAULT;
786 break;
787 }
788 break;
789
790 case IPA_IOC_ADD_RT_RULE:
791 if (copy_from_user(header, (u8 *)arg,
792 sizeof(struct ipa_ioc_add_rt_rule))) {
793 retval = -EFAULT;
794 break;
795 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200796 pre_entry =
797 ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300798 pyld_sz =
799 sizeof(struct ipa_ioc_add_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200800 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300801 param = kzalloc(pyld_sz, GFP_KERNEL);
802 if (!param) {
803 retval = -ENOMEM;
804 break;
805 }
806 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
807 retval = -EFAULT;
808 break;
809 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200810 /* add check in case user-space module compromised */
811 if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
812 != pre_entry)) {
813 IPAERR("current %d pre %d\n",
814 ((struct ipa_ioc_add_rt_rule *)param)->
815 num_rules,
816 pre_entry);
817 retval = -EFAULT;
818 break;
819 }
Amir Levy9659e592016-10-27 18:08:27 +0300820 if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
821 retval = -EFAULT;
822 break;
823 }
824 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
825 retval = -EFAULT;
826 break;
827 }
828 break;
829 case IPA_IOC_ADD_RT_RULE_AFTER:
830 if (copy_from_user(header, (u8 *)arg,
831 sizeof(struct ipa_ioc_add_rt_rule_after))) {
832
833 retval = -EFAULT;
834 break;
835 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200836 pre_entry =
837 ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300838 pyld_sz =
839 sizeof(struct ipa_ioc_add_rt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200840 pre_entry * sizeof(struct ipa_rt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300841 param = kzalloc(pyld_sz, GFP_KERNEL);
842 if (!param) {
843 retval = -ENOMEM;
844 break;
845 }
846 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
847 retval = -EFAULT;
848 break;
849 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200850 /* add check in case user-space module compromised */
851 if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
852 num_rules != pre_entry)) {
853 IPAERR("current %d pre %d\n",
854 ((struct ipa_ioc_add_rt_rule_after *)param)->
855 num_rules,
856 pre_entry);
857 retval = -EFAULT;
858 break;
859 }
Amir Levy9659e592016-10-27 18:08:27 +0300860 if (ipa3_add_rt_rule_after(
861 (struct ipa_ioc_add_rt_rule_after *)param)) {
862
863 retval = -EFAULT;
864 break;
865 }
866 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
867 retval = -EFAULT;
868 break;
869 }
870 break;
871
872 case IPA_IOC_MDFY_RT_RULE:
873 if (copy_from_user(header, (u8 *)arg,
874 sizeof(struct ipa_ioc_mdfy_rt_rule))) {
875 retval = -EFAULT;
876 break;
877 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200878 pre_entry =
879 ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300880 pyld_sz =
881 sizeof(struct ipa_ioc_mdfy_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200882 pre_entry * sizeof(struct ipa_rt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +0300883 param = kzalloc(pyld_sz, GFP_KERNEL);
884 if (!param) {
885 retval = -ENOMEM;
886 break;
887 }
888 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
889 retval = -EFAULT;
890 break;
891 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200892 /* add check in case user-space module compromised */
893 if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
894 != pre_entry)) {
895 IPAERR("current %d pre %d\n",
896 ((struct ipa_ioc_mdfy_rt_rule *)param)->
897 num_rules,
898 pre_entry);
899 retval = -EFAULT;
900 break;
901 }
Amir Levy9659e592016-10-27 18:08:27 +0300902 if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
903 retval = -EFAULT;
904 break;
905 }
906 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
907 retval = -EFAULT;
908 break;
909 }
910 break;
911
912 case IPA_IOC_DEL_RT_RULE:
913 if (copy_from_user(header, (u8 *)arg,
914 sizeof(struct ipa_ioc_del_rt_rule))) {
915 retval = -EFAULT;
916 break;
917 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200918 pre_entry =
919 ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +0300920 pyld_sz =
921 sizeof(struct ipa_ioc_del_rt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200922 pre_entry * sizeof(struct ipa_rt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +0300923 param = kzalloc(pyld_sz, GFP_KERNEL);
924 if (!param) {
925 retval = -ENOMEM;
926 break;
927 }
928 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
929 retval = -EFAULT;
930 break;
931 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200932 /* add check in case user-space module compromised */
933 if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
934 != pre_entry)) {
935 IPAERR("current %d pre %d\n",
936 ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
937 pre_entry);
938 retval = -EFAULT;
939 break;
940 }
Amir Levy9659e592016-10-27 18:08:27 +0300941 if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
942 retval = -EFAULT;
943 break;
944 }
945 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
946 retval = -EFAULT;
947 break;
948 }
949 break;
950
951 case IPA_IOC_ADD_FLT_RULE:
952 if (copy_from_user(header, (u8 *)arg,
953 sizeof(struct ipa_ioc_add_flt_rule))) {
954 retval = -EFAULT;
955 break;
956 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200957 pre_entry =
958 ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +0300959 pyld_sz =
960 sizeof(struct ipa_ioc_add_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200961 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +0300962 param = kzalloc(pyld_sz, GFP_KERNEL);
963 if (!param) {
964 retval = -ENOMEM;
965 break;
966 }
967 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
968 retval = -EFAULT;
969 break;
970 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200971 /* add check in case user-space module compromised */
972 if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
973 != pre_entry)) {
974 IPAERR("current %d pre %d\n",
975 ((struct ipa_ioc_add_flt_rule *)param)->
976 num_rules,
977 pre_entry);
978 retval = -EFAULT;
979 break;
980 }
Amir Levy9659e592016-10-27 18:08:27 +0300981 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
982 retval = -EFAULT;
983 break;
984 }
985 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
986 retval = -EFAULT;
987 break;
988 }
989 break;
990
991 case IPA_IOC_ADD_FLT_RULE_AFTER:
992 if (copy_from_user(header, (u8 *)arg,
993 sizeof(struct ipa_ioc_add_flt_rule_after))) {
994
995 retval = -EFAULT;
996 break;
997 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +0200998 pre_entry =
999 ((struct ipa_ioc_add_flt_rule_after *)header)->
1000 num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001001 pyld_sz =
1002 sizeof(struct ipa_ioc_add_flt_rule_after) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001003 pre_entry * sizeof(struct ipa_flt_rule_add);
Amir Levy9659e592016-10-27 18:08:27 +03001004 param = kzalloc(pyld_sz, GFP_KERNEL);
1005 if (!param) {
1006 retval = -ENOMEM;
1007 break;
1008 }
1009 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1010 retval = -EFAULT;
1011 break;
1012 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001013 /* add check in case user-space module compromised */
1014 if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
1015 num_rules != pre_entry)) {
1016 IPAERR("current %d pre %d\n",
1017 ((struct ipa_ioc_add_flt_rule_after *)param)->
1018 num_rules,
1019 pre_entry);
1020 retval = -EFAULT;
1021 break;
1022 }
Amir Levy9659e592016-10-27 18:08:27 +03001023 if (ipa3_add_flt_rule_after(
1024 (struct ipa_ioc_add_flt_rule_after *)param)) {
1025 retval = -EFAULT;
1026 break;
1027 }
1028 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1029 retval = -EFAULT;
1030 break;
1031 }
1032 break;
1033
1034 case IPA_IOC_DEL_FLT_RULE:
1035 if (copy_from_user(header, (u8 *)arg,
1036 sizeof(struct ipa_ioc_del_flt_rule))) {
1037 retval = -EFAULT;
1038 break;
1039 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001040 pre_entry =
1041 ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001042 pyld_sz =
1043 sizeof(struct ipa_ioc_del_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001044 pre_entry * sizeof(struct ipa_flt_rule_del);
Amir Levy9659e592016-10-27 18:08:27 +03001045 param = kzalloc(pyld_sz, GFP_KERNEL);
1046 if (!param) {
1047 retval = -ENOMEM;
1048 break;
1049 }
1050 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1051 retval = -EFAULT;
1052 break;
1053 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001054 /* add check in case user-space module compromised */
1055 if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
1056 != pre_entry)) {
1057 IPAERR("current %d pre %d\n",
1058 ((struct ipa_ioc_del_flt_rule *)param)->
1059 num_hdls,
1060 pre_entry);
1061 retval = -EFAULT;
1062 break;
1063 }
Amir Levy9659e592016-10-27 18:08:27 +03001064 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
1065 retval = -EFAULT;
1066 break;
1067 }
1068 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1069 retval = -EFAULT;
1070 break;
1071 }
1072 break;
1073
1074 case IPA_IOC_MDFY_FLT_RULE:
1075 if (copy_from_user(header, (u8 *)arg,
1076 sizeof(struct ipa_ioc_mdfy_flt_rule))) {
1077 retval = -EFAULT;
1078 break;
1079 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001080 pre_entry =
1081 ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
Amir Levy9659e592016-10-27 18:08:27 +03001082 pyld_sz =
1083 sizeof(struct ipa_ioc_mdfy_flt_rule) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001084 pre_entry * sizeof(struct ipa_flt_rule_mdfy);
Amir Levy9659e592016-10-27 18:08:27 +03001085 param = kzalloc(pyld_sz, GFP_KERNEL);
1086 if (!param) {
1087 retval = -ENOMEM;
1088 break;
1089 }
1090 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1091 retval = -EFAULT;
1092 break;
1093 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001094 /* add check in case user-space module compromised */
1095 if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
1096 != pre_entry)) {
1097 IPAERR("current %d pre %d\n",
1098 ((struct ipa_ioc_mdfy_flt_rule *)param)->
1099 num_rules,
1100 pre_entry);
1101 retval = -EFAULT;
1102 break;
1103 }
Amir Levy9659e592016-10-27 18:08:27 +03001104 if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
1105 retval = -EFAULT;
1106 break;
1107 }
1108 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1109 retval = -EFAULT;
1110 break;
1111 }
1112 break;
1113
1114 case IPA_IOC_COMMIT_HDR:
1115 retval = ipa3_commit_hdr();
1116 break;
1117 case IPA_IOC_RESET_HDR:
1118 retval = ipa3_reset_hdr();
1119 break;
1120 case IPA_IOC_COMMIT_RT:
1121 retval = ipa3_commit_rt(arg);
1122 break;
1123 case IPA_IOC_RESET_RT:
1124 retval = ipa3_reset_rt(arg);
1125 break;
1126 case IPA_IOC_COMMIT_FLT:
1127 retval = ipa3_commit_flt(arg);
1128 break;
1129 case IPA_IOC_RESET_FLT:
1130 retval = ipa3_reset_flt(arg);
1131 break;
1132 case IPA_IOC_GET_RT_TBL:
1133 if (copy_from_user(header, (u8 *)arg,
1134 sizeof(struct ipa_ioc_get_rt_tbl))) {
1135 retval = -EFAULT;
1136 break;
1137 }
1138 if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
1139 retval = -EFAULT;
1140 break;
1141 }
1142 if (copy_to_user((u8 *)arg, header,
1143 sizeof(struct ipa_ioc_get_rt_tbl))) {
1144 retval = -EFAULT;
1145 break;
1146 }
1147 break;
1148 case IPA_IOC_PUT_RT_TBL:
1149 retval = ipa3_put_rt_tbl(arg);
1150 break;
1151 case IPA_IOC_GET_HDR:
1152 if (copy_from_user(header, (u8 *)arg,
1153 sizeof(struct ipa_ioc_get_hdr))) {
1154 retval = -EFAULT;
1155 break;
1156 }
1157 if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
1158 retval = -EFAULT;
1159 break;
1160 }
1161 if (copy_to_user((u8 *)arg, header,
1162 sizeof(struct ipa_ioc_get_hdr))) {
1163 retval = -EFAULT;
1164 break;
1165 }
1166 break;
1167 case IPA_IOC_PUT_HDR:
1168 retval = ipa3_put_hdr(arg);
1169 break;
1170 case IPA_IOC_SET_FLT:
1171 retval = ipa3_cfg_filter(arg);
1172 break;
1173 case IPA_IOC_COPY_HDR:
1174 if (copy_from_user(header, (u8 *)arg,
1175 sizeof(struct ipa_ioc_copy_hdr))) {
1176 retval = -EFAULT;
1177 break;
1178 }
1179 if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
1180 retval = -EFAULT;
1181 break;
1182 }
1183 if (copy_to_user((u8 *)arg, header,
1184 sizeof(struct ipa_ioc_copy_hdr))) {
1185 retval = -EFAULT;
1186 break;
1187 }
1188 break;
1189 case IPA_IOC_QUERY_INTF:
1190 if (copy_from_user(header, (u8 *)arg,
1191 sizeof(struct ipa_ioc_query_intf))) {
1192 retval = -EFAULT;
1193 break;
1194 }
1195 if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
1196 retval = -1;
1197 break;
1198 }
1199 if (copy_to_user((u8 *)arg, header,
1200 sizeof(struct ipa_ioc_query_intf))) {
1201 retval = -EFAULT;
1202 break;
1203 }
1204 break;
1205 case IPA_IOC_QUERY_INTF_TX_PROPS:
1206 sz = sizeof(struct ipa_ioc_query_intf_tx_props);
1207 if (copy_from_user(header, (u8 *)arg, sz)) {
1208 retval = -EFAULT;
1209 break;
1210 }
1211
1212 if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
1213 > IPA_NUM_PROPS_MAX) {
1214 retval = -EFAULT;
1215 break;
1216 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001217 pre_entry =
1218 ((struct ipa_ioc_query_intf_tx_props *)
1219 header)->num_tx_props;
1220 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001221 sizeof(struct ipa_ioc_tx_intf_prop);
1222 param = kzalloc(pyld_sz, GFP_KERNEL);
1223 if (!param) {
1224 retval = -ENOMEM;
1225 break;
1226 }
1227 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1228 retval = -EFAULT;
1229 break;
1230 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001231 /* add check in case user-space module compromised */
1232 if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
1233 param)->num_tx_props
1234 != pre_entry)) {
1235 IPAERR("current %d pre %d\n",
1236 ((struct ipa_ioc_query_intf_tx_props *)
1237 param)->num_tx_props, pre_entry);
1238 retval = -EFAULT;
1239 break;
1240 }
Amir Levy9659e592016-10-27 18:08:27 +03001241 if (ipa3_query_intf_tx_props(
1242 (struct ipa_ioc_query_intf_tx_props *)param)) {
1243 retval = -1;
1244 break;
1245 }
1246 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1247 retval = -EFAULT;
1248 break;
1249 }
1250 break;
1251 case IPA_IOC_QUERY_INTF_RX_PROPS:
1252 sz = sizeof(struct ipa_ioc_query_intf_rx_props);
1253 if (copy_from_user(header, (u8 *)arg, sz)) {
1254 retval = -EFAULT;
1255 break;
1256 }
1257
1258 if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
1259 > IPA_NUM_PROPS_MAX) {
1260 retval = -EFAULT;
1261 break;
1262 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001263 pre_entry =
1264 ((struct ipa_ioc_query_intf_rx_props *)
1265 header)->num_rx_props;
1266 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001267 sizeof(struct ipa_ioc_rx_intf_prop);
1268 param = kzalloc(pyld_sz, GFP_KERNEL);
1269 if (!param) {
1270 retval = -ENOMEM;
1271 break;
1272 }
1273 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1274 retval = -EFAULT;
1275 break;
1276 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001277 /* add check in case user-space module compromised */
1278 if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
1279 param)->num_rx_props != pre_entry)) {
1280 IPAERR("current %d pre %d\n",
1281 ((struct ipa_ioc_query_intf_rx_props *)
1282 param)->num_rx_props, pre_entry);
1283 retval = -EFAULT;
1284 break;
1285 }
Amir Levy9659e592016-10-27 18:08:27 +03001286 if (ipa3_query_intf_rx_props(
1287 (struct ipa_ioc_query_intf_rx_props *)param)) {
1288 retval = -1;
1289 break;
1290 }
1291 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1292 retval = -EFAULT;
1293 break;
1294 }
1295 break;
1296 case IPA_IOC_QUERY_INTF_EXT_PROPS:
1297 sz = sizeof(struct ipa_ioc_query_intf_ext_props);
1298 if (copy_from_user(header, (u8 *)arg, sz)) {
1299 retval = -EFAULT;
1300 break;
1301 }
1302
1303 if (((struct ipa_ioc_query_intf_ext_props *)
1304 header)->num_ext_props > IPA_NUM_PROPS_MAX) {
1305 retval = -EFAULT;
1306 break;
1307 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001308 pre_entry =
1309 ((struct ipa_ioc_query_intf_ext_props *)
1310 header)->num_ext_props;
1311 pyld_sz = sz + pre_entry *
Amir Levy9659e592016-10-27 18:08:27 +03001312 sizeof(struct ipa_ioc_ext_intf_prop);
1313 param = kzalloc(pyld_sz, GFP_KERNEL);
1314 if (!param) {
1315 retval = -ENOMEM;
1316 break;
1317 }
1318 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1319 retval = -EFAULT;
1320 break;
1321 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001322 /* add check in case user-space module compromised */
1323 if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
1324 param)->num_ext_props != pre_entry)) {
1325 IPAERR("current %d pre %d\n",
1326 ((struct ipa_ioc_query_intf_ext_props *)
1327 param)->num_ext_props, pre_entry);
1328 retval = -EFAULT;
1329 break;
1330 }
Amir Levy9659e592016-10-27 18:08:27 +03001331 if (ipa3_query_intf_ext_props(
1332 (struct ipa_ioc_query_intf_ext_props *)param)) {
1333 retval = -1;
1334 break;
1335 }
1336 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1337 retval = -EFAULT;
1338 break;
1339 }
1340 break;
1341 case IPA_IOC_PULL_MSG:
1342 if (copy_from_user(header, (u8 *)arg,
1343 sizeof(struct ipa_msg_meta))) {
1344 retval = -EFAULT;
1345 break;
1346 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001347 pre_entry =
Amir Levy9659e592016-10-27 18:08:27 +03001348 ((struct ipa_msg_meta *)header)->msg_len;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001349 pyld_sz = sizeof(struct ipa_msg_meta) +
1350 pre_entry;
Amir Levy9659e592016-10-27 18:08:27 +03001351 param = kzalloc(pyld_sz, GFP_KERNEL);
1352 if (!param) {
1353 retval = -ENOMEM;
1354 break;
1355 }
1356 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1357 retval = -EFAULT;
1358 break;
1359 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001360 /* add check in case user-space module compromised */
1361 if (unlikely(((struct ipa_msg_meta *)param)->msg_len
1362 != pre_entry)) {
1363 IPAERR("current %d pre %d\n",
1364 ((struct ipa_msg_meta *)param)->msg_len,
1365 pre_entry);
1366 retval = -EFAULT;
1367 break;
1368 }
Amir Levy9659e592016-10-27 18:08:27 +03001369 if (ipa3_pull_msg((struct ipa_msg_meta *)param,
1370 (char *)param + sizeof(struct ipa_msg_meta),
1371 ((struct ipa_msg_meta *)param)->msg_len) !=
1372 ((struct ipa_msg_meta *)param)->msg_len) {
1373 retval = -1;
1374 break;
1375 }
1376 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1377 retval = -EFAULT;
1378 break;
1379 }
1380 break;
1381 case IPA_IOC_RM_ADD_DEPENDENCY:
1382 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1383 sizeof(struct ipa_ioc_rm_dependency))) {
1384 retval = -EFAULT;
1385 break;
1386 }
1387 retval = ipa_rm_add_dependency_from_ioctl(
1388 rm_depend.resource_name, rm_depend.depends_on_name);
1389 break;
1390 case IPA_IOC_RM_DEL_DEPENDENCY:
1391 if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
1392 sizeof(struct ipa_ioc_rm_dependency))) {
1393 retval = -EFAULT;
1394 break;
1395 }
1396 retval = ipa_rm_delete_dependency_from_ioctl(
1397 rm_depend.resource_name, rm_depend.depends_on_name);
1398 break;
1399 case IPA_IOC_GENERATE_FLT_EQ:
1400 {
1401 struct ipa_ioc_generate_flt_eq flt_eq;
1402
1403 if (copy_from_user(&flt_eq, (u8 *)arg,
1404 sizeof(struct ipa_ioc_generate_flt_eq))) {
1405 retval = -EFAULT;
1406 break;
1407 }
1408 if (ipahal_flt_generate_equation(flt_eq.ip,
1409 &flt_eq.attrib, &flt_eq.eq_attrib)) {
1410 retval = -EFAULT;
1411 break;
1412 }
1413 if (copy_to_user((u8 *)arg, &flt_eq,
1414 sizeof(struct ipa_ioc_generate_flt_eq))) {
1415 retval = -EFAULT;
1416 break;
1417 }
1418 break;
1419 }
1420 case IPA_IOC_QUERY_EP_MAPPING:
1421 {
1422 retval = ipa3_get_ep_mapping(arg);
1423 break;
1424 }
1425 case IPA_IOC_QUERY_RT_TBL_INDEX:
1426 if (copy_from_user(header, (u8 *)arg,
1427 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1428 retval = -EFAULT;
1429 break;
1430 }
1431 if (ipa3_query_rt_index(
1432 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
1433 retval = -EFAULT;
1434 break;
1435 }
1436 if (copy_to_user((u8 *)arg, header,
1437 sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
1438 retval = -EFAULT;
1439 break;
1440 }
1441 break;
1442 case IPA_IOC_WRITE_QMAPID:
1443 if (copy_from_user(header, (u8 *)arg,
1444 sizeof(struct ipa_ioc_write_qmapid))) {
1445 retval = -EFAULT;
1446 break;
1447 }
1448 if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
1449 retval = -EFAULT;
1450 break;
1451 }
1452 if (copy_to_user((u8 *)arg, header,
1453 sizeof(struct ipa_ioc_write_qmapid))) {
1454 retval = -EFAULT;
1455 break;
1456 }
1457 break;
1458 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
1459 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
1460 if (retval) {
1461 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1462 break;
1463 }
1464 break;
1465 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
1466 retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
1467 if (retval) {
1468 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1469 break;
1470 }
1471 break;
1472 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
1473 retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT);
1474 if (retval) {
1475 IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
1476 break;
1477 }
1478 break;
1479 case IPA_IOC_ADD_HDR_PROC_CTX:
1480 if (copy_from_user(header, (u8 *)arg,
1481 sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
1482 retval = -EFAULT;
1483 break;
1484 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001485 pre_entry =
1486 ((struct ipa_ioc_add_hdr_proc_ctx *)
1487 header)->num_proc_ctxs;
Amir Levy9659e592016-10-27 18:08:27 +03001488 pyld_sz =
1489 sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001490 pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
Amir Levy9659e592016-10-27 18:08:27 +03001491 param = kzalloc(pyld_sz, GFP_KERNEL);
1492 if (!param) {
1493 retval = -ENOMEM;
1494 break;
1495 }
1496 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1497 retval = -EFAULT;
1498 break;
1499 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001500 /* add check in case user-space module compromised */
1501 if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
1502 param)->num_proc_ctxs != pre_entry)) {
1503 IPAERR("current %d pre %d\n",
1504 ((struct ipa_ioc_add_hdr_proc_ctx *)
1505 param)->num_proc_ctxs, pre_entry);
1506 retval = -EFAULT;
1507 break;
1508 }
Amir Levy9659e592016-10-27 18:08:27 +03001509 if (ipa3_add_hdr_proc_ctx(
1510 (struct ipa_ioc_add_hdr_proc_ctx *)param)) {
1511 retval = -EFAULT;
1512 break;
1513 }
1514 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1515 retval = -EFAULT;
1516 break;
1517 }
1518 break;
1519 case IPA_IOC_DEL_HDR_PROC_CTX:
1520 if (copy_from_user(header, (u8 *)arg,
1521 sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
1522 retval = -EFAULT;
1523 break;
1524 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001525 pre_entry =
1526 ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
Amir Levy9659e592016-10-27 18:08:27 +03001527 pyld_sz =
1528 sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001529 pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
Amir Levy9659e592016-10-27 18:08:27 +03001530 param = kzalloc(pyld_sz, GFP_KERNEL);
1531 if (!param) {
1532 retval = -ENOMEM;
1533 break;
1534 }
1535 if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
1536 retval = -EFAULT;
1537 break;
1538 }
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001539 /* add check in case user-space module compromised */
1540 if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
1541 param)->num_hdls != pre_entry)) {
1542 IPAERR("current %d pre %d\n",
1543 ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
1544 num_hdls,
1545 pre_entry);
1546 retval = -EFAULT;
1547 break;
1548 }
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02001549 if (ipa3_del_hdr_proc_ctx_by_user(
1550 (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) {
Amir Levy9659e592016-10-27 18:08:27 +03001551 retval = -EFAULT;
1552 break;
1553 }
1554 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1555 retval = -EFAULT;
1556 break;
1557 }
1558 break;
1559
1560 case IPA_IOC_GET_HW_VERSION:
1561 pyld_sz = sizeof(enum ipa_hw_type);
1562 param = kzalloc(pyld_sz, GFP_KERNEL);
1563 if (!param) {
1564 retval = -ENOMEM;
1565 break;
1566 }
1567 memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
1568 if (copy_to_user((u8 *)arg, param, pyld_sz)) {
1569 retval = -EFAULT;
1570 break;
1571 }
1572 break;
1573
1574 default: /* redundant, as cmd was checked against MAXNR */
1575 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1576 return -ENOTTY;
1577 }
1578 kfree(param);
1579 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1580
1581 return retval;
1582}
1583
1584/**
1585* ipa3_setup_dflt_rt_tables() - Setup default routing tables
1586*
1587* Return codes:
1588* 0: success
1589* -ENOMEM: failed to allocate memory
1590* -EPERM: failed to add the tables
1591*/
1592int ipa3_setup_dflt_rt_tables(void)
1593{
1594 struct ipa_ioc_add_rt_rule *rt_rule;
1595 struct ipa_rt_rule_add *rt_rule_entry;
1596
1597 rt_rule =
1598 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
1599 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
1600 if (!rt_rule) {
1601 IPAERR("fail to alloc mem\n");
1602 return -ENOMEM;
1603 }
1604 /* setup a default v4 route to point to Apps */
1605 rt_rule->num_rules = 1;
1606 rt_rule->commit = 1;
1607 rt_rule->ip = IPA_IP_v4;
1608 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
1609 IPA_RESOURCE_NAME_MAX);
1610
1611 rt_rule_entry = &rt_rule->rules[0];
1612 rt_rule_entry->at_rear = 1;
1613 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
1614 rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
1615 rt_rule_entry->rule.retain_hdr = 1;
1616
1617 if (ipa3_add_rt_rule(rt_rule)) {
1618 IPAERR("fail to add dflt v4 rule\n");
1619 kfree(rt_rule);
1620 return -EPERM;
1621 }
1622 IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1623 ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1624
1625 /* setup a default v6 route to point to A5 */
1626 rt_rule->ip = IPA_IP_v6;
1627 if (ipa3_add_rt_rule(rt_rule)) {
1628 IPAERR("fail to add dflt v6 rule\n");
1629 kfree(rt_rule);
1630 return -EPERM;
1631 }
1632 IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
1633 ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
1634
1635 /*
1636 * because these tables are the very first to be added, they will both
1637 * have the same index (0) which is essential for programming the
1638 * "route" end-point config
1639 */
1640
1641 kfree(rt_rule);
1642
1643 return 0;
1644}
1645
1646static int ipa3_setup_exception_path(void)
1647{
1648 struct ipa_ioc_add_hdr *hdr;
1649 struct ipa_hdr_add *hdr_entry;
1650 struct ipahal_reg_route route = { 0 };
1651 int ret;
1652
1653 /* install the basic exception header */
1654 hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
1655 sizeof(struct ipa_hdr_add), GFP_KERNEL);
1656 if (!hdr) {
1657 IPAERR("fail to alloc exception hdr\n");
1658 return -ENOMEM;
1659 }
1660 hdr->num_hdrs = 1;
1661 hdr->commit = 1;
1662 hdr_entry = &hdr->hdr[0];
1663
1664 strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
1665 hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
1666
1667 if (ipa3_add_hdr(hdr)) {
1668 IPAERR("fail to add exception hdr\n");
1669 ret = -EPERM;
1670 goto bail;
1671 }
1672
1673 if (hdr_entry->status) {
1674 IPAERR("fail to add exception hdr\n");
1675 ret = -EPERM;
1676 goto bail;
1677 }
1678
1679 ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
1680
1681 /* set the route register to pass exception packets to Apps */
1682 route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
1683 route.route_frag_def_pipe = ipa3_get_ep_mapping(
1684 IPA_CLIENT_APPS_LAN_CONS);
1685 route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
1686 route.route_def_retain_hdr = 1;
1687
1688 if (ipa3_cfg_route(&route)) {
1689 IPAERR("fail to add exception hdr\n");
1690 ret = -EPERM;
1691 goto bail;
1692 }
1693
1694 ret = 0;
1695bail:
1696 kfree(hdr);
1697 return ret;
1698}
1699
1700static int ipa3_init_smem_region(int memory_region_size,
1701 int memory_region_offset)
1702{
1703 struct ipahal_imm_cmd_dma_shared_mem cmd;
1704 struct ipahal_imm_cmd_pyld *cmd_pyld;
1705 struct ipa3_desc desc;
1706 struct ipa_mem_buffer mem;
1707 int rc;
1708
1709 if (memory_region_size == 0)
1710 return 0;
1711
1712 memset(&desc, 0, sizeof(desc));
1713 memset(&cmd, 0, sizeof(cmd));
1714 memset(&mem, 0, sizeof(mem));
1715
1716 mem.size = memory_region_size;
1717 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
1718 &mem.phys_base, GFP_KERNEL);
1719 if (!mem.base) {
1720 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
1721 return -ENOMEM;
1722 }
1723
1724 memset(mem.base, 0, mem.size);
1725 cmd.is_read = false;
1726 cmd.skip_pipeline_clear = false;
1727 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1728 cmd.size = mem.size;
1729 cmd.system_addr = mem.phys_base;
1730 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
1731 memory_region_offset;
1732 cmd_pyld = ipahal_construct_imm_cmd(
1733 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1734 if (!cmd_pyld) {
1735 IPAERR("failed to construct dma_shared_mem imm cmd\n");
1736 return -ENOMEM;
1737 }
1738 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
1739 desc.pyld = cmd_pyld->data;
1740 desc.len = cmd_pyld->len;
1741 desc.type = IPA_IMM_CMD_DESC;
1742
1743 rc = ipa3_send_cmd(1, &desc);
1744 if (rc) {
1745 IPAERR("failed to send immediate command (error %d)\n", rc);
1746 rc = -EFAULT;
1747 }
1748
1749 ipahal_destroy_imm_cmd(cmd_pyld);
1750 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
1751 mem.phys_base);
1752
1753 return rc;
1754}
1755
1756/**
1757* ipa3_init_q6_smem() - Initialize Q6 general memory and
1758* header memory regions in IPA.
1759*
1760* Return codes:
1761* 0: success
1762* -ENOMEM: failed to allocate dma memory
1763* -EFAULT: failed to send IPA command to initialize the memory
1764*/
1765int ipa3_init_q6_smem(void)
1766{
1767 int rc;
1768
1769 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
1770
1771 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
1772 IPA_MEM_PART(modem_ofst));
1773 if (rc) {
1774 IPAERR("failed to initialize Modem RAM memory\n");
1775 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1776 return rc;
1777 }
1778
1779 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
1780 IPA_MEM_PART(modem_hdr_ofst));
1781 if (rc) {
1782 IPAERR("failed to initialize Modem HDRs RAM memory\n");
1783 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1784 return rc;
1785 }
1786
1787 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
1788 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
1789 if (rc) {
1790 IPAERR("failed to initialize Modem proc ctx RAM memory\n");
1791 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1792 return rc;
1793 }
1794
1795 rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
1796 IPA_MEM_PART(modem_comp_decomp_ofst));
1797 if (rc) {
1798 IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
1799 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1800 return rc;
1801 }
1802 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
1803
1804 return rc;
1805}
1806
1807static void ipa3_destroy_imm(void *user1, int user2)
1808{
1809 ipahal_destroy_imm_cmd(user1);
1810}
1811
1812static void ipa3_q6_pipe_delay(bool delay)
1813{
1814 int client_idx;
1815 int ep_idx;
1816 struct ipa_ep_cfg_ctrl ep_ctrl;
1817
1818 memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
1819 ep_ctrl.ipa_ep_delay = delay;
1820
1821 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1822 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
1823 ep_idx = ipa3_get_ep_mapping(client_idx);
1824 if (ep_idx == -1)
1825 continue;
1826
1827 ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
1828 ep_idx, &ep_ctrl);
1829 }
1830 }
1831}
1832
1833static void ipa3_q6_avoid_holb(void)
1834{
1835 int ep_idx;
1836 int client_idx;
1837 struct ipa_ep_cfg_ctrl ep_suspend;
1838 struct ipa_ep_cfg_holb ep_holb;
1839
1840 memset(&ep_suspend, 0, sizeof(ep_suspend));
1841 memset(&ep_holb, 0, sizeof(ep_holb));
1842
1843 ep_suspend.ipa_ep_suspend = true;
1844 ep_holb.tmr_val = 0;
1845 ep_holb.en = 1;
1846
1847 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1848 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1849 ep_idx = ipa3_get_ep_mapping(client_idx);
1850 if (ep_idx == -1)
1851 continue;
1852
1853 /*
1854 * ipa3_cfg_ep_holb is not used here because we are
1855 * setting HOLB on Q6 pipes, and from APPS perspective
1856 * they are not valid, therefore, the above function
1857 * will fail.
1858 */
1859 ipahal_write_reg_n_fields(
1860 IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
1861 ep_idx, &ep_holb);
1862 ipahal_write_reg_n_fields(
1863 IPA_ENDP_INIT_HOL_BLOCK_EN_n,
1864 ep_idx, &ep_holb);
1865
1866 ipahal_write_reg_n_fields(
1867 IPA_ENDP_INIT_CTRL_n,
1868 ep_idx, &ep_suspend);
1869 }
1870 }
1871}
1872
Skylar Chang94692c92017-03-01 09:07:11 -08001873static void ipa3_halt_q6_cons_gsi_channels(void)
1874{
1875 int ep_idx;
1876 int client_idx;
1877 const struct ipa_gsi_ep_config *gsi_ep_cfg;
1878 int ret;
1879 int code = 0;
1880
1881 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
1882 if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
1883 ep_idx = ipa3_get_ep_mapping(client_idx);
1884 if (ep_idx == -1)
1885 continue;
1886
Skylar Changc1f15312017-05-09 14:14:32 -07001887 gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
Skylar Chang94692c92017-03-01 09:07:11 -08001888 if (!gsi_ep_cfg) {
1889 IPAERR("failed to get GSI config\n");
1890 ipa_assert();
1891 return;
1892 }
1893
1894 ret = gsi_halt_channel_ee(
1895 gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
1896 &code);
1897 if (ret == GSI_STATUS_SUCCESS)
1898 IPADBG("halted gsi ch %d ee %d with code %d\n",
1899 gsi_ep_cfg->ipa_gsi_chan_num,
1900 gsi_ep_cfg->ee,
1901 code);
1902 else
1903 IPAERR("failed to halt ch %d ee %d code %d\n",
1904 gsi_ep_cfg->ipa_gsi_chan_num,
1905 gsi_ep_cfg->ee,
1906 code);
1907 }
1908 }
1909}
1910
1911
Amir Levy9659e592016-10-27 18:08:27 +03001912static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
1913 enum ipa_rule_type rlt)
1914{
1915 struct ipa3_desc *desc;
1916 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
1917 struct ipahal_imm_cmd_pyld **cmd_pyld;
1918 int retval = 0;
1919 int pipe_idx;
1920 int flt_idx = 0;
1921 int num_cmds = 0;
1922 int index;
1923 u32 lcl_addr_mem_part;
1924 u32 lcl_hdr_sz;
1925 struct ipa_mem_buffer mem;
1926
1927 IPADBG("Entry\n");
1928
1929 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
1930 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
1931 return -EINVAL;
1932 }
1933
1934 /* Up to filtering pipes we have filtering tables */
1935 desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
1936 GFP_KERNEL);
1937 if (!desc) {
1938 IPAERR("failed to allocate memory\n");
1939 return -ENOMEM;
1940 }
1941
1942 cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
1943 sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
1944 if (!cmd_pyld) {
1945 IPAERR("failed to allocate memory\n");
1946 retval = -ENOMEM;
1947 goto free_desc;
1948 }
1949
1950 if (ip == IPA_IP_v4) {
1951 if (rlt == IPA_RULE_HASHABLE) {
1952 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
1953 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
1954 } else {
1955 lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
1956 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
1957 }
1958 } else {
1959 if (rlt == IPA_RULE_HASHABLE) {
1960 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
1961 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
1962 } else {
1963 lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
1964 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
1965 }
1966 }
1967
1968 retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
Amir Levy4dc79be2017-02-01 19:18:35 +02001969 0, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03001970 if (retval) {
1971 IPAERR("failed to generate flt single tbl empty img\n");
1972 goto free_cmd_pyld;
1973 }
1974
1975 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
1976 if (!ipa_is_ep_support_flt(pipe_idx))
1977 continue;
1978
1979 /*
1980 * Iterating over all the filtering pipes which are either
1981 * invalid but connected or connected but not configured by AP.
1982 */
1983 if (!ipa3_ctx->ep[pipe_idx].valid ||
1984 ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
1985
1986 cmd.is_read = false;
1987 cmd.skip_pipeline_clear = false;
1988 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
1989 cmd.size = mem.size;
1990 cmd.system_addr = mem.phys_base;
1991 cmd.local_addr =
1992 ipa3_ctx->smem_restricted_bytes +
1993 lcl_addr_mem_part +
1994 ipahal_get_hw_tbl_hdr_width() +
1995 flt_idx * ipahal_get_hw_tbl_hdr_width();
1996 cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
1997 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
1998 if (!cmd_pyld[num_cmds]) {
1999 IPAERR("fail construct dma_shared_mem cmd\n");
2000 retval = -ENOMEM;
2001 goto free_empty_img;
2002 }
2003 desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
2004 IPA_IMM_CMD_DMA_SHARED_MEM);
2005 desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
2006 desc[num_cmds].len = cmd_pyld[num_cmds]->len;
2007 desc[num_cmds].type = IPA_IMM_CMD_DESC;
2008 num_cmds++;
2009 }
2010
2011 flt_idx++;
2012 }
2013
2014 IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
2015 retval = ipa3_send_cmd(num_cmds, desc);
2016 if (retval) {
2017 IPAERR("failed to send immediate command (err %d)\n", retval);
2018 retval = -EFAULT;
2019 }
2020
2021free_empty_img:
2022 ipahal_free_dma_mem(&mem);
2023free_cmd_pyld:
2024 for (index = 0; index < num_cmds; index++)
2025 ipahal_destroy_imm_cmd(cmd_pyld[index]);
2026 kfree(cmd_pyld);
2027free_desc:
2028 kfree(desc);
2029 return retval;
2030}
2031
2032static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
2033 enum ipa_rule_type rlt)
2034{
2035 struct ipa3_desc *desc;
2036 struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
2037 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2038 int retval = 0;
2039 u32 modem_rt_index_lo;
2040 u32 modem_rt_index_hi;
2041 u32 lcl_addr_mem_part;
2042 u32 lcl_hdr_sz;
2043 struct ipa_mem_buffer mem;
2044
2045 IPADBG("Entry\n");
2046
2047 if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
2048 IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
2049 return -EINVAL;
2050 }
2051
2052 if (ip == IPA_IP_v4) {
2053 modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
2054 modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
2055 if (rlt == IPA_RULE_HASHABLE) {
2056 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
2057 lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
2058 } else {
2059 lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
2060 lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
2061 }
2062 } else {
2063 modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
2064 modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
2065 if (rlt == IPA_RULE_HASHABLE) {
2066 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
2067 lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
2068 } else {
2069 lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
2070 lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
2071 }
2072 }
2073
2074 retval = ipahal_rt_generate_empty_img(
2075 modem_rt_index_hi - modem_rt_index_lo + 1,
Amir Levy4dc79be2017-02-01 19:18:35 +02002076 lcl_hdr_sz, lcl_hdr_sz, &mem, true);
Amir Levy9659e592016-10-27 18:08:27 +03002077 if (retval) {
2078 IPAERR("fail generate empty rt img\n");
2079 return -ENOMEM;
2080 }
2081
2082 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2083 if (!desc) {
2084 IPAERR("failed to allocate memory\n");
2085 goto free_empty_img;
2086 }
2087
2088 cmd.is_read = false;
2089 cmd.skip_pipeline_clear = false;
2090 cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2091 cmd.size = mem.size;
2092 cmd.system_addr = mem.phys_base;
2093 cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2094 lcl_addr_mem_part +
2095 modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
2096 cmd_pyld = ipahal_construct_imm_cmd(
2097 IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
2098 if (!cmd_pyld) {
2099 IPAERR("failed to construct dma_shared_mem imm cmd\n");
2100 retval = -ENOMEM;
2101 goto free_desc;
2102 }
2103 desc->opcode =
2104 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
2105 desc->pyld = cmd_pyld->data;
2106 desc->len = cmd_pyld->len;
2107 desc->type = IPA_IMM_CMD_DESC;
2108
2109 IPADBG("Sending 1 descriptor for rt tbl clearing\n");
2110 retval = ipa3_send_cmd(1, desc);
2111 if (retval) {
2112 IPAERR("failed to send immediate command (err %d)\n", retval);
2113 retval = -EFAULT;
2114 }
2115
2116 ipahal_destroy_imm_cmd(cmd_pyld);
2117free_desc:
2118 kfree(desc);
2119free_empty_img:
2120 ipahal_free_dma_mem(&mem);
2121 return retval;
2122}
2123
2124static int ipa3_q6_clean_q6_tables(void)
2125{
2126 struct ipa3_desc *desc;
2127 struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
2128 struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
2129 int retval;
2130 struct ipahal_reg_fltrt_hash_flush flush;
2131 struct ipahal_reg_valmask valmask;
2132
2133 IPADBG("Entry\n");
2134
2135
2136 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2137 IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
2138 return -EFAULT;
2139 }
2140 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2141 IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
2142 return -EFAULT;
2143 }
2144 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2145 IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
2146 return -EFAULT;
2147 }
2148 if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2149 IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
2150 return -EFAULT;
2151 }
2152
2153 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
2154 IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
2155 return -EFAULT;
2156 }
2157 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
2158 IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
2159 return -EFAULT;
2160 }
2161 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
2162 IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
2163 return -EFAULT;
2164 }
2165 if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
2166 IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
2167 return -EFAULT;
2168 }
2169
2170 /* Flush rules cache */
2171 desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
2172 if (!desc) {
2173 IPAERR("failed to allocate memory\n");
2174 return -ENOMEM;
2175 }
2176
2177 flush.v4_flt = true;
2178 flush.v4_rt = true;
2179 flush.v6_flt = true;
2180 flush.v6_rt = true;
2181 ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
2182 reg_write_cmd.skip_pipeline_clear = false;
2183 reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2184 reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
2185 reg_write_cmd.value = valmask.val;
2186 reg_write_cmd.value_mask = valmask.mask;
2187 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
2188 &reg_write_cmd, false);
2189 if (!cmd_pyld) {
2190 IPAERR("fail construct register_write imm cmd\n");
2191 retval = -EFAULT;
2192 goto bail_desc;
2193 }
2194 desc->opcode =
2195 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
2196 desc->pyld = cmd_pyld->data;
2197 desc->len = cmd_pyld->len;
2198 desc->type = IPA_IMM_CMD_DESC;
2199
2200 IPADBG("Sending 1 descriptor for tbls flush\n");
2201 retval = ipa3_send_cmd(1, desc);
2202 if (retval) {
2203 IPAERR("failed to send immediate command (err %d)\n", retval);
2204 retval = -EFAULT;
2205 }
2206
2207 ipahal_destroy_imm_cmd(cmd_pyld);
2208
2209bail_desc:
2210 kfree(desc);
2211 IPADBG("Done - retval = %d\n", retval);
2212 return retval;
2213}
2214
2215static int ipa3_q6_set_ex_path_to_apps(void)
2216{
2217 int ep_idx;
2218 int client_idx;
2219 struct ipa3_desc *desc;
2220 int num_descs = 0;
2221 int index;
2222 struct ipahal_imm_cmd_register_write reg_write;
2223 struct ipahal_imm_cmd_pyld *cmd_pyld;
2224 int retval;
2225 struct ipahal_reg_valmask valmask;
2226
2227 desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
2228 GFP_KERNEL);
2229 if (!desc) {
2230 IPAERR("failed to allocate memory\n");
2231 return -ENOMEM;
2232 }
2233
2234 /* Set the exception path to AP */
2235 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
2236 ep_idx = ipa3_get_ep_mapping(client_idx);
2237 if (ep_idx == -1)
2238 continue;
2239
2240 if (ipa3_ctx->ep[ep_idx].valid &&
2241 ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
2242 BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
2243
2244 reg_write.skip_pipeline_clear = false;
2245 reg_write.pipeline_clear_options =
2246 IPAHAL_HPS_CLEAR;
2247 reg_write.offset =
Amir Levy8c19dd42017-04-02 18:21:09 +03002248 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2249 ep_idx);
Amir Levy9659e592016-10-27 18:08:27 +03002250 ipahal_get_status_ep_valmask(
2251 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
2252 &valmask);
2253 reg_write.value = valmask.val;
2254 reg_write.value_mask = valmask.mask;
2255 cmd_pyld = ipahal_construct_imm_cmd(
2256 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2257 if (!cmd_pyld) {
2258 IPAERR("fail construct register_write cmd\n");
2259 BUG();
2260 }
2261
2262 desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
2263 IPA_IMM_CMD_REGISTER_WRITE);
2264 desc[num_descs].type = IPA_IMM_CMD_DESC;
2265 desc[num_descs].callback = ipa3_destroy_imm;
2266 desc[num_descs].user1 = cmd_pyld;
2267 desc[num_descs].pyld = cmd_pyld->data;
2268 desc[num_descs].len = cmd_pyld->len;
2269 num_descs++;
2270 }
Amir Levy5807be32017-04-19 14:35:12 +03002271
2272 /* disable statuses for modem producers */
2273 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
2274 ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
2275
2276 reg_write.skip_pipeline_clear = false;
2277 reg_write.pipeline_clear_options =
2278 IPAHAL_HPS_CLEAR;
2279 reg_write.offset =
2280 ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
2281 ep_idx);
2282 reg_write.value = 0;
2283 reg_write.value_mask = ~0;
2284 cmd_pyld = ipahal_construct_imm_cmd(
2285 IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
2286 if (!cmd_pyld) {
2287 IPAERR("fail construct register_write cmd\n");
2288 ipa_assert();
2289 return -EFAULT;
2290 }
2291
2292 desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
2293 IPA_IMM_CMD_REGISTER_WRITE);
2294 desc[num_descs].type = IPA_IMM_CMD_DESC;
2295 desc[num_descs].callback = ipa3_destroy_imm;
2296 desc[num_descs].user1 = cmd_pyld;
2297 desc[num_descs].pyld = cmd_pyld->data;
2298 desc[num_descs].len = cmd_pyld->len;
2299 num_descs++;
2300 }
Amir Levy9659e592016-10-27 18:08:27 +03002301 }
2302
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002303 /* Will wait 500msecs for IPA tag process completion */
Amir Levy9659e592016-10-27 18:08:27 +03002304 retval = ipa3_tag_process(desc, num_descs,
2305 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
2306 if (retval) {
2307 IPAERR("TAG process failed! (error %d)\n", retval);
2308 /* For timeout error ipa3_destroy_imm cb will destroy user1 */
2309 if (retval != -ETIME) {
2310 for (index = 0; index < num_descs; index++)
2311 if (desc[index].callback)
2312 desc[index].callback(desc[index].user1,
2313 desc[index].user2);
2314 retval = -EINVAL;
2315 }
2316 }
2317
2318 kfree(desc);
2319
2320 return retval;
2321}
2322
2323/**
2324* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
2325* in IPA HW. This is performed in case of SSR.
2326*
2327* This is a mandatory procedure, in case one of the steps fails, the
2328* AP needs to restart.
2329*/
2330void ipa3_q6_pre_shutdown_cleanup(void)
2331{
2332 IPADBG_LOW("ENTER\n");
2333
2334 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2335
2336 ipa3_q6_pipe_delay(true);
2337 ipa3_q6_avoid_holb();
2338 if (ipa3_q6_clean_q6_tables()) {
2339 IPAERR("Failed to clean Q6 tables\n");
2340 BUG();
2341 }
2342 if (ipa3_q6_set_ex_path_to_apps()) {
2343 IPAERR("Failed to redirect exceptions to APPS\n");
2344 BUG();
2345 }
2346 /* Remove delay from Q6 PRODs to avoid pending descriptors
2347 * on pipe reset procedure
2348 */
2349 ipa3_q6_pipe_delay(false);
2350
2351 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2352 IPADBG_LOW("Exit with success\n");
2353}
2354
2355/*
2356 * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
2357 * check if GSI channel related to Q6 producer client is empty.
2358 *
2359 * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
2360 * info are injected into IPA RX from IPA_IF, while modem is restarting.
2361 */
2362void ipa3_q6_post_shutdown_cleanup(void)
2363{
2364 int client_idx;
Skylar Changc1f15312017-05-09 14:14:32 -07002365 int ep_idx;
Amir Levy9659e592016-10-27 18:08:27 +03002366
2367 IPADBG_LOW("ENTER\n");
Amir Levy9659e592016-10-27 18:08:27 +03002368
2369 if (!ipa3_ctx->uc_ctx.uc_loaded) {
2370 IPAERR("uC is not loaded. Skipping\n");
2371 return;
2372 }
2373
Skylar Chang94692c92017-03-01 09:07:11 -08002374 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
2375
2376 /* Handle the issue where SUSPEND was removed for some reason */
2377 ipa3_q6_avoid_holb();
2378 ipa3_halt_q6_cons_gsi_channels();
2379
Amir Levy9659e592016-10-27 18:08:27 +03002380 for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
2381 if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
Skylar Changc1f15312017-05-09 14:14:32 -07002382 ep_idx = ipa3_get_ep_mapping(client_idx);
2383 if (ep_idx == -1)
2384 continue;
2385
Amir Levy9659e592016-10-27 18:08:27 +03002386 if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
2387 IPAERR("fail to validate Q6 ch emptiness %d\n",
2388 client_idx);
2389 BUG();
2390 return;
2391 }
2392 }
2393
2394 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
2395 IPADBG_LOW("Exit with success\n");
2396}
2397
2398static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
2399{
2400 /* Set 4 bytes of CANARY before the offset */
2401 sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
2402}
2403
2404/**
Amir Levy9fadeca2017-04-25 10:18:32 +03002405 * _ipa_init_sram_v3() - Initialize IPA local SRAM.
Amir Levy9659e592016-10-27 18:08:27 +03002406 *
2407 * Return codes: 0 for success, negative value for failure
2408 */
Amir Levy9fadeca2017-04-25 10:18:32 +03002409int _ipa_init_sram_v3(void)
Amir Levy9659e592016-10-27 18:08:27 +03002410{
2411 u32 *ipa_sram_mmio;
2412 unsigned long phys_addr;
2413
2414 phys_addr = ipa3_ctx->ipa_wrapper_base +
2415 ipa3_ctx->ctrl->ipa_reg_base_ofst +
2416 ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
2417 ipa3_ctx->smem_restricted_bytes / 4);
2418
2419 ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
2420 if (!ipa_sram_mmio) {
2421 IPAERR("fail to ioremap IPA SRAM\n");
2422 return -ENOMEM;
2423 }
2424
2425 /* Consult with ipa_i.h on the location of the CANARY values */
2426 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
2427 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
2428 ipa3_sram_set_canary(ipa_sram_mmio,
2429 IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
2430 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
2431 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
2432 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
2433 ipa3_sram_set_canary(ipa_sram_mmio,
2434 IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
2435 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
2436 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
2437 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
2438 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
2439 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
2440 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
2441 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
2442 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
2443 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
2444 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
2445 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
2446 ipa3_sram_set_canary(ipa_sram_mmio,
2447 IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
2448 ipa3_sram_set_canary(ipa_sram_mmio,
2449 IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
2450 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
2451 ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
Amir Levy9fadeca2017-04-25 10:18:32 +03002452 ipa3_sram_set_canary(ipa_sram_mmio,
2453 (ipa_get_hw_type() >= IPA_HW_v3_5) ?
2454 IPA_MEM_PART(uc_event_ring_ofst) :
2455 IPA_MEM_PART(end_ofst));
Amir Levy9659e592016-10-27 18:08:27 +03002456
2457 iounmap(ipa_sram_mmio);
2458
2459 return 0;
2460}
2461
2462/**
2463 * _ipa_init_hdr_v3_0() - Initialize IPA header block.
2464 *
2465 * Return codes: 0 for success, negative value for failure
2466 */
2467int _ipa_init_hdr_v3_0(void)
2468{
2469 struct ipa3_desc desc = { 0 };
2470 struct ipa_mem_buffer mem;
2471 struct ipahal_imm_cmd_hdr_init_local cmd = {0};
2472 struct ipahal_imm_cmd_pyld *cmd_pyld;
2473 struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
2474
2475 mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
2476 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2477 GFP_KERNEL);
2478 if (!mem.base) {
2479 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2480 return -ENOMEM;
2481 }
2482 memset(mem.base, 0, mem.size);
2483
2484 cmd.hdr_table_addr = mem.phys_base;
2485 cmd.size_hdr_table = mem.size;
2486 cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
2487 IPA_MEM_PART(modem_hdr_ofst);
2488 cmd_pyld = ipahal_construct_imm_cmd(
2489 IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
2490 if (!cmd_pyld) {
2491 IPAERR("fail to construct hdr_init_local imm cmd\n");
2492 dma_free_coherent(ipa3_ctx->pdev,
2493 mem.size, mem.base,
2494 mem.phys_base);
2495 return -EFAULT;
2496 }
2497 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
2498 desc.type = IPA_IMM_CMD_DESC;
2499 desc.pyld = cmd_pyld->data;
2500 desc.len = cmd_pyld->len;
2501 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2502
2503 if (ipa3_send_cmd(1, &desc)) {
2504 IPAERR("fail to send immediate command\n");
2505 ipahal_destroy_imm_cmd(cmd_pyld);
2506 dma_free_coherent(ipa3_ctx->pdev,
2507 mem.size, mem.base,
2508 mem.phys_base);
2509 return -EFAULT;
2510 }
2511
2512 ipahal_destroy_imm_cmd(cmd_pyld);
2513 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2514
2515 mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
2516 IPA_MEM_PART(apps_hdr_proc_ctx_size);
2517 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
2518 GFP_KERNEL);
2519 if (!mem.base) {
2520 IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
2521 return -ENOMEM;
2522 }
2523 memset(mem.base, 0, mem.size);
2524 memset(&desc, 0, sizeof(desc));
2525
2526 dma_cmd.is_read = false;
2527 dma_cmd.skip_pipeline_clear = false;
2528 dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
2529 dma_cmd.system_addr = mem.phys_base;
2530 dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
2531 IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
2532 dma_cmd.size = mem.size;
2533 cmd_pyld = ipahal_construct_imm_cmd(
2534 IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
2535 if (!cmd_pyld) {
2536 IPAERR("fail to construct dma_shared_mem imm\n");
2537 dma_free_coherent(ipa3_ctx->pdev,
2538 mem.size, mem.base,
2539 mem.phys_base);
2540 return -EFAULT;
2541 }
2542 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
2543 desc.pyld = cmd_pyld->data;
2544 desc.len = cmd_pyld->len;
2545 desc.type = IPA_IMM_CMD_DESC;
2546 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2547
2548 if (ipa3_send_cmd(1, &desc)) {
2549 IPAERR("fail to send immediate command\n");
2550 ipahal_destroy_imm_cmd(cmd_pyld);
2551 dma_free_coherent(ipa3_ctx->pdev,
2552 mem.size,
2553 mem.base,
2554 mem.phys_base);
2555 return -EFAULT;
2556 }
2557 ipahal_destroy_imm_cmd(cmd_pyld);
2558
2559 ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
2560
2561 dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
2562
2563 return 0;
2564}
2565
2566/**
2567 * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
2568 *
2569 * Return codes: 0 for success, negative value for failure
2570 */
2571int _ipa_init_rt4_v3(void)
2572{
2573 struct ipa3_desc desc = { 0 };
2574 struct ipa_mem_buffer mem;
2575 struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
2576 struct ipahal_imm_cmd_pyld *cmd_pyld;
2577 int i;
2578 int rc = 0;
2579
2580 for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
2581 i <= IPA_MEM_PART(v4_modem_rt_index_hi);
2582 i++)
2583 ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
2584 IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
2585
2586 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
2587 IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002588 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002589 if (rc) {
2590 IPAERR("fail generate empty v4 rt img\n");
2591 return rc;
2592 }
2593
2594 v4_cmd.hash_rules_addr = mem.phys_base;
2595 v4_cmd.hash_rules_size = mem.size;
2596 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2597 IPA_MEM_PART(v4_rt_hash_ofst);
2598 v4_cmd.nhash_rules_addr = mem.phys_base;
2599 v4_cmd.nhash_rules_size = mem.size;
2600 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2601 IPA_MEM_PART(v4_rt_nhash_ofst);
2602 IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
2603 v4_cmd.hash_local_addr);
2604 IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
2605 v4_cmd.nhash_local_addr);
2606 cmd_pyld = ipahal_construct_imm_cmd(
2607 IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
2608 if (!cmd_pyld) {
2609 IPAERR("fail construct ip_v4_rt_init imm cmd\n");
2610 rc = -EPERM;
2611 goto free_mem;
2612 }
2613
2614 desc.opcode =
2615 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
2616 desc.type = IPA_IMM_CMD_DESC;
2617 desc.pyld = cmd_pyld->data;
2618 desc.len = cmd_pyld->len;
2619 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2620
2621 if (ipa3_send_cmd(1, &desc)) {
2622 IPAERR("fail to send immediate command\n");
2623 rc = -EFAULT;
2624 }
2625
2626 ipahal_destroy_imm_cmd(cmd_pyld);
2627
2628free_mem:
2629 ipahal_free_dma_mem(&mem);
2630 return rc;
2631}
2632
2633/**
2634 * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
2635 *
2636 * Return codes: 0 for success, negative value for failure
2637 */
2638int _ipa_init_rt6_v3(void)
2639{
2640 struct ipa3_desc desc = { 0 };
2641 struct ipa_mem_buffer mem;
2642 struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
2643 struct ipahal_imm_cmd_pyld *cmd_pyld;
2644 int i;
2645 int rc = 0;
2646
2647 for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
2648 i <= IPA_MEM_PART(v6_modem_rt_index_hi);
2649 i++)
2650 ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
2651 IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
2652
2653 rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
2654 IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
Amir Levy4dc79be2017-02-01 19:18:35 +02002655 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002656 if (rc) {
2657 IPAERR("fail generate empty v6 rt img\n");
2658 return rc;
2659 }
2660
2661 v6_cmd.hash_rules_addr = mem.phys_base;
2662 v6_cmd.hash_rules_size = mem.size;
2663 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2664 IPA_MEM_PART(v6_rt_hash_ofst);
2665 v6_cmd.nhash_rules_addr = mem.phys_base;
2666 v6_cmd.nhash_rules_size = mem.size;
2667 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2668 IPA_MEM_PART(v6_rt_nhash_ofst);
2669 IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
2670 v6_cmd.hash_local_addr);
2671 IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
2672 v6_cmd.nhash_local_addr);
2673 cmd_pyld = ipahal_construct_imm_cmd(
2674 IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
2675 if (!cmd_pyld) {
2676 IPAERR("fail construct ip_v6_rt_init imm cmd\n");
2677 rc = -EPERM;
2678 goto free_mem;
2679 }
2680
2681 desc.opcode =
2682 ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
2683 desc.type = IPA_IMM_CMD_DESC;
2684 desc.pyld = cmd_pyld->data;
2685 desc.len = cmd_pyld->len;
2686 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2687
2688 if (ipa3_send_cmd(1, &desc)) {
2689 IPAERR("fail to send immediate command\n");
2690 rc = -EFAULT;
2691 }
2692
2693 ipahal_destroy_imm_cmd(cmd_pyld);
2694
2695free_mem:
2696 ipahal_free_dma_mem(&mem);
2697 return rc;
2698}
2699
2700/**
2701 * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
2702 *
2703 * Return codes: 0 for success, negative value for failure
2704 */
2705int _ipa_init_flt4_v3(void)
2706{
2707 struct ipa3_desc desc = { 0 };
2708 struct ipa_mem_buffer mem;
2709 struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
2710 struct ipahal_imm_cmd_pyld *cmd_pyld;
2711 int rc;
2712
2713 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2714 IPA_MEM_PART(v4_flt_hash_size),
2715 IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002716 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002717 if (rc) {
2718 IPAERR("fail generate empty v4 flt img\n");
2719 return rc;
2720 }
2721
2722 v4_cmd.hash_rules_addr = mem.phys_base;
2723 v4_cmd.hash_rules_size = mem.size;
2724 v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2725 IPA_MEM_PART(v4_flt_hash_ofst);
2726 v4_cmd.nhash_rules_addr = mem.phys_base;
2727 v4_cmd.nhash_rules_size = mem.size;
2728 v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2729 IPA_MEM_PART(v4_flt_nhash_ofst);
2730 IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
2731 v4_cmd.hash_local_addr);
2732 IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
2733 v4_cmd.nhash_local_addr);
2734 cmd_pyld = ipahal_construct_imm_cmd(
2735 IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
2736 if (!cmd_pyld) {
2737 IPAERR("fail construct ip_v4_flt_init imm cmd\n");
2738 rc = -EPERM;
2739 goto free_mem;
2740 }
2741
2742 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
2743 desc.type = IPA_IMM_CMD_DESC;
2744 desc.pyld = cmd_pyld->data;
2745 desc.len = cmd_pyld->len;
2746 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2747
2748 if (ipa3_send_cmd(1, &desc)) {
2749 IPAERR("fail to send immediate command\n");
2750 rc = -EFAULT;
2751 }
2752
2753 ipahal_destroy_imm_cmd(cmd_pyld);
2754
2755free_mem:
2756 ipahal_free_dma_mem(&mem);
2757 return rc;
2758}
2759
2760/**
2761 * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
2762 *
2763 * Return codes: 0 for success, negative value for failure
2764 */
2765int _ipa_init_flt6_v3(void)
2766{
2767 struct ipa3_desc desc = { 0 };
2768 struct ipa_mem_buffer mem;
2769 struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
2770 struct ipahal_imm_cmd_pyld *cmd_pyld;
2771 int rc;
2772
2773 rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
2774 IPA_MEM_PART(v6_flt_hash_size),
2775 IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
Amir Levy4dc79be2017-02-01 19:18:35 +02002776 &mem, false);
Amir Levy9659e592016-10-27 18:08:27 +03002777 if (rc) {
2778 IPAERR("fail generate empty v6 flt img\n");
2779 return rc;
2780 }
2781
2782 v6_cmd.hash_rules_addr = mem.phys_base;
2783 v6_cmd.hash_rules_size = mem.size;
2784 v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
2785 IPA_MEM_PART(v6_flt_hash_ofst);
2786 v6_cmd.nhash_rules_addr = mem.phys_base;
2787 v6_cmd.nhash_rules_size = mem.size;
2788 v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
2789 IPA_MEM_PART(v6_flt_nhash_ofst);
2790 IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
2791 v6_cmd.hash_local_addr);
2792 IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
2793 v6_cmd.nhash_local_addr);
2794
2795 cmd_pyld = ipahal_construct_imm_cmd(
2796 IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
2797 if (!cmd_pyld) {
2798 IPAERR("fail construct ip_v6_flt_init imm cmd\n");
2799 rc = -EPERM;
2800 goto free_mem;
2801 }
2802
2803 desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
2804 desc.type = IPA_IMM_CMD_DESC;
2805 desc.pyld = cmd_pyld->data;
2806 desc.len = cmd_pyld->len;
2807 IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
2808
2809 if (ipa3_send_cmd(1, &desc)) {
2810 IPAERR("fail to send immediate command\n");
2811 rc = -EFAULT;
2812 }
2813
2814 ipahal_destroy_imm_cmd(cmd_pyld);
2815
2816free_mem:
2817 ipahal_free_dma_mem(&mem);
2818 return rc;
2819}
2820
2821static int ipa3_setup_flt_hash_tuple(void)
2822{
2823 int pipe_idx;
2824 struct ipahal_reg_hash_tuple tuple;
2825
2826 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2827
2828 for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
2829 if (!ipa_is_ep_support_flt(pipe_idx))
2830 continue;
2831
2832 if (ipa_is_modem_pipe(pipe_idx))
2833 continue;
2834
2835 if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
2836 IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
2837 return -EFAULT;
2838 }
2839 }
2840
2841 return 0;
2842}
2843
2844static int ipa3_setup_rt_hash_tuple(void)
2845{
2846 int tbl_idx;
2847 struct ipahal_reg_hash_tuple tuple;
2848
2849 memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
2850
2851 for (tbl_idx = 0;
2852 tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
2853 IPA_MEM_PART(v4_rt_num_index));
2854 tbl_idx++) {
2855
2856 if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
2857 tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
2858 continue;
2859
2860 if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
2861 tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
2862 continue;
2863
2864 if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
2865 IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
2866 return -EFAULT;
2867 }
2868 }
2869
2870 return 0;
2871}
2872
2873static int ipa3_setup_apps_pipes(void)
2874{
2875 struct ipa_sys_connect_params sys_in;
2876 int result = 0;
2877
2878 if (ipa3_ctx->gsi_ch20_wa) {
2879 IPADBG("Allocating GSI physical channel 20\n");
2880 result = ipa_gsi_ch20_wa();
2881 if (result) {
2882 IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
Ghanim Fodic6b67492017-03-15 14:19:56 +02002883 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002884 }
2885 }
2886
Skylar Changd407e592017-03-30 11:25:30 -07002887 /* allocate the common PROD event ring */
2888 if (ipa3_alloc_common_event_ring()) {
2889 IPAERR("ipa3_alloc_common_event_ring failed.\n");
2890 result = -EPERM;
2891 goto fail_ch20_wa;
2892 }
2893
Amir Levy9659e592016-10-27 18:08:27 +03002894 /* CMD OUT (AP->IPA) */
2895 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2896 sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
2897 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2898 sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
2899 sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
2900 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02002901 IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03002902 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002903 goto fail_ch20_wa;
Amir Levy9659e592016-10-27 18:08:27 +03002904 }
2905 IPADBG("Apps to IPA cmd pipe is connected\n");
2906
2907 ipa3_ctx->ctrl->ipa_init_sram();
2908 IPADBG("SRAM initialized\n");
2909
2910 ipa3_ctx->ctrl->ipa_init_hdr();
2911 IPADBG("HDR initialized\n");
2912
2913 ipa3_ctx->ctrl->ipa_init_rt4();
2914 IPADBG("V4 RT initialized\n");
2915
2916 ipa3_ctx->ctrl->ipa_init_rt6();
2917 IPADBG("V6 RT initialized\n");
2918
2919 ipa3_ctx->ctrl->ipa_init_flt4();
2920 IPADBG("V4 FLT initialized\n");
2921
2922 ipa3_ctx->ctrl->ipa_init_flt6();
2923 IPADBG("V6 FLT initialized\n");
2924
2925 if (ipa3_setup_flt_hash_tuple()) {
2926 IPAERR(":fail to configure flt hash tuple\n");
2927 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002928 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002929 }
2930 IPADBG("flt hash tuple is configured\n");
2931
2932 if (ipa3_setup_rt_hash_tuple()) {
2933 IPAERR(":fail to configure rt hash tuple\n");
2934 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002935 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002936 }
2937 IPADBG("rt hash tuple is configured\n");
2938
2939 if (ipa3_setup_exception_path()) {
2940 IPAERR(":fail to setup excp path\n");
2941 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002942 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002943 }
2944 IPADBG("Exception path was successfully set");
2945
2946 if (ipa3_setup_dflt_rt_tables()) {
2947 IPAERR(":fail to setup dflt routes\n");
2948 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002949 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002950 }
2951 IPADBG("default routing was set\n");
2952
Ghanim Fodic6b67492017-03-15 14:19:56 +02002953 /* LAN IN (IPA->AP) */
Amir Levy9659e592016-10-27 18:08:27 +03002954 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2955 sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
2956 sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
2957 sys_in.notify = ipa3_lan_rx_cb;
2958 sys_in.priv = NULL;
2959 sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
2960 sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
2961 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
2962 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
2963 sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
2964 sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
2965 sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
2966 sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
2967
2968 /**
2969 * ipa_lan_rx_cb() intended to notify the source EP about packet
2970 * being received on the LAN_CONS via calling the source EP call-back.
2971 * There could be a race condition with calling this call-back. Other
2972 * thread may nullify it - e.g. on EP disconnect.
2973 * This lock intended to protect the access to the source EP call-back
2974 */
2975 spin_lock_init(&ipa3_ctx->disconnect_lock);
2976 if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
Ghanim Fodic6b67492017-03-15 14:19:56 +02002977 IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
Amir Levy9659e592016-10-27 18:08:27 +03002978 result = -EPERM;
Ghanim Fodic6b67492017-03-15 14:19:56 +02002979 goto fail_flt_hash_tuple;
Amir Levy9659e592016-10-27 18:08:27 +03002980 }
2981
Ghanim Fodic6b67492017-03-15 14:19:56 +02002982 /* LAN OUT (AP->IPA) */
Amir Levy54fe4d32017-03-16 11:21:49 +02002983 if (!ipa3_ctx->ipa_config_is_mhi) {
2984 memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
2985 sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
2986 sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
2987 sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
2988 if (ipa3_setup_sys_pipe(&sys_in,
2989 &ipa3_ctx->clnt_hdl_data_out)) {
2990 IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
2991 result = -EPERM;
2992 goto fail_lan_data_out;
2993 }
Amir Levy9659e592016-10-27 18:08:27 +03002994 }
2995
2996 return 0;
2997
Ghanim Fodic6b67492017-03-15 14:19:56 +02002998fail_lan_data_out:
Amir Levy9659e592016-10-27 18:08:27 +03002999 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003000fail_flt_hash_tuple:
Amir Levy9659e592016-10-27 18:08:27 +03003001 if (ipa3_ctx->dflt_v6_rt_rule_hdl)
3002 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3003 if (ipa3_ctx->dflt_v4_rt_rule_hdl)
3004 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
3005 if (ipa3_ctx->excp_hdr_hdl)
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003006 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003007 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
Ghanim Fodic6b67492017-03-15 14:19:56 +02003008fail_ch20_wa:
Amir Levy9659e592016-10-27 18:08:27 +03003009 return result;
3010}
3011
3012static void ipa3_teardown_apps_pipes(void)
3013{
Amir Levy54fe4d32017-03-16 11:21:49 +02003014 if (!ipa3_ctx->ipa_config_is_mhi)
3015 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
Amir Levy9659e592016-10-27 18:08:27 +03003016 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
3017 __ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
3018 __ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
Ghanim Fodi2c8ba072017-01-12 15:14:15 +02003019 __ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03003020 ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
3021}
3022
3023#ifdef CONFIG_COMPAT
3024long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3025{
3026 int retval = 0;
3027 struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
3028 struct ipa_ioc_nat_alloc_mem nat_mem;
3029
3030 switch (cmd) {
3031 case IPA_IOC_ADD_HDR32:
3032 cmd = IPA_IOC_ADD_HDR;
3033 break;
3034 case IPA_IOC_DEL_HDR32:
3035 cmd = IPA_IOC_DEL_HDR;
3036 break;
3037 case IPA_IOC_ADD_RT_RULE32:
3038 cmd = IPA_IOC_ADD_RT_RULE;
3039 break;
3040 case IPA_IOC_DEL_RT_RULE32:
3041 cmd = IPA_IOC_DEL_RT_RULE;
3042 break;
3043 case IPA_IOC_ADD_FLT_RULE32:
3044 cmd = IPA_IOC_ADD_FLT_RULE;
3045 break;
3046 case IPA_IOC_DEL_FLT_RULE32:
3047 cmd = IPA_IOC_DEL_FLT_RULE;
3048 break;
3049 case IPA_IOC_GET_RT_TBL32:
3050 cmd = IPA_IOC_GET_RT_TBL;
3051 break;
3052 case IPA_IOC_COPY_HDR32:
3053 cmd = IPA_IOC_COPY_HDR;
3054 break;
3055 case IPA_IOC_QUERY_INTF32:
3056 cmd = IPA_IOC_QUERY_INTF;
3057 break;
3058 case IPA_IOC_QUERY_INTF_TX_PROPS32:
3059 cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
3060 break;
3061 case IPA_IOC_QUERY_INTF_RX_PROPS32:
3062 cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
3063 break;
3064 case IPA_IOC_QUERY_INTF_EXT_PROPS32:
3065 cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
3066 break;
3067 case IPA_IOC_GET_HDR32:
3068 cmd = IPA_IOC_GET_HDR;
3069 break;
3070 case IPA_IOC_ALLOC_NAT_MEM32:
3071 if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
3072 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3073 retval = -EFAULT;
3074 goto ret;
3075 }
3076 memcpy(nat_mem.dev_name, nat_mem32.dev_name,
3077 IPA_RESOURCE_NAME_MAX);
3078 nat_mem.size = (size_t)nat_mem32.size;
3079 nat_mem.offset = (off_t)nat_mem32.offset;
3080
3081 /* null terminate the string */
3082 nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
3083
3084 if (ipa3_allocate_nat_device(&nat_mem)) {
3085 retval = -EFAULT;
3086 goto ret;
3087 }
3088 nat_mem32.offset = (compat_off_t)nat_mem.offset;
3089 if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
3090 sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
3091 retval = -EFAULT;
3092 }
3093ret:
3094 return retval;
3095 case IPA_IOC_V4_INIT_NAT32:
3096 cmd = IPA_IOC_V4_INIT_NAT;
3097 break;
3098 case IPA_IOC_NAT_DMA32:
3099 cmd = IPA_IOC_NAT_DMA;
3100 break;
3101 case IPA_IOC_V4_DEL_NAT32:
3102 cmd = IPA_IOC_V4_DEL_NAT;
3103 break;
3104 case IPA_IOC_GET_NAT_OFFSET32:
3105 cmd = IPA_IOC_GET_NAT_OFFSET;
3106 break;
3107 case IPA_IOC_PULL_MSG32:
3108 cmd = IPA_IOC_PULL_MSG;
3109 break;
3110 case IPA_IOC_RM_ADD_DEPENDENCY32:
3111 cmd = IPA_IOC_RM_ADD_DEPENDENCY;
3112 break;
3113 case IPA_IOC_RM_DEL_DEPENDENCY32:
3114 cmd = IPA_IOC_RM_DEL_DEPENDENCY;
3115 break;
3116 case IPA_IOC_GENERATE_FLT_EQ32:
3117 cmd = IPA_IOC_GENERATE_FLT_EQ;
3118 break;
3119 case IPA_IOC_QUERY_RT_TBL_INDEX32:
3120 cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
3121 break;
3122 case IPA_IOC_WRITE_QMAPID32:
3123 cmd = IPA_IOC_WRITE_QMAPID;
3124 break;
3125 case IPA_IOC_MDFY_FLT_RULE32:
3126 cmd = IPA_IOC_MDFY_FLT_RULE;
3127 break;
3128 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
3129 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
3130 break;
3131 case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
3132 cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
3133 break;
3134 case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
3135 cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
3136 break;
3137 case IPA_IOC_MDFY_RT_RULE32:
3138 cmd = IPA_IOC_MDFY_RT_RULE;
3139 break;
3140 case IPA_IOC_COMMIT_HDR:
3141 case IPA_IOC_RESET_HDR:
3142 case IPA_IOC_COMMIT_RT:
3143 case IPA_IOC_RESET_RT:
3144 case IPA_IOC_COMMIT_FLT:
3145 case IPA_IOC_RESET_FLT:
3146 case IPA_IOC_DUMP:
3147 case IPA_IOC_PUT_RT_TBL:
3148 case IPA_IOC_PUT_HDR:
3149 case IPA_IOC_SET_FLT:
3150 case IPA_IOC_QUERY_EP_MAPPING:
3151 break;
3152 default:
3153 return -ENOIOCTLCMD;
3154 }
3155 return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3156}
3157#endif
3158
3159static ssize_t ipa3_write(struct file *file, const char __user *buf,
3160 size_t count, loff_t *ppos);
3161
3162static const struct file_operations ipa3_drv_fops = {
3163 .owner = THIS_MODULE,
3164 .open = ipa3_open,
3165 .read = ipa3_read,
3166 .write = ipa3_write,
3167 .unlocked_ioctl = ipa3_ioctl,
3168#ifdef CONFIG_COMPAT
3169 .compat_ioctl = compat_ipa3_ioctl,
3170#endif
3171};
3172
3173static int ipa3_get_clks(struct device *dev)
3174{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003175 if (ipa3_res.use_bw_vote) {
3176 IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
3177 ipa3_clk = NULL;
3178 return 0;
3179 }
3180
Amir Levy9659e592016-10-27 18:08:27 +03003181 ipa3_clk = clk_get(dev, "core_clk");
3182 if (IS_ERR(ipa3_clk)) {
3183 if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
3184 IPAERR("fail to get ipa clk\n");
3185 return PTR_ERR(ipa3_clk);
3186 }
3187 return 0;
3188}
3189
3190/**
3191 * _ipa_enable_clks_v3_0() - Enable IPA clocks.
3192 */
3193void _ipa_enable_clks_v3_0(void)
3194{
Ghanim Fodi6a831342017-03-07 18:19:15 +02003195 IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003196 if (ipa3_clk) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02003197 IPADBG_LOW("enabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003198 clk_prepare(ipa3_clk);
3199 clk_enable(ipa3_clk);
Amir Levy9659e592016-10-27 18:08:27 +03003200 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
Amir Levy9659e592016-10-27 18:08:27 +03003201 }
3202
Ghanim Fodi6a831342017-03-07 18:19:15 +02003203 ipa3_uc_notify_clk_state(true);
Amir Levy9659e592016-10-27 18:08:27 +03003204 ipa3_suspend_apps_pipes(false);
3205}
3206
3207static unsigned int ipa3_get_bus_vote(void)
3208{
3209 unsigned int idx = 1;
3210
3211 if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) {
3212 idx = 1;
3213 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3214 ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
3215 if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
3216 idx = 1;
3217 else
3218 idx = 2;
3219 } else if (ipa3_ctx->curr_ipa_clk_rate ==
3220 ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
3221 idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
3222 } else {
3223 WARN_ON(1);
3224 }
3225
3226 IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
3227
3228 return idx;
3229}
3230
3231/**
3232* ipa3_enable_clks() - Turn on IPA clocks
3233*
3234* Return codes:
3235* None
3236*/
3237void ipa3_enable_clks(void)
3238{
3239 IPADBG("enabling IPA clocks and bus voting\n");
3240
Ghanim Fodi6a831342017-03-07 18:19:15 +02003241 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3242 ipa3_get_bus_vote()))
3243 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003244
Ghanim Fodi6a831342017-03-07 18:19:15 +02003245 ipa3_ctx->ctrl->ipa3_enable_clks();
Amir Levy9659e592016-10-27 18:08:27 +03003246}
3247
3248
3249/**
3250 * _ipa_disable_clks_v3_0() - Disable IPA clocks.
3251 */
3252void _ipa_disable_clks_v3_0(void)
3253{
Amir Levy9659e592016-10-27 18:08:27 +03003254 ipa3_suspend_apps_pipes(true);
3255 ipa3_uc_notify_clk_state(false);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003256 if (ipa3_clk) {
3257 IPADBG_LOW("disabling gcc_ipa_clk\n");
Amir Levy9659e592016-10-27 18:08:27 +03003258 clk_disable_unprepare(ipa3_clk);
Ghanim Fodi6a831342017-03-07 18:19:15 +02003259 }
Amir Levy9659e592016-10-27 18:08:27 +03003260}
3261
3262/**
3263* ipa3_disable_clks() - Turn off IPA clocks
3264*
3265* Return codes:
3266* None
3267*/
3268void ipa3_disable_clks(void)
3269{
3270 IPADBG("disabling IPA clocks and bus voting\n");
3271
3272 ipa3_ctx->ctrl->ipa3_disable_clks();
3273
Ghanim Fodi6a831342017-03-07 18:19:15 +02003274 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
3275 WARN_ON(1);
Amir Levy9659e592016-10-27 18:08:27 +03003276}
3277
3278/**
3279 * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
3280 *
3281 * This function is called prior to clock gating when active client counter
3282 * is 1. TAG process ensures that there are no packets inside IPA HW that
Amir Levya59ed3f2017-03-05 17:30:55 +02003283 * were not submitted to the IPA client via the transport. During TAG process
3284 * all aggregation frames are (force) closed.
Amir Levy9659e592016-10-27 18:08:27 +03003285 *
3286 * Return codes:
3287 * None
3288 */
3289static void ipa3_start_tag_process(struct work_struct *work)
3290{
3291 int res;
3292
3293 IPADBG("starting TAG process\n");
3294 /* close aggregation frames on all pipes */
3295 res = ipa3_tag_aggr_force_close(-1);
3296 if (res)
3297 IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
3298 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
3299
3300 IPADBG("TAG process done\n");
3301}
3302
3303/**
3304* ipa3_active_clients_log_mod() - Log a modification in the active clients
3305* reference count
3306*
3307* This method logs any modification in the active clients reference count:
3308* It logs the modification in the circular history buffer
3309* It logs the modification in the hash table - looking for an entry,
3310* creating one if needed and deleting one if needed.
3311*
3312* @id: ipa3_active client logging info struct to hold the log information
3313* @inc: a boolean variable to indicate whether the modification is an increase
3314* or decrease
3315* @int_ctx: a boolean variable to indicate whether this call is being made from
3316* an interrupt context and therefore should allocate GFP_ATOMIC memory
3317*
3318* Method process:
3319* - Hash the unique identifier string
3320* - Find the hash in the table
3321* 1)If found, increase or decrease the reference count
3322* 2)If not found, allocate a new hash table entry struct and initialize it
3323* - Remove and deallocate unneeded data structure
3324* - Log the call in the circular history buffer (unless it is a simple call)
3325*/
3326void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
3327 bool inc, bool int_ctx)
3328{
3329 char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
3330 unsigned long long t;
3331 unsigned long nanosec_rem;
3332 struct ipa3_active_client_htable_entry *hentry;
3333 struct ipa3_active_client_htable_entry *hfound;
3334 u32 hkey;
3335 char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
3336
3337 hfound = NULL;
3338 memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3339 strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
Amir Levyd9f51132016-11-14 16:55:35 +02003340 hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
Amir Levy9659e592016-10-27 18:08:27 +03003341 0);
3342 hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
3343 hentry, list, hkey) {
3344 if (!strcmp(hentry->id_string, id->id_string)) {
3345 hentry->count = hentry->count + (inc ? 1 : -1);
3346 hfound = hentry;
3347 }
3348 }
3349 if (hfound == NULL) {
3350 hentry = NULL;
3351 hentry = kzalloc(sizeof(
3352 struct ipa3_active_client_htable_entry),
3353 int_ctx ? GFP_ATOMIC : GFP_KERNEL);
3354 if (hentry == NULL) {
3355 IPAERR("failed allocating active clients hash entry");
3356 return;
3357 }
3358 hentry->type = id->type;
3359 strlcpy(hentry->id_string, id->id_string,
3360 IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
3361 INIT_HLIST_NODE(&hentry->list);
3362 hentry->count = inc ? 1 : -1;
3363 hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
3364 &hentry->list, hkey);
3365 } else if (hfound->count == 0) {
3366 hash_del(&hfound->list);
3367 kfree(hfound);
3368 }
3369
3370 if (id->type != SIMPLE) {
3371 t = local_clock();
3372 nanosec_rem = do_div(t, 1000000000) / 1000;
3373 snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
3374 inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
3375 "[%5lu.%06lu] v %s, %s: %d",
3376 (unsigned long)t, nanosec_rem,
3377 id->id_string, id->file, id->line);
3378 ipa3_active_clients_log_insert(temp_str);
3379 }
3380}
3381
3382void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
3383 bool int_ctx)
3384{
3385 ipa3_active_clients_log_mod(id, false, int_ctx);
3386}
3387
3388void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
3389 bool int_ctx)
3390{
3391 ipa3_active_clients_log_mod(id, true, int_ctx);
3392}
3393
3394/**
3395* ipa3_inc_client_enable_clks() - Increase active clients counter, and
3396* enable ipa clocks if necessary
3397*
3398* Return codes:
3399* None
3400*/
3401void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
3402{
3403 ipa3_active_clients_lock();
3404 ipa3_active_clients_log_inc(id, false);
3405 ipa3_ctx->ipa3_active_clients.cnt++;
3406 if (ipa3_ctx->ipa3_active_clients.cnt == 1)
3407 ipa3_enable_clks();
3408 IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
3409 ipa3_active_clients_unlock();
3410}
3411
3412/**
3413* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
3414* clients if no asynchronous actions should be done. Asynchronous actions are
3415* locking a mutex and waking up IPA HW.
3416*
3417* Return codes: 0 for success
3418* -EPERM if an asynchronous action should have been done
3419*/
3420int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
3421 *id)
3422{
3423 int res = 0;
3424 unsigned long flags;
3425
3426 if (ipa3_active_clients_trylock(&flags) == 0)
3427 return -EPERM;
3428
3429 if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
3430 res = -EPERM;
3431 goto bail;
3432 }
3433 ipa3_active_clients_log_inc(id, true);
3434 ipa3_ctx->ipa3_active_clients.cnt++;
3435 IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
3436bail:
3437 ipa3_active_clients_trylock_unlock(&flags);
3438
3439 return res;
3440}
3441
3442/**
3443 * ipa3_dec_client_disable_clks() - Decrease active clients counter
3444 *
3445 * In case that there are no active clients this function also starts
3446 * TAG process. When TAG progress ends ipa clocks will be gated.
3447 * start_tag_process_again flag is set during this function to signal TAG
3448 * process to start again as there was another client that may send data to ipa
3449 *
3450 * Return codes:
3451 * None
3452 */
3453void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
3454{
3455 struct ipa_active_client_logging_info log_info;
3456
3457 ipa3_active_clients_lock();
3458 ipa3_active_clients_log_dec(id, false);
3459 ipa3_ctx->ipa3_active_clients.cnt--;
3460 IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
3461 if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
3462 if (ipa3_ctx->tag_process_before_gating) {
3463 ipa3_ctx->tag_process_before_gating = false;
3464 /*
3465 * When TAG process ends, active clients will be
3466 * decreased
3467 */
3468 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
3469 "TAG_PROCESS");
3470 ipa3_active_clients_log_inc(&log_info, false);
3471 ipa3_ctx->ipa3_active_clients.cnt = 1;
3472 queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
3473 } else {
3474 ipa3_disable_clks();
3475 }
3476 }
3477 ipa3_active_clients_unlock();
3478}
3479
3480/**
3481* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
3482* acquire wakelock if necessary
3483*
3484* Return codes:
3485* None
3486*/
3487void ipa3_inc_acquire_wakelock(void)
3488{
3489 unsigned long flags;
3490
3491 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3492 ipa3_ctx->wakelock_ref_cnt.cnt++;
3493 if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
3494 __pm_stay_awake(&ipa3_ctx->w_lock);
3495 IPADBG_LOW("active wakelock ref cnt = %d\n",
3496 ipa3_ctx->wakelock_ref_cnt.cnt);
3497 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3498}
3499
3500/**
3501 * ipa3_dec_release_wakelock() - Decrease active clients counter
3502 *
3503 * In case if the ref count is 0, release the wakelock.
3504 *
3505 * Return codes:
3506 * None
3507 */
3508void ipa3_dec_release_wakelock(void)
3509{
3510 unsigned long flags;
3511
3512 spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3513 ipa3_ctx->wakelock_ref_cnt.cnt--;
3514 IPADBG_LOW("active wakelock ref cnt = %d\n",
3515 ipa3_ctx->wakelock_ref_cnt.cnt);
3516 if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
3517 __pm_relax(&ipa3_ctx->w_lock);
3518 spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
3519}
3520
3521int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
3522 u32 bandwidth_mbps)
3523{
3524 enum ipa_voltage_level needed_voltage;
3525 u32 clk_rate;
3526
3527 IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
3528 floor_voltage, bandwidth_mbps);
3529
3530 if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
3531 floor_voltage >= IPA_VOLTAGE_MAX) {
3532 IPAERR("bad voltage\n");
3533 return -EINVAL;
3534 }
3535
3536 if (ipa3_ctx->enable_clock_scaling) {
3537 IPADBG_LOW("Clock scaling is enabled\n");
3538 if (bandwidth_mbps >=
3539 ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
3540 needed_voltage = IPA_VOLTAGE_TURBO;
3541 else if (bandwidth_mbps >=
3542 ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
3543 needed_voltage = IPA_VOLTAGE_NOMINAL;
3544 else
3545 needed_voltage = IPA_VOLTAGE_SVS;
3546 } else {
3547 IPADBG_LOW("Clock scaling is disabled\n");
3548 needed_voltage = IPA_VOLTAGE_NOMINAL;
3549 }
3550
3551 needed_voltage = max(needed_voltage, floor_voltage);
3552 switch (needed_voltage) {
3553 case IPA_VOLTAGE_SVS:
3554 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
3555 break;
3556 case IPA_VOLTAGE_NOMINAL:
3557 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
3558 break;
3559 case IPA_VOLTAGE_TURBO:
3560 clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
3561 break;
3562 default:
3563 IPAERR("bad voltage\n");
3564 WARN_ON(1);
3565 return -EFAULT;
3566 }
3567
3568 if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
3569 IPADBG_LOW("Same voltage\n");
3570 return 0;
3571 }
3572
3573 ipa3_active_clients_lock();
3574 ipa3_ctx->curr_ipa_clk_rate = clk_rate;
3575 IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
3576 if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003577 struct ipa_active_client_logging_info log_info;
3578
3579 /*
3580 * clk_set_rate should be called with unlocked lock to allow
3581 * clients to get a reference to IPA clock synchronously.
3582 * Hold a reference to IPA clock here to make sure clock
3583 * state does not change during set_rate.
3584 */
3585 IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
3586 ipa3_ctx->ipa3_active_clients.cnt++;
3587 ipa3_active_clients_log_inc(&log_info, false);
3588 ipa3_active_clients_unlock();
3589
Ghanim Fodi6a831342017-03-07 18:19:15 +02003590 if (ipa3_clk)
3591 clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
3592 if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
3593 ipa3_get_bus_vote()))
3594 WARN_ON(1);
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003595 /* remove the vote added here */
3596 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03003597 } else {
3598 IPADBG_LOW("clocks are gated, not setting rate\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003599 ipa3_active_clients_unlock();
Amir Levy9659e592016-10-27 18:08:27 +03003600 }
Amir Levy9659e592016-10-27 18:08:27 +03003601 IPADBG_LOW("Done\n");
Skylar Chang1cbe99c2017-05-01 13:44:03 -07003602
Amir Levy9659e592016-10-27 18:08:27 +03003603 return 0;
3604}
3605
Amir Levya59ed3f2017-03-05 17:30:55 +02003606static void ipa3_process_irq_schedule_rel(void)
Amir Levy9659e592016-10-27 18:08:27 +03003607{
3608 queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
Amir Levya59ed3f2017-03-05 17:30:55 +02003609 &ipa3_transport_release_resource_work,
Amir Levy9659e592016-10-27 18:08:27 +03003610 msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
3611}
3612
3613/**
3614* ipa3_suspend_handler() - Handles the suspend interrupt:
3615* wakes up the suspended peripheral by requesting its consumer
3616* @interrupt: Interrupt type
3617* @private_data: The client's private data
3618* @interrupt_data: Interrupt specific information data
3619*/
3620void ipa3_suspend_handler(enum ipa_irq_type interrupt,
3621 void *private_data,
3622 void *interrupt_data)
3623{
3624 enum ipa_rm_resource_name resource;
3625 u32 suspend_data =
3626 ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
3627 u32 bmsk = 1;
3628 u32 i = 0;
3629 int res;
3630 struct ipa_ep_cfg_holb holb_cfg;
3631
3632 IPADBG("interrupt=%d, interrupt_data=%u\n",
3633 interrupt, suspend_data);
3634 memset(&holb_cfg, 0, sizeof(holb_cfg));
3635 holb_cfg.tmr_val = 0;
3636
3637 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3638 if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
3639 if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
3640 /*
3641 * pipe will be unsuspended as part of
3642 * enabling IPA clocks
3643 */
Skylar Chang0d06bb12017-02-24 11:22:03 -08003644 mutex_lock(&ipa3_ctx->transport_pm.
3645 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003646 if (!atomic_read(
3647 &ipa3_ctx->transport_pm.dec_clients)
3648 ) {
3649 IPA_ACTIVE_CLIENTS_INC_EP(
3650 ipa3_ctx->ep[i].client);
3651 IPADBG_LOW("Pipes un-suspended.\n");
3652 IPADBG_LOW("Enter poll mode.\n");
3653 atomic_set(
3654 &ipa3_ctx->transport_pm.dec_clients,
3655 1);
Amir Levya59ed3f2017-03-05 17:30:55 +02003656 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003657 }
Skylar Chang0d06bb12017-02-24 11:22:03 -08003658 mutex_unlock(&ipa3_ctx->transport_pm.
3659 transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003660 } else {
3661 resource = ipa3_get_rm_resource_from_ep(i);
3662 res =
3663 ipa_rm_request_resource_with_timer(resource);
3664 if (res == -EPERM &&
3665 IPA_CLIENT_IS_CONS(
3666 ipa3_ctx->ep[i].client)) {
3667 holb_cfg.en = 1;
3668 res = ipa3_cfg_ep_holb_by_client(
3669 ipa3_ctx->ep[i].client, &holb_cfg);
3670 if (res) {
3671 IPAERR("holb en fail, stall\n");
3672 BUG();
3673 }
3674 }
3675 }
3676 }
3677 bmsk = bmsk << 1;
3678 }
3679}
3680
3681/**
3682* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
3683* as it was registered in the IPA init sequence.
3684* Return codes:
3685* 0: success
3686* -EPERM: failed to remove current handler or failed to add original handler
3687*/
3688int ipa3_restore_suspend_handler(void)
3689{
3690 int result = 0;
3691
3692 result = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
3693 if (result) {
3694 IPAERR("remove handler for suspend interrupt failed\n");
3695 return -EPERM;
3696 }
3697
3698 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3699 ipa3_suspend_handler, false, NULL);
3700 if (result) {
3701 IPAERR("register handler for suspend interrupt failed\n");
3702 result = -EPERM;
3703 }
3704
3705 IPADBG("suspend handler successfully restored\n");
3706
3707 return result;
3708}
3709
3710static int ipa3_apps_cons_release_resource(void)
3711{
3712 return 0;
3713}
3714
3715static int ipa3_apps_cons_request_resource(void)
3716{
3717 return 0;
3718}
3719
Amir Levya59ed3f2017-03-05 17:30:55 +02003720static void ipa3_transport_release_resource(struct work_struct *work)
Amir Levy9659e592016-10-27 18:08:27 +03003721{
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303722 mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003723 /* check whether still need to decrease client usage */
3724 if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
3725 if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
3726 IPADBG("EOT pending Re-scheduling\n");
Amir Levya59ed3f2017-03-05 17:30:55 +02003727 ipa3_process_irq_schedule_rel();
Amir Levy9659e592016-10-27 18:08:27 +03003728 } else {
3729 atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02003730 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
Amir Levy9659e592016-10-27 18:08:27 +03003731 }
3732 }
3733 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Sridhar Ancha99b505b2016-04-21 23:11:10 +05303734 mutex_unlock(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03003735}
3736
3737int ipa3_create_apps_resource(void)
3738{
3739 struct ipa_rm_create_params apps_cons_create_params;
3740 struct ipa_rm_perf_profile profile;
3741 int result = 0;
3742
3743 memset(&apps_cons_create_params, 0,
3744 sizeof(apps_cons_create_params));
3745 apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
3746 apps_cons_create_params.request_resource =
3747 ipa3_apps_cons_request_resource;
3748 apps_cons_create_params.release_resource =
3749 ipa3_apps_cons_release_resource;
3750 result = ipa_rm_create_resource(&apps_cons_create_params);
3751 if (result) {
3752 IPAERR("ipa_rm_create_resource failed\n");
3753 return result;
3754 }
3755
3756 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
3757 ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
3758
3759 return result;
3760}
3761
3762/**
3763 * ipa3_init_interrupts() - Register to IPA IRQs
3764 *
3765 * Return codes: 0 in success, negative in failure
3766 *
3767 */
3768int ipa3_init_interrupts(void)
3769{
3770 int result;
3771
3772 /*register IPA IRQ handler*/
3773 result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
3774 master_dev);
3775 if (result) {
3776 IPAERR("ipa interrupts initialization failed\n");
3777 return -ENODEV;
3778 }
3779
3780 /*add handler for suspend interrupt*/
3781 result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
3782 ipa3_suspend_handler, false, NULL);
3783 if (result) {
3784 IPAERR("register handler for suspend interrupt failed\n");
3785 result = -ENODEV;
3786 goto fail_add_interrupt_handler;
3787 }
3788
3789 return 0;
3790
3791fail_add_interrupt_handler:
3792 free_irq(ipa3_res.ipa_irq, master_dev);
3793 return result;
3794}
3795
3796/**
3797 * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
3798 * The idr strcuture per filtering table is intended for rule id generation
3799 * per filtering rule.
3800 */
3801static void ipa3_destroy_flt_tbl_idrs(void)
3802{
3803 int i;
3804 struct ipa3_flt_tbl *flt_tbl;
3805
3806 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
3807 if (!ipa_is_ep_support_flt(i))
3808 continue;
3809
3810 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
3811 idr_destroy(&flt_tbl->rule_ids);
3812 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
3813 idr_destroy(&flt_tbl->rule_ids);
3814 }
3815}
3816
3817static void ipa3_freeze_clock_vote_and_notify_modem(void)
3818{
3819 int res;
Amir Levy9659e592016-10-27 18:08:27 +03003820 struct ipa_active_client_logging_info log_info;
3821
3822 if (ipa3_ctx->smp2p_info.res_sent)
3823 return;
3824
Skylar Change1209942017-02-02 14:26:38 -08003825 if (ipa3_ctx->smp2p_info.out_base_id == 0) {
3826 IPAERR("smp2p out gpio not assigned\n");
3827 return;
3828 }
3829
Amir Levy9659e592016-10-27 18:08:27 +03003830 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
3831 res = ipa3_inc_client_enable_clks_no_block(&log_info);
3832 if (res)
Skylar Change1209942017-02-02 14:26:38 -08003833 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03003834 else
Skylar Change1209942017-02-02 14:26:38 -08003835 ipa3_ctx->smp2p_info.ipa_clk_on = true;
Amir Levy9659e592016-10-27 18:08:27 +03003836
Skylar Change1209942017-02-02 14:26:38 -08003837 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3838 IPA_GPIO_OUT_CLK_VOTE_IDX,
3839 ipa3_ctx->smp2p_info.ipa_clk_on);
3840 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3841 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
Amir Levy9659e592016-10-27 18:08:27 +03003842
Skylar Change1209942017-02-02 14:26:38 -08003843 ipa3_ctx->smp2p_info.res_sent = true;
3844 IPADBG("IPA clocks are %s\n",
3845 ipa3_ctx->smp2p_info.ipa_clk_on ? "ON" : "OFF");
3846}
3847
3848void ipa3_reset_freeze_vote(void)
3849{
3850 if (ipa3_ctx->smp2p_info.res_sent == false)
3851 return;
3852
3853 if (ipa3_ctx->smp2p_info.ipa_clk_on)
3854 IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE");
3855
3856 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3857 IPA_GPIO_OUT_CLK_VOTE_IDX, 0);
3858 gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
3859 IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0);
3860
3861 ipa3_ctx->smp2p_info.res_sent = false;
3862 ipa3_ctx->smp2p_info.ipa_clk_on = false;
Amir Levy9659e592016-10-27 18:08:27 +03003863}
3864
3865static int ipa3_panic_notifier(struct notifier_block *this,
3866 unsigned long event, void *ptr)
3867{
3868 int res;
3869
3870 ipa3_freeze_clock_vote_and_notify_modem();
3871
3872 IPADBG("Calling uC panic handler\n");
3873 res = ipa3_uc_panic_notifier(this, event, ptr);
3874 if (res)
3875 IPAERR("uC panic handler failed %d\n", res);
3876
3877 return NOTIFY_DONE;
3878}
3879
3880static struct notifier_block ipa3_panic_blk = {
3881 .notifier_call = ipa3_panic_notifier,
3882 /* IPA panic handler needs to run before modem shuts down */
3883 .priority = INT_MAX,
3884};
3885
3886static void ipa3_register_panic_hdlr(void)
3887{
3888 atomic_notifier_chain_register(&panic_notifier_list,
3889 &ipa3_panic_blk);
3890}
3891
3892static void ipa3_trigger_ipa_ready_cbs(void)
3893{
3894 struct ipa3_ready_cb_info *info;
3895
3896 mutex_lock(&ipa3_ctx->lock);
3897
3898 /* Call all the CBs */
3899 list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
3900 if (info->ready_cb)
3901 info->ready_cb(info->user_data);
3902
3903 mutex_unlock(&ipa3_ctx->lock);
3904}
3905
3906static int ipa3_gsi_pre_fw_load_init(void)
3907{
3908 int result;
3909
3910 result = gsi_configure_regs(ipa3_res.transport_mem_base,
3911 ipa3_res.transport_mem_size,
3912 ipa3_res.ipa_mem_base);
3913 if (result) {
3914 IPAERR("Failed to configure GSI registers\n");
3915 return -EINVAL;
3916 }
3917
3918 return 0;
3919}
3920
Skylar Chang0c17c7d2016-10-31 09:57:54 -07003921static void ipa3_uc_is_loaded(void)
3922{
3923 IPADBG("\n");
3924 complete_all(&ipa3_ctx->uc_loaded_completion_obj);
3925}
3926
Amir Levy41644242016-11-03 15:38:09 +02003927static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
3928{
3929 enum gsi_ver gsi_ver;
3930
3931 switch (ipa_hw_type) {
3932 case IPA_HW_v3_0:
3933 case IPA_HW_v3_1:
3934 gsi_ver = GSI_VER_1_0;
3935 break;
3936 case IPA_HW_v3_5:
3937 gsi_ver = GSI_VER_1_2;
3938 break;
3939 case IPA_HW_v3_5_1:
3940 gsi_ver = GSI_VER_1_3;
3941 break;
Michael Adisumarta891a4ff2017-05-16 16:40:06 -07003942 case IPA_HW_v4_0:
3943 gsi_ver = GSI_VER_2_0;
3944 break;
Amir Levy41644242016-11-03 15:38:09 +02003945 default:
3946 IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
3947 WARN_ON(1);
3948 gsi_ver = GSI_VER_ERR;
3949 }
3950
3951 IPADBG("GSI version %d\n", gsi_ver);
3952
3953 return gsi_ver;
3954}
3955
Amir Levy9659e592016-10-27 18:08:27 +03003956/**
3957 * ipa3_post_init() - Initialize the IPA Driver (Part II).
3958 * This part contains all initialization which requires interaction with
Amir Levya59ed3f2017-03-05 17:30:55 +02003959 * IPA HW (via GSI).
Amir Levy9659e592016-10-27 18:08:27 +03003960 *
3961 * @resource_p: contain platform specific values from DST file
3962 * @pdev: The platform device structure representing the IPA driver
3963 *
3964 * Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02003965 * - Initialize endpoints bitmaps
3966 * - Initialize resource groups min and max values
3967 * - Initialize filtering lists heads and idr
3968 * - Initialize interrupts
Amir Levya59ed3f2017-03-05 17:30:55 +02003969 * - Register GSI
Amir Levy9659e592016-10-27 18:08:27 +03003970 * - Setup APPS pipes
3971 * - Initialize tethering bridge
3972 * - Initialize IPA debugfs
3973 * - Initialize IPA uC interface
3974 * - Initialize WDI interface
3975 * - Initialize USB interface
3976 * - Register for panic handler
3977 * - Trigger IPA ready callbacks (to all subscribers)
3978 * - Trigger IPA completion object (to all who wait on it)
3979 */
3980static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
3981 struct device *ipa_dev)
3982{
3983 int result;
Amir Levy9659e592016-10-27 18:08:27 +03003984 struct gsi_per_props gsi_props;
Skylar Chang0c17c7d2016-10-31 09:57:54 -07003985 struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
Amir Levy54fe4d32017-03-16 11:21:49 +02003986 struct ipa3_flt_tbl *flt_tbl;
3987 int i;
3988
3989 /*
3990 * indication whether working in MHI config or non MHI config is given
3991 * in ipa3_write which is launched before ipa3_post_init. i.e. from
3992 * this point it is safe to use ipa3_ep_mapping array and the correct
3993 * entry will be returned from ipa3_get_hw_type_index()
3994 */
3995 ipa_init_ep_flt_bitmap();
3996 IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
3997 ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
3998
3999 /* Assign resource limitation to each group */
4000 ipa3_set_resorce_groups_min_max_limits();
4001
4002 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4003 if (!ipa_is_ep_support_flt(i))
4004 continue;
4005
4006 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
4007 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4008 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4009 !ipa3_ctx->ip4_flt_tbl_hash_lcl;
4010 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4011 !ipa3_ctx->ip4_flt_tbl_nhash_lcl;
4012 idr_init(&flt_tbl->rule_ids);
4013
4014 flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
4015 INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
4016 flt_tbl->in_sys[IPA_RULE_HASHABLE] =
4017 !ipa3_ctx->ip6_flt_tbl_hash_lcl;
4018 flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
4019 !ipa3_ctx->ip6_flt_tbl_nhash_lcl;
4020 idr_init(&flt_tbl->rule_ids);
4021 }
4022
4023 if (!ipa3_ctx->apply_rg10_wa) {
4024 result = ipa3_init_interrupts();
4025 if (result) {
4026 IPAERR("ipa initialization of interrupts failed\n");
4027 result = -ENODEV;
4028 goto fail_register_device;
4029 }
4030 } else {
4031 IPADBG("Initialization of ipa interrupts skipped\n");
4032 }
Amir Levy9659e592016-10-27 18:08:27 +03004033
Amir Levy3afd94a2017-01-05 10:19:13 +02004034 /*
Amir Levy5cfbb322017-01-09 14:53:02 +02004035 * IPAv3.5 and above requires to disable prefetch for USB in order
4036 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
Amir Levy3afd94a2017-01-05 10:19:13 +02004037 */
Amir Levy5cfbb322017-01-09 14:53:02 +02004038 if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) &&
4039 (!ipa3_ctx->ipa_config_is_mhi))
Amir Levy3afd94a2017-01-05 10:19:13 +02004040 ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
4041
Amir Levya59ed3f2017-03-05 17:30:55 +02004042 memset(&gsi_props, 0, sizeof(gsi_props));
4043 gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
4044 gsi_props.ee = resource_p->ee;
4045 gsi_props.intr = GSI_INTR_IRQ;
4046 gsi_props.irq = resource_p->transport_irq;
4047 gsi_props.phys_addr = resource_p->transport_mem_base;
4048 gsi_props.size = resource_p->transport_mem_size;
4049 gsi_props.notify_cb = ipa_gsi_notify_cb;
4050 gsi_props.req_clk_cb = NULL;
4051 gsi_props.rel_clk_cb = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004052
Amir Levya59ed3f2017-03-05 17:30:55 +02004053 result = gsi_register_device(&gsi_props,
4054 &ipa3_ctx->gsi_dev_hdl);
4055 if (result != GSI_STATUS_SUCCESS) {
4056 IPAERR(":gsi register error - %d\n", result);
4057 result = -ENODEV;
4058 goto fail_register_device;
Amir Levy9659e592016-10-27 18:08:27 +03004059 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004060 IPADBG("IPA gsi is registered\n");
Amir Levy9659e592016-10-27 18:08:27 +03004061
4062 /* setup the AP-IPA pipes */
4063 if (ipa3_setup_apps_pipes()) {
4064 IPAERR(":failed to setup IPA-Apps pipes\n");
4065 result = -ENODEV;
4066 goto fail_setup_apps_pipes;
4067 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004068 IPADBG("IPA GPI pipes were connected\n");
Amir Levy9659e592016-10-27 18:08:27 +03004069
4070 if (ipa3_ctx->use_ipa_teth_bridge) {
4071 /* Initialize the tethering bridge driver */
4072 result = ipa3_teth_bridge_driver_init();
4073 if (result) {
4074 IPAERR(":teth_bridge init failed (%d)\n", -result);
4075 result = -ENODEV;
4076 goto fail_teth_bridge_driver_init;
4077 }
4078 IPADBG("teth_bridge initialized");
4079 }
4080
4081 ipa3_debugfs_init();
4082
4083 result = ipa3_uc_interface_init();
4084 if (result)
4085 IPAERR(":ipa Uc interface init failed (%d)\n", -result);
4086 else
4087 IPADBG(":ipa Uc interface init ok\n");
4088
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004089 uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
4090 ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
4091
Amir Levy9659e592016-10-27 18:08:27 +03004092 result = ipa3_wdi_init();
4093 if (result)
4094 IPAERR(":wdi init failed (%d)\n", -result);
4095 else
4096 IPADBG(":wdi init ok\n");
4097
4098 result = ipa3_ntn_init();
4099 if (result)
4100 IPAERR(":ntn init failed (%d)\n", -result);
4101 else
4102 IPADBG(":ntn init ok\n");
4103
4104 ipa3_register_panic_hdlr();
4105
4106 ipa3_ctx->q6_proxy_clk_vote_valid = true;
4107
4108 mutex_lock(&ipa3_ctx->lock);
4109 ipa3_ctx->ipa_initialization_complete = true;
4110 mutex_unlock(&ipa3_ctx->lock);
4111
4112 ipa3_trigger_ipa_ready_cbs();
4113 complete_all(&ipa3_ctx->init_completion_obj);
4114 pr_info("IPA driver initialization was successful.\n");
4115
4116 return 0;
4117
4118fail_teth_bridge_driver_init:
4119 ipa3_teardown_apps_pipes();
4120fail_setup_apps_pipes:
Amir Levya59ed3f2017-03-05 17:30:55 +02004121 gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
Amir Levy9659e592016-10-27 18:08:27 +03004122fail_register_device:
4123 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
4124 ipa_rm_exit();
4125 cdev_del(&ipa3_ctx->cdev);
4126 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
4127 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
Skylar Chang6c4bec92017-04-21 16:10:14 -07004128 ipa3_free_dma_task_for_gsi();
Amir Levy9659e592016-10-27 18:08:27 +03004129 ipa3_destroy_flt_tbl_idrs();
4130 idr_destroy(&ipa3_ctx->ipa_idr);
4131 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
4132 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
4133 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
4134 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
4135 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
4136 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
4137 kmem_cache_destroy(ipa3_ctx->hdr_cache);
4138 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
4139 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
4140 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
4141 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
4142 iounmap(ipa3_ctx->mmio);
4143 ipa3_disable_clks();
Ghanim Fodi6a831342017-03-07 18:19:15 +02004144 if (ipa3_clk)
4145 clk_put(ipa3_clk);
4146 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004147 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
4148 if (ipa3_bus_scale_table) {
4149 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
4150 ipa3_bus_scale_table = NULL;
4151 }
4152 kfree(ipa3_ctx->ctrl);
4153 kfree(ipa3_ctx);
4154 ipa3_ctx = NULL;
4155 return result;
4156}
4157
4158static int ipa3_trigger_fw_loading_mdms(void)
4159{
4160 int result;
4161 const struct firmware *fw;
4162
4163 IPADBG("FW loading process initiated\n");
4164
4165 result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
4166 if (result < 0) {
4167 IPAERR("request_firmware failed, error %d\n", result);
4168 return result;
4169 }
4170 if (fw == NULL) {
4171 IPAERR("Firmware is NULL!\n");
4172 return -EINVAL;
4173 }
4174
4175 IPADBG("FWs are available for loading\n");
4176
Ghanim Fodi37b64952017-01-24 15:42:30 +02004177 result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
Amir Levy9659e592016-10-27 18:08:27 +03004178 if (result) {
4179 IPAERR("IPA FWs loading has failed\n");
4180 release_firmware(fw);
4181 return result;
4182 }
4183
4184 result = gsi_enable_fw(ipa3_res.transport_mem_base,
Amir Levy85dcd172016-12-06 17:47:39 +02004185 ipa3_res.transport_mem_size,
4186 ipa3_get_gsi_ver(ipa3_res.ipa_hw_type));
Amir Levy9659e592016-10-27 18:08:27 +03004187 if (result) {
4188 IPAERR("Failed to enable GSI FW\n");
4189 release_firmware(fw);
4190 return result;
4191 }
4192
4193 release_firmware(fw);
4194
4195 IPADBG("FW loading process is complete\n");
4196 return 0;
4197}
4198
4199static int ipa3_trigger_fw_loading_msms(void)
4200{
4201 void *subsystem_get_retval = NULL;
4202
4203 IPADBG("FW loading process initiated\n");
4204
4205 subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
4206 if (IS_ERR_OR_NULL(subsystem_get_retval)) {
4207 IPAERR("Unable to trigger PIL process for FW loading\n");
4208 return -EINVAL;
4209 }
4210
4211 IPADBG("FW loading process is complete\n");
4212 return 0;
4213}
4214
4215static ssize_t ipa3_write(struct file *file, const char __user *buf,
4216 size_t count, loff_t *ppos)
4217{
4218 unsigned long missing;
4219 int result = -EINVAL;
4220
4221 char dbg_buff[16] = { 0 };
4222
4223 if (sizeof(dbg_buff) < count + 1)
4224 return -EFAULT;
4225
4226 missing = copy_from_user(dbg_buff, buf, count);
4227
4228 if (missing) {
4229 IPAERR("Unable to copy data from user\n");
4230 return -EFAULT;
4231 }
4232
4233 /* Prevent consequent calls from trying to load the FW again. */
4234 if (ipa3_is_ready())
4235 return count;
4236
Amir Levya59ed3f2017-03-05 17:30:55 +02004237 IPA_ACTIVE_CLIENTS_INC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03004238
Amir Levy54fe4d32017-03-16 11:21:49 +02004239 if (ipa3_is_msm_device()) {
Amir Levya59ed3f2017-03-05 17:30:55 +02004240 result = ipa3_trigger_fw_loading_msms();
Amir Levy54fe4d32017-03-16 11:21:49 +02004241 } else {
4242 if (!strcasecmp(dbg_buff, "MHI")) {
4243 ipa3_ctx->ipa_config_is_mhi = true;
4244 pr_info(
4245 "IPA is loading with MHI configuration\n");
4246 } else {
4247 pr_info(
4248 "IPA is loading with non MHI configuration\n");
4249 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004250 result = ipa3_trigger_fw_loading_mdms();
Amir Levy54fe4d32017-03-16 11:21:49 +02004251 }
Amir Levya59ed3f2017-03-05 17:30:55 +02004252 /* No IPAv3.x chipsets that don't support FW loading */
Amir Levy9659e592016-10-27 18:08:27 +03004253
Amir Levya59ed3f2017-03-05 17:30:55 +02004254 IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
Amir Levy9659e592016-10-27 18:08:27 +03004255
Amir Levya59ed3f2017-03-05 17:30:55 +02004256 if (result) {
4257 IPAERR("FW loading process has failed\n");
Ghanim Fodi24fee1c2017-02-12 15:25:53 +02004258 return result;
Amir Levya59ed3f2017-03-05 17:30:55 +02004259 } else
4260 ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
4261
Amir Levy9659e592016-10-27 18:08:27 +03004262 return count;
4263}
4264
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004265static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
4266{
4267 int i, size, ret, resp;
4268 struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
4269 struct tz_smmu_ipa_protect_region_s cmd_buf;
4270
4271 if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
4272 size = ipa3_ctx->ipa_tz_unlock_reg_num *
4273 sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
4274 ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
4275 if (ipa_tz_unlock_vec == NULL)
4276 return -ENOMEM;
4277
4278 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4279 ipa_tz_unlock_vec[i].input_addr =
4280 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4281 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4282 0xFFF);
4283 ipa_tz_unlock_vec[i].output_addr =
4284 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
4285 (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
4286 0xFFF);
4287 ipa_tz_unlock_vec[i].size =
4288 ipa3_ctx->ipa_tz_unlock_reg[i].size;
4289 ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
4290 }
4291
4292 /* pass physical address of command buffer */
4293 cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
4294 cmd_buf.size_bytes = size;
4295
4296 /* flush cache to DDR */
4297 __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
4298 outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
4299
4300 ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
4301 sizeof(cmd_buf), &resp, sizeof(resp));
4302 if (ret) {
4303 IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
4304 kfree(ipa_tz_unlock_vec);
4305 return -EFAULT;
4306 }
4307 kfree(ipa_tz_unlock_vec);
4308 }
4309 return 0;
4310}
4311
Skylar Changcd3902d2017-03-27 18:08:27 -07004312static int ipa3_alloc_pkt_init(void)
4313{
4314 struct ipa_mem_buffer mem;
4315 struct ipahal_imm_cmd_pyld *cmd_pyld;
4316 struct ipahal_imm_cmd_ip_packet_init cmd = {0};
4317 int i;
4318
4319 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4320 &cmd, false);
4321 if (!cmd_pyld) {
4322 IPAERR("failed to construct IMM cmd\n");
4323 return -ENOMEM;
4324 }
4325
4326 mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
4327 mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
4328 &mem.phys_base, GFP_KERNEL);
4329 if (!mem.base) {
4330 IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
4331 ipahal_destroy_imm_cmd(cmd_pyld);
4332 return -ENOMEM;
4333 }
4334 ipahal_destroy_imm_cmd(cmd_pyld);
4335
4336 memset(mem.base, 0, mem.size);
4337 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
4338 cmd.destination_pipe_index = i;
4339 cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
4340 &cmd, false);
4341 if (!cmd_pyld) {
4342 IPAERR("failed to construct IMM cmd\n");
4343 dma_free_coherent(ipa3_ctx->pdev,
4344 mem.size,
4345 mem.base,
4346 mem.phys_base);
4347 return -ENOMEM;
4348 }
4349 memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
4350 cmd_pyld->len);
4351 ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
4352 ipahal_destroy_imm_cmd(cmd_pyld);
4353 }
4354
4355 return 0;
4356}
4357
Amir Levy9659e592016-10-27 18:08:27 +03004358/**
4359* ipa3_pre_init() - Initialize the IPA Driver.
4360* This part contains all initialization which doesn't require IPA HW, such
4361* as structure allocations and initializations, register writes, etc.
4362*
4363* @resource_p: contain platform specific values from DST file
4364* @pdev: The platform device structure representing the IPA driver
4365*
4366* Function initialization process:
Amir Levy54fe4d32017-03-16 11:21:49 +02004367* Allocate memory for the driver context data struct
4368* Initializing the ipa3_ctx with :
Amir Levy9659e592016-10-27 18:08:27 +03004369* 1)parsed values from the dts file
4370* 2)parameters passed to the module initialization
4371* 3)read HW values(such as core memory size)
Amir Levy54fe4d32017-03-16 11:21:49 +02004372* Map IPA core registers to CPU memory
4373* Restart IPA core(HW reset)
4374* Initialize the look-aside caches(kmem_cache/slab) for filter,
Amir Levy9659e592016-10-27 18:08:27 +03004375* routing and IPA-tree
Amir Levy54fe4d32017-03-16 11:21:49 +02004376* Create memory pool with 4 objects for DMA operations(each object
Amir Levy9659e592016-10-27 18:08:27 +03004377* is 512Bytes long), this object will be use for tx(A5->IPA)
Amir Levy54fe4d32017-03-16 11:21:49 +02004378* Initialize lists head(routing, hdr, system pipes)
4379* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
4380* Initialize spinlocks (for list related to A5<->IPA pipes)
4381* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
4382* Initialize Red-Black-Tree(s) for handles of header,routing rule,
4383* routing table ,filtering rule
4384* Initialize the filter block by committing IPV4 and IPV6 default rules
4385* Create empty routing table in system memory(no committing)
4386* Create a char-device for IPA
4387* Initialize IPA RM (resource manager)
4388* Configure GSI registers (in GSI case)
Amir Levy9659e592016-10-27 18:08:27 +03004389*/
4390static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
4391 struct device *ipa_dev)
4392{
4393 int result = 0;
4394 int i;
Amir Levy9659e592016-10-27 18:08:27 +03004395 struct ipa3_rt_tbl_set *rset;
4396 struct ipa_active_client_logging_info log_info;
4397
4398 IPADBG("IPA Driver initialization started\n");
4399
4400 ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
4401 if (!ipa3_ctx) {
4402 IPAERR(":kzalloc err.\n");
4403 result = -ENOMEM;
4404 goto fail_mem_ctx;
4405 }
4406
4407 ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
Skylar Chang841c1452017-04-03 16:07:22 -07004408 if (ipa3_ctx->logbuf == NULL)
4409 IPAERR("failed to create IPC log, continue...\n");
Amir Levy9659e592016-10-27 18:08:27 +03004410
4411 ipa3_ctx->pdev = ipa_dev;
4412 ipa3_ctx->uc_pdev = ipa_dev;
4413 ipa3_ctx->smmu_present = smmu_info.present;
4414 if (!ipa3_ctx->smmu_present)
4415 ipa3_ctx->smmu_s1_bypass = true;
4416 else
4417 ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
4418 ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
4419 ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
4420 ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
4421 ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
4422 ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
Amir Levy9659e592016-10-27 18:08:27 +03004423 ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
4424 ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
4425 ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
4426 ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
4427 ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
4428 ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
4429 ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
Amir Levy9659e592016-10-27 18:08:27 +03004430 ipa3_ctx->ee = resource_p->ee;
4431 ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
4432 ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
4433 ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004434 if (resource_p->ipa_tz_unlock_reg) {
4435 ipa3_ctx->ipa_tz_unlock_reg_num =
4436 resource_p->ipa_tz_unlock_reg_num;
4437 ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
4438 ipa3_ctx->ipa_tz_unlock_reg_num,
4439 sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
4440 GFP_KERNEL);
4441 if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
4442 result = -ENOMEM;
4443 goto fail_tz_unlock_reg;
4444 }
4445 for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
4446 ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
4447 resource_p->ipa_tz_unlock_reg[i].reg_addr;
4448 ipa3_ctx->ipa_tz_unlock_reg[i].size =
4449 resource_p->ipa_tz_unlock_reg[i].size;
4450 }
4451 }
4452
4453 /* unlock registers for uc */
4454 ipa3_tz_unlock_reg(ipa3_ctx);
Amir Levy9659e592016-10-27 18:08:27 +03004455
4456 /* default aggregation parameters */
4457 ipa3_ctx->aggregation_type = IPA_MBIM_16;
4458 ipa3_ctx->aggregation_byte_limit = 1;
4459 ipa3_ctx->aggregation_time_limit = 0;
4460
4461 ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
4462 if (!ipa3_ctx->ctrl) {
4463 IPAERR("memory allocation error for ctrl\n");
4464 result = -ENOMEM;
4465 goto fail_mem_ctrl;
4466 }
4467 result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
4468 ipa3_ctx->ipa_hw_type);
4469 if (result) {
4470 IPAERR("fail to static bind IPA ctrl.\n");
4471 result = -EFAULT;
4472 goto fail_bind;
4473 }
4474
4475 result = ipa3_init_mem_partition(master_dev->of_node);
4476 if (result) {
4477 IPAERR(":ipa3_init_mem_partition failed!\n");
4478 result = -ENODEV;
4479 goto fail_init_mem_partition;
4480 }
4481
4482 if (ipa3_bus_scale_table) {
Ghanim Fodi6a831342017-03-07 18:19:15 +02004483 IPADBG("Use bus scaling info from device tree #usecases=%d\n",
4484 ipa3_bus_scale_table->num_usecases);
Amir Levy9659e592016-10-27 18:08:27 +03004485 ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
4486 }
4487
Ghanim Fodi6a831342017-03-07 18:19:15 +02004488 /* get BUS handle */
4489 ipa3_ctx->ipa_bus_hdl =
4490 msm_bus_scale_register_client(
4491 ipa3_ctx->ctrl->msm_bus_data_ptr);
4492 if (!ipa3_ctx->ipa_bus_hdl) {
4493 IPAERR("fail to register with bus mgr!\n");
4494 result = -ENODEV;
4495 goto fail_bus_reg;
Amir Levy9659e592016-10-27 18:08:27 +03004496 }
4497
4498 /* get IPA clocks */
4499 result = ipa3_get_clks(master_dev);
4500 if (result)
4501 goto fail_clk;
4502
4503 /* init active_clients_log after getting ipa-clk */
4504 if (ipa3_active_clients_log_init())
4505 goto fail_init_active_client;
4506
4507 /* Enable ipa3_ctx->enable_clock_scaling */
4508 ipa3_ctx->enable_clock_scaling = 1;
4509 ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
4510
4511 /* enable IPA clocks explicitly to allow the initialization */
4512 ipa3_enable_clks();
4513
4514 /* setup IPA register access */
4515 IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
4516 ipa3_ctx->ctrl->ipa_reg_base_ofst);
4517 ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
4518 ipa3_ctx->ctrl->ipa_reg_base_ofst,
4519 resource_p->ipa_mem_size);
4520 if (!ipa3_ctx->mmio) {
4521 IPAERR(":ipa-base ioremap err.\n");
4522 result = -EFAULT;
4523 goto fail_remap;
4524 }
4525
4526 if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
4527 ipa3_ctx->pdev)) {
4528 IPAERR("fail to init ipahal\n");
4529 result = -EFAULT;
4530 goto fail_ipahal;
4531 }
4532
4533 result = ipa3_init_hw();
4534 if (result) {
4535 IPAERR(":error initializing HW.\n");
4536 result = -ENODEV;
4537 goto fail_init_hw;
4538 }
4539 IPADBG("IPA HW initialization sequence completed");
4540
4541 ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
4542 if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
4543 IPAERR("IPA has more pipes then supported! has %d, max %d\n",
4544 ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
4545 result = -ENODEV;
4546 goto fail_init_hw;
4547 }
4548
Amir Levy9659e592016-10-27 18:08:27 +03004549 ipa3_ctx->ctrl->ipa_sram_read_settings();
4550 IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
4551 ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
4552
4553 IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
4554 ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
4555 ipa3_ctx->ip4_rt_tbl_nhash_lcl);
4556
4557 IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
4558 ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
4559
4560 IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
4561 ipa3_ctx->ip4_flt_tbl_hash_lcl,
4562 ipa3_ctx->ip4_flt_tbl_nhash_lcl);
4563
4564 IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
4565 ipa3_ctx->ip6_flt_tbl_hash_lcl,
4566 ipa3_ctx->ip6_flt_tbl_nhash_lcl);
4567
4568 if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
4569 IPAERR("SW expect more core memory, needed %d, avail %d\n",
4570 ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
4571 result = -ENOMEM;
4572 goto fail_init_hw;
4573 }
4574
4575 mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
4576 spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
4577 IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
4578 ipa3_active_clients_log_inc(&log_info, false);
4579 ipa3_ctx->ipa3_active_clients.cnt = 1;
4580
Amir Levy9659e592016-10-27 18:08:27 +03004581 /* Create workqueues for power management */
4582 ipa3_ctx->power_mgmt_wq =
4583 create_singlethread_workqueue("ipa_power_mgmt");
4584 if (!ipa3_ctx->power_mgmt_wq) {
4585 IPAERR("failed to create power mgmt wq\n");
4586 result = -ENOMEM;
4587 goto fail_init_hw;
4588 }
4589
4590 ipa3_ctx->transport_power_mgmt_wq =
4591 create_singlethread_workqueue("transport_power_mgmt");
4592 if (!ipa3_ctx->transport_power_mgmt_wq) {
4593 IPAERR("failed to create transport power mgmt wq\n");
4594 result = -ENOMEM;
4595 goto fail_create_transport_wq;
4596 }
4597
Sridhar Ancha99b505b2016-04-21 23:11:10 +05304598 mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
Amir Levy9659e592016-10-27 18:08:27 +03004599
4600 /* init the lookaside cache */
4601 ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
4602 sizeof(struct ipa3_flt_entry), 0, 0, NULL);
4603 if (!ipa3_ctx->flt_rule_cache) {
4604 IPAERR(":ipa flt cache create failed\n");
4605 result = -ENOMEM;
4606 goto fail_flt_rule_cache;
4607 }
4608 ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
4609 sizeof(struct ipa3_rt_entry), 0, 0, NULL);
4610 if (!ipa3_ctx->rt_rule_cache) {
4611 IPAERR(":ipa rt cache create failed\n");
4612 result = -ENOMEM;
4613 goto fail_rt_rule_cache;
4614 }
4615 ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
4616 sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
4617 if (!ipa3_ctx->hdr_cache) {
4618 IPAERR(":ipa hdr cache create failed\n");
4619 result = -ENOMEM;
4620 goto fail_hdr_cache;
4621 }
4622 ipa3_ctx->hdr_offset_cache =
4623 kmem_cache_create("IPA_HDR_OFFSET",
4624 sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
4625 if (!ipa3_ctx->hdr_offset_cache) {
4626 IPAERR(":ipa hdr off cache create failed\n");
4627 result = -ENOMEM;
4628 goto fail_hdr_offset_cache;
4629 }
4630 ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
4631 sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
4632 if (!ipa3_ctx->hdr_proc_ctx_cache) {
4633 IPAERR(":ipa hdr proc ctx cache create failed\n");
4634 result = -ENOMEM;
4635 goto fail_hdr_proc_ctx_cache;
4636 }
4637 ipa3_ctx->hdr_proc_ctx_offset_cache =
4638 kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
4639 sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
4640 if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
4641 IPAERR(":ipa hdr proc ctx off cache create failed\n");
4642 result = -ENOMEM;
4643 goto fail_hdr_proc_ctx_offset_cache;
4644 }
4645 ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
4646 sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
4647 if (!ipa3_ctx->rt_tbl_cache) {
4648 IPAERR(":ipa rt tbl cache create failed\n");
4649 result = -ENOMEM;
4650 goto fail_rt_tbl_cache;
4651 }
4652 ipa3_ctx->tx_pkt_wrapper_cache =
4653 kmem_cache_create("IPA_TX_PKT_WRAPPER",
4654 sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
4655 if (!ipa3_ctx->tx_pkt_wrapper_cache) {
4656 IPAERR(":ipa tx pkt wrapper cache create failed\n");
4657 result = -ENOMEM;
4658 goto fail_tx_pkt_wrapper_cache;
4659 }
4660 ipa3_ctx->rx_pkt_wrapper_cache =
4661 kmem_cache_create("IPA_RX_PKT_WRAPPER",
4662 sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
4663 if (!ipa3_ctx->rx_pkt_wrapper_cache) {
4664 IPAERR(":ipa rx pkt wrapper cache create failed\n");
4665 result = -ENOMEM;
4666 goto fail_rx_pkt_wrapper_cache;
4667 }
4668
Skylar Chang6c4bec92017-04-21 16:10:14 -07004669 /* allocate memory for DMA_TASK workaround */
4670 result = ipa3_allocate_dma_task_for_gsi();
4671 if (result) {
4672 IPAERR("failed to allocate dma task\n");
4673 goto fail_dma_task;
4674 }
4675
Amir Levy9659e592016-10-27 18:08:27 +03004676 /* init the various list heads */
4677 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
4678 for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
4679 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
4680 INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
4681 }
4682 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
4683 for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
4684 INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
4685 INIT_LIST_HEAD(&ipa3_ctx->
4686 hdr_proc_ctx_tbl.head_free_offset_list[i]);
4687 }
4688 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
4689 INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
Amir Levy9659e592016-10-27 18:08:27 +03004690
4691 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
4692 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4693 rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
4694 INIT_LIST_HEAD(&rset->head_rt_tbl_list);
4695
4696 INIT_LIST_HEAD(&ipa3_ctx->intf_list);
4697 INIT_LIST_HEAD(&ipa3_ctx->msg_list);
4698 INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
4699 init_waitqueue_head(&ipa3_ctx->msg_waitq);
4700 mutex_init(&ipa3_ctx->msg_lock);
4701
4702 mutex_init(&ipa3_ctx->lock);
4703 mutex_init(&ipa3_ctx->nat_mem.lock);
4704
4705 idr_init(&ipa3_ctx->ipa_idr);
4706 spin_lock_init(&ipa3_ctx->idr_lock);
4707
4708 /* wlan related member */
4709 memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
4710 spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
4711 spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
4712 INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
4713
Amir Levy9659e592016-10-27 18:08:27 +03004714 ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
4715
4716 result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
4717 if (result) {
4718 IPAERR("alloc_chrdev_region err.\n");
4719 result = -ENODEV;
4720 goto fail_alloc_chrdev_region;
4721 }
4722
4723 ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
4724 ipa3_ctx, DRV_NAME);
4725 if (IS_ERR(ipa3_ctx->dev)) {
4726 IPAERR(":device_create err.\n");
4727 result = -ENODEV;
4728 goto fail_device_create;
4729 }
4730
4731 cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
4732 ipa3_ctx->cdev.owner = THIS_MODULE;
4733 ipa3_ctx->cdev.ops = &ipa3_drv_fops; /* from LDD3 */
4734
4735 result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
4736 if (result) {
4737 IPAERR(":cdev_add err=%d\n", -result);
4738 result = -ENODEV;
4739 goto fail_cdev_add;
4740 }
4741 IPADBG("ipa cdev added successful. major:%d minor:%d\n",
4742 MAJOR(ipa3_ctx->dev_num),
4743 MINOR(ipa3_ctx->dev_num));
4744
4745 if (ipa3_create_nat_device()) {
4746 IPAERR("unable to create nat device\n");
4747 result = -ENODEV;
4748 goto fail_nat_dev_add;
4749 }
4750
4751 /* Create a wakeup source. */
4752 wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
4753 spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
4754
4755 /* Initialize IPA RM (resource manager) */
4756 result = ipa_rm_initialize();
4757 if (result) {
4758 IPAERR("RM initialization failed (%d)\n", -result);
4759 result = -ENODEV;
4760 goto fail_ipa_rm_init;
4761 }
4762 IPADBG("IPA resource manager initialized");
4763
4764 result = ipa3_create_apps_resource();
4765 if (result) {
4766 IPAERR("Failed to create APPS_CONS resource\n");
4767 result = -ENODEV;
4768 goto fail_create_apps_resource;
4769 }
4770
Skylar Changcd3902d2017-03-27 18:08:27 -07004771 result = ipa3_alloc_pkt_init();
4772 if (result) {
4773 IPAERR("Failed to alloc pkt_init payload\n");
4774 result = -ENODEV;
4775 goto fail_create_apps_resource;
4776 }
4777
Amir Levy12ef0912016-08-30 09:27:34 +03004778 if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
4779 ipa3_enable_dcd();
4780
Amir Levy9659e592016-10-27 18:08:27 +03004781 INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
4782
4783 init_completion(&ipa3_ctx->init_completion_obj);
Skylar Chang0c17c7d2016-10-31 09:57:54 -07004784 init_completion(&ipa3_ctx->uc_loaded_completion_obj);
Amir Levy9659e592016-10-27 18:08:27 +03004785
4786 /*
Amir Levya59ed3f2017-03-05 17:30:55 +02004787 * We can't register the GSI driver yet, as it expects
Amir Levy9659e592016-10-27 18:08:27 +03004788 * the GSI FW to be up and running before the registration.
Amir Levya59ed3f2017-03-05 17:30:55 +02004789 *
4790 * For IPA3.0, the GSI configuration is done by the GSI driver.
4791 * For IPA3.1 (and on), the GSI configuration is done by TZ.
Amir Levy9659e592016-10-27 18:08:27 +03004792 */
Amir Levya59ed3f2017-03-05 17:30:55 +02004793 if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
4794 result = ipa3_gsi_pre_fw_load_init();
4795 if (result) {
4796 IPAERR("gsi pre FW loading config failed\n");
4797 result = -ENODEV;
4798 goto fail_ipa_init_interrupts;
Amir Levy9659e592016-10-27 18:08:27 +03004799 }
4800 }
Amir Levy9659e592016-10-27 18:08:27 +03004801
4802 return 0;
4803
4804fail_ipa_init_interrupts:
4805 ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
4806fail_create_apps_resource:
4807 ipa_rm_exit();
4808fail_ipa_rm_init:
4809fail_nat_dev_add:
4810 cdev_del(&ipa3_ctx->cdev);
4811fail_cdev_add:
4812 device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
4813fail_device_create:
4814 unregister_chrdev_region(ipa3_ctx->dev_num, 1);
4815fail_alloc_chrdev_region:
Skylar Chang6c4bec92017-04-21 16:10:14 -07004816 ipa3_free_dma_task_for_gsi();
4817fail_dma_task:
Amir Levy9659e592016-10-27 18:08:27 +03004818 idr_destroy(&ipa3_ctx->ipa_idr);
Amir Levy9659e592016-10-27 18:08:27 +03004819 kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
4820fail_rx_pkt_wrapper_cache:
4821 kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
4822fail_tx_pkt_wrapper_cache:
4823 kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
4824fail_rt_tbl_cache:
4825 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
4826fail_hdr_proc_ctx_offset_cache:
4827 kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
4828fail_hdr_proc_ctx_cache:
4829 kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
4830fail_hdr_offset_cache:
4831 kmem_cache_destroy(ipa3_ctx->hdr_cache);
4832fail_hdr_cache:
4833 kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
4834fail_rt_rule_cache:
4835 kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
4836fail_flt_rule_cache:
4837 destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
4838fail_create_transport_wq:
4839 destroy_workqueue(ipa3_ctx->power_mgmt_wq);
4840fail_init_hw:
4841 ipahal_destroy();
4842fail_ipahal:
4843 iounmap(ipa3_ctx->mmio);
4844fail_remap:
4845 ipa3_disable_clks();
4846 ipa3_active_clients_log_destroy();
4847fail_init_active_client:
Ghanim Fodi6a831342017-03-07 18:19:15 +02004848 if (ipa3_clk)
4849 clk_put(ipa3_clk);
4850 ipa3_clk = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004851fail_clk:
4852 msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
4853fail_bus_reg:
Ghanim Fodi6a831342017-03-07 18:19:15 +02004854 if (ipa3_bus_scale_table) {
4855 msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
4856 ipa3_bus_scale_table = NULL;
4857 }
Amir Levy9659e592016-10-27 18:08:27 +03004858fail_init_mem_partition:
4859fail_bind:
4860 kfree(ipa3_ctx->ctrl);
4861fail_mem_ctrl:
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004862 kfree(ipa3_ctx->ipa_tz_unlock_reg);
4863fail_tz_unlock_reg:
Skylar Chang841c1452017-04-03 16:07:22 -07004864 if (ipa3_ctx->logbuf)
4865 ipc_log_context_destroy(ipa3_ctx->logbuf);
Amir Levy9659e592016-10-27 18:08:27 +03004866 kfree(ipa3_ctx);
4867 ipa3_ctx = NULL;
4868fail_mem_ctx:
4869 return result;
4870}
4871
4872static int get_ipa_dts_configuration(struct platform_device *pdev,
4873 struct ipa3_plat_drv_res *ipa_drv_res)
4874{
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004875 int i, result, pos;
Amir Levy9659e592016-10-27 18:08:27 +03004876 struct resource *resource;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004877 u32 *ipa_tz_unlock_reg;
4878 int elem_num;
Amir Levy9659e592016-10-27 18:08:27 +03004879
4880 /* initialize ipa3_res */
4881 ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
4882 ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
4883 ipa_drv_res->ipa_hw_type = 0;
4884 ipa_drv_res->ipa3_hw_mode = 0;
Amir Levy9659e592016-10-27 18:08:27 +03004885 ipa_drv_res->modem_cfg_emb_pipe_flt = false;
4886 ipa_drv_res->ipa_wdi2 = false;
4887 ipa_drv_res->use_64_bit_dma_mask = false;
Ghanim Fodi6a831342017-03-07 18:19:15 +02004888 ipa_drv_res->use_bw_vote = false;
Amir Levy9659e592016-10-27 18:08:27 +03004889 ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4890 ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
4891 ipa_drv_res->apply_rg10_wa = false;
4892 ipa_drv_res->gsi_ch20_wa = false;
Gidon Studinski3021a6f2016-11-10 12:48:48 +02004893 ipa_drv_res->ipa_tz_unlock_reg_num = 0;
4894 ipa_drv_res->ipa_tz_unlock_reg = NULL;
Amir Levy9659e592016-10-27 18:08:27 +03004895
4896 /* Get IPA HW Version */
4897 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
4898 &ipa_drv_res->ipa_hw_type);
4899 if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
4900 IPAERR(":get resource failed for ipa-hw-ver!\n");
4901 return -ENODEV;
4902 }
4903 IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
4904
4905 if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
4906 IPAERR(":IPA version below 3.0 not supported!\n");
4907 return -ENODEV;
4908 }
4909
4910 /* Get IPA HW mode */
4911 result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
4912 &ipa_drv_res->ipa3_hw_mode);
4913 if (result)
4914 IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
4915 else
4916 IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
4917 ipa_drv_res->ipa3_hw_mode);
4918
4919 /* Get IPA WAN / LAN RX pool size */
4920 result = of_property_read_u32(pdev->dev.of_node,
4921 "qcom,wan-rx-ring-size",
4922 &ipa_drv_res->wan_rx_ring_size);
4923 if (result)
4924 IPADBG("using default for wan-rx-ring-size = %u\n",
4925 ipa_drv_res->wan_rx_ring_size);
4926 else
4927 IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
4928 ipa_drv_res->wan_rx_ring_size);
4929
4930 result = of_property_read_u32(pdev->dev.of_node,
4931 "qcom,lan-rx-ring-size",
4932 &ipa_drv_res->lan_rx_ring_size);
4933 if (result)
4934 IPADBG("using default for lan-rx-ring-size = %u\n",
4935 ipa_drv_res->lan_rx_ring_size);
4936 else
4937 IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
4938 ipa_drv_res->lan_rx_ring_size);
4939
4940 ipa_drv_res->use_ipa_teth_bridge =
4941 of_property_read_bool(pdev->dev.of_node,
4942 "qcom,use-ipa-tethering-bridge");
4943 IPADBG(": using TBDr = %s",
4944 ipa_drv_res->use_ipa_teth_bridge
4945 ? "True" : "False");
4946
Amir Levy9659e592016-10-27 18:08:27 +03004947 ipa_drv_res->modem_cfg_emb_pipe_flt =
4948 of_property_read_bool(pdev->dev.of_node,
4949 "qcom,modem-cfg-emb-pipe-flt");
4950 IPADBG(": modem configure embedded pipe filtering = %s\n",
4951 ipa_drv_res->modem_cfg_emb_pipe_flt
4952 ? "True" : "False");
4953
4954 ipa_drv_res->ipa_wdi2 =
4955 of_property_read_bool(pdev->dev.of_node,
4956 "qcom,ipa-wdi2");
4957 IPADBG(": WDI-2.0 = %s\n",
4958 ipa_drv_res->ipa_wdi2
4959 ? "True" : "False");
4960
4961 ipa_drv_res->use_64_bit_dma_mask =
4962 of_property_read_bool(pdev->dev.of_node,
4963 "qcom,use-64-bit-dma-mask");
4964 IPADBG(": use_64_bit_dma_mask = %s\n",
4965 ipa_drv_res->use_64_bit_dma_mask
4966 ? "True" : "False");
4967
Ghanim Fodi6a831342017-03-07 18:19:15 +02004968 ipa_drv_res->use_bw_vote =
4969 of_property_read_bool(pdev->dev.of_node,
4970 "qcom,bandwidth-vote-for-ipa");
4971 IPADBG(": use_bw_vote = %s\n",
4972 ipa_drv_res->use_bw_vote
4973 ? "True" : "False");
4974
Amir Levy9659e592016-10-27 18:08:27 +03004975 ipa_drv_res->skip_uc_pipe_reset =
4976 of_property_read_bool(pdev->dev.of_node,
4977 "qcom,skip-uc-pipe-reset");
4978 IPADBG(": skip uC pipe reset = %s\n",
4979 ipa_drv_res->skip_uc_pipe_reset
4980 ? "True" : "False");
4981
4982 ipa_drv_res->tethered_flow_control =
4983 of_property_read_bool(pdev->dev.of_node,
4984 "qcom,tethered-flow-control");
4985 IPADBG(": Use apps based flow control = %s\n",
4986 ipa_drv_res->tethered_flow_control
4987 ? "True" : "False");
4988
Amir Levy9659e592016-10-27 18:08:27 +03004989 /* Get IPA wrapper address */
4990 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4991 "ipa-base");
4992 if (!resource) {
4993 IPAERR(":get resource failed for ipa-base!\n");
4994 return -ENODEV;
4995 }
4996 ipa_drv_res->ipa_mem_base = resource->start;
4997 ipa_drv_res->ipa_mem_size = resource_size(resource);
4998 IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
4999 ipa_drv_res->ipa_mem_base,
5000 ipa_drv_res->ipa_mem_size);
5001
5002 smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
5003 smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
5004
Amir Levya59ed3f2017-03-05 17:30:55 +02005005 /* Get IPA GSI address */
5006 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5007 "gsi-base");
5008 if (!resource) {
5009 IPAERR(":get resource failed for gsi-base!\n");
5010 return -ENODEV;
Amir Levy9659e592016-10-27 18:08:27 +03005011 }
Amir Levya59ed3f2017-03-05 17:30:55 +02005012 ipa_drv_res->transport_mem_base = resource->start;
5013 ipa_drv_res->transport_mem_size = resource_size(resource);
5014 IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
5015 ipa_drv_res->transport_mem_base,
5016 ipa_drv_res->transport_mem_size);
5017
5018 /* Get IPA GSI IRQ number */
5019 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5020 "gsi-irq");
5021 if (!resource) {
5022 IPAERR(":get resource failed for gsi-irq!\n");
5023 return -ENODEV;
5024 }
5025 ipa_drv_res->transport_irq = resource->start;
5026 IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
Amir Levy9659e592016-10-27 18:08:27 +03005027
5028 /* Get IPA pipe mem start ofst */
5029 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5030 "ipa-pipe-mem");
5031 if (!resource) {
5032 IPADBG(":not using pipe memory - resource nonexisting\n");
5033 } else {
5034 ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
5035 ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
5036 IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
5037 ipa_drv_res->ipa_pipe_mem_start_ofst,
5038 ipa_drv_res->ipa_pipe_mem_size);
5039 }
5040
5041 /* Get IPA IRQ number */
5042 resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
5043 "ipa-irq");
5044 if (!resource) {
5045 IPAERR(":get resource failed for ipa-irq!\n");
5046 return -ENODEV;
5047 }
5048 ipa_drv_res->ipa_irq = resource->start;
5049 IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
5050
5051 result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
5052 &ipa_drv_res->ee);
5053 if (result)
5054 ipa_drv_res->ee = 0;
5055
5056 ipa_drv_res->apply_rg10_wa =
5057 of_property_read_bool(pdev->dev.of_node,
5058 "qcom,use-rg10-limitation-mitigation");
5059 IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
5060 ipa_drv_res->apply_rg10_wa
5061 ? "True" : "False");
5062
5063 ipa_drv_res->gsi_ch20_wa =
5064 of_property_read_bool(pdev->dev.of_node,
5065 "qcom,do-not-use-ch-gsi-20");
5066 IPADBG(": GSI CH 20 WA is = %s\n",
5067 ipa_drv_res->apply_rg10_wa
5068 ? "Needed" : "Not needed");
5069
Gidon Studinski3021a6f2016-11-10 12:48:48 +02005070 elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
5071 "qcom,ipa-tz-unlock-reg", sizeof(u32));
5072
5073 if (elem_num > 0 && elem_num % 2 == 0) {
5074 ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
5075
5076 ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
5077 if (ipa_tz_unlock_reg == NULL)
5078 return -ENOMEM;
5079
5080 ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
5081 ipa_drv_res->ipa_tz_unlock_reg_num,
5082 sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
5083 GFP_KERNEL);
5084 if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
5085 kfree(ipa_tz_unlock_reg);
5086 return -ENOMEM;
5087 }
5088
5089 if (of_property_read_u32_array(pdev->dev.of_node,
5090 "qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
5091 elem_num)) {
5092 IPAERR("failed to read register addresses\n");
5093 kfree(ipa_tz_unlock_reg);
5094 kfree(ipa_drv_res->ipa_tz_unlock_reg);
5095 return -EFAULT;
5096 }
5097
5098 pos = 0;
5099 for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
5100 ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
5101 ipa_tz_unlock_reg[pos++];
5102 ipa_drv_res->ipa_tz_unlock_reg[i].size =
5103 ipa_tz_unlock_reg[pos++];
5104 IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
5105 &ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
5106 ipa_drv_res->ipa_tz_unlock_reg[i].size);
5107 }
5108 kfree(ipa_tz_unlock_reg);
5109 }
Amir Levy9659e592016-10-27 18:08:27 +03005110 return 0;
5111}
5112
5113static int ipa_smmu_wlan_cb_probe(struct device *dev)
5114{
5115 struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005116 int atomic_ctx = 1;
5117 int fast = 1;
5118 int bypass = 1;
5119 int ret;
5120 u32 add_map_size;
5121 const u32 *add_map;
5122 int i;
5123
5124 IPADBG("sub pdev=%p\n", dev);
5125
5126 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005127 cb->iommu = iommu_domain_alloc(dev->bus);
Amir Levy9659e592016-10-27 18:08:27 +03005128 if (!cb->iommu) {
5129 IPAERR("could not alloc iommu domain\n");
5130 /* assume this failure is because iommu driver is not ready */
5131 return -EPROBE_DEFER;
5132 }
5133 cb->valid = true;
5134
Amir Levy9659e592016-10-27 18:08:27 +03005135 if (smmu_info.s1_bypass) {
5136 if (iommu_domain_set_attr(cb->iommu,
5137 DOMAIN_ATTR_S1_BYPASS,
5138 &bypass)) {
5139 IPAERR("couldn't set bypass\n");
5140 cb->valid = false;
5141 return -EIO;
5142 }
5143 IPADBG("SMMU S1 BYPASS\n");
5144 } else {
5145 if (iommu_domain_set_attr(cb->iommu,
5146 DOMAIN_ATTR_ATOMIC,
5147 &atomic_ctx)) {
5148 IPAERR("couldn't disable coherent HTW\n");
5149 cb->valid = false;
5150 return -EIO;
5151 }
5152 IPADBG("SMMU ATTR ATOMIC\n");
5153
5154 if (smmu_info.fast_map) {
5155 if (iommu_domain_set_attr(cb->iommu,
5156 DOMAIN_ATTR_FAST,
5157 &fast)) {
5158 IPAERR("couldn't set fast map\n");
5159 cb->valid = false;
5160 return -EIO;
5161 }
5162 IPADBG("SMMU fast map set\n");
5163 }
5164 }
5165
5166 ret = iommu_attach_device(cb->iommu, dev);
5167 if (ret) {
5168 IPAERR("could not attach device ret=%d\n", ret);
5169 cb->valid = false;
5170 return ret;
5171 }
5172 /* MAP ipa-uc ram */
5173 add_map = of_get_property(dev->of_node,
5174 "qcom,additional-mapping", &add_map_size);
5175 if (add_map) {
5176 /* mapping size is an array of 3-tuple of u32 */
5177 if (add_map_size % (3 * sizeof(u32))) {
5178 IPAERR("wrong additional mapping format\n");
5179 cb->valid = false;
5180 return -EFAULT;
5181 }
5182
5183 /* iterate of each entry of the additional mapping array */
5184 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5185 u32 iova = be32_to_cpu(add_map[i]);
5186 u32 pa = be32_to_cpu(add_map[i + 1]);
5187 u32 size = be32_to_cpu(add_map[i + 2]);
5188 unsigned long iova_p;
5189 phys_addr_t pa_p;
5190 u32 size_p;
5191
5192 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5193 iova_p, pa_p, size_p);
5194 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5195 iova_p, &pa_p, size_p);
5196 ipa3_iommu_map(cb->iommu,
5197 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005198 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005199 }
5200 }
5201 return 0;
5202}
5203
5204static int ipa_smmu_uc_cb_probe(struct device *dev)
5205{
5206 struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
Amir Levy9659e592016-10-27 18:08:27 +03005207 int atomic_ctx = 1;
5208 int bypass = 1;
5209 int fast = 1;
5210 int ret;
5211 u32 iova_ap_mapping[2];
5212
5213 IPADBG("UC CB PROBE sub pdev=%p\n", dev);
5214
5215 ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5216 iova_ap_mapping, 2);
5217 if (ret) {
5218 IPAERR("Fail to read UC start/size iova addresses\n");
5219 return ret;
5220 }
5221 cb->va_start = iova_ap_mapping[0];
5222 cb->va_size = iova_ap_mapping[1];
5223 cb->va_end = cb->va_start + cb->va_size;
5224 IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5225
5226 if (smmu_info.use_64_bit_dma_mask) {
5227 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5228 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5229 IPAERR("DMA set 64bit mask failed\n");
5230 return -EOPNOTSUPP;
5231 }
5232 } else {
5233 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5234 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5235 IPAERR("DMA set 32bit mask failed\n");
5236 return -EOPNOTSUPP;
5237 }
5238 }
5239 IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
5240
5241 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005242 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005243 cb->va_start, cb->va_size);
5244 if (IS_ERR_OR_NULL(cb->mapping)) {
5245 IPADBG("Fail to create mapping\n");
5246 /* assume this failure is because iommu driver is not ready */
5247 return -EPROBE_DEFER;
5248 }
5249 IPADBG("SMMU mapping created\n");
5250 cb->valid = true;
5251
Amir Levy9659e592016-10-27 18:08:27 +03005252 IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
5253 if (smmu_info.s1_bypass) {
5254 if (iommu_domain_set_attr(cb->mapping->domain,
5255 DOMAIN_ATTR_S1_BYPASS,
5256 &bypass)) {
5257 IPAERR("couldn't set bypass\n");
5258 arm_iommu_release_mapping(cb->mapping);
5259 cb->valid = false;
5260 return -EIO;
5261 }
5262 IPADBG("SMMU S1 BYPASS\n");
5263 } else {
5264 if (iommu_domain_set_attr(cb->mapping->domain,
5265 DOMAIN_ATTR_ATOMIC,
5266 &atomic_ctx)) {
5267 IPAERR("couldn't set domain as atomic\n");
5268 arm_iommu_release_mapping(cb->mapping);
5269 cb->valid = false;
5270 return -EIO;
5271 }
5272 IPADBG("SMMU atomic set\n");
5273
5274 if (smmu_info.fast_map) {
5275 if (iommu_domain_set_attr(cb->mapping->domain,
5276 DOMAIN_ATTR_FAST,
5277 &fast)) {
5278 IPAERR("couldn't set fast map\n");
5279 arm_iommu_release_mapping(cb->mapping);
5280 cb->valid = false;
5281 return -EIO;
5282 }
5283 IPADBG("SMMU fast map set\n");
5284 }
5285 }
5286
5287 IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
5288 ret = arm_iommu_attach_device(cb->dev, cb->mapping);
5289 if (ret) {
5290 IPAERR("could not attach device ret=%d\n", ret);
5291 arm_iommu_release_mapping(cb->mapping);
5292 cb->valid = false;
5293 return ret;
5294 }
5295
5296 cb->next_addr = cb->va_end;
5297 ipa3_ctx->uc_pdev = dev;
5298
5299 return 0;
5300}
5301
5302static int ipa_smmu_ap_cb_probe(struct device *dev)
5303{
5304 struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
5305 int result;
Amir Levy9659e592016-10-27 18:08:27 +03005306 int atomic_ctx = 1;
5307 int fast = 1;
5308 int bypass = 1;
5309 u32 iova_ap_mapping[2];
5310 u32 add_map_size;
5311 const u32 *add_map;
5312 void *smem_addr;
5313 int i;
5314
5315 IPADBG("AP CB probe: sub pdev=%p\n", dev);
5316
5317 result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
5318 iova_ap_mapping, 2);
5319 if (result) {
5320 IPAERR("Fail to read AP start/size iova addresses\n");
5321 return result;
5322 }
5323 cb->va_start = iova_ap_mapping[0];
5324 cb->va_size = iova_ap_mapping[1];
5325 cb->va_end = cb->va_start + cb->va_size;
5326 IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
5327
5328 if (smmu_info.use_64_bit_dma_mask) {
5329 if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
5330 dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
5331 IPAERR("DMA set 64bit mask failed\n");
5332 return -EOPNOTSUPP;
5333 }
5334 } else {
5335 if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
5336 dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
5337 IPAERR("DMA set 32bit mask failed\n");
5338 return -EOPNOTSUPP;
5339 }
5340 }
5341
5342 cb->dev = dev;
Amir Levyf5625342016-12-25 10:21:02 +02005343 cb->mapping = arm_iommu_create_mapping(dev->bus,
Amir Levy9659e592016-10-27 18:08:27 +03005344 cb->va_start, cb->va_size);
5345 if (IS_ERR_OR_NULL(cb->mapping)) {
5346 IPADBG("Fail to create mapping\n");
5347 /* assume this failure is because iommu driver is not ready */
5348 return -EPROBE_DEFER;
5349 }
5350 IPADBG("SMMU mapping created\n");
5351 cb->valid = true;
5352
Amir Levy9659e592016-10-27 18:08:27 +03005353 if (smmu_info.s1_bypass) {
5354 if (iommu_domain_set_attr(cb->mapping->domain,
5355 DOMAIN_ATTR_S1_BYPASS,
5356 &bypass)) {
5357 IPAERR("couldn't set bypass\n");
5358 arm_iommu_release_mapping(cb->mapping);
5359 cb->valid = false;
5360 return -EIO;
5361 }
5362 IPADBG("SMMU S1 BYPASS\n");
5363 } else {
5364 if (iommu_domain_set_attr(cb->mapping->domain,
5365 DOMAIN_ATTR_ATOMIC,
5366 &atomic_ctx)) {
5367 IPAERR("couldn't set domain as atomic\n");
5368 arm_iommu_release_mapping(cb->mapping);
5369 cb->valid = false;
5370 return -EIO;
5371 }
5372 IPADBG("SMMU atomic set\n");
5373
5374 if (iommu_domain_set_attr(cb->mapping->domain,
5375 DOMAIN_ATTR_FAST,
5376 &fast)) {
5377 IPAERR("couldn't set fast map\n");
5378 arm_iommu_release_mapping(cb->mapping);
5379 cb->valid = false;
5380 return -EIO;
5381 }
5382 IPADBG("SMMU fast map set\n");
5383 }
5384
5385 result = arm_iommu_attach_device(cb->dev, cb->mapping);
5386 if (result) {
5387 IPAERR("couldn't attach to IOMMU ret=%d\n", result);
5388 cb->valid = false;
5389 return result;
5390 }
5391
5392 add_map = of_get_property(dev->of_node,
5393 "qcom,additional-mapping", &add_map_size);
5394 if (add_map) {
5395 /* mapping size is an array of 3-tuple of u32 */
5396 if (add_map_size % (3 * sizeof(u32))) {
5397 IPAERR("wrong additional mapping format\n");
5398 cb->valid = false;
5399 return -EFAULT;
5400 }
5401
5402 /* iterate of each entry of the additional mapping array */
5403 for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
5404 u32 iova = be32_to_cpu(add_map[i]);
5405 u32 pa = be32_to_cpu(add_map[i + 1]);
5406 u32 size = be32_to_cpu(add_map[i + 2]);
5407 unsigned long iova_p;
5408 phys_addr_t pa_p;
5409 u32 size_p;
5410
5411 IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
5412 iova_p, pa_p, size_p);
5413 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5414 iova_p, &pa_p, size_p);
5415 ipa3_iommu_map(cb->mapping->domain,
5416 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005417 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005418 }
5419 }
5420
5421 /* map SMEM memory for IPA table accesses */
5422 smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
5423 SMEM_MODEM, 0);
5424 if (smem_addr) {
5425 phys_addr_t iova = smem_virt_to_phys(smem_addr);
5426 phys_addr_t pa = iova;
5427 unsigned long iova_p;
5428 phys_addr_t pa_p;
5429 u32 size_p;
5430
5431 IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
5432 iova_p, pa_p, size_p);
5433 IPADBG("mapping 0x%lx to 0x%pa size %d\n",
5434 iova_p, &pa_p, size_p);
5435 ipa3_iommu_map(cb->mapping->domain,
5436 iova_p, pa_p, size_p,
Amir Levyf5625342016-12-25 10:21:02 +02005437 IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
Amir Levy9659e592016-10-27 18:08:27 +03005438 }
5439
5440
5441 smmu_info.present = true;
5442
5443 if (!ipa3_bus_scale_table)
5444 ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
5445
5446 /* Proceed to real initialization */
5447 result = ipa3_pre_init(&ipa3_res, dev);
5448 if (result) {
5449 IPAERR("ipa_init failed\n");
5450 arm_iommu_detach_device(cb->dev);
5451 arm_iommu_release_mapping(cb->mapping);
5452 cb->valid = false;
5453 return result;
5454 }
5455
5456 return result;
5457}
5458
5459static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
5460{
5461 ipa3_freeze_clock_vote_and_notify_modem();
5462
5463 return IRQ_HANDLED;
5464}
5465
5466static int ipa3_smp2p_probe(struct device *dev)
5467{
5468 struct device_node *node = dev->of_node;
5469 int res;
5470
5471 IPADBG("node->name=%s\n", node->name);
5472 if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
5473 res = of_get_gpio(node, 0);
5474 if (res < 0) {
5475 IPADBG("of_get_gpio returned %d\n", res);
5476 return res;
5477 }
5478
5479 ipa3_ctx->smp2p_info.out_base_id = res;
5480 IPADBG("smp2p out_base_id=%d\n",
5481 ipa3_ctx->smp2p_info.out_base_id);
5482 } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
5483 int irq;
5484
5485 res = of_get_gpio(node, 0);
5486 if (res < 0) {
5487 IPADBG("of_get_gpio returned %d\n", res);
5488 return res;
5489 }
5490
5491 ipa3_ctx->smp2p_info.in_base_id = res;
5492 IPADBG("smp2p in_base_id=%d\n",
5493 ipa3_ctx->smp2p_info.in_base_id);
5494
5495 /* register for modem clk query */
5496 irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
5497 IPA_GPIO_IN_QUERY_CLK_IDX);
5498 if (irq < 0) {
5499 IPAERR("gpio_to_irq failed %d\n", irq);
5500 return -ENODEV;
5501 }
5502 IPADBG("smp2p irq#=%d\n", irq);
5503 res = request_irq(irq,
5504 (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
5505 IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
5506 if (res) {
5507 IPAERR("fail to register smp2p irq=%d\n", irq);
5508 return -ENODEV;
5509 }
5510 res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
5511 IPA_GPIO_IN_QUERY_CLK_IDX);
5512 if (res)
5513 IPAERR("failed to enable irq wake\n");
5514 }
5515
5516 return 0;
5517}
5518
5519int ipa3_plat_drv_probe(struct platform_device *pdev_p,
5520 struct ipa_api_controller *api_ctrl,
5521 const struct of_device_id *pdrv_match)
5522{
5523 int result;
5524 struct device *dev = &pdev_p->dev;
5525
5526 IPADBG("IPA driver probing started\n");
5527 IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
5528
5529 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
5530 return ipa_smmu_ap_cb_probe(dev);
5531
5532 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
5533 return ipa_smmu_wlan_cb_probe(dev);
5534
5535 if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
5536 return ipa_smmu_uc_cb_probe(dev);
5537
5538 if (of_device_is_compatible(dev->of_node,
5539 "qcom,smp2pgpio-map-ipa-1-in"))
5540 return ipa3_smp2p_probe(dev);
5541
5542 if (of_device_is_compatible(dev->of_node,
5543 "qcom,smp2pgpio-map-ipa-1-out"))
5544 return ipa3_smp2p_probe(dev);
5545
5546 master_dev = dev;
5547 if (!ipa3_pdev)
5548 ipa3_pdev = pdev_p;
5549
5550 result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
5551 if (result) {
5552 IPAERR("IPA dts parsing failed\n");
5553 return result;
5554 }
5555
5556 result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
5557 if (result) {
5558 IPAERR("IPA API binding failed\n");
5559 return result;
5560 }
5561
Amir Levy9659e592016-10-27 18:08:27 +03005562 if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
5563 if (of_property_read_bool(pdev_p->dev.of_node,
5564 "qcom,smmu-s1-bypass"))
5565 smmu_info.s1_bypass = true;
5566 if (of_property_read_bool(pdev_p->dev.of_node,
5567 "qcom,smmu-fast-map"))
5568 smmu_info.fast_map = true;
5569 if (of_property_read_bool(pdev_p->dev.of_node,
5570 "qcom,use-64-bit-dma-mask"))
5571 smmu_info.use_64_bit_dma_mask = true;
5572 smmu_info.arm_smmu = true;
5573 pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
5574 smmu_info.s1_bypass, smmu_info.fast_map);
5575 } else if (of_property_read_bool(pdev_p->dev.of_node,
5576 "qcom,msm-smmu")) {
5577 IPAERR("Legacy IOMMU not supported\n");
5578 result = -EOPNOTSUPP;
5579 } else {
5580 if (of_property_read_bool(pdev_p->dev.of_node,
5581 "qcom,use-64-bit-dma-mask")) {
5582 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
5583 dma_set_coherent_mask(&pdev_p->dev,
5584 DMA_BIT_MASK(64))) {
5585 IPAERR("DMA set 64bit mask failed\n");
5586 return -EOPNOTSUPP;
5587 }
5588 } else {
5589 if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
5590 dma_set_coherent_mask(&pdev_p->dev,
5591 DMA_BIT_MASK(32))) {
5592 IPAERR("DMA set 32bit mask failed\n");
5593 return -EOPNOTSUPP;
5594 }
5595 }
5596
5597 if (!ipa3_bus_scale_table)
5598 ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
5599 /* Proceed to real initialization */
5600 result = ipa3_pre_init(&ipa3_res, dev);
5601 if (result) {
5602 IPAERR("ipa3_init failed\n");
5603 return result;
5604 }
5605 }
5606
Ghanim Fodi115bf8a2017-04-21 01:36:06 -07005607 result = of_platform_populate(pdev_p->dev.of_node,
5608 pdrv_match, NULL, &pdev_p->dev);
5609 if (result) {
5610 IPAERR("failed to populate platform\n");
5611 return result;
5612 }
5613
Amir Levy9659e592016-10-27 18:08:27 +03005614 return result;
5615}
5616
5617/**
5618 * ipa3_ap_suspend() - suspend callback for runtime_pm
5619 * @dev: pointer to device
5620 *
5621 * This callback will be invoked by the runtime_pm framework when an AP suspend
5622 * operation is invoked, usually by pressing a suspend button.
5623 *
5624 * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
5625 * This will postpone the suspend operation until IPA is no longer used by AP.
5626*/
5627int ipa3_ap_suspend(struct device *dev)
5628{
5629 int i;
5630
5631 IPADBG("Enter...\n");
5632
5633 /* In case there is a tx/rx handler in polling mode fail to suspend */
5634 for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
5635 if (ipa3_ctx->ep[i].sys &&
5636 atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
5637 IPAERR("EP %d is in polling state, do not suspend\n",
5638 i);
5639 return -EAGAIN;
5640 }
5641 }
5642
Amir Levya59ed3f2017-03-05 17:30:55 +02005643 /*
5644 * Release transport IPA resource without waiting for inactivity timer
5645 */
Amir Levy9659e592016-10-27 18:08:27 +03005646 atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
Amir Levya59ed3f2017-03-05 17:30:55 +02005647 ipa3_transport_release_resource(NULL);
Amir Levy9659e592016-10-27 18:08:27 +03005648 IPADBG("Exit\n");
5649
5650 return 0;
5651}
5652
5653/**
5654* ipa3_ap_resume() - resume callback for runtime_pm
5655* @dev: pointer to device
5656*
5657* This callback will be invoked by the runtime_pm framework when an AP resume
5658* operation is invoked.
5659*
5660* Always returns 0 since resume should always succeed.
5661*/
5662int ipa3_ap_resume(struct device *dev)
5663{
5664 return 0;
5665}
5666
5667struct ipa3_context *ipa3_get_ctx(void)
5668{
5669 return ipa3_ctx;
5670}
5671
Amir Levy9659e592016-10-27 18:08:27 +03005672static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
5673{
5674 switch (notify->evt_id) {
5675 case GSI_PER_EVT_GLOB_ERROR:
5676 IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
5677 IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
5678 break;
5679 case GSI_PER_EVT_GLOB_GP1:
5680 IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
5681 BUG();
5682 break;
5683 case GSI_PER_EVT_GLOB_GP2:
5684 IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
5685 BUG();
5686 break;
5687 case GSI_PER_EVT_GLOB_GP3:
5688 IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
5689 BUG();
5690 break;
5691 case GSI_PER_EVT_GENERAL_BREAK_POINT:
5692 IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
5693 break;
5694 case GSI_PER_EVT_GENERAL_BUS_ERROR:
5695 IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
5696 BUG();
5697 break;
5698 case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
5699 IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
5700 BUG();
5701 break;
5702 case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
5703 IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
5704 BUG();
5705 break;
5706 default:
5707 IPAERR("Received unexpected evt: %d\n",
5708 notify->evt_id);
5709 BUG();
5710 }
5711}
5712
5713int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
5714{
5715 struct ipa3_ready_cb_info *cb_info = NULL;
5716
5717 /* check ipa3_ctx existed or not */
5718 if (!ipa3_ctx) {
5719 IPADBG("IPA driver haven't initialized\n");
5720 return -ENXIO;
5721 }
5722 mutex_lock(&ipa3_ctx->lock);
5723 if (ipa3_ctx->ipa_initialization_complete) {
5724 mutex_unlock(&ipa3_ctx->lock);
5725 IPADBG("IPA driver finished initialization already\n");
5726 return -EEXIST;
5727 }
5728
5729 cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
5730 if (!cb_info) {
5731 mutex_unlock(&ipa3_ctx->lock);
5732 return -ENOMEM;
5733 }
5734
5735 cb_info->ready_cb = ipa_ready_cb;
5736 cb_info->user_data = user_data;
5737
5738 list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
5739 mutex_unlock(&ipa3_ctx->lock);
5740
5741 return 0;
5742}
5743
5744int ipa3_iommu_map(struct iommu_domain *domain,
5745 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
5746{
5747 struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
5748 struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
5749
5750 IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
5751 IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
5752
5753 /* make sure no overlapping */
5754 if (domain == ipa3_get_smmu_domain()) {
5755 if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
5756 IPAERR("iommu AP overlap addr 0x%lx\n", iova);
5757 ipa_assert();
5758 return -EFAULT;
5759 }
5760 } else if (domain == ipa3_get_wlan_smmu_domain()) {
5761 /* wlan is one time map */
5762 } else if (domain == ipa3_get_uc_smmu_domain()) {
5763 if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
5764 IPAERR("iommu uC overlap addr 0x%lx\n", iova);
5765 ipa_assert();
5766 return -EFAULT;
5767 }
5768 } else {
5769 IPAERR("Unexpected domain 0x%p\n", domain);
5770 ipa_assert();
5771 return -EFAULT;
5772 }
5773
5774 return iommu_map(domain, iova, paddr, size, prot);
5775}
5776
5777MODULE_LICENSE("GPL v2");
5778MODULE_DESCRIPTION("IPA HW device driver");