blob: 35eea02fd4778537b875df8fd8a74e5bcd9218e9 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053078#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
79#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070080
Sathish Ambleya21b5b52017-01-11 16:11:01 -080081#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
82#define FASTRPC_STATIC_HANDLE_LISTENER (3)
83#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053084#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080085
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053086#define INIT_FILELEN_MAX (2*1024*1024)
87#define INIT_MEMLEN_MAX (8*1024*1024)
88
Sathish Ambleya21b5b52017-01-11 16:11:01 -080089#define PERF_END (void)0
90
91#define PERF(enb, cnt, ff) \
92 {\
93 struct timespec startT = {0};\
94 if (enb) {\
95 getnstimeofday(&startT);\
96 } \
97 ff ;\
98 if (enb) {\
99 cnt += getnstimediff(&startT);\
100 } \
101 }
102
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700103static int fastrpc_glink_open(int cid);
104static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800105static struct dentry *debugfs_root;
106static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
108static inline uint64_t buf_page_start(uint64_t buf)
109{
110 uint64_t start = (uint64_t) buf & PAGE_MASK;
111 return start;
112}
113
114static inline uint64_t buf_page_offset(uint64_t buf)
115{
116 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
117 return offset;
118}
119
120static inline int buf_num_pages(uint64_t buf, ssize_t len)
121{
122 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
123 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
124 int nPages = end - start + 1;
125 return nPages;
126}
127
128static inline uint64_t buf_page_size(uint32_t size)
129{
130 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
131
132 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
133}
134
135static inline void *uint64_to_ptr(uint64_t addr)
136{
137 void *ptr = (void *)((uintptr_t)addr);
138
139 return ptr;
140}
141
142static inline uint64_t ptr_to_uint64(void *ptr)
143{
144 uint64_t addr = (uint64_t)((uintptr_t)ptr);
145
146 return addr;
147}
148
149struct fastrpc_file;
150
151struct fastrpc_buf {
152 struct hlist_node hn;
153 struct fastrpc_file *fl;
154 void *virt;
155 uint64_t phys;
156 ssize_t size;
157};
158
159struct fastrpc_ctx_lst;
160
161struct overlap {
162 uintptr_t start;
163 uintptr_t end;
164 int raix;
165 uintptr_t mstart;
166 uintptr_t mend;
167 uintptr_t offset;
168};
169
170struct smq_invoke_ctx {
171 struct hlist_node hn;
172 struct completion work;
173 int retval;
174 int pid;
175 int tgid;
176 remote_arg_t *lpra;
177 remote_arg64_t *rpra;
178 int *fds;
179 unsigned int *attrs;
180 struct fastrpc_mmap **maps;
181 struct fastrpc_buf *buf;
182 ssize_t used;
183 struct fastrpc_file *fl;
184 uint32_t sc;
185 struct overlap *overs;
186 struct overlap **overps;
187 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700188 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530189 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700190};
191
192struct fastrpc_ctx_lst {
193 struct hlist_head pending;
194 struct hlist_head interrupted;
195};
196
197struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530198 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct dma_iommu_mapping *mapping;
200 int cb;
201 int enabled;
202 int faults;
203 int secure;
204 int coherent;
205};
206
207struct fastrpc_session_ctx {
208 struct device *dev;
209 struct fastrpc_smmu smmu;
210 int used;
211};
212
213struct fastrpc_glink_info {
214 int link_state;
215 int port_state;
216 struct glink_open_config cfg;
217 struct glink_link_info link_info;
218 void *link_notify_handle;
219};
220
221struct fastrpc_channel_ctx {
222 char *name;
223 char *subsys;
224 void *chan;
225 struct device *dev;
226 struct fastrpc_session_ctx session[NUM_SESSIONS];
227 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530228 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 struct notifier_block nb;
230 struct kref kref;
231 int sesscount;
232 int ssrcount;
233 void *handle;
234 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700236 int vmid;
c_mtharu63ffc012017-11-16 15:26:56 +0530237 int rhvmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530238 int ramdumpenabled;
239 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700240 struct fastrpc_glink_info link;
c_mtharu314a4202017-11-15 22:09:17 +0530241 struct mutex mut;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700242};
243
244struct fastrpc_apps {
245 struct fastrpc_channel_ctx *channel;
246 struct cdev cdev;
247 struct class *class;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 struct smq_phy_page range;
249 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530250 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700251 dev_t dev_no;
252 int compat;
253 struct hlist_head drivers;
254 spinlock_t hlock;
255 struct ion_client *client;
256 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530257 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700258};
259
260struct fastrpc_mmap {
261 struct hlist_node hn;
262 struct fastrpc_file *fl;
263 struct fastrpc_apps *apps;
264 int fd;
265 uint32_t flags;
266 struct dma_buf *buf;
267 struct sg_table *table;
268 struct dma_buf_attachment *attach;
269 struct ion_handle *handle;
270 uint64_t phys;
271 ssize_t size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530272 uintptr_t __user va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273 ssize_t len;
274 int refs;
275 uintptr_t raddr;
276 int uncached;
277 int secure;
278 uintptr_t attr;
279};
280
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281struct fastrpc_perf {
282 int64_t count;
283 int64_t flush;
284 int64_t map;
285 int64_t copy;
286 int64_t link;
287 int64_t getargs;
288 int64_t putargs;
289 int64_t invargs;
290 int64_t invoke;
291};
292
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293struct fastrpc_file {
294 struct hlist_node hn;
295 spinlock_t hlock;
296 struct hlist_head maps;
297 struct hlist_head bufs;
298 struct fastrpc_ctx_lst clst;
299 struct fastrpc_session_ctx *sctx;
300 struct fastrpc_session_ctx *secsctx;
301 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800302 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530303 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 int tgid;
305 int cid;
306 int ssrcount;
307 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530308 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800310 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800311 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530312 struct pm_qos_request pm_qos_req;
313 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314};
315
316static struct fastrpc_apps gfa;
317
318static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
319 {
320 .name = "adsprpc-smd",
321 .subsys = "adsp",
322 .link.link_info.edge = "lpass",
323 .link.link_info.transport = "smem",
324 },
325 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700326 .name = "mdsprpc-smd",
327 .subsys = "modem",
328 .link.link_info.edge = "mpss",
329 .link.link_info.transport = "smem",
330 },
331 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800332 .name = "sdsprpc-smd",
333 .subsys = "slpi",
334 .link.link_info.edge = "dsps",
335 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800336 },
337 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700338 .name = "cdsprpc-smd",
339 .subsys = "cdsp",
340 .link.link_info.edge = "cdsp",
341 .link.link_info.transport = "smem",
342 },
343};
344
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800345static inline int64_t getnstimediff(struct timespec *start)
346{
347 int64_t ns;
348 struct timespec ts, b;
349
350 getnstimeofday(&ts);
351 b = timespec_sub(ts, *start);
352 ns = timespec_to_ns(&b);
353 return ns;
354}
355
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
357{
c_mtharue1a5ce12017-10-13 20:47:09 +0530358 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700359 int vmid;
360
361 if (!fl)
362 return;
363 if (cache) {
364 spin_lock(&fl->hlock);
365 hlist_add_head(&buf->hn, &fl->bufs);
366 spin_unlock(&fl->hlock);
367 return;
368 }
369 if (!IS_ERR_OR_NULL(buf->virt)) {
370 int destVM[1] = {VMID_HLOS};
371 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
372
373 if (fl->sctx->smmu.cb)
374 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
375 vmid = fl->apps->channel[fl->cid].vmid;
376 if (vmid) {
377 int srcVM[2] = {VMID_HLOS, vmid};
378
379 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
380 srcVM, 2, destVM, destVMperm, 1);
381 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530382 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383 buf->phys);
384 }
385 kfree(buf);
386}
387
388static void fastrpc_buf_list_free(struct fastrpc_file *fl)
389{
390 struct fastrpc_buf *buf, *free;
391
392 do {
393 struct hlist_node *n;
394
c_mtharue1a5ce12017-10-13 20:47:09 +0530395 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396 spin_lock(&fl->hlock);
397 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
398 hlist_del_init(&buf->hn);
399 free = buf;
400 break;
401 }
402 spin_unlock(&fl->hlock);
403 if (free)
404 fastrpc_buf_free(free, 0);
405 } while (free);
406}
407
408static void fastrpc_mmap_add(struct fastrpc_mmap *map)
409{
c_mtharue1a5ce12017-10-13 20:47:09 +0530410 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
411 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
412 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413
c_mtharue1a5ce12017-10-13 20:47:09 +0530414 spin_lock(&me->hlock);
415 hlist_add_head(&map->hn, &me->maps);
416 spin_unlock(&me->hlock);
417 } else {
418 struct fastrpc_file *fl = map->fl;
419
420 spin_lock(&fl->hlock);
421 hlist_add_head(&map->hn, &fl->maps);
422 spin_unlock(&fl->hlock);
423 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424}
425
c_mtharue1a5ce12017-10-13 20:47:09 +0530426static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
427 uintptr_t __user va, ssize_t len, int mflags, int refs,
428 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429{
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 struct fastrpc_apps *me = &gfa;
431 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700432 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +0530433 if (mflags == ADSP_MMAP_HEAP_ADDR ||
434 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
435 spin_lock(&me->hlock);
436 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
437 if (va >= map->va &&
438 va + len <= map->va + map->len &&
439 map->fd == fd) {
440 if (refs)
441 map->refs++;
442 match = map;
443 break;
444 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700445 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530446 spin_unlock(&me->hlock);
447 } else {
448 spin_lock(&fl->hlock);
449 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
450 if (va >= map->va &&
451 va + len <= map->va + map->len &&
452 map->fd == fd) {
453 if (refs)
454 map->refs++;
455 match = map;
456 break;
457 }
458 }
459 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700460 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700461 if (match) {
462 *ppmap = match;
463 return 0;
464 }
465 return -ENOTTY;
466}
467
c_mtharue1a5ce12017-10-13 20:47:09 +0530468static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
469{
470 struct fastrpc_apps *me = &gfa;
471 void *vaddr = NULL;
472
473 if (me->dev == NULL) {
474 pr_err("device adsprpc-mem is not initialized\n");
475 return -ENODEV;
476 }
477 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
478 if (!vaddr) {
479 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
480 (unsigned int)size);
481 return -ENOMEM;
482 }
483 return 0;
484}
485
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
487 ssize_t len, struct fastrpc_mmap **ppmap)
488{
c_mtharue1a5ce12017-10-13 20:47:09 +0530489 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700490 struct hlist_node *n;
491 struct fastrpc_apps *me = &gfa;
492
493 spin_lock(&me->hlock);
494 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
495 if (map->raddr == va &&
496 map->raddr + map->len == va + len &&
497 map->refs == 1) {
498 match = map;
499 hlist_del_init(&map->hn);
500 break;
501 }
502 }
503 spin_unlock(&me->hlock);
504 if (match) {
505 *ppmap = match;
506 return 0;
507 }
508 spin_lock(&fl->hlock);
509 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
510 if (map->raddr == va &&
511 map->raddr + map->len == va + len &&
512 map->refs == 1) {
513 match = map;
514 hlist_del_init(&map->hn);
515 break;
516 }
517 }
518 spin_unlock(&fl->hlock);
519 if (match) {
520 *ppmap = match;
521 return 0;
522 }
523 return -ENOTTY;
524}
525
c_mtharu7bd6a422017-10-17 18:15:37 +0530526static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700527{
c_mtharue1a5ce12017-10-13 20:47:09 +0530528 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700529 struct fastrpc_file *fl;
530 int vmid;
531 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532
533 if (!map)
534 return;
535 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530536 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
537 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
538 spin_lock(&me->hlock);
539 map->refs--;
540 if (!map->refs)
541 hlist_del_init(&map->hn);
542 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530543 if (map->refs > 0)
544 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530545 } else {
546 spin_lock(&fl->hlock);
547 map->refs--;
548 if (!map->refs)
549 hlist_del_init(&map->hn);
550 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530551 if (map->refs > 0 && !flags)
552 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530553 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530554 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
555 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700556
c_mtharue1a5ce12017-10-13 20:47:09 +0530557 if (me->dev == NULL) {
558 pr_err("failed to free remote heap allocation\n");
559 return;
560 }
561 if (map->phys) {
562 dma_free_coherent(me->dev, map->size,
563 &(map->va), map->phys);
564 }
565 } else {
566 int destVM[1] = {VMID_HLOS};
567 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
568
569 if (map->secure)
570 sess = fl->secsctx;
571 else
572 sess = fl->sctx;
573
574 if (!IS_ERR_OR_NULL(map->handle))
575 ion_free(fl->apps->client, map->handle);
576 if (sess && sess->smmu.enabled) {
577 if (map->size || map->phys)
578 msm_dma_unmap_sg(sess->smmu.dev,
579 map->table->sgl,
580 map->table->nents, DMA_BIDIRECTIONAL,
581 map->buf);
582 }
583 vmid = fl->apps->channel[fl->cid].vmid;
584 if (vmid && map->phys) {
585 int srcVM[2] = {VMID_HLOS, vmid};
586
587 hyp_assign_phys(map->phys, buf_page_size(map->size),
588 srcVM, 2, destVM, destVMperm, 1);
589 }
590
591 if (!IS_ERR_OR_NULL(map->table))
592 dma_buf_unmap_attachment(map->attach, map->table,
593 DMA_BIDIRECTIONAL);
594 if (!IS_ERR_OR_NULL(map->attach))
595 dma_buf_detach(map->buf, map->attach);
596 if (!IS_ERR_OR_NULL(map->buf))
597 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700598 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700599 kfree(map);
600}
601
602static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
603 struct fastrpc_session_ctx **session);
604
605static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
c_mtharue1a5ce12017-10-13 20:47:09 +0530606 unsigned int attr, uintptr_t __user va, ssize_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700607 struct fastrpc_mmap **ppmap)
608{
c_mtharue1a5ce12017-10-13 20:47:09 +0530609 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 struct fastrpc_session_ctx *sess;
611 struct fastrpc_apps *apps = fl->apps;
612 int cid = fl->cid;
613 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530614 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700615 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530616 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700617 unsigned long flags;
618 int err = 0, vmid;
619
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800620 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700621 return 0;
622 map = kzalloc(sizeof(*map), GFP_KERNEL);
623 VERIFY(err, !IS_ERR_OR_NULL(map));
624 if (err)
625 goto bail;
626 INIT_HLIST_NODE(&map->hn);
627 map->flags = mflags;
628 map->refs = 1;
629 map->fl = fl;
630 map->fd = fd;
631 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530632 if (mflags == ADSP_MMAP_HEAP_ADDR ||
633 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
634 map->apps = me;
635 map->fl = NULL;
636 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700637 if (err)
638 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530639 map->phys = (uintptr_t)region_start;
640 map->size = len;
641 map->va = (uintptr_t __user)map->phys;
642 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530643 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
644 pr_info("adsprpc: buffer mapped with persist attr %x\n",
645 (unsigned int)map->attr);
646 map->refs = 2;
647 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530648 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
649 ion_import_dma_buf_fd(fl->apps->client, fd)));
650 if (err)
651 goto bail;
652 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
653 &flags));
654 if (err)
655 goto bail;
656
657 map->uncached = !ION_IS_CACHED(flags);
658 if (map->attr & FASTRPC_ATTR_NOVA)
659 map->uncached = 1;
660
661 map->secure = flags & ION_FLAG_SECURE;
662 if (map->secure) {
663 if (!fl->secsctx)
664 err = fastrpc_session_alloc(chan, 1,
665 &fl->secsctx);
666 if (err)
667 goto bail;
668 }
669 if (map->secure)
670 sess = fl->secsctx;
671 else
672 sess = fl->sctx;
673 VERIFY(err, !IS_ERR_OR_NULL(sess));
674 if (err)
675 goto bail;
676 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
677 if (err)
678 goto bail;
679 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
680 dma_buf_attach(map->buf, sess->smmu.dev)));
681 if (err)
682 goto bail;
683 VERIFY(err, !IS_ERR_OR_NULL(map->table =
684 dma_buf_map_attachment(map->attach,
685 DMA_BIDIRECTIONAL)));
686 if (err)
687 goto bail;
688 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700689 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530690
691 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
692 (sess->smmu.coherent && map->uncached))
693 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
694 else if (map->attr & FASTRPC_ATTR_COHERENT)
695 attrs |= DMA_ATTR_FORCE_COHERENT;
696
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700697 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530698 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700699 map->table->sgl, map->table->nents,
700 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530701 if (err)
702 goto bail;
703 } else {
704 VERIFY(err, map->table->nents == 1);
705 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700706 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530707 }
708 map->phys = sg_dma_address(map->table->sgl);
709 if (sess->smmu.cb) {
710 map->phys += ((uint64_t)sess->smmu.cb << 32);
711 map->size = sg_dma_len(map->table->sgl);
712 } else {
713 map->size = buf_page_size(len);
714 }
715 vmid = fl->apps->channel[fl->cid].vmid;
716 if (vmid) {
717 int srcVM[1] = {VMID_HLOS};
718 int destVM[2] = {VMID_HLOS, vmid};
719 int destVMperm[2] = {PERM_READ | PERM_WRITE,
720 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700721
c_mtharue1a5ce12017-10-13 20:47:09 +0530722 VERIFY(err, !hyp_assign_phys(map->phys,
723 buf_page_size(map->size),
724 srcVM, 1, destVM, destVMperm, 2));
725 if (err)
726 goto bail;
727 }
728 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700729 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700730 map->len = len;
731
732 fastrpc_mmap_add(map);
733 *ppmap = map;
734
735bail:
736 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530737 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700738 return err;
739}
740
741static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
742 struct fastrpc_buf **obuf)
743{
744 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530745 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700746 struct hlist_node *n;
747
748 VERIFY(err, size > 0);
749 if (err)
750 goto bail;
751
752 /* find the smallest buffer that fits in the cache */
753 spin_lock(&fl->hlock);
754 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
755 if (buf->size >= size && (!fr || fr->size > buf->size))
756 fr = buf;
757 }
758 if (fr)
759 hlist_del_init(&fr->hn);
760 spin_unlock(&fl->hlock);
761 if (fr) {
762 *obuf = fr;
763 return 0;
764 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530765 buf = NULL;
766 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700767 if (err)
768 goto bail;
769 INIT_HLIST_NODE(&buf->hn);
770 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530771 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700772 buf->phys = 0;
773 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530774 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700775 (void *)&buf->phys, GFP_KERNEL);
776 if (IS_ERR_OR_NULL(buf->virt)) {
777 /* free cache and retry */
778 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530779 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700780 (void *)&buf->phys, GFP_KERNEL);
781 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
782 }
783 if (err)
784 goto bail;
785 if (fl->sctx->smmu.cb)
786 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
787 vmid = fl->apps->channel[fl->cid].vmid;
788 if (vmid) {
789 int srcVM[1] = {VMID_HLOS};
790 int destVM[2] = {VMID_HLOS, vmid};
791 int destVMperm[2] = {PERM_READ | PERM_WRITE,
792 PERM_READ | PERM_WRITE | PERM_EXEC};
793
794 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
795 srcVM, 1, destVM, destVMperm, 2));
796 if (err)
797 goto bail;
798 }
799
800 *obuf = buf;
801 bail:
802 if (err && buf)
803 fastrpc_buf_free(buf, 0);
804 return err;
805}
806
807
808static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700809 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700810 struct smq_invoke_ctx **po)
811{
812 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530813 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700814 struct hlist_node *n;
815 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
816
817 spin_lock(&fl->hlock);
818 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
819 if (ictx->pid == current->pid) {
820 if (invoke->sc != ictx->sc || ictx->fl != fl)
821 err = -1;
822 else {
823 ctx = ictx;
824 hlist_del_init(&ctx->hn);
825 hlist_add_head(&ctx->hn, &fl->clst.pending);
826 }
827 break;
828 }
829 }
830 spin_unlock(&fl->hlock);
831 if (ctx)
832 *po = ctx;
833 return err;
834}
835
836#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
837static int overlap_ptr_cmp(const void *a, const void *b)
838{
839 struct overlap *pa = *((struct overlap **)a);
840 struct overlap *pb = *((struct overlap **)b);
841 /* sort with lowest starting buffer first */
842 int st = CMP(pa->start, pb->start);
843 /* sort with highest ending buffer first */
844 int ed = CMP(pb->end, pa->end);
845 return st == 0 ? ed : st;
846}
847
Sathish Ambley9466d672017-01-25 10:51:55 -0800848static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700849{
Sathish Ambley9466d672017-01-25 10:51:55 -0800850 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700851 remote_arg_t *lpra = ctx->lpra;
852 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
853 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
854 int nbufs = inbufs + outbufs;
855 struct overlap max;
856
857 for (i = 0; i < nbufs; ++i) {
858 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
859 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800860 if (lpra[i].buf.len) {
861 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
862 if (err)
863 goto bail;
864 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700865 ctx->overs[i].raix = i;
866 ctx->overps[i] = &ctx->overs[i];
867 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530868 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700869 max.start = 0;
870 max.end = 0;
871 for (i = 0; i < nbufs; ++i) {
872 if (ctx->overps[i]->start < max.end) {
873 ctx->overps[i]->mstart = max.end;
874 ctx->overps[i]->mend = ctx->overps[i]->end;
875 ctx->overps[i]->offset = max.end -
876 ctx->overps[i]->start;
877 if (ctx->overps[i]->end > max.end) {
878 max.end = ctx->overps[i]->end;
879 } else {
880 ctx->overps[i]->mend = 0;
881 ctx->overps[i]->mstart = 0;
882 }
883 } else {
884 ctx->overps[i]->mend = ctx->overps[i]->end;
885 ctx->overps[i]->mstart = ctx->overps[i]->start;
886 ctx->overps[i]->offset = 0;
887 max = *ctx->overps[i];
888 }
889 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800890bail:
891 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700892}
893
894#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
895 do {\
896 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530897 VERIFY(err, 0 == copy_from_user((dst),\
898 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700899 (size)));\
900 else\
901 memmove((dst), (src), (size));\
902 } while (0)
903
904#define K_COPY_TO_USER(err, kernel, dst, src, size) \
905 do {\
906 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530907 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
908 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700909 else\
910 memmove((dst), (src), (size));\
911 } while (0)
912
913
914static void context_free(struct smq_invoke_ctx *ctx);
915
916static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700917 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700918 struct smq_invoke_ctx **po)
919{
920 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530921 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700922 struct fastrpc_ctx_lst *clst = &fl->clst;
923 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
924
925 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
926 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
927 sizeof(*ctx->fds) * (bufs) +
928 sizeof(*ctx->attrs) * (bufs) +
929 sizeof(*ctx->overs) * (bufs) +
930 sizeof(*ctx->overps) * (bufs);
931
c_mtharue1a5ce12017-10-13 20:47:09 +0530932 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700933 if (err)
934 goto bail;
935
936 INIT_HLIST_NODE(&ctx->hn);
937 hlist_add_fake(&ctx->hn);
938 ctx->fl = fl;
939 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
940 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
941 ctx->fds = (int *)(&ctx->lpra[bufs]);
942 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
943 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
944 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
945
c_mtharue1a5ce12017-10-13 20:47:09 +0530946 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700947 bufs * sizeof(*ctx->lpra));
948 if (err)
949 goto bail;
950
951 if (invokefd->fds) {
952 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
953 bufs * sizeof(*ctx->fds));
954 if (err)
955 goto bail;
956 }
957 if (invokefd->attrs) {
958 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
959 bufs * sizeof(*ctx->attrs));
960 if (err)
961 goto bail;
962 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700963 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700964 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800965 if (bufs) {
966 VERIFY(err, 0 == context_build_overlap(ctx));
967 if (err)
968 goto bail;
969 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700970 ctx->retval = -1;
971 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530972 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700973 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530974 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700975
976 spin_lock(&fl->hlock);
977 hlist_add_head(&ctx->hn, &clst->pending);
978 spin_unlock(&fl->hlock);
979
980 *po = ctx;
981bail:
982 if (ctx && err)
983 context_free(ctx);
984 return err;
985}
986
987static void context_save_interrupted(struct smq_invoke_ctx *ctx)
988{
989 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
990
991 spin_lock(&ctx->fl->hlock);
992 hlist_del_init(&ctx->hn);
993 hlist_add_head(&ctx->hn, &clst->interrupted);
994 spin_unlock(&ctx->fl->hlock);
995 /* free the cache on power collapse */
996 fastrpc_buf_list_free(ctx->fl);
997}
998
999static void context_free(struct smq_invoke_ctx *ctx)
1000{
1001 int i;
1002 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1003 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1004 spin_lock(&ctx->fl->hlock);
1005 hlist_del_init(&ctx->hn);
1006 spin_unlock(&ctx->fl->hlock);
1007 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301008 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001009 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301010 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001011 kfree(ctx);
1012}
1013
1014static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1015{
1016 ctx->retval = retval;
1017 complete(&ctx->work);
1018}
1019
1020
1021static void fastrpc_notify_users(struct fastrpc_file *me)
1022{
1023 struct smq_invoke_ctx *ictx;
1024 struct hlist_node *n;
1025
1026 spin_lock(&me->hlock);
1027 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1028 complete(&ictx->work);
1029 }
1030 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1031 complete(&ictx->work);
1032 }
1033 spin_unlock(&me->hlock);
1034
1035}
1036
1037static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1038{
1039 struct fastrpc_file *fl;
1040 struct hlist_node *n;
1041
1042 spin_lock(&me->hlock);
1043 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1044 if (fl->cid == cid)
1045 fastrpc_notify_users(fl);
1046 }
1047 spin_unlock(&me->hlock);
1048
1049}
1050static void context_list_ctor(struct fastrpc_ctx_lst *me)
1051{
1052 INIT_HLIST_HEAD(&me->interrupted);
1053 INIT_HLIST_HEAD(&me->pending);
1054}
1055
1056static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1057{
1058 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301059 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001060 struct hlist_node *n;
1061
1062 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301063 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001064 spin_lock(&fl->hlock);
1065 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1066 hlist_del_init(&ictx->hn);
1067 ctxfree = ictx;
1068 break;
1069 }
1070 spin_unlock(&fl->hlock);
1071 if (ctxfree)
1072 context_free(ctxfree);
1073 } while (ctxfree);
1074 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301075 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001076 spin_lock(&fl->hlock);
1077 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1078 hlist_del_init(&ictx->hn);
1079 ctxfree = ictx;
1080 break;
1081 }
1082 spin_unlock(&fl->hlock);
1083 if (ctxfree)
1084 context_free(ctxfree);
1085 } while (ctxfree);
1086}
1087
1088static int fastrpc_file_free(struct fastrpc_file *fl);
1089static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1090{
1091 struct fastrpc_file *fl, *free;
1092 struct hlist_node *n;
1093
1094 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301095 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001096 spin_lock(&me->hlock);
1097 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1098 hlist_del_init(&fl->hn);
1099 free = fl;
1100 break;
1101 }
1102 spin_unlock(&me->hlock);
1103 if (free)
1104 fastrpc_file_free(free);
1105 } while (free);
1106}
1107
1108static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1109{
1110 remote_arg64_t *rpra;
1111 remote_arg_t *lpra = ctx->lpra;
1112 struct smq_invoke_buf *list;
1113 struct smq_phy_page *pages, *ipage;
1114 uint32_t sc = ctx->sc;
1115 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1116 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001117 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 uintptr_t args;
1119 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001120 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001121 int err = 0;
1122 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001123 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001124 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001125
1126 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301127 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001128 list = smq_invoke_buf_start(rpra, sc);
1129 pages = smq_phy_page_start(sc, list);
1130 ipage = pages;
1131
1132 for (i = 0; i < bufs; ++i) {
c_mtharue1a5ce12017-10-13 20:47:09 +05301133 uintptr_t __user buf = (uintptr_t __user)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001134 ssize_t len = lpra[i].buf.len;
1135
1136 if (ctx->fds[i] && (ctx->fds[i] != -1))
1137 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1138 ctx->attrs[i], buf, len,
1139 mflags, &ctx->maps[i]);
1140 ipage += 1;
1141 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001142 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1143 for (i = bufs; i < bufs + handles; i++) {
1144 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1145 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1146 if (err)
1147 goto bail;
1148 ipage += 1;
1149 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001150 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1151 (sizeof(uint32_t) * M_CRCLIST);
1152
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001153 /* calculate len requreed for copying */
1154 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1155 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001156 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001157 ssize_t len = lpra[i].buf.len;
1158
1159 if (!len)
1160 continue;
1161 if (ctx->maps[i])
1162 continue;
1163 if (ctx->overps[oix]->offset == 0)
1164 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001165 mstart = ctx->overps[oix]->mstart;
1166 mend = ctx->overps[oix]->mend;
1167 VERIFY(err, (mend - mstart) <= LONG_MAX);
1168 if (err)
1169 goto bail;
1170 copylen += mend - mstart;
1171 VERIFY(err, copylen >= 0);
1172 if (err)
1173 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001174 }
1175 ctx->used = copylen;
1176
1177 /* allocate new buffer */
1178 if (copylen) {
1179 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1180 if (err)
1181 goto bail;
1182 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301183 if (ctx->buf->virt && metalen <= copylen)
1184 memset(ctx->buf->virt, 0, metalen);
1185
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001186 /* copy metadata */
1187 rpra = ctx->buf->virt;
1188 ctx->rpra = rpra;
1189 list = smq_invoke_buf_start(rpra, sc);
1190 pages = smq_phy_page_start(sc, list);
1191 ipage = pages;
1192 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001193 for (i = 0; i < bufs + handles; ++i) {
1194 if (lpra[i].buf.len)
1195 list[i].num = 1;
1196 else
1197 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001198 list[i].pgidx = ipage - pages;
1199 ipage++;
1200 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301201
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001202 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001203 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301204 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001205 struct fastrpc_mmap *map = ctx->maps[i];
1206 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1207 ssize_t len = lpra[i].buf.len;
1208
1209 rpra[i].buf.pv = 0;
1210 rpra[i].buf.len = len;
1211 if (!len)
1212 continue;
1213 if (map) {
1214 struct vm_area_struct *vma;
1215 uintptr_t offset;
1216 int num = buf_num_pages(buf, len);
1217 int idx = list[i].pgidx;
1218
1219 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001220 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001221 } else {
1222 down_read(&current->mm->mmap_sem);
1223 VERIFY(err, NULL != (vma = find_vma(current->mm,
1224 map->va)));
1225 if (err) {
1226 up_read(&current->mm->mmap_sem);
1227 goto bail;
1228 }
1229 offset = buf_page_start(buf) - vma->vm_start;
1230 up_read(&current->mm->mmap_sem);
1231 VERIFY(err, offset < (uintptr_t)map->size);
1232 if (err)
1233 goto bail;
1234 }
1235 pages[idx].addr = map->phys + offset;
1236 pages[idx].size = num << PAGE_SHIFT;
1237 }
1238 rpra[i].buf.pv = buf;
1239 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001240 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001241 for (i = bufs; i < bufs + handles; ++i) {
1242 struct fastrpc_mmap *map = ctx->maps[i];
1243
1244 pages[i].addr = map->phys;
1245 pages[i].size = map->size;
1246 }
1247 fdlist = (uint64_t *)&pages[bufs + handles];
1248 for (i = 0; i < M_FDLIST; i++)
1249 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001250 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301251 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001252
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001253 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001254 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001255 rlen = copylen - metalen;
1256 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1257 int i = ctx->overps[oix]->raix;
1258 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001259 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001260 uint64_t buf;
1261 ssize_t len = lpra[i].buf.len;
1262
1263 if (!len)
1264 continue;
1265 if (map)
1266 continue;
1267 if (ctx->overps[oix]->offset == 0) {
1268 rlen -= ALIGN(args, BALIGN) - args;
1269 args = ALIGN(args, BALIGN);
1270 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001271 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001272 VERIFY(err, rlen >= mlen);
1273 if (err)
1274 goto bail;
1275 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1276 pages[list[i].pgidx].addr = ctx->buf->phys -
1277 ctx->overps[oix]->offset +
1278 (copylen - rlen);
1279 pages[list[i].pgidx].addr =
1280 buf_page_start(pages[list[i].pgidx].addr);
1281 buf = rpra[i].buf.pv;
1282 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1283 if (i < inbufs) {
1284 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1285 lpra[i].buf.pv, len);
1286 if (err)
1287 goto bail;
1288 }
1289 args = args + mlen;
1290 rlen -= mlen;
1291 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001292 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001293
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001294 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001295 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1296 int i = ctx->overps[oix]->raix;
1297 struct fastrpc_mmap *map = ctx->maps[i];
1298
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001299 if (map && map->uncached)
1300 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301301 if (ctx->fl->sctx->smmu.coherent &&
1302 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1303 continue;
1304 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1305 continue;
1306
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001307 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1308 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1309 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1310 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001311 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301312 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001313 rpra[i].dma.fd = ctx->fds[i];
1314 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1315 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001316 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001317
1318 if (!ctx->fl->sctx->smmu.coherent) {
1319 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001320 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001321 PERF_END);
1322 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001323 bail:
1324 return err;
1325}
1326
1327static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1328 remote_arg_t *upra)
1329{
1330 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001331 struct smq_invoke_buf *list;
1332 struct smq_phy_page *pages;
1333 struct fastrpc_mmap *mmap;
1334 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001335 uint32_t *crclist = NULL;
1336
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001337 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001338 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001339 int err = 0;
1340
1341 inbufs = REMOTE_SCALARS_INBUFS(sc);
1342 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001343 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1344 list = smq_invoke_buf_start(ctx->rpra, sc);
1345 pages = smq_phy_page_start(sc, list);
1346 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001347 crclist = (uint32_t *)(fdlist + M_FDLIST);
1348
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001349 for (i = inbufs; i < inbufs + outbufs; ++i) {
1350 if (!ctx->maps[i]) {
1351 K_COPY_TO_USER(err, kernel,
1352 ctx->lpra[i].buf.pv,
1353 uint64_to_ptr(rpra[i].buf.pv),
1354 rpra[i].buf.len);
1355 if (err)
1356 goto bail;
1357 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301358 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301359 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360 }
1361 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001362 if (inbufs + outbufs + handles) {
1363 for (i = 0; i < M_FDLIST; i++) {
1364 if (!fdlist[i])
1365 break;
1366 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001367 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301368 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001369 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001370 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001371 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301372 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001373 crclist, M_CRCLIST*sizeof(uint32_t));
1374
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001375 bail:
1376 return err;
1377}
1378
1379static void inv_args_pre(struct smq_invoke_ctx *ctx)
1380{
1381 int i, inbufs, outbufs;
1382 uint32_t sc = ctx->sc;
1383 remote_arg64_t *rpra = ctx->rpra;
1384 uintptr_t end;
1385
1386 inbufs = REMOTE_SCALARS_INBUFS(sc);
1387 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1388 for (i = inbufs; i < inbufs + outbufs; ++i) {
1389 struct fastrpc_mmap *map = ctx->maps[i];
1390
1391 if (map && map->uncached)
1392 continue;
1393 if (!rpra[i].buf.len)
1394 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301395 if (ctx->fl->sctx->smmu.coherent &&
1396 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1397 continue;
1398 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1399 continue;
1400
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001401 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1402 buf_page_start(rpra[i].buf.pv))
1403 continue;
1404 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1405 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1406 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1407 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1408 rpra[i].buf.len);
1409 if (!IS_CACHE_ALIGNED(end))
1410 dmac_flush_range((char *)end,
1411 (char *)end + 1);
1412 }
1413}
1414
1415static void inv_args(struct smq_invoke_ctx *ctx)
1416{
1417 int i, inbufs, outbufs;
1418 uint32_t sc = ctx->sc;
1419 remote_arg64_t *rpra = ctx->rpra;
1420 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001421
1422 inbufs = REMOTE_SCALARS_INBUFS(sc);
1423 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1424 for (i = inbufs; i < inbufs + outbufs; ++i) {
1425 struct fastrpc_mmap *map = ctx->maps[i];
1426
1427 if (map && map->uncached)
1428 continue;
1429 if (!rpra[i].buf.len)
1430 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301431 if (ctx->fl->sctx->smmu.coherent &&
1432 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1433 continue;
1434 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1435 continue;
1436
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001437 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1438 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001439 continue;
1440 }
1441 if (map && map->handle)
1442 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1443 (char *)uint64_to_ptr(rpra[i].buf.pv),
1444 rpra[i].buf.len, ION_IOC_INV_CACHES);
1445 else
1446 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1447 (char *)uint64_to_ptr(rpra[i].buf.pv
1448 + rpra[i].buf.len));
1449 }
1450
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001451 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452 dmac_inv_range(rpra, (char *)rpra + used);
1453}
1454
1455static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1456 uint32_t kernel, uint32_t handle)
1457{
1458 struct smq_msg *msg = &ctx->msg;
1459 struct fastrpc_file *fl = ctx->fl;
1460 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1461 int err = 0;
1462
c_mtharue1a5ce12017-10-13 20:47:09 +05301463 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001464 if (err)
1465 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301466 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001467 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301468 if (fl->sessionid)
1469 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001470 if (kernel)
1471 msg->pid = 0;
1472 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1473 msg->invoke.header.handle = handle;
1474 msg->invoke.header.sc = ctx->sc;
1475 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1476 msg->invoke.page.size = buf_page_size(ctx->used);
1477
1478 if (fl->ssrcount != channel_ctx->ssrcount) {
1479 err = -ECONNRESET;
1480 goto bail;
1481 }
1482 VERIFY(err, channel_ctx->link.port_state ==
1483 FASTRPC_LINK_CONNECTED);
1484 if (err)
1485 goto bail;
1486 err = glink_tx(channel_ctx->chan,
1487 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1488 GLINK_TX_REQ_INTENT);
1489 bail:
1490 return err;
1491}
1492
1493static void fastrpc_init(struct fastrpc_apps *me)
1494{
1495 int i;
1496
1497 INIT_HLIST_HEAD(&me->drivers);
1498 spin_lock_init(&me->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001499 me->channel = &gcinfo[0];
1500 for (i = 0; i < NUM_CHANNELS; i++) {
1501 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301502 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001503 me->channel[i].sesscount = 0;
c_mtharu314a4202017-11-15 22:09:17 +05301504 mutex_init(&me->channel[i].mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 }
1506}
1507
1508static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1509
1510static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1511 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001512 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001513{
c_mtharue1a5ce12017-10-13 20:47:09 +05301514 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001515 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1516 int cid = fl->cid;
1517 int interrupted = 0;
1518 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001519 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001520
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001521 if (fl->profile)
1522 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301523
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301524
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301525 VERIFY(err, fl->sctx != NULL);
1526 if (err)
1527 goto bail;
1528 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1529 if (err)
1530 goto bail;
1531
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001532 if (!kernel) {
1533 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1534 &ctx));
1535 if (err)
1536 goto bail;
1537 if (fl->sctx->smmu.faults)
1538 err = FASTRPC_ENOSUCH;
1539 if (err)
1540 goto bail;
1541 if (ctx)
1542 goto wait;
1543 }
1544
1545 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1546 if (err)
1547 goto bail;
1548
1549 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001550 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001551 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001552 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001553 if (err)
1554 goto bail;
1555 }
1556
Sathish Ambleyc432b502017-06-05 12:03:42 -07001557 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001558 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001559 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001560 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001561 PERF_END);
1562
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 if (err)
1564 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001565 wait:
1566 if (kernel)
1567 wait_for_completion(&ctx->work);
1568 else {
1569 interrupted = wait_for_completion_interruptible(&ctx->work);
1570 VERIFY(err, 0 == (err = interrupted));
1571 if (err)
1572 goto bail;
1573 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001574
1575 PERF(fl->profile, fl->perf.invargs,
1576 if (!fl->sctx->smmu.coherent)
1577 inv_args(ctx);
1578 PERF_END);
1579
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001580 VERIFY(err, 0 == (err = ctx->retval));
1581 if (err)
1582 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001583
1584 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001585 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001586 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001587 if (err)
1588 goto bail;
1589 bail:
1590 if (ctx && interrupted == -ERESTARTSYS)
1591 context_save_interrupted(ctx);
1592 else if (ctx)
1593 context_free(ctx);
1594 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1595 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001596
1597 if (fl->profile && !interrupted) {
1598 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1599 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301600 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001601 fl->perf.count++;
1602 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001603 return err;
1604}
1605
Sathish Ambley36849af2017-02-02 09:35:55 -08001606static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001607static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001608 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001609{
1610 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301611 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001612 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001613 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001614 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301615 struct fastrpc_mmap *file = NULL, *mem = NULL;
1616 char *proc_name = NULL;
1617 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301618 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301619 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1620 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001621
Sathish Ambley36849af2017-02-02 09:35:55 -08001622 VERIFY(err, !fastrpc_channel_open(fl));
1623 if (err)
1624 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001625 if (init->flags == FASTRPC_INIT_ATTACH) {
1626 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301627 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001628
1629 ra[0].buf.pv = (void *)&tgid;
1630 ra[0].buf.len = sizeof(tgid);
1631 ioctl.inv.handle = 1;
1632 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1633 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301634 ioctl.fds = NULL;
1635 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001636 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001637 fl->pd = 0;
1638 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1639 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1640 if (err)
1641 goto bail;
1642 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001643 remote_arg_t ra[6];
1644 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001645 int mflags = 0;
1646 struct {
1647 int pgid;
1648 int namelen;
1649 int filelen;
1650 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001651 int attrs;
1652 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001653 } inbuf;
1654
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301655 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001656 inbuf.namelen = strlen(current->comm) + 1;
1657 inbuf.filelen = init->filelen;
1658 fl->pd = 1;
1659 if (init->filelen) {
1660 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1661 init->file, init->filelen, mflags, &file));
1662 if (err)
1663 goto bail;
1664 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301665
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001666 inbuf.pageslen = 1;
1667 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1668 init->mem, init->memlen, mflags, &mem));
1669 if (err)
1670 goto bail;
1671 inbuf.pageslen = 1;
1672 ra[0].buf.pv = (void *)&inbuf;
1673 ra[0].buf.len = sizeof(inbuf);
1674 fds[0] = 0;
1675
1676 ra[1].buf.pv = (void *)current->comm;
1677 ra[1].buf.len = inbuf.namelen;
1678 fds[1] = 0;
1679
1680 ra[2].buf.pv = (void *)init->file;
1681 ra[2].buf.len = inbuf.filelen;
1682 fds[2] = init->filefd;
1683
1684 pages[0].addr = mem->phys;
1685 pages[0].size = mem->size;
1686 ra[3].buf.pv = (void *)pages;
1687 ra[3].buf.len = 1 * sizeof(*pages);
1688 fds[3] = 0;
1689
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001690 inbuf.attrs = uproc->attrs;
1691 ra[4].buf.pv = (void *)&(inbuf.attrs);
1692 ra[4].buf.len = sizeof(inbuf.attrs);
1693 fds[4] = 0;
1694
1695 inbuf.siglen = uproc->siglen;
1696 ra[5].buf.pv = (void *)&(inbuf.siglen);
1697 ra[5].buf.len = sizeof(inbuf.siglen);
1698 fds[5] = 0;
1699
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001700 ioctl.inv.handle = 1;
1701 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001702 if (uproc->attrs)
1703 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001704 ioctl.inv.pra = ra;
1705 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301706 ioctl.attrs = NULL;
1707 ioctl.crc = NULL;
1708 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1709 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1710 if (err)
1711 goto bail;
1712 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1713 remote_arg_t ra[3];
1714 uint64_t phys = 0;
1715 ssize_t size = 0;
1716 int fds[3];
1717 struct {
1718 int pgid;
1719 int namelen;
1720 int pageslen;
1721 } inbuf;
1722
1723 if (!init->filelen)
1724 goto bail;
1725
1726 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1727 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1728 if (err)
1729 goto bail;
1730 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1731 (void __user *)init->file, init->filelen));
1732 if (err)
1733 goto bail;
1734
1735 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301736 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301737 inbuf.pageslen = 0;
1738 if (!me->staticpd_flags) {
1739 inbuf.pageslen = 1;
1740 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1741 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1742 &mem));
1743 if (err)
1744 goto bail;
1745 phys = mem->phys;
1746 size = mem->size;
1747 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1748 srcVM, 1, destVM, destVMperm, 1));
1749 if (err) {
1750 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1751 err);
1752 pr_err("map->phys %llx, map->size %d\n",
1753 phys, (int)size);
1754 goto bail;
1755 }
1756 me->staticpd_flags = 1;
1757 }
1758
1759 ra[0].buf.pv = (void *)&inbuf;
1760 ra[0].buf.len = sizeof(inbuf);
1761 fds[0] = 0;
1762
1763 ra[1].buf.pv = (void *)proc_name;
1764 ra[1].buf.len = inbuf.namelen;
1765 fds[1] = 0;
1766
1767 pages[0].addr = phys;
1768 pages[0].size = size;
1769
1770 ra[2].buf.pv = (void *)pages;
1771 ra[2].buf.len = sizeof(*pages);
1772 fds[2] = 0;
1773 ioctl.inv.handle = 1;
1774
1775 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1776 ioctl.inv.pra = ra;
1777 ioctl.fds = NULL;
1778 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001779 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001780 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1781 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1782 if (err)
1783 goto bail;
1784 } else {
1785 err = -ENOTTY;
1786 }
1787bail:
c_mtharud91205a2017-11-07 16:01:06 +05301788 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301789 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1790 me->staticpd_flags = 0;
1791 if (mem && err) {
1792 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1793 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1794 destVM, 1, srcVM, hlosVMperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301795 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301796 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001797 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301798 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001799 return err;
1800}
1801
1802static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1803{
1804 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001805 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001806 remote_arg_t ra[1];
1807 int tgid = 0;
1808
Sathish Ambley36849af2017-02-02 09:35:55 -08001809 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1810 if (err)
1811 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301812 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001813 if (err)
1814 goto bail;
1815 tgid = fl->tgid;
1816 ra[0].buf.pv = (void *)&tgid;
1817 ra[0].buf.len = sizeof(tgid);
1818 ioctl.inv.handle = 1;
1819 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1820 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301821 ioctl.fds = NULL;
1822 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001823 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001824 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1825 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1826bail:
1827 return err;
1828}
1829
1830static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1831 struct fastrpc_mmap *map)
1832{
Sathish Ambleybae51902017-07-03 15:00:49 -07001833 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05301834 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835 struct smq_phy_page page;
1836 int num = 1;
1837 remote_arg_t ra[3];
1838 int err = 0;
1839 struct {
1840 int pid;
1841 uint32_t flags;
1842 uintptr_t vaddrin;
1843 int num;
1844 } inargs;
1845 struct {
1846 uintptr_t vaddrout;
1847 } routargs;
1848
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301849 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001850 inargs.vaddrin = (uintptr_t)map->va;
1851 inargs.flags = flags;
1852 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1853 ra[0].buf.pv = (void *)&inargs;
1854 ra[0].buf.len = sizeof(inargs);
1855 page.addr = map->phys;
1856 page.size = map->size;
1857 ra[1].buf.pv = (void *)&page;
1858 ra[1].buf.len = num * sizeof(page);
1859
1860 ra[2].buf.pv = (void *)&routargs;
1861 ra[2].buf.len = sizeof(routargs);
1862
1863 ioctl.inv.handle = 1;
1864 if (fl->apps->compat)
1865 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1866 else
1867 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1868 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301869 ioctl.fds = NULL;
1870 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001871 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001872 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1873 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1874 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301875 if (err)
1876 goto bail;
1877 if (flags == ADSP_MMAP_HEAP_ADDR) {
1878 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001879
c_mtharue1a5ce12017-10-13 20:47:09 +05301880 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1881 desc.args[1] = map->phys;
1882 desc.args[2] = map->size;
1883 desc.arginfo = SCM_ARGS(3);
1884 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1885 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1886 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1887
1888 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301889 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301890 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1891
1892 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1893 srcVM, 1, destVM, destVMperm, 1));
1894 if (err)
1895 goto bail;
1896 }
1897bail:
1898 return err;
1899}
1900
1901static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1902 struct fastrpc_mmap *map)
1903{
1904 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05301905 struct fastrpc_apps *me = &gfa;
1906 int srcVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301907 int destVM[1] = {VMID_HLOS};
1908 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1909
1910 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1911 struct fastrpc_ioctl_invoke_crc ioctl;
1912 struct scm_desc desc = {0};
1913 remote_arg_t ra[1];
1914 int err = 0;
1915 struct {
1916 uint8_t skey;
1917 } routargs;
1918
1919 ra[0].buf.pv = (void *)&routargs;
1920 ra[0].buf.len = sizeof(routargs);
1921
1922 ioctl.inv.handle = 1;
1923 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1924 ioctl.inv.pra = ra;
1925 ioctl.fds = NULL;
1926 ioctl.attrs = NULL;
1927 ioctl.crc = NULL;
1928 if (fl == NULL)
1929 goto bail;
1930
1931 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1932 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1933 if (err)
1934 goto bail;
1935 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1936 desc.args[1] = map->phys;
1937 desc.args[2] = map->size;
1938 desc.args[3] = routargs.skey;
1939 desc.arginfo = SCM_ARGS(4);
1940 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1941 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1942 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1943 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1944 srcVM, 1, destVM, destVMperm, 1));
1945 if (err)
1946 goto bail;
1947 }
1948
1949bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001950 return err;
1951}
1952
1953static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1954 struct fastrpc_mmap *map)
1955{
Sathish Ambleybae51902017-07-03 15:00:49 -07001956 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001957 remote_arg_t ra[1];
1958 int err = 0;
1959 struct {
1960 int pid;
1961 uintptr_t vaddrout;
1962 ssize_t size;
1963 } inargs;
1964
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301965 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001966 inargs.size = map->size;
1967 inargs.vaddrout = map->raddr;
1968 ra[0].buf.pv = (void *)&inargs;
1969 ra[0].buf.len = sizeof(inargs);
1970
1971 ioctl.inv.handle = 1;
1972 if (fl->apps->compat)
1973 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1974 else
1975 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1976 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301977 ioctl.fds = NULL;
1978 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001979 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001980 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1981 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301982 if (err)
1983 goto bail;
1984 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1985 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1986 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
1987 if (err)
1988 goto bail;
1989 }
1990bail:
1991 return err;
1992}
1993
1994static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
1995{
1996 struct fastrpc_mmap *match = NULL, *map = NULL;
1997 struct hlist_node *n = NULL;
1998 int err = 0, ret = 0;
1999 struct fastrpc_apps *me = &gfa;
2000 struct ramdump_segment *ramdump_segments_rh = NULL;
2001
2002 do {
2003 match = NULL;
2004 spin_lock(&me->hlock);
2005 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2006 match = map;
2007 hlist_del_init(&map->hn);
2008 break;
2009 }
2010 spin_unlock(&me->hlock);
2011
2012 if (match) {
2013 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2014 if (err)
2015 goto bail;
2016 if (me->channel[0].ramdumpenabled) {
2017 ramdump_segments_rh = kcalloc(1,
2018 sizeof(struct ramdump_segment), GFP_KERNEL);
2019 if (ramdump_segments_rh) {
2020 ramdump_segments_rh->address =
2021 match->phys;
2022 ramdump_segments_rh->size = match->size;
2023 ret = do_elf_ramdump(
2024 me->channel[0].remoteheap_ramdump_dev,
2025 ramdump_segments_rh, 1);
2026 if (ret < 0)
2027 pr_err("ADSPRPC: unable to dump heap");
2028 kfree(ramdump_segments_rh);
2029 }
2030 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302031 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302032 }
2033 } while (match);
2034bail:
2035 if (err && match)
2036 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002037 return err;
2038}
2039
2040static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
2041 ssize_t len, struct fastrpc_mmap **ppmap);
2042
2043static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2044
2045static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2046 struct fastrpc_ioctl_munmap *ud)
2047{
2048 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302049 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002050
2051 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2052 if (err)
2053 goto bail;
2054 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2055 if (err)
2056 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302057 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002058bail:
2059 if (err && map)
2060 fastrpc_mmap_add(map);
2061 return err;
2062}
2063
c_mtharu7bd6a422017-10-17 18:15:37 +05302064static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2065 struct fastrpc_ioctl_munmap_fd *ud) {
2066 int err = 0;
2067 struct fastrpc_mmap *map = NULL;
2068
2069 VERIFY(err, (fl && ud));
2070 if (err)
2071 goto bail;
2072
2073 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2074 pr_err("mapping not found to unamp %x va %llx %x\n",
2075 ud->fd, (unsigned long long)ud->va,
2076 (unsigned int)ud->len);
2077 err = -1;
2078 goto bail;
2079 }
2080 if (map)
2081 fastrpc_mmap_free(map, 0);
2082bail:
2083 return err;
2084}
2085
2086
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002087static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2088 struct fastrpc_ioctl_mmap *ud)
2089{
2090
c_mtharue1a5ce12017-10-13 20:47:09 +05302091 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002092 int err = 0;
2093
c_mtharue1a5ce12017-10-13 20:47:09 +05302094 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t __user)ud->vaddrin,
2095 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002096 return 0;
2097
2098 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
c_mtharue1a5ce12017-10-13 20:47:09 +05302099 (uintptr_t __user)ud->vaddrin, ud->size,
2100 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002101 if (err)
2102 goto bail;
2103 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2104 if (err)
2105 goto bail;
2106 ud->vaddrout = map->raddr;
2107 bail:
2108 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302109 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002110 return err;
2111}
2112
2113static void fastrpc_channel_close(struct kref *kref)
2114{
2115 struct fastrpc_apps *me = &gfa;
2116 struct fastrpc_channel_ctx *ctx;
2117 int cid;
2118
2119 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2120 cid = ctx - &gcinfo[0];
2121 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302122 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302123 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2124 ctx->link.link_notify_handle = NULL;
c_mtharu314a4202017-11-15 22:09:17 +05302125 mutex_unlock(&ctx->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002126 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2127 MAJOR(me->dev_no), cid);
2128}
2129
2130static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2131
2132static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2133 int secure, struct fastrpc_session_ctx **session)
2134{
2135 struct fastrpc_apps *me = &gfa;
2136 int idx = 0, err = 0;
2137
2138 if (chan->sesscount) {
2139 for (idx = 0; idx < chan->sesscount; ++idx) {
2140 if (!chan->session[idx].used &&
2141 chan->session[idx].smmu.secure == secure) {
2142 chan->session[idx].used = 1;
2143 break;
2144 }
2145 }
2146 VERIFY(err, idx < chan->sesscount);
2147 if (err)
2148 goto bail;
2149 chan->session[idx].smmu.faults = 0;
2150 } else {
2151 VERIFY(err, me->dev != NULL);
2152 if (err)
2153 goto bail;
2154 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302155 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002156 }
2157
2158 *session = &chan->session[idx];
2159 bail:
2160 return err;
2161}
2162
c_mtharue1a5ce12017-10-13 20:47:09 +05302163static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2164 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002165{
2166 if (glink_queue_rx_intent(h, NULL, size))
2167 return false;
2168 return true;
2169}
2170
c_mtharue1a5ce12017-10-13 20:47:09 +05302171static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002172 const void *pkt_priv, const void *ptr)
2173{
2174}
2175
c_mtharue1a5ce12017-10-13 20:47:09 +05302176static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002177 const void *pkt_priv, const void *ptr, size_t size)
2178{
2179 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302180 struct smq_invoke_ctx *ctx;
2181 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002182
c_mtharufdac6892017-10-12 13:09:01 +05302183 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2184 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302185 goto bail;
2186
c_mtharufdac6892017-10-12 13:09:01 +05302187 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2188 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2189 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302190 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302191
c_mtharufdac6892017-10-12 13:09:01 +05302192 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302193bail:
c_mtharufdac6892017-10-12 13:09:01 +05302194 if (err)
2195 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002196 glink_rx_done(handle, ptr, true);
2197}
2198
c_mtharue1a5ce12017-10-13 20:47:09 +05302199static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002200 unsigned int event)
2201{
2202 struct fastrpc_apps *me = &gfa;
2203 int cid = (int)(uintptr_t)priv;
2204 struct fastrpc_glink_info *link;
2205
2206 if (cid < 0 || cid >= NUM_CHANNELS)
2207 return;
2208 link = &me->channel[cid].link;
2209 switch (event) {
2210 case GLINK_CONNECTED:
2211 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302212 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002213 break;
2214 case GLINK_LOCAL_DISCONNECTED:
2215 link->port_state = FASTRPC_LINK_DISCONNECTED;
2216 break;
2217 case GLINK_REMOTE_DISCONNECTED:
c_mtharu314a4202017-11-15 22:09:17 +05302218 mutex_lock(&me->channel[cid].mut);
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302219 if (me->channel[cid].chan) {
c_mtharu314a4202017-11-15 22:09:17 +05302220 link->port_state = FASTRPC_LINK_REMOTE_DISCONNECTING;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002221 fastrpc_glink_close(me->channel[cid].chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302222 me->channel[cid].chan = NULL;
c_mtharu314a4202017-11-15 22:09:17 +05302223 } else {
2224 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002225 }
c_mtharu314a4202017-11-15 22:09:17 +05302226 mutex_unlock(&me->channel[cid].mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002227 break;
2228 default:
2229 break;
2230 }
2231}
2232
2233static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2234 struct fastrpc_session_ctx **session)
2235{
2236 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002237
c_mtharu314a4202017-11-15 22:09:17 +05302238 mutex_lock(&chan->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002239 if (!*session)
2240 err = fastrpc_session_alloc_locked(chan, secure, session);
c_mtharu314a4202017-11-15 22:09:17 +05302241 mutex_unlock(&chan->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002242 return err;
2243}
2244
2245static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2246 struct fastrpc_session_ctx *session)
2247{
c_mtharu314a4202017-11-15 22:09:17 +05302248 mutex_lock(&chan->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002249 session->used = 0;
c_mtharu314a4202017-11-15 22:09:17 +05302250 mutex_unlock(&chan->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002251}
2252
2253static int fastrpc_file_free(struct fastrpc_file *fl)
2254{
2255 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302256 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002257 int cid;
2258
2259 if (!fl)
2260 return 0;
2261 cid = fl->cid;
2262
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302263 (void)fastrpc_release_current_dsp_process(fl);
2264
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002265 spin_lock(&fl->apps->hlock);
2266 hlist_del_init(&fl->hn);
2267 spin_unlock(&fl->apps->hlock);
2268
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002269 if (!fl->sctx) {
2270 kfree(fl);
2271 return 0;
2272 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302273 spin_lock(&fl->hlock);
2274 fl->file_close = 1;
2275 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002276 fastrpc_context_list_dtor(fl);
2277 fastrpc_buf_list_free(fl);
2278 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302279 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002280 }
2281 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2282 kref_put_mutex(&fl->apps->channel[cid].kref,
c_mtharu314a4202017-11-15 22:09:17 +05302283 fastrpc_channel_close, &fl->apps->channel[cid].mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002284 if (fl->sctx)
2285 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2286 if (fl->secsctx)
2287 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2288 kfree(fl);
2289 return 0;
2290}
2291
2292static int fastrpc_device_release(struct inode *inode, struct file *file)
2293{
2294 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2295
2296 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302297 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2298 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002299 if (fl->debugfs_file != NULL)
2300 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002301 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302302 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002303 }
2304 return 0;
2305}
2306
2307static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2308 void *priv)
2309{
2310 struct fastrpc_apps *me = &gfa;
2311 int cid = (int)((uintptr_t)priv);
2312 struct fastrpc_glink_info *link;
2313
2314 if (cid < 0 || cid >= NUM_CHANNELS)
2315 return;
2316
2317 link = &me->channel[cid].link;
2318 switch (cb_info->link_state) {
2319 case GLINK_LINK_STATE_UP:
2320 link->link_state = FASTRPC_LINK_STATE_UP;
2321 complete(&me->channel[cid].work);
2322 break;
2323 case GLINK_LINK_STATE_DOWN:
2324 link->link_state = FASTRPC_LINK_STATE_DOWN;
2325 break;
2326 default:
2327 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2328 break;
2329 }
2330}
2331
2332static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2333{
2334 int err = 0;
2335 struct fastrpc_glink_info *link;
2336
2337 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2338 if (err)
2339 goto bail;
2340
2341 link = &me->channel[cid].link;
2342 if (link->link_notify_handle != NULL)
2343 goto bail;
2344
2345 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2346 link->link_notify_handle = glink_register_link_state_cb(
2347 &link->link_info,
2348 (void *)((uintptr_t)cid));
2349 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2350 if (err) {
2351 link->link_notify_handle = NULL;
2352 goto bail;
2353 }
2354 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2355 RPC_TIMEOUT));
2356bail:
2357 return err;
2358}
2359
c_mtharu314a4202017-11-15 22:09:17 +05302360static void fastrpc_glink_stop(int cid)
2361{
2362 int err = 0;
2363 struct fastrpc_glink_info *link;
2364
2365 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2366 if (err)
2367 return;
2368 link = &gfa.channel[cid].link;
2369
2370 if (link->port_state == FASTRPC_LINK_CONNECTED)
2371 link->port_state = FASTRPC_LINK_REMOTE_DISCONNECTING;
2372}
2373
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002374static void fastrpc_glink_close(void *chan, int cid)
2375{
2376 int err = 0;
2377 struct fastrpc_glink_info *link;
2378
2379 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2380 if (err)
2381 return;
2382 link = &gfa.channel[cid].link;
2383
c_mtharu314a4202017-11-15 22:09:17 +05302384 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2385 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002386 link->port_state = FASTRPC_LINK_DISCONNECTING;
2387 glink_close(chan);
2388 }
2389}
2390
2391static int fastrpc_glink_open(int cid)
2392{
2393 int err = 0;
2394 void *handle = NULL;
2395 struct fastrpc_apps *me = &gfa;
2396 struct glink_open_config *cfg;
2397 struct fastrpc_glink_info *link;
2398
2399 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2400 if (err)
2401 goto bail;
2402 link = &me->channel[cid].link;
2403 cfg = &me->channel[cid].link.cfg;
2404 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2405 if (err)
2406 goto bail;
2407
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302408 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2409 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002410 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002411
2412 link->port_state = FASTRPC_LINK_CONNECTING;
2413 cfg->priv = (void *)(uintptr_t)cid;
2414 cfg->edge = gcinfo[cid].link.link_info.edge;
2415 cfg->transport = gcinfo[cid].link.link_info.transport;
2416 cfg->name = FASTRPC_GLINK_GUID;
2417 cfg->notify_rx = fastrpc_glink_notify_rx;
2418 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2419 cfg->notify_state = fastrpc_glink_notify_state;
2420 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2421 handle = glink_open(cfg);
2422 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302423 if (err) {
2424 if (link->port_state == FASTRPC_LINK_CONNECTING)
2425 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002426 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302427 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002428 me->channel[cid].chan = handle;
2429bail:
2430 return err;
2431}
2432
Sathish Ambley1ca68232017-01-19 10:32:55 -08002433static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2434{
2435 filp->private_data = inode->i_private;
2436 return 0;
2437}
2438
2439static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2440 size_t count, loff_t *position)
2441{
2442 struct fastrpc_file *fl = filp->private_data;
2443 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302444 struct fastrpc_buf *buf = NULL;
2445 struct fastrpc_mmap *map = NULL;
2446 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002447 struct fastrpc_channel_ctx *chan;
2448 struct fastrpc_session_ctx *sess;
2449 unsigned int len = 0;
2450 int i, j, ret = 0;
2451 char *fileinfo = NULL;
2452
2453 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2454 if (!fileinfo)
2455 goto bail;
2456 if (fl == NULL) {
2457 for (i = 0; i < NUM_CHANNELS; i++) {
2458 chan = &gcinfo[i];
2459 len += scnprintf(fileinfo + len,
2460 DEBUGFS_SIZE - len, "%s\n\n",
2461 chan->name);
2462 len += scnprintf(fileinfo + len,
2463 DEBUGFS_SIZE - len, "%s %d\n",
2464 "sesscount:", chan->sesscount);
2465 for (j = 0; j < chan->sesscount; j++) {
2466 sess = &chan->session[j];
2467 len += scnprintf(fileinfo + len,
2468 DEBUGFS_SIZE - len,
2469 "%s%d\n\n", "SESSION", j);
2470 len += scnprintf(fileinfo + len,
2471 DEBUGFS_SIZE - len,
2472 "%s %d\n", "sid:",
2473 sess->smmu.cb);
2474 len += scnprintf(fileinfo + len,
2475 DEBUGFS_SIZE - len,
2476 "%s %d\n", "SECURE:",
2477 sess->smmu.secure);
2478 }
2479 }
2480 } else {
2481 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2482 "%s %d\n\n",
2483 "PROCESS_ID:", fl->tgid);
2484 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2485 "%s %d\n\n",
2486 "CHANNEL_ID:", fl->cid);
2487 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2488 "%s %d\n\n",
2489 "SSRCOUNT:", fl->ssrcount);
2490 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2491 "%s\n",
2492 "LIST OF BUFS:");
2493 spin_lock(&fl->hlock);
2494 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2495 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302496 "%s %pK %s %pK %s %llx\n", "buf:",
2497 buf, "buf->virt:", buf->virt,
2498 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002499 }
2500 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2501 "\n%s\n",
2502 "LIST OF MAPS:");
2503 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2504 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302505 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002506 "map:", map,
2507 "map->va:", map->va,
2508 "map->phys:", map->phys);
2509 }
2510 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2511 "\n%s\n",
2512 "LIST OF PENDING SMQCONTEXTS:");
2513 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2514 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302515 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002516 "smqcontext:", ictx,
2517 "sc:", ictx->sc,
2518 "tid:", ictx->pid,
2519 "handle", ictx->rpra->h);
2520 }
2521 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2522 "\n%s\n",
2523 "LIST OF INTERRUPTED SMQCONTEXTS:");
2524 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2525 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302526 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002527 "smqcontext:", ictx,
2528 "sc:", ictx->sc,
2529 "tid:", ictx->pid,
2530 "handle", ictx->rpra->h);
2531 }
2532 spin_unlock(&fl->hlock);
2533 }
2534 if (len > DEBUGFS_SIZE)
2535 len = DEBUGFS_SIZE;
2536 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2537 kfree(fileinfo);
2538bail:
2539 return ret;
2540}
2541
2542static const struct file_operations debugfs_fops = {
2543 .open = fastrpc_debugfs_open,
2544 .read = fastrpc_debugfs_read,
2545};
Sathish Ambley36849af2017-02-02 09:35:55 -08002546static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002547{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002548 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002549 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002550
Sathish Ambley36849af2017-02-02 09:35:55 -08002551 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552 if (err)
c_mtharu314a4202017-11-15 22:09:17 +05302553 return err;
Sathish Ambley36849af2017-02-02 09:35:55 -08002554 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302555 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2556 if (err)
2557 goto bail;
2558 mutex_lock(&me->channel[cid].mut);
c_mtharue1a5ce12017-10-13 20:47:09 +05302559 if (me->channel[cid].ssrcount !=
2560 me->channel[cid].prevssrcount) {
2561 if (!me->channel[cid].issubsystemup) {
2562 VERIFY(err, 0);
2563 if (err)
2564 goto bail;
2565 }
2566 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002567 fl->ssrcount = me->channel[cid].ssrcount;
2568 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302569 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302570 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2571 if (err)
2572 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002573 VERIFY(err, 0 == fastrpc_glink_open(cid));
2574 if (err)
2575 goto bail;
2576
c_mtharu314a4202017-11-15 22:09:17 +05302577 mutex_unlock(&me->channel[cid].mut);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302578 VERIFY(err,
2579 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002580 RPC_TIMEOUT));
c_mtharu314a4202017-11-15 22:09:17 +05302581 mutex_lock(&me->channel[cid].mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002582 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302583 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002584 goto bail;
2585 }
2586 kref_init(&me->channel[cid].kref);
2587 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2588 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302589 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2590 FASTRPC_GLINK_INTENT_LEN);
2591 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2592 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002593 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302594 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2595 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002596 if (me->channel[cid].ssrcount !=
2597 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302598 if (fastrpc_mmap_remove_ssr(fl))
2599 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002600 me->channel[cid].prevssrcount =
2601 me->channel[cid].ssrcount;
2602 }
2603 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002604
2605bail:
c_mtharu314a4202017-11-15 22:09:17 +05302606 mutex_unlock(&me->channel[cid].mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002607 return err;
2608}
2609
Sathish Ambley36849af2017-02-02 09:35:55 -08002610static int fastrpc_device_open(struct inode *inode, struct file *filp)
2611{
2612 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002613 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302614 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002615 struct fastrpc_apps *me = &gfa;
2616
c_mtharue1a5ce12017-10-13 20:47:09 +05302617 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002618 if (err)
2619 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002620 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2621 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002622 context_list_ctor(&fl->clst);
2623 spin_lock_init(&fl->hlock);
2624 INIT_HLIST_HEAD(&fl->maps);
2625 INIT_HLIST_HEAD(&fl->bufs);
2626 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302627 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002628 fl->tgid = current->tgid;
2629 fl->apps = me;
2630 fl->mode = FASTRPC_MODE_SERIAL;
2631 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002632 if (debugfs_file != NULL)
2633 fl->debugfs_file = debugfs_file;
2634 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302635 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002636 filp->private_data = fl;
2637 spin_lock(&me->hlock);
2638 hlist_add_head(&fl->hn, &me->drivers);
2639 spin_unlock(&me->hlock);
2640 return 0;
2641}
2642
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002643static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2644{
2645 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002646 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002647
c_mtharue1a5ce12017-10-13 20:47:09 +05302648 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002649 if (err)
2650 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002651 if (fl->cid == -1) {
2652 cid = *info;
2653 VERIFY(err, cid < NUM_CHANNELS);
2654 if (err)
2655 goto bail;
2656 fl->cid = cid;
2657 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2658 VERIFY(err, !fastrpc_session_alloc_locked(
2659 &fl->apps->channel[cid], 0, &fl->sctx));
2660 if (err)
2661 goto bail;
2662 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302663 VERIFY(err, fl->sctx != NULL);
2664 if (err)
2665 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002666 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2667bail:
2668 return err;
2669}
2670
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302671static int fastrpc_internal_control(struct fastrpc_file *fl,
2672 struct fastrpc_ioctl_control *cp)
2673{
2674 int err = 0;
2675 int latency;
2676
2677 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2678 if (err)
2679 goto bail;
2680 VERIFY(err, !IS_ERR_OR_NULL(cp));
2681 if (err)
2682 goto bail;
2683
2684 switch (cp->req) {
2685 case FASTRPC_CONTROL_LATENCY:
2686 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2687 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2688 VERIFY(err, latency != 0);
2689 if (err)
2690 goto bail;
2691 if (!fl->qos_request) {
2692 pm_qos_add_request(&fl->pm_qos_req,
2693 PM_QOS_CPU_DMA_LATENCY, latency);
2694 fl->qos_request = 1;
2695 } else
2696 pm_qos_update_request(&fl->pm_qos_req, latency);
2697 break;
2698 default:
2699 err = -ENOTTY;
2700 break;
2701 }
2702bail:
2703 return err;
2704}
2705
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002706static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2707 unsigned long ioctl_param)
2708{
2709 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002710 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002711 struct fastrpc_ioctl_mmap mmap;
2712 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302713 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002714 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002715 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302716 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002717 } p;
2718 void *param = (char *)ioctl_param;
2719 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2720 int size = 0, err = 0;
2721 uint32_t info;
2722
c_mtharue1a5ce12017-10-13 20:47:09 +05302723 p.inv.fds = NULL;
2724 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002725 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302726 spin_lock(&fl->hlock);
2727 if (fl->file_close == 1) {
2728 err = EBADF;
2729 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2730 spin_unlock(&fl->hlock);
2731 goto bail;
2732 }
2733 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002734
2735 switch (ioctl_num) {
2736 case FASTRPC_IOCTL_INVOKE:
2737 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002738 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002739 case FASTRPC_IOCTL_INVOKE_FD:
2740 if (!size)
2741 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2742 /* fall through */
2743 case FASTRPC_IOCTL_INVOKE_ATTRS:
2744 if (!size)
2745 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002746 /* fall through */
2747 case FASTRPC_IOCTL_INVOKE_CRC:
2748 if (!size)
2749 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302750 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002751 if (err)
2752 goto bail;
2753 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2754 0, &p.inv)));
2755 if (err)
2756 goto bail;
2757 break;
2758 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302759 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2760 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302761 if (err)
2762 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002763 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2764 if (err)
2765 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302766 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002767 if (err)
2768 goto bail;
2769 break;
2770 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302771 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2772 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302773 if (err)
2774 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002775 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2776 &p.munmap)));
2777 if (err)
2778 goto bail;
2779 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302780 case FASTRPC_IOCTL_MUNMAP_FD:
2781 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2782 sizeof(p.munmap_fd));
2783 if (err)
2784 goto bail;
2785 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2786 &p.munmap_fd)));
2787 if (err)
2788 goto bail;
2789 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002790 case FASTRPC_IOCTL_SETMODE:
2791 switch ((uint32_t)ioctl_param) {
2792 case FASTRPC_MODE_PARALLEL:
2793 case FASTRPC_MODE_SERIAL:
2794 fl->mode = (uint32_t)ioctl_param;
2795 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002796 case FASTRPC_MODE_PROFILE:
2797 fl->profile = (uint32_t)ioctl_param;
2798 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302799 case FASTRPC_MODE_SESSION:
2800 fl->sessionid = 1;
2801 fl->tgid |= (1 << SESSION_ID_INDEX);
2802 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002803 default:
2804 err = -ENOTTY;
2805 break;
2806 }
2807 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002808 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302809 K_COPY_FROM_USER(err, 0, &p.perf,
2810 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002811 if (err)
2812 goto bail;
2813 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2814 if (p.perf.keys) {
2815 char *keys = PERF_KEYS;
2816
c_mtharue1a5ce12017-10-13 20:47:09 +05302817 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2818 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002819 if (err)
2820 goto bail;
2821 }
2822 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302823 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2824 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002825 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302826 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002827 if (err)
2828 goto bail;
2829 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302830 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302831 K_COPY_FROM_USER(err, 0, &p.cp, param,
2832 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302833 if (err)
2834 goto bail;
2835 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2836 if (err)
2837 goto bail;
2838 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002839 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302840 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002841 if (err)
2842 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002843 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2844 if (err)
2845 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302846 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002847 if (err)
2848 goto bail;
2849 break;
2850 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002851 p.init.attrs = 0;
2852 p.init.siglen = 0;
2853 size = sizeof(struct fastrpc_ioctl_init);
2854 /* fall through */
2855 case FASTRPC_IOCTL_INIT_ATTRS:
2856 if (!size)
2857 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302858 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002859 if (err)
2860 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302861 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302862 p.init.init.filelen < INIT_FILELEN_MAX);
2863 if (err)
2864 goto bail;
2865 VERIFY(err, p.init.init.memlen >= 0 &&
2866 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302867 if (err)
2868 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002869 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2870 if (err)
2871 goto bail;
2872 break;
2873
2874 default:
2875 err = -ENOTTY;
2876 pr_info("bad ioctl: %d\n", ioctl_num);
2877 break;
2878 }
2879 bail:
2880 return err;
2881}
2882
2883static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2884 unsigned long code,
2885 void *data)
2886{
2887 struct fastrpc_apps *me = &gfa;
2888 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302889 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002890 int cid;
2891
2892 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2893 cid = ctx - &me->channel[0];
2894 if (code == SUBSYS_BEFORE_SHUTDOWN) {
c_mtharu314a4202017-11-15 22:09:17 +05302895 mutex_lock(&ctx->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002896 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302897 ctx->issubsystemup = 0;
c_mtharu314a4202017-11-15 22:09:17 +05302898 pr_info("'restart notifier: /dev/%s c %d %d'\n",
2899 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2900 if (ctx->chan)
2901 fastrpc_glink_stop(cid);
2902 mutex_unlock(&ctx->mut);
c_mtharue1a5ce12017-10-13 20:47:09 +05302903 if (cid == 0)
2904 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002905 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302906 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2907 if (me->channel[0].remoteheap_ramdump_dev &&
2908 notifdata->enable_ramdump) {
2909 me->channel[0].ramdumpenabled = 1;
2910 }
2911 } else if (code == SUBSYS_AFTER_POWERUP) {
2912 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002913 }
2914
2915 return NOTIFY_DONE;
2916}
2917
2918static const struct file_operations fops = {
2919 .open = fastrpc_device_open,
2920 .release = fastrpc_device_release,
2921 .unlocked_ioctl = fastrpc_device_ioctl,
2922 .compat_ioctl = compat_fastrpc_device_ioctl,
2923};
2924
2925static const struct of_device_id fastrpc_match_table[] = {
2926 { .compatible = "qcom,msm-fastrpc-adsp", },
2927 { .compatible = "qcom,msm-fastrpc-compute", },
2928 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2929 { .compatible = "qcom,msm-adsprpc-mem-region", },
2930 {}
2931};
2932
2933static int fastrpc_cb_probe(struct device *dev)
2934{
2935 struct fastrpc_channel_ctx *chan;
2936 struct fastrpc_session_ctx *sess;
2937 struct of_phandle_args iommuspec;
2938 const char *name;
2939 unsigned int start = 0x80000000;
2940 int err = 0, i;
2941 int secure_vmid = VMID_CP_PIXEL;
2942
c_mtharue1a5ce12017-10-13 20:47:09 +05302943 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2944 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002945 if (err)
2946 goto bail;
2947 for (i = 0; i < NUM_CHANNELS; i++) {
2948 if (!gcinfo[i].name)
2949 continue;
2950 if (!strcmp(name, gcinfo[i].name))
2951 break;
2952 }
2953 VERIFY(err, i < NUM_CHANNELS);
2954 if (err)
2955 goto bail;
2956 chan = &gcinfo[i];
2957 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2958 if (err)
2959 goto bail;
2960
2961 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2962 "#iommu-cells", 0, &iommuspec));
2963 if (err)
2964 goto bail;
2965 sess = &chan->session[chan->sesscount];
2966 sess->smmu.cb = iommuspec.args[0] & 0xf;
2967 sess->used = 0;
2968 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2969 "dma-coherent");
2970 sess->smmu.secure = of_property_read_bool(dev->of_node,
2971 "qcom,secure-context-bank");
2972 if (sess->smmu.secure)
2973 start = 0x60000000;
2974 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2975 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302976 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002977 if (err)
2978 goto bail;
2979
2980 if (sess->smmu.secure)
2981 iommu_domain_set_attr(sess->smmu.mapping->domain,
2982 DOMAIN_ATTR_SECURE_VMID,
2983 &secure_vmid);
2984
2985 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2986 if (err)
2987 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302988 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002989 sess->smmu.enabled = 1;
2990 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002991 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2992 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002993bail:
2994 return err;
2995}
2996
2997static int fastrpc_probe(struct platform_device *pdev)
2998{
2999 int err = 0;
3000 struct fastrpc_apps *me = &gfa;
3001 struct device *dev = &pdev->dev;
3002 struct smq_phy_page range;
3003 struct device_node *ion_node, *node;
3004 struct platform_device *ion_pdev;
3005 struct cma *cma;
3006 uint32_t val;
3007
c_mtharu63ffc012017-11-16 15:26:56 +05303008
3009 if (of_device_is_compatible(dev->of_node,
3010 "qcom,msm-fastrpc-compute")) {
3011 of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
3012 &gcinfo[0].rhvmid);
3013
3014 pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
3015
3016 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3017 &me->latency);
3018 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003019 if (of_device_is_compatible(dev->of_node,
3020 "qcom,msm-fastrpc-compute-cb"))
3021 return fastrpc_cb_probe(dev);
3022
3023 if (of_device_is_compatible(dev->of_node,
3024 "qcom,msm-adsprpc-mem-region")) {
3025 me->dev = dev;
3026 range.addr = 0;
3027 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3028 if (ion_node) {
3029 for_each_available_child_of_node(ion_node, node) {
3030 if (of_property_read_u32(node, "reg", &val))
3031 continue;
3032 if (val != ION_ADSP_HEAP_ID)
3033 continue;
3034 ion_pdev = of_find_device_by_node(node);
3035 if (!ion_pdev)
3036 break;
3037 cma = dev_get_cma_area(&ion_pdev->dev);
3038 if (cma) {
3039 range.addr = cma_get_base(cma);
3040 range.size = (size_t)cma_get_size(cma);
3041 }
3042 break;
3043 }
3044 }
3045 if (range.addr) {
3046 int srcVM[1] = {VMID_HLOS};
3047 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3048 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003049 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003050 PERM_READ | PERM_WRITE | PERM_EXEC,
3051 PERM_READ | PERM_WRITE | PERM_EXEC,
3052 PERM_READ | PERM_WRITE | PERM_EXEC,
3053 };
3054
3055 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3056 srcVM, 1, destVM, destVMperm, 4));
3057 if (err)
3058 goto bail;
3059 }
3060 return 0;
3061 }
3062
3063 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3064 fastrpc_match_table,
3065 NULL, &pdev->dev));
3066 if (err)
3067 goto bail;
3068bail:
3069 return err;
3070}
3071
3072static void fastrpc_deinit(void)
3073{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003074 struct fastrpc_channel_ctx *chan = gcinfo;
3075 int i, j;
3076
3077 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3078 if (chan->chan) {
3079 kref_put_mutex(&chan->kref,
c_mtharu314a4202017-11-15 22:09:17 +05303080 fastrpc_channel_close, &chan->mut);
c_mtharue1a5ce12017-10-13 20:47:09 +05303081 chan->chan = NULL;
c_mtharu314a4202017-11-15 22:09:17 +05303082 mutex_destroy(&chan->mut);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003083 }
3084 for (j = 0; j < NUM_SESSIONS; j++) {
3085 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303086 if (sess->smmu.dev) {
3087 arm_iommu_detach_device(sess->smmu.dev);
3088 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003089 }
3090 if (sess->smmu.mapping) {
3091 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303092 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003093 }
3094 }
3095 }
3096}
3097
3098static struct platform_driver fastrpc_driver = {
3099 .probe = fastrpc_probe,
3100 .driver = {
3101 .name = "fastrpc",
3102 .owner = THIS_MODULE,
3103 .of_match_table = fastrpc_match_table,
3104 },
3105};
3106
3107static int __init fastrpc_device_init(void)
3108{
3109 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303110 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003111 int err = 0, i;
3112
3113 memset(me, 0, sizeof(*me));
3114
3115 fastrpc_init(me);
3116 me->dev = NULL;
3117 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3118 if (err)
3119 goto register_bail;
3120 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3121 DEVICE_NAME));
3122 if (err)
3123 goto alloc_chrdev_bail;
3124 cdev_init(&me->cdev, &fops);
3125 me->cdev.owner = THIS_MODULE;
3126 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003127 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003128 if (err)
3129 goto cdev_init_bail;
3130 me->class = class_create(THIS_MODULE, "fastrpc");
3131 VERIFY(err, !IS_ERR(me->class));
3132 if (err)
3133 goto class_create_bail;
3134 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003135 dev = device_create(me->class, NULL,
3136 MKDEV(MAJOR(me->dev_no), 0),
3137 NULL, gcinfo[0].name);
3138 VERIFY(err, !IS_ERR_OR_NULL(dev));
3139 if (err)
3140 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003141 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003142 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003143 me->channel[i].ssrcount = 0;
3144 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303145 me->channel[i].issubsystemup = 1;
3146 me->channel[i].ramdumpenabled = 0;
3147 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003148 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3149 me->channel[i].handle = subsys_notif_register_notifier(
3150 gcinfo[i].subsys,
3151 &me->channel[i].nb);
3152 }
3153
3154 me->client = msm_ion_client_create(DEVICE_NAME);
3155 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3156 if (err)
3157 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003158 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003159 return 0;
3160device_create_bail:
3161 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003162 if (me->channel[i].handle)
3163 subsys_notif_unregister_notifier(me->channel[i].handle,
3164 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003165 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003166 if (!IS_ERR_OR_NULL(dev))
3167 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003168 class_destroy(me->class);
3169class_create_bail:
3170 cdev_del(&me->cdev);
3171cdev_init_bail:
3172 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3173alloc_chrdev_bail:
3174register_bail:
3175 fastrpc_deinit();
3176 return err;
3177}
3178
3179static void __exit fastrpc_device_exit(void)
3180{
3181 struct fastrpc_apps *me = &gfa;
3182 int i;
3183
3184 fastrpc_file_list_dtor(me);
3185 fastrpc_deinit();
3186 for (i = 0; i < NUM_CHANNELS; i++) {
3187 if (!gcinfo[i].name)
3188 continue;
3189 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3190 subsys_notif_unregister_notifier(me->channel[i].handle,
3191 &me->channel[i].nb);
3192 }
3193 class_destroy(me->class);
3194 cdev_del(&me->cdev);
3195 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3196 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003197 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003198}
3199
3200late_initcall(fastrpc_device_init);
3201module_exit(fastrpc_device_exit);
3202
3203MODULE_LICENSE("GPL v2");