blob: 1a11c037dbb00d7f6d71f2d4343e9a502f70016c [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
78
Sathish Ambleya21b5b52017-01-11 16:11:01 -080079#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
80#define FASTRPC_STATIC_HANDLE_LISTENER (3)
81#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053082#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080083
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053084#define INIT_FILELEN_MAX (2*1024*1024)
85#define INIT_MEMLEN_MAX (8*1024*1024)
86
Sathish Ambleya21b5b52017-01-11 16:11:01 -080087#define PERF_END (void)0
88
89#define PERF(enb, cnt, ff) \
90 {\
91 struct timespec startT = {0};\
92 if (enb) {\
93 getnstimeofday(&startT);\
94 } \
95 ff ;\
96 if (enb) {\
97 cnt += getnstimediff(&startT);\
98 } \
99 }
100
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700101static int fastrpc_glink_open(int cid);
102static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800103static struct dentry *debugfs_root;
104static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700105
106static inline uint64_t buf_page_start(uint64_t buf)
107{
108 uint64_t start = (uint64_t) buf & PAGE_MASK;
109 return start;
110}
111
112static inline uint64_t buf_page_offset(uint64_t buf)
113{
114 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
115 return offset;
116}
117
118static inline int buf_num_pages(uint64_t buf, ssize_t len)
119{
120 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
121 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
122 int nPages = end - start + 1;
123 return nPages;
124}
125
126static inline uint64_t buf_page_size(uint32_t size)
127{
128 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
129
130 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
131}
132
133static inline void *uint64_to_ptr(uint64_t addr)
134{
135 void *ptr = (void *)((uintptr_t)addr);
136
137 return ptr;
138}
139
140static inline uint64_t ptr_to_uint64(void *ptr)
141{
142 uint64_t addr = (uint64_t)((uintptr_t)ptr);
143
144 return addr;
145}
146
147struct fastrpc_file;
148
149struct fastrpc_buf {
150 struct hlist_node hn;
151 struct fastrpc_file *fl;
152 void *virt;
153 uint64_t phys;
154 ssize_t size;
155};
156
157struct fastrpc_ctx_lst;
158
159struct overlap {
160 uintptr_t start;
161 uintptr_t end;
162 int raix;
163 uintptr_t mstart;
164 uintptr_t mend;
165 uintptr_t offset;
166};
167
168struct smq_invoke_ctx {
169 struct hlist_node hn;
170 struct completion work;
171 int retval;
172 int pid;
173 int tgid;
174 remote_arg_t *lpra;
175 remote_arg64_t *rpra;
176 int *fds;
177 unsigned int *attrs;
178 struct fastrpc_mmap **maps;
179 struct fastrpc_buf *buf;
180 ssize_t used;
181 struct fastrpc_file *fl;
182 uint32_t sc;
183 struct overlap *overs;
184 struct overlap **overps;
185 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700186 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530187 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700188};
189
190struct fastrpc_ctx_lst {
191 struct hlist_head pending;
192 struct hlist_head interrupted;
193};
194
195struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530196 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700197 struct dma_iommu_mapping *mapping;
198 int cb;
199 int enabled;
200 int faults;
201 int secure;
202 int coherent;
203};
204
205struct fastrpc_session_ctx {
206 struct device *dev;
207 struct fastrpc_smmu smmu;
208 int used;
209};
210
211struct fastrpc_glink_info {
212 int link_state;
213 int port_state;
214 struct glink_open_config cfg;
215 struct glink_link_info link_info;
216 void *link_notify_handle;
217};
218
219struct fastrpc_channel_ctx {
220 char *name;
221 char *subsys;
222 void *chan;
223 struct device *dev;
224 struct fastrpc_session_ctx session[NUM_SESSIONS];
225 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530226 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700227 struct notifier_block nb;
228 struct kref kref;
229 int sesscount;
230 int ssrcount;
231 void *handle;
232 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530233 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700234 int vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int ramdumpenabled;
236 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700237 struct fastrpc_glink_info link;
238};
239
240struct fastrpc_apps {
241 struct fastrpc_channel_ctx *channel;
242 struct cdev cdev;
243 struct class *class;
244 struct mutex smd_mutex;
245 struct smq_phy_page range;
246 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530247 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 dev_t dev_no;
249 int compat;
250 struct hlist_head drivers;
251 spinlock_t hlock;
252 struct ion_client *client;
253 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530254 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255};
256
257struct fastrpc_mmap {
258 struct hlist_node hn;
259 struct fastrpc_file *fl;
260 struct fastrpc_apps *apps;
261 int fd;
262 uint32_t flags;
263 struct dma_buf *buf;
264 struct sg_table *table;
265 struct dma_buf_attachment *attach;
266 struct ion_handle *handle;
267 uint64_t phys;
268 ssize_t size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530269 uintptr_t __user va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700270 ssize_t len;
271 int refs;
272 uintptr_t raddr;
273 int uncached;
274 int secure;
275 uintptr_t attr;
276};
277
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800278struct fastrpc_perf {
279 int64_t count;
280 int64_t flush;
281 int64_t map;
282 int64_t copy;
283 int64_t link;
284 int64_t getargs;
285 int64_t putargs;
286 int64_t invargs;
287 int64_t invoke;
288};
289
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290struct fastrpc_file {
291 struct hlist_node hn;
292 spinlock_t hlock;
293 struct hlist_head maps;
294 struct hlist_head bufs;
295 struct fastrpc_ctx_lst clst;
296 struct fastrpc_session_ctx *sctx;
297 struct fastrpc_session_ctx *secsctx;
298 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800299 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530300 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700301 int tgid;
302 int cid;
303 int ssrcount;
304 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530305 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800307 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800308 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530309 struct pm_qos_request pm_qos_req;
310 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311};
312
313static struct fastrpc_apps gfa;
314
315static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
316 {
317 .name = "adsprpc-smd",
318 .subsys = "adsp",
319 .link.link_info.edge = "lpass",
320 .link.link_info.transport = "smem",
321 },
322 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700323 .name = "mdsprpc-smd",
324 .subsys = "modem",
325 .link.link_info.edge = "mpss",
326 .link.link_info.transport = "smem",
327 },
328 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800329 .name = "sdsprpc-smd",
330 .subsys = "slpi",
331 .link.link_info.edge = "dsps",
332 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800333 },
334 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700335 .name = "cdsprpc-smd",
336 .subsys = "cdsp",
337 .link.link_info.edge = "cdsp",
338 .link.link_info.transport = "smem",
339 },
340};
341
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800342static inline int64_t getnstimediff(struct timespec *start)
343{
344 int64_t ns;
345 struct timespec ts, b;
346
347 getnstimeofday(&ts);
348 b = timespec_sub(ts, *start);
349 ns = timespec_to_ns(&b);
350 return ns;
351}
352
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700353static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
354{
c_mtharue1a5ce12017-10-13 20:47:09 +0530355 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356 int vmid;
357
358 if (!fl)
359 return;
360 if (cache) {
361 spin_lock(&fl->hlock);
362 hlist_add_head(&buf->hn, &fl->bufs);
363 spin_unlock(&fl->hlock);
364 return;
365 }
366 if (!IS_ERR_OR_NULL(buf->virt)) {
367 int destVM[1] = {VMID_HLOS};
368 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
369
370 if (fl->sctx->smmu.cb)
371 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
372 vmid = fl->apps->channel[fl->cid].vmid;
373 if (vmid) {
374 int srcVM[2] = {VMID_HLOS, vmid};
375
376 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
377 srcVM, 2, destVM, destVMperm, 1);
378 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530379 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700380 buf->phys);
381 }
382 kfree(buf);
383}
384
385static void fastrpc_buf_list_free(struct fastrpc_file *fl)
386{
387 struct fastrpc_buf *buf, *free;
388
389 do {
390 struct hlist_node *n;
391
c_mtharue1a5ce12017-10-13 20:47:09 +0530392 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393 spin_lock(&fl->hlock);
394 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
395 hlist_del_init(&buf->hn);
396 free = buf;
397 break;
398 }
399 spin_unlock(&fl->hlock);
400 if (free)
401 fastrpc_buf_free(free, 0);
402 } while (free);
403}
404
405static void fastrpc_mmap_add(struct fastrpc_mmap *map)
406{
c_mtharue1a5ce12017-10-13 20:47:09 +0530407 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
408 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
409 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700410
c_mtharue1a5ce12017-10-13 20:47:09 +0530411 spin_lock(&me->hlock);
412 hlist_add_head(&map->hn, &me->maps);
413 spin_unlock(&me->hlock);
414 } else {
415 struct fastrpc_file *fl = map->fl;
416
417 spin_lock(&fl->hlock);
418 hlist_add_head(&map->hn, &fl->maps);
419 spin_unlock(&fl->hlock);
420 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700421}
422
c_mtharue1a5ce12017-10-13 20:47:09 +0530423static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
424 uintptr_t __user va, ssize_t len, int mflags, int refs,
425 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700426{
c_mtharue1a5ce12017-10-13 20:47:09 +0530427 struct fastrpc_apps *me = &gfa;
428 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 if (mflags == ADSP_MMAP_HEAP_ADDR ||
431 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
432 spin_lock(&me->hlock);
433 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
434 if (va >= map->va &&
435 va + len <= map->va + map->len &&
436 map->fd == fd) {
437 if (refs)
438 map->refs++;
439 match = map;
440 break;
441 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700442 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530443 spin_unlock(&me->hlock);
444 } else {
445 spin_lock(&fl->hlock);
446 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
447 if (va >= map->va &&
448 va + len <= map->va + map->len &&
449 map->fd == fd) {
450 if (refs)
451 map->refs++;
452 match = map;
453 break;
454 }
455 }
456 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700457 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700458 if (match) {
459 *ppmap = match;
460 return 0;
461 }
462 return -ENOTTY;
463}
464
c_mtharue1a5ce12017-10-13 20:47:09 +0530465static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
466{
467 struct fastrpc_apps *me = &gfa;
468 void *vaddr = NULL;
469
470 if (me->dev == NULL) {
471 pr_err("device adsprpc-mem is not initialized\n");
472 return -ENODEV;
473 }
474 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
475 if (!vaddr) {
476 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
477 (unsigned int)size);
478 return -ENOMEM;
479 }
480 return 0;
481}
482
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700483static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
484 ssize_t len, struct fastrpc_mmap **ppmap)
485{
c_mtharue1a5ce12017-10-13 20:47:09 +0530486 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700487 struct hlist_node *n;
488 struct fastrpc_apps *me = &gfa;
489
490 spin_lock(&me->hlock);
491 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
492 if (map->raddr == va &&
493 map->raddr + map->len == va + len &&
494 map->refs == 1) {
495 match = map;
496 hlist_del_init(&map->hn);
497 break;
498 }
499 }
500 spin_unlock(&me->hlock);
501 if (match) {
502 *ppmap = match;
503 return 0;
504 }
505 spin_lock(&fl->hlock);
506 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
507 if (map->raddr == va &&
508 map->raddr + map->len == va + len &&
509 map->refs == 1) {
510 match = map;
511 hlist_del_init(&map->hn);
512 break;
513 }
514 }
515 spin_unlock(&fl->hlock);
516 if (match) {
517 *ppmap = match;
518 return 0;
519 }
520 return -ENOTTY;
521}
522
523static void fastrpc_mmap_free(struct fastrpc_mmap *map)
524{
c_mtharue1a5ce12017-10-13 20:47:09 +0530525 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700526 struct fastrpc_file *fl;
527 int vmid;
528 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700529
530 if (!map)
531 return;
532 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530533 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
534 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
535 spin_lock(&me->hlock);
536 map->refs--;
537 if (!map->refs)
538 hlist_del_init(&map->hn);
539 spin_unlock(&me->hlock);
540 } else {
541 spin_lock(&fl->hlock);
542 map->refs--;
543 if (!map->refs)
544 hlist_del_init(&map->hn);
545 spin_unlock(&fl->hlock);
546 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700547 if (map->refs > 0)
548 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530549 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
550 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700551
c_mtharue1a5ce12017-10-13 20:47:09 +0530552 if (me->dev == NULL) {
553 pr_err("failed to free remote heap allocation\n");
554 return;
555 }
556 if (map->phys) {
557 dma_free_coherent(me->dev, map->size,
558 &(map->va), map->phys);
559 }
560 } else {
561 int destVM[1] = {VMID_HLOS};
562 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
563
564 if (map->secure)
565 sess = fl->secsctx;
566 else
567 sess = fl->sctx;
568
569 if (!IS_ERR_OR_NULL(map->handle))
570 ion_free(fl->apps->client, map->handle);
571 if (sess && sess->smmu.enabled) {
572 if (map->size || map->phys)
573 msm_dma_unmap_sg(sess->smmu.dev,
574 map->table->sgl,
575 map->table->nents, DMA_BIDIRECTIONAL,
576 map->buf);
577 }
578 vmid = fl->apps->channel[fl->cid].vmid;
579 if (vmid && map->phys) {
580 int srcVM[2] = {VMID_HLOS, vmid};
581
582 hyp_assign_phys(map->phys, buf_page_size(map->size),
583 srcVM, 2, destVM, destVMperm, 1);
584 }
585
586 if (!IS_ERR_OR_NULL(map->table))
587 dma_buf_unmap_attachment(map->attach, map->table,
588 DMA_BIDIRECTIONAL);
589 if (!IS_ERR_OR_NULL(map->attach))
590 dma_buf_detach(map->buf, map->attach);
591 if (!IS_ERR_OR_NULL(map->buf))
592 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700593 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700594 kfree(map);
595}
596
597static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
598 struct fastrpc_session_ctx **session);
599
600static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
c_mtharue1a5ce12017-10-13 20:47:09 +0530601 unsigned int attr, uintptr_t __user va, ssize_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700602 struct fastrpc_mmap **ppmap)
603{
c_mtharue1a5ce12017-10-13 20:47:09 +0530604 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700605 struct fastrpc_session_ctx *sess;
606 struct fastrpc_apps *apps = fl->apps;
607 int cid = fl->cid;
608 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530609 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700612 unsigned long flags;
613 int err = 0, vmid;
614
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800615 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700616 return 0;
617 map = kzalloc(sizeof(*map), GFP_KERNEL);
618 VERIFY(err, !IS_ERR_OR_NULL(map));
619 if (err)
620 goto bail;
621 INIT_HLIST_NODE(&map->hn);
622 map->flags = mflags;
623 map->refs = 1;
624 map->fl = fl;
625 map->fd = fd;
626 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530627 if (mflags == ADSP_MMAP_HEAP_ADDR ||
628 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
629 map->apps = me;
630 map->fl = NULL;
631 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700632 if (err)
633 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530634 map->phys = (uintptr_t)region_start;
635 map->size = len;
636 map->va = (uintptr_t __user)map->phys;
637 } else {
638 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
639 ion_import_dma_buf_fd(fl->apps->client, fd)));
640 if (err)
641 goto bail;
642 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
643 &flags));
644 if (err)
645 goto bail;
646
647 map->uncached = !ION_IS_CACHED(flags);
648 if (map->attr & FASTRPC_ATTR_NOVA)
649 map->uncached = 1;
650
651 map->secure = flags & ION_FLAG_SECURE;
652 if (map->secure) {
653 if (!fl->secsctx)
654 err = fastrpc_session_alloc(chan, 1,
655 &fl->secsctx);
656 if (err)
657 goto bail;
658 }
659 if (map->secure)
660 sess = fl->secsctx;
661 else
662 sess = fl->sctx;
663 VERIFY(err, !IS_ERR_OR_NULL(sess));
664 if (err)
665 goto bail;
666 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
667 if (err)
668 goto bail;
669 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
670 dma_buf_attach(map->buf, sess->smmu.dev)));
671 if (err)
672 goto bail;
673 VERIFY(err, !IS_ERR_OR_NULL(map->table =
674 dma_buf_map_attachment(map->attach,
675 DMA_BIDIRECTIONAL)));
676 if (err)
677 goto bail;
678 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700679 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530680
681 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
682 (sess->smmu.coherent && map->uncached))
683 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
684 else if (map->attr & FASTRPC_ATTR_COHERENT)
685 attrs |= DMA_ATTR_FORCE_COHERENT;
686
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700687 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530688 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700689 map->table->sgl, map->table->nents,
690 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530691 if (err)
692 goto bail;
693 } else {
694 VERIFY(err, map->table->nents == 1);
695 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530697 }
698 map->phys = sg_dma_address(map->table->sgl);
699 if (sess->smmu.cb) {
700 map->phys += ((uint64_t)sess->smmu.cb << 32);
701 map->size = sg_dma_len(map->table->sgl);
702 } else {
703 map->size = buf_page_size(len);
704 }
705 vmid = fl->apps->channel[fl->cid].vmid;
706 if (vmid) {
707 int srcVM[1] = {VMID_HLOS};
708 int destVM[2] = {VMID_HLOS, vmid};
709 int destVMperm[2] = {PERM_READ | PERM_WRITE,
710 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700711
c_mtharue1a5ce12017-10-13 20:47:09 +0530712 VERIFY(err, !hyp_assign_phys(map->phys,
713 buf_page_size(map->size),
714 srcVM, 1, destVM, destVMperm, 2));
715 if (err)
716 goto bail;
717 }
718 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700720 map->len = len;
721
722 fastrpc_mmap_add(map);
723 *ppmap = map;
724
725bail:
726 if (err && map)
727 fastrpc_mmap_free(map);
728 return err;
729}
730
731static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
732 struct fastrpc_buf **obuf)
733{
734 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530735 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736 struct hlist_node *n;
737
738 VERIFY(err, size > 0);
739 if (err)
740 goto bail;
741
742 /* find the smallest buffer that fits in the cache */
743 spin_lock(&fl->hlock);
744 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
745 if (buf->size >= size && (!fr || fr->size > buf->size))
746 fr = buf;
747 }
748 if (fr)
749 hlist_del_init(&fr->hn);
750 spin_unlock(&fl->hlock);
751 if (fr) {
752 *obuf = fr;
753 return 0;
754 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530755 buf = NULL;
756 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700757 if (err)
758 goto bail;
759 INIT_HLIST_NODE(&buf->hn);
760 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530761 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700762 buf->phys = 0;
763 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530764 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700765 (void *)&buf->phys, GFP_KERNEL);
766 if (IS_ERR_OR_NULL(buf->virt)) {
767 /* free cache and retry */
768 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530769 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700770 (void *)&buf->phys, GFP_KERNEL);
771 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
772 }
773 if (err)
774 goto bail;
775 if (fl->sctx->smmu.cb)
776 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
777 vmid = fl->apps->channel[fl->cid].vmid;
778 if (vmid) {
779 int srcVM[1] = {VMID_HLOS};
780 int destVM[2] = {VMID_HLOS, vmid};
781 int destVMperm[2] = {PERM_READ | PERM_WRITE,
782 PERM_READ | PERM_WRITE | PERM_EXEC};
783
784 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
785 srcVM, 1, destVM, destVMperm, 2));
786 if (err)
787 goto bail;
788 }
789
790 *obuf = buf;
791 bail:
792 if (err && buf)
793 fastrpc_buf_free(buf, 0);
794 return err;
795}
796
797
798static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700799 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700800 struct smq_invoke_ctx **po)
801{
802 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530803 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700804 struct hlist_node *n;
805 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
806
807 spin_lock(&fl->hlock);
808 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
809 if (ictx->pid == current->pid) {
810 if (invoke->sc != ictx->sc || ictx->fl != fl)
811 err = -1;
812 else {
813 ctx = ictx;
814 hlist_del_init(&ctx->hn);
815 hlist_add_head(&ctx->hn, &fl->clst.pending);
816 }
817 break;
818 }
819 }
820 spin_unlock(&fl->hlock);
821 if (ctx)
822 *po = ctx;
823 return err;
824}
825
826#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
827static int overlap_ptr_cmp(const void *a, const void *b)
828{
829 struct overlap *pa = *((struct overlap **)a);
830 struct overlap *pb = *((struct overlap **)b);
831 /* sort with lowest starting buffer first */
832 int st = CMP(pa->start, pb->start);
833 /* sort with highest ending buffer first */
834 int ed = CMP(pb->end, pa->end);
835 return st == 0 ? ed : st;
836}
837
Sathish Ambley9466d672017-01-25 10:51:55 -0800838static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700839{
Sathish Ambley9466d672017-01-25 10:51:55 -0800840 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700841 remote_arg_t *lpra = ctx->lpra;
842 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
843 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
844 int nbufs = inbufs + outbufs;
845 struct overlap max;
846
847 for (i = 0; i < nbufs; ++i) {
848 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
849 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800850 if (lpra[i].buf.len) {
851 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
852 if (err)
853 goto bail;
854 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700855 ctx->overs[i].raix = i;
856 ctx->overps[i] = &ctx->overs[i];
857 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530858 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700859 max.start = 0;
860 max.end = 0;
861 for (i = 0; i < nbufs; ++i) {
862 if (ctx->overps[i]->start < max.end) {
863 ctx->overps[i]->mstart = max.end;
864 ctx->overps[i]->mend = ctx->overps[i]->end;
865 ctx->overps[i]->offset = max.end -
866 ctx->overps[i]->start;
867 if (ctx->overps[i]->end > max.end) {
868 max.end = ctx->overps[i]->end;
869 } else {
870 ctx->overps[i]->mend = 0;
871 ctx->overps[i]->mstart = 0;
872 }
873 } else {
874 ctx->overps[i]->mend = ctx->overps[i]->end;
875 ctx->overps[i]->mstart = ctx->overps[i]->start;
876 ctx->overps[i]->offset = 0;
877 max = *ctx->overps[i];
878 }
879 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800880bail:
881 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700882}
883
884#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
885 do {\
886 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530887 VERIFY(err, 0 == copy_from_user((dst),\
888 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700889 (size)));\
890 else\
891 memmove((dst), (src), (size));\
892 } while (0)
893
894#define K_COPY_TO_USER(err, kernel, dst, src, size) \
895 do {\
896 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530897 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
898 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700899 else\
900 memmove((dst), (src), (size));\
901 } while (0)
902
903
904static void context_free(struct smq_invoke_ctx *ctx);
905
906static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700907 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700908 struct smq_invoke_ctx **po)
909{
910 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530911 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700912 struct fastrpc_ctx_lst *clst = &fl->clst;
913 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
914
915 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
916 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
917 sizeof(*ctx->fds) * (bufs) +
918 sizeof(*ctx->attrs) * (bufs) +
919 sizeof(*ctx->overs) * (bufs) +
920 sizeof(*ctx->overps) * (bufs);
921
c_mtharue1a5ce12017-10-13 20:47:09 +0530922 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700923 if (err)
924 goto bail;
925
926 INIT_HLIST_NODE(&ctx->hn);
927 hlist_add_fake(&ctx->hn);
928 ctx->fl = fl;
929 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
930 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
931 ctx->fds = (int *)(&ctx->lpra[bufs]);
932 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
933 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
934 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
935
c_mtharue1a5ce12017-10-13 20:47:09 +0530936 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700937 bufs * sizeof(*ctx->lpra));
938 if (err)
939 goto bail;
940
941 if (invokefd->fds) {
942 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
943 bufs * sizeof(*ctx->fds));
944 if (err)
945 goto bail;
946 }
947 if (invokefd->attrs) {
948 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
949 bufs * sizeof(*ctx->attrs));
950 if (err)
951 goto bail;
952 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700953 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700954 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800955 if (bufs) {
956 VERIFY(err, 0 == context_build_overlap(ctx));
957 if (err)
958 goto bail;
959 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700960 ctx->retval = -1;
961 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530962 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700963 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530964 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700965
966 spin_lock(&fl->hlock);
967 hlist_add_head(&ctx->hn, &clst->pending);
968 spin_unlock(&fl->hlock);
969
970 *po = ctx;
971bail:
972 if (ctx && err)
973 context_free(ctx);
974 return err;
975}
976
977static void context_save_interrupted(struct smq_invoke_ctx *ctx)
978{
979 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
980
981 spin_lock(&ctx->fl->hlock);
982 hlist_del_init(&ctx->hn);
983 hlist_add_head(&ctx->hn, &clst->interrupted);
984 spin_unlock(&ctx->fl->hlock);
985 /* free the cache on power collapse */
986 fastrpc_buf_list_free(ctx->fl);
987}
988
989static void context_free(struct smq_invoke_ctx *ctx)
990{
991 int i;
992 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
993 REMOTE_SCALARS_OUTBUFS(ctx->sc);
994 spin_lock(&ctx->fl->hlock);
995 hlist_del_init(&ctx->hn);
996 spin_unlock(&ctx->fl->hlock);
997 for (i = 0; i < nbufs; ++i)
998 fastrpc_mmap_free(ctx->maps[i]);
999 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301000 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001001 kfree(ctx);
1002}
1003
1004static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1005{
1006 ctx->retval = retval;
1007 complete(&ctx->work);
1008}
1009
1010
1011static void fastrpc_notify_users(struct fastrpc_file *me)
1012{
1013 struct smq_invoke_ctx *ictx;
1014 struct hlist_node *n;
1015
1016 spin_lock(&me->hlock);
1017 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1018 complete(&ictx->work);
1019 }
1020 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1021 complete(&ictx->work);
1022 }
1023 spin_unlock(&me->hlock);
1024
1025}
1026
1027static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1028{
1029 struct fastrpc_file *fl;
1030 struct hlist_node *n;
1031
1032 spin_lock(&me->hlock);
1033 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1034 if (fl->cid == cid)
1035 fastrpc_notify_users(fl);
1036 }
1037 spin_unlock(&me->hlock);
1038
1039}
1040static void context_list_ctor(struct fastrpc_ctx_lst *me)
1041{
1042 INIT_HLIST_HEAD(&me->interrupted);
1043 INIT_HLIST_HEAD(&me->pending);
1044}
1045
1046static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1047{
1048 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301049 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001050 struct hlist_node *n;
1051
1052 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301053 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001054 spin_lock(&fl->hlock);
1055 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1056 hlist_del_init(&ictx->hn);
1057 ctxfree = ictx;
1058 break;
1059 }
1060 spin_unlock(&fl->hlock);
1061 if (ctxfree)
1062 context_free(ctxfree);
1063 } while (ctxfree);
1064 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301065 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001066 spin_lock(&fl->hlock);
1067 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1068 hlist_del_init(&ictx->hn);
1069 ctxfree = ictx;
1070 break;
1071 }
1072 spin_unlock(&fl->hlock);
1073 if (ctxfree)
1074 context_free(ctxfree);
1075 } while (ctxfree);
1076}
1077
1078static int fastrpc_file_free(struct fastrpc_file *fl);
1079static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1080{
1081 struct fastrpc_file *fl, *free;
1082 struct hlist_node *n;
1083
1084 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301085 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001086 spin_lock(&me->hlock);
1087 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1088 hlist_del_init(&fl->hn);
1089 free = fl;
1090 break;
1091 }
1092 spin_unlock(&me->hlock);
1093 if (free)
1094 fastrpc_file_free(free);
1095 } while (free);
1096}
1097
1098static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1099{
1100 remote_arg64_t *rpra;
1101 remote_arg_t *lpra = ctx->lpra;
1102 struct smq_invoke_buf *list;
1103 struct smq_phy_page *pages, *ipage;
1104 uint32_t sc = ctx->sc;
1105 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1106 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001107 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001108 uintptr_t args;
1109 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001110 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001111 int err = 0;
1112 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001113 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001114 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001115
1116 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301117 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 list = smq_invoke_buf_start(rpra, sc);
1119 pages = smq_phy_page_start(sc, list);
1120 ipage = pages;
1121
1122 for (i = 0; i < bufs; ++i) {
c_mtharue1a5ce12017-10-13 20:47:09 +05301123 uintptr_t __user buf = (uintptr_t __user)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001124 ssize_t len = lpra[i].buf.len;
1125
1126 if (ctx->fds[i] && (ctx->fds[i] != -1))
1127 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1128 ctx->attrs[i], buf, len,
1129 mflags, &ctx->maps[i]);
1130 ipage += 1;
1131 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001132 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1133 for (i = bufs; i < bufs + handles; i++) {
1134 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1135 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1136 if (err)
1137 goto bail;
1138 ipage += 1;
1139 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001140 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1141 (sizeof(uint32_t) * M_CRCLIST);
1142
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 /* calculate len requreed for copying */
1144 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1145 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001146 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001147 ssize_t len = lpra[i].buf.len;
1148
1149 if (!len)
1150 continue;
1151 if (ctx->maps[i])
1152 continue;
1153 if (ctx->overps[oix]->offset == 0)
1154 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001155 mstart = ctx->overps[oix]->mstart;
1156 mend = ctx->overps[oix]->mend;
1157 VERIFY(err, (mend - mstart) <= LONG_MAX);
1158 if (err)
1159 goto bail;
1160 copylen += mend - mstart;
1161 VERIFY(err, copylen >= 0);
1162 if (err)
1163 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001164 }
1165 ctx->used = copylen;
1166
1167 /* allocate new buffer */
1168 if (copylen) {
1169 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1170 if (err)
1171 goto bail;
1172 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301173 if (ctx->buf->virt && metalen <= copylen)
1174 memset(ctx->buf->virt, 0, metalen);
1175
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 /* copy metadata */
1177 rpra = ctx->buf->virt;
1178 ctx->rpra = rpra;
1179 list = smq_invoke_buf_start(rpra, sc);
1180 pages = smq_phy_page_start(sc, list);
1181 ipage = pages;
1182 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001183 for (i = 0; i < bufs + handles; ++i) {
1184 if (lpra[i].buf.len)
1185 list[i].num = 1;
1186 else
1187 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 list[i].pgidx = ipage - pages;
1189 ipage++;
1190 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301191
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001192 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001193 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301194 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001195 struct fastrpc_mmap *map = ctx->maps[i];
1196 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1197 ssize_t len = lpra[i].buf.len;
1198
1199 rpra[i].buf.pv = 0;
1200 rpra[i].buf.len = len;
1201 if (!len)
1202 continue;
1203 if (map) {
1204 struct vm_area_struct *vma;
1205 uintptr_t offset;
1206 int num = buf_num_pages(buf, len);
1207 int idx = list[i].pgidx;
1208
1209 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001210 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001211 } else {
1212 down_read(&current->mm->mmap_sem);
1213 VERIFY(err, NULL != (vma = find_vma(current->mm,
1214 map->va)));
1215 if (err) {
1216 up_read(&current->mm->mmap_sem);
1217 goto bail;
1218 }
1219 offset = buf_page_start(buf) - vma->vm_start;
1220 up_read(&current->mm->mmap_sem);
1221 VERIFY(err, offset < (uintptr_t)map->size);
1222 if (err)
1223 goto bail;
1224 }
1225 pages[idx].addr = map->phys + offset;
1226 pages[idx].size = num << PAGE_SHIFT;
1227 }
1228 rpra[i].buf.pv = buf;
1229 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001230 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001231 for (i = bufs; i < bufs + handles; ++i) {
1232 struct fastrpc_mmap *map = ctx->maps[i];
1233
1234 pages[i].addr = map->phys;
1235 pages[i].size = map->size;
1236 }
1237 fdlist = (uint64_t *)&pages[bufs + handles];
1238 for (i = 0; i < M_FDLIST; i++)
1239 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001240 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301241 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001242
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001243 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001244 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001245 rlen = copylen - metalen;
1246 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1247 int i = ctx->overps[oix]->raix;
1248 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001249 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001250 uint64_t buf;
1251 ssize_t len = lpra[i].buf.len;
1252
1253 if (!len)
1254 continue;
1255 if (map)
1256 continue;
1257 if (ctx->overps[oix]->offset == 0) {
1258 rlen -= ALIGN(args, BALIGN) - args;
1259 args = ALIGN(args, BALIGN);
1260 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001261 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001262 VERIFY(err, rlen >= mlen);
1263 if (err)
1264 goto bail;
1265 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1266 pages[list[i].pgidx].addr = ctx->buf->phys -
1267 ctx->overps[oix]->offset +
1268 (copylen - rlen);
1269 pages[list[i].pgidx].addr =
1270 buf_page_start(pages[list[i].pgidx].addr);
1271 buf = rpra[i].buf.pv;
1272 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1273 if (i < inbufs) {
1274 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1275 lpra[i].buf.pv, len);
1276 if (err)
1277 goto bail;
1278 }
1279 args = args + mlen;
1280 rlen -= mlen;
1281 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001282 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001283
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001284 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001285 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1286 int i = ctx->overps[oix]->raix;
1287 struct fastrpc_mmap *map = ctx->maps[i];
1288
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001289 if (map && map->uncached)
1290 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301291 if (ctx->fl->sctx->smmu.coherent &&
1292 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1293 continue;
1294 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1295 continue;
1296
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001297 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1298 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1299 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1300 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001301 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301302 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001303 rpra[i].dma.fd = ctx->fds[i];
1304 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1305 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001306 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001307
1308 if (!ctx->fl->sctx->smmu.coherent) {
1309 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001310 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001311 PERF_END);
1312 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001313 bail:
1314 return err;
1315}
1316
1317static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1318 remote_arg_t *upra)
1319{
1320 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001321 struct smq_invoke_buf *list;
1322 struct smq_phy_page *pages;
1323 struct fastrpc_mmap *mmap;
1324 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001325 uint32_t *crclist = NULL;
1326
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001327 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001328 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001329 int err = 0;
1330
1331 inbufs = REMOTE_SCALARS_INBUFS(sc);
1332 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001333 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1334 list = smq_invoke_buf_start(ctx->rpra, sc);
1335 pages = smq_phy_page_start(sc, list);
1336 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001337 crclist = (uint32_t *)(fdlist + M_FDLIST);
1338
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001339 for (i = inbufs; i < inbufs + outbufs; ++i) {
1340 if (!ctx->maps[i]) {
1341 K_COPY_TO_USER(err, kernel,
1342 ctx->lpra[i].buf.pv,
1343 uint64_to_ptr(rpra[i].buf.pv),
1344 rpra[i].buf.len);
1345 if (err)
1346 goto bail;
1347 } else {
1348 fastrpc_mmap_free(ctx->maps[i]);
c_mtharue1a5ce12017-10-13 20:47:09 +05301349 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001350 }
1351 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001352 if (inbufs + outbufs + handles) {
1353 for (i = 0; i < M_FDLIST; i++) {
1354 if (!fdlist[i])
1355 break;
1356 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001357 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001358 fastrpc_mmap_free(mmap);
1359 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001361 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301362 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001363 crclist, M_CRCLIST*sizeof(uint32_t));
1364
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001365 bail:
1366 return err;
1367}
1368
1369static void inv_args_pre(struct smq_invoke_ctx *ctx)
1370{
1371 int i, inbufs, outbufs;
1372 uint32_t sc = ctx->sc;
1373 remote_arg64_t *rpra = ctx->rpra;
1374 uintptr_t end;
1375
1376 inbufs = REMOTE_SCALARS_INBUFS(sc);
1377 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1378 for (i = inbufs; i < inbufs + outbufs; ++i) {
1379 struct fastrpc_mmap *map = ctx->maps[i];
1380
1381 if (map && map->uncached)
1382 continue;
1383 if (!rpra[i].buf.len)
1384 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301385 if (ctx->fl->sctx->smmu.coherent &&
1386 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1387 continue;
1388 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1389 continue;
1390
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001391 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1392 buf_page_start(rpra[i].buf.pv))
1393 continue;
1394 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1395 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1396 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1397 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1398 rpra[i].buf.len);
1399 if (!IS_CACHE_ALIGNED(end))
1400 dmac_flush_range((char *)end,
1401 (char *)end + 1);
1402 }
1403}
1404
1405static void inv_args(struct smq_invoke_ctx *ctx)
1406{
1407 int i, inbufs, outbufs;
1408 uint32_t sc = ctx->sc;
1409 remote_arg64_t *rpra = ctx->rpra;
1410 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001411
1412 inbufs = REMOTE_SCALARS_INBUFS(sc);
1413 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1414 for (i = inbufs; i < inbufs + outbufs; ++i) {
1415 struct fastrpc_mmap *map = ctx->maps[i];
1416
1417 if (map && map->uncached)
1418 continue;
1419 if (!rpra[i].buf.len)
1420 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301421 if (ctx->fl->sctx->smmu.coherent &&
1422 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1423 continue;
1424 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1425 continue;
1426
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001427 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1428 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 continue;
1430 }
1431 if (map && map->handle)
1432 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1433 (char *)uint64_to_ptr(rpra[i].buf.pv),
1434 rpra[i].buf.len, ION_IOC_INV_CACHES);
1435 else
1436 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1437 (char *)uint64_to_ptr(rpra[i].buf.pv
1438 + rpra[i].buf.len));
1439 }
1440
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001441 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442 dmac_inv_range(rpra, (char *)rpra + used);
1443}
1444
1445static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1446 uint32_t kernel, uint32_t handle)
1447{
1448 struct smq_msg *msg = &ctx->msg;
1449 struct fastrpc_file *fl = ctx->fl;
1450 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1451 int err = 0;
1452
c_mtharue1a5ce12017-10-13 20:47:09 +05301453 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 if (err)
1455 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301456 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301458 if (fl->sessionid)
1459 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001460 if (kernel)
1461 msg->pid = 0;
1462 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1463 msg->invoke.header.handle = handle;
1464 msg->invoke.header.sc = ctx->sc;
1465 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1466 msg->invoke.page.size = buf_page_size(ctx->used);
1467
1468 if (fl->ssrcount != channel_ctx->ssrcount) {
1469 err = -ECONNRESET;
1470 goto bail;
1471 }
1472 VERIFY(err, channel_ctx->link.port_state ==
1473 FASTRPC_LINK_CONNECTED);
1474 if (err)
1475 goto bail;
1476 err = glink_tx(channel_ctx->chan,
1477 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1478 GLINK_TX_REQ_INTENT);
1479 bail:
1480 return err;
1481}
1482
1483static void fastrpc_init(struct fastrpc_apps *me)
1484{
1485 int i;
1486
1487 INIT_HLIST_HEAD(&me->drivers);
1488 spin_lock_init(&me->hlock);
1489 mutex_init(&me->smd_mutex);
1490 me->channel = &gcinfo[0];
1491 for (i = 0; i < NUM_CHANNELS; i++) {
1492 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301493 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001494 me->channel[i].sesscount = 0;
1495 }
1496}
1497
1498static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1499
1500static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1501 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001502 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001503{
c_mtharue1a5ce12017-10-13 20:47:09 +05301504 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1506 int cid = fl->cid;
1507 int interrupted = 0;
1508 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001509 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001511 if (fl->profile)
1512 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301513
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301514
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301515 VERIFY(err, fl->sctx != NULL);
1516 if (err)
1517 goto bail;
1518 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1519 if (err)
1520 goto bail;
1521
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 if (!kernel) {
1523 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1524 &ctx));
1525 if (err)
1526 goto bail;
1527 if (fl->sctx->smmu.faults)
1528 err = FASTRPC_ENOSUCH;
1529 if (err)
1530 goto bail;
1531 if (ctx)
1532 goto wait;
1533 }
1534
1535 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1536 if (err)
1537 goto bail;
1538
1539 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001540 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001541 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001542 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001543 if (err)
1544 goto bail;
1545 }
1546
Sathish Ambleyc432b502017-06-05 12:03:42 -07001547 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001548 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001549 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001550 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001551 PERF_END);
1552
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001553 if (err)
1554 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001555 wait:
1556 if (kernel)
1557 wait_for_completion(&ctx->work);
1558 else {
1559 interrupted = wait_for_completion_interruptible(&ctx->work);
1560 VERIFY(err, 0 == (err = interrupted));
1561 if (err)
1562 goto bail;
1563 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001564
1565 PERF(fl->profile, fl->perf.invargs,
1566 if (!fl->sctx->smmu.coherent)
1567 inv_args(ctx);
1568 PERF_END);
1569
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 VERIFY(err, 0 == (err = ctx->retval));
1571 if (err)
1572 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001573
1574 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001575 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001576 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001577 if (err)
1578 goto bail;
1579 bail:
1580 if (ctx && interrupted == -ERESTARTSYS)
1581 context_save_interrupted(ctx);
1582 else if (ctx)
1583 context_free(ctx);
1584 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1585 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001586
1587 if (fl->profile && !interrupted) {
1588 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1589 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301590 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001591 fl->perf.count++;
1592 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001593 return err;
1594}
1595
Sathish Ambley36849af2017-02-02 09:35:55 -08001596static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001597static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001598 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599{
1600 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301601 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001602 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001603 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001604 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301605 struct fastrpc_mmap *file = NULL, *mem = NULL;
1606 char *proc_name = NULL;
1607 int srcVM[1] = {VMID_HLOS};
1608 int destVM[1] = {VMID_ADSP_Q6};
1609 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1610 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611
Sathish Ambley36849af2017-02-02 09:35:55 -08001612 VERIFY(err, !fastrpc_channel_open(fl));
1613 if (err)
1614 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001615 if (init->flags == FASTRPC_INIT_ATTACH) {
1616 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301617 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001618
1619 ra[0].buf.pv = (void *)&tgid;
1620 ra[0].buf.len = sizeof(tgid);
1621 ioctl.inv.handle = 1;
1622 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1623 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301624 ioctl.fds = NULL;
1625 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001626 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001627 fl->pd = 0;
1628 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1629 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1630 if (err)
1631 goto bail;
1632 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001633 remote_arg_t ra[6];
1634 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001635 int mflags = 0;
1636 struct {
1637 int pgid;
1638 int namelen;
1639 int filelen;
1640 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001641 int attrs;
1642 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001643 } inbuf;
1644
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301645 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001646 inbuf.namelen = strlen(current->comm) + 1;
1647 inbuf.filelen = init->filelen;
1648 fl->pd = 1;
1649 if (init->filelen) {
1650 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1651 init->file, init->filelen, mflags, &file));
1652 if (err)
1653 goto bail;
1654 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301655
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001656 inbuf.pageslen = 1;
1657 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1658 init->mem, init->memlen, mflags, &mem));
1659 if (err)
1660 goto bail;
1661 inbuf.pageslen = 1;
1662 ra[0].buf.pv = (void *)&inbuf;
1663 ra[0].buf.len = sizeof(inbuf);
1664 fds[0] = 0;
1665
1666 ra[1].buf.pv = (void *)current->comm;
1667 ra[1].buf.len = inbuf.namelen;
1668 fds[1] = 0;
1669
1670 ra[2].buf.pv = (void *)init->file;
1671 ra[2].buf.len = inbuf.filelen;
1672 fds[2] = init->filefd;
1673
1674 pages[0].addr = mem->phys;
1675 pages[0].size = mem->size;
1676 ra[3].buf.pv = (void *)pages;
1677 ra[3].buf.len = 1 * sizeof(*pages);
1678 fds[3] = 0;
1679
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001680 inbuf.attrs = uproc->attrs;
1681 ra[4].buf.pv = (void *)&(inbuf.attrs);
1682 ra[4].buf.len = sizeof(inbuf.attrs);
1683 fds[4] = 0;
1684
1685 inbuf.siglen = uproc->siglen;
1686 ra[5].buf.pv = (void *)&(inbuf.siglen);
1687 ra[5].buf.len = sizeof(inbuf.siglen);
1688 fds[5] = 0;
1689
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001690 ioctl.inv.handle = 1;
1691 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001692 if (uproc->attrs)
1693 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001694 ioctl.inv.pra = ra;
1695 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301696 ioctl.attrs = NULL;
1697 ioctl.crc = NULL;
1698 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1699 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1700 if (err)
1701 goto bail;
1702 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1703 remote_arg_t ra[3];
1704 uint64_t phys = 0;
1705 ssize_t size = 0;
1706 int fds[3];
1707 struct {
1708 int pgid;
1709 int namelen;
1710 int pageslen;
1711 } inbuf;
1712
1713 if (!init->filelen)
1714 goto bail;
1715
1716 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1717 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1718 if (err)
1719 goto bail;
1720 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1721 (void __user *)init->file, init->filelen));
1722 if (err)
1723 goto bail;
1724
1725 inbuf.pgid = current->tgid;
1726 inbuf.namelen = strlen(proc_name)+1;
1727 inbuf.pageslen = 0;
1728 if (!me->staticpd_flags) {
1729 inbuf.pageslen = 1;
1730 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1731 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1732 &mem));
1733 if (err)
1734 goto bail;
1735 phys = mem->phys;
1736 size = mem->size;
1737 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1738 srcVM, 1, destVM, destVMperm, 1));
1739 if (err) {
1740 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1741 err);
1742 pr_err("map->phys %llx, map->size %d\n",
1743 phys, (int)size);
1744 goto bail;
1745 }
1746 me->staticpd_flags = 1;
1747 }
1748
1749 ra[0].buf.pv = (void *)&inbuf;
1750 ra[0].buf.len = sizeof(inbuf);
1751 fds[0] = 0;
1752
1753 ra[1].buf.pv = (void *)proc_name;
1754 ra[1].buf.len = inbuf.namelen;
1755 fds[1] = 0;
1756
1757 pages[0].addr = phys;
1758 pages[0].size = size;
1759
1760 ra[2].buf.pv = (void *)pages;
1761 ra[2].buf.len = sizeof(*pages);
1762 fds[2] = 0;
1763 ioctl.inv.handle = 1;
1764
1765 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1766 ioctl.inv.pra = ra;
1767 ioctl.fds = NULL;
1768 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001769 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001770 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1771 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1772 if (err)
1773 goto bail;
1774 } else {
1775 err = -ENOTTY;
1776 }
1777bail:
c_mtharud91205a2017-11-07 16:01:06 +05301778 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301779 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1780 me->staticpd_flags = 0;
1781 if (mem && err) {
1782 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1783 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1784 destVM, 1, srcVM, hlosVMperm, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001785 fastrpc_mmap_free(mem);
c_mtharue1a5ce12017-10-13 20:47:09 +05301786 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001787 if (file)
1788 fastrpc_mmap_free(file);
1789 return err;
1790}
1791
1792static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1793{
1794 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001795 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001796 remote_arg_t ra[1];
1797 int tgid = 0;
1798
Sathish Ambley36849af2017-02-02 09:35:55 -08001799 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1800 if (err)
1801 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301802 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001803 if (err)
1804 goto bail;
1805 tgid = fl->tgid;
1806 ra[0].buf.pv = (void *)&tgid;
1807 ra[0].buf.len = sizeof(tgid);
1808 ioctl.inv.handle = 1;
1809 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1810 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301811 ioctl.fds = NULL;
1812 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001813 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001814 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1815 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1816bail:
1817 return err;
1818}
1819
1820static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1821 struct fastrpc_mmap *map)
1822{
Sathish Ambleybae51902017-07-03 15:00:49 -07001823 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001824 struct smq_phy_page page;
1825 int num = 1;
1826 remote_arg_t ra[3];
1827 int err = 0;
1828 struct {
1829 int pid;
1830 uint32_t flags;
1831 uintptr_t vaddrin;
1832 int num;
1833 } inargs;
1834 struct {
1835 uintptr_t vaddrout;
1836 } routargs;
1837
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301838 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001839 inargs.vaddrin = (uintptr_t)map->va;
1840 inargs.flags = flags;
1841 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1842 ra[0].buf.pv = (void *)&inargs;
1843 ra[0].buf.len = sizeof(inargs);
1844 page.addr = map->phys;
1845 page.size = map->size;
1846 ra[1].buf.pv = (void *)&page;
1847 ra[1].buf.len = num * sizeof(page);
1848
1849 ra[2].buf.pv = (void *)&routargs;
1850 ra[2].buf.len = sizeof(routargs);
1851
1852 ioctl.inv.handle = 1;
1853 if (fl->apps->compat)
1854 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1855 else
1856 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1857 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301858 ioctl.fds = NULL;
1859 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001860 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001861 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1862 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1863 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301864 if (err)
1865 goto bail;
1866 if (flags == ADSP_MMAP_HEAP_ADDR) {
1867 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001868
c_mtharue1a5ce12017-10-13 20:47:09 +05301869 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1870 desc.args[1] = map->phys;
1871 desc.args[2] = map->size;
1872 desc.arginfo = SCM_ARGS(3);
1873 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1874 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1875 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1876
1877 int srcVM[1] = {VMID_HLOS};
1878 int destVM[1] = {VMID_ADSP_Q6};
1879 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1880
1881 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1882 srcVM, 1, destVM, destVMperm, 1));
1883 if (err)
1884 goto bail;
1885 }
1886bail:
1887 return err;
1888}
1889
1890static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1891 struct fastrpc_mmap *map)
1892{
1893 int err = 0;
1894 int srcVM[1] = {VMID_ADSP_Q6};
1895 int destVM[1] = {VMID_HLOS};
1896 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1897
1898 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1899 struct fastrpc_ioctl_invoke_crc ioctl;
1900 struct scm_desc desc = {0};
1901 remote_arg_t ra[1];
1902 int err = 0;
1903 struct {
1904 uint8_t skey;
1905 } routargs;
1906
1907 ra[0].buf.pv = (void *)&routargs;
1908 ra[0].buf.len = sizeof(routargs);
1909
1910 ioctl.inv.handle = 1;
1911 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1912 ioctl.inv.pra = ra;
1913 ioctl.fds = NULL;
1914 ioctl.attrs = NULL;
1915 ioctl.crc = NULL;
1916 if (fl == NULL)
1917 goto bail;
1918
1919 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1920 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1921 if (err)
1922 goto bail;
1923 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1924 desc.args[1] = map->phys;
1925 desc.args[2] = map->size;
1926 desc.args[3] = routargs.skey;
1927 desc.arginfo = SCM_ARGS(4);
1928 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1929 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1930 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1931 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1932 srcVM, 1, destVM, destVMperm, 1));
1933 if (err)
1934 goto bail;
1935 }
1936
1937bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001938 return err;
1939}
1940
1941static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1942 struct fastrpc_mmap *map)
1943{
Sathish Ambleybae51902017-07-03 15:00:49 -07001944 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001945 remote_arg_t ra[1];
1946 int err = 0;
1947 struct {
1948 int pid;
1949 uintptr_t vaddrout;
1950 ssize_t size;
1951 } inargs;
1952
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301953 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001954 inargs.size = map->size;
1955 inargs.vaddrout = map->raddr;
1956 ra[0].buf.pv = (void *)&inargs;
1957 ra[0].buf.len = sizeof(inargs);
1958
1959 ioctl.inv.handle = 1;
1960 if (fl->apps->compat)
1961 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1962 else
1963 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1964 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301965 ioctl.fds = NULL;
1966 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001967 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001968 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1969 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301970 if (err)
1971 goto bail;
1972 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1973 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1974 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
1975 if (err)
1976 goto bail;
1977 }
1978bail:
1979 return err;
1980}
1981
1982static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
1983{
1984 struct fastrpc_mmap *match = NULL, *map = NULL;
1985 struct hlist_node *n = NULL;
1986 int err = 0, ret = 0;
1987 struct fastrpc_apps *me = &gfa;
1988 struct ramdump_segment *ramdump_segments_rh = NULL;
1989
1990 do {
1991 match = NULL;
1992 spin_lock(&me->hlock);
1993 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
1994 match = map;
1995 hlist_del_init(&map->hn);
1996 break;
1997 }
1998 spin_unlock(&me->hlock);
1999
2000 if (match) {
2001 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2002 if (err)
2003 goto bail;
2004 if (me->channel[0].ramdumpenabled) {
2005 ramdump_segments_rh = kcalloc(1,
2006 sizeof(struct ramdump_segment), GFP_KERNEL);
2007 if (ramdump_segments_rh) {
2008 ramdump_segments_rh->address =
2009 match->phys;
2010 ramdump_segments_rh->size = match->size;
2011 ret = do_elf_ramdump(
2012 me->channel[0].remoteheap_ramdump_dev,
2013 ramdump_segments_rh, 1);
2014 if (ret < 0)
2015 pr_err("ADSPRPC: unable to dump heap");
2016 kfree(ramdump_segments_rh);
2017 }
2018 }
2019 fastrpc_mmap_free(match);
2020 }
2021 } while (match);
2022bail:
2023 if (err && match)
2024 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002025 return err;
2026}
2027
2028static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
2029 ssize_t len, struct fastrpc_mmap **ppmap);
2030
2031static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2032
2033static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2034 struct fastrpc_ioctl_munmap *ud)
2035{
2036 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302037 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002038
2039 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2040 if (err)
2041 goto bail;
2042 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2043 if (err)
2044 goto bail;
2045 fastrpc_mmap_free(map);
2046bail:
2047 if (err && map)
2048 fastrpc_mmap_add(map);
2049 return err;
2050}
2051
2052static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2053 struct fastrpc_ioctl_mmap *ud)
2054{
2055
c_mtharue1a5ce12017-10-13 20:47:09 +05302056 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002057 int err = 0;
2058
c_mtharue1a5ce12017-10-13 20:47:09 +05302059 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t __user)ud->vaddrin,
2060 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002061 return 0;
2062
2063 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
c_mtharue1a5ce12017-10-13 20:47:09 +05302064 (uintptr_t __user)ud->vaddrin, ud->size,
2065 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002066 if (err)
2067 goto bail;
2068 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2069 if (err)
2070 goto bail;
2071 ud->vaddrout = map->raddr;
2072 bail:
2073 if (err && map)
2074 fastrpc_mmap_free(map);
2075 return err;
2076}
2077
2078static void fastrpc_channel_close(struct kref *kref)
2079{
2080 struct fastrpc_apps *me = &gfa;
2081 struct fastrpc_channel_ctx *ctx;
2082 int cid;
2083
2084 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2085 cid = ctx - &gcinfo[0];
2086 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302087 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302088 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2089 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002090 mutex_unlock(&me->smd_mutex);
2091 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2092 MAJOR(me->dev_no), cid);
2093}
2094
2095static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2096
2097static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2098 int secure, struct fastrpc_session_ctx **session)
2099{
2100 struct fastrpc_apps *me = &gfa;
2101 int idx = 0, err = 0;
2102
2103 if (chan->sesscount) {
2104 for (idx = 0; idx < chan->sesscount; ++idx) {
2105 if (!chan->session[idx].used &&
2106 chan->session[idx].smmu.secure == secure) {
2107 chan->session[idx].used = 1;
2108 break;
2109 }
2110 }
2111 VERIFY(err, idx < chan->sesscount);
2112 if (err)
2113 goto bail;
2114 chan->session[idx].smmu.faults = 0;
2115 } else {
2116 VERIFY(err, me->dev != NULL);
2117 if (err)
2118 goto bail;
2119 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302120 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002121 }
2122
2123 *session = &chan->session[idx];
2124 bail:
2125 return err;
2126}
2127
c_mtharue1a5ce12017-10-13 20:47:09 +05302128static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2129 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002130{
2131 if (glink_queue_rx_intent(h, NULL, size))
2132 return false;
2133 return true;
2134}
2135
c_mtharue1a5ce12017-10-13 20:47:09 +05302136static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002137 const void *pkt_priv, const void *ptr)
2138{
2139}
2140
c_mtharue1a5ce12017-10-13 20:47:09 +05302141static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002142 const void *pkt_priv, const void *ptr, size_t size)
2143{
2144 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302145 struct smq_invoke_ctx *ctx;
2146 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002147
c_mtharufdac6892017-10-12 13:09:01 +05302148 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2149 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302150 goto bail;
2151
c_mtharufdac6892017-10-12 13:09:01 +05302152 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2153 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2154 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302155 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302156
c_mtharufdac6892017-10-12 13:09:01 +05302157 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302158bail:
c_mtharufdac6892017-10-12 13:09:01 +05302159 if (err)
2160 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002161 glink_rx_done(handle, ptr, true);
2162}
2163
c_mtharue1a5ce12017-10-13 20:47:09 +05302164static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002165 unsigned int event)
2166{
2167 struct fastrpc_apps *me = &gfa;
2168 int cid = (int)(uintptr_t)priv;
2169 struct fastrpc_glink_info *link;
2170
2171 if (cid < 0 || cid >= NUM_CHANNELS)
2172 return;
2173 link = &me->channel[cid].link;
2174 switch (event) {
2175 case GLINK_CONNECTED:
2176 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302177 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002178 break;
2179 case GLINK_LOCAL_DISCONNECTED:
2180 link->port_state = FASTRPC_LINK_DISCONNECTED;
2181 break;
2182 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302183 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002184 fastrpc_glink_close(me->channel[cid].chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302185 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002186 }
2187 break;
2188 default:
2189 break;
2190 }
2191}
2192
2193static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2194 struct fastrpc_session_ctx **session)
2195{
2196 int err = 0;
2197 struct fastrpc_apps *me = &gfa;
2198
2199 mutex_lock(&me->smd_mutex);
2200 if (!*session)
2201 err = fastrpc_session_alloc_locked(chan, secure, session);
2202 mutex_unlock(&me->smd_mutex);
2203 return err;
2204}
2205
2206static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2207 struct fastrpc_session_ctx *session)
2208{
2209 struct fastrpc_apps *me = &gfa;
2210
2211 mutex_lock(&me->smd_mutex);
2212 session->used = 0;
2213 mutex_unlock(&me->smd_mutex);
2214}
2215
2216static int fastrpc_file_free(struct fastrpc_file *fl)
2217{
2218 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302219 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002220 int cid;
2221
2222 if (!fl)
2223 return 0;
2224 cid = fl->cid;
2225
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302226 (void)fastrpc_release_current_dsp_process(fl);
2227
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002228 spin_lock(&fl->apps->hlock);
2229 hlist_del_init(&fl->hn);
2230 spin_unlock(&fl->apps->hlock);
2231
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002232 if (!fl->sctx) {
2233 kfree(fl);
2234 return 0;
2235 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302236 spin_lock(&fl->hlock);
2237 fl->file_close = 1;
2238 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002239 fastrpc_context_list_dtor(fl);
2240 fastrpc_buf_list_free(fl);
2241 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2242 fastrpc_mmap_free(map);
2243 }
2244 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2245 kref_put_mutex(&fl->apps->channel[cid].kref,
2246 fastrpc_channel_close, &fl->apps->smd_mutex);
2247 if (fl->sctx)
2248 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2249 if (fl->secsctx)
2250 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2251 kfree(fl);
2252 return 0;
2253}
2254
2255static int fastrpc_device_release(struct inode *inode, struct file *file)
2256{
2257 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2258
2259 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302260 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2261 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002262 if (fl->debugfs_file != NULL)
2263 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002264 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302265 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002266 }
2267 return 0;
2268}
2269
2270static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2271 void *priv)
2272{
2273 struct fastrpc_apps *me = &gfa;
2274 int cid = (int)((uintptr_t)priv);
2275 struct fastrpc_glink_info *link;
2276
2277 if (cid < 0 || cid >= NUM_CHANNELS)
2278 return;
2279
2280 link = &me->channel[cid].link;
2281 switch (cb_info->link_state) {
2282 case GLINK_LINK_STATE_UP:
2283 link->link_state = FASTRPC_LINK_STATE_UP;
2284 complete(&me->channel[cid].work);
2285 break;
2286 case GLINK_LINK_STATE_DOWN:
2287 link->link_state = FASTRPC_LINK_STATE_DOWN;
2288 break;
2289 default:
2290 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2291 break;
2292 }
2293}
2294
2295static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2296{
2297 int err = 0;
2298 struct fastrpc_glink_info *link;
2299
2300 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2301 if (err)
2302 goto bail;
2303
2304 link = &me->channel[cid].link;
2305 if (link->link_notify_handle != NULL)
2306 goto bail;
2307
2308 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2309 link->link_notify_handle = glink_register_link_state_cb(
2310 &link->link_info,
2311 (void *)((uintptr_t)cid));
2312 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2313 if (err) {
2314 link->link_notify_handle = NULL;
2315 goto bail;
2316 }
2317 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2318 RPC_TIMEOUT));
2319bail:
2320 return err;
2321}
2322
2323static void fastrpc_glink_close(void *chan, int cid)
2324{
2325 int err = 0;
2326 struct fastrpc_glink_info *link;
2327
2328 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2329 if (err)
2330 return;
2331 link = &gfa.channel[cid].link;
2332
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302333 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002334 link->port_state = FASTRPC_LINK_DISCONNECTING;
2335 glink_close(chan);
2336 }
2337}
2338
2339static int fastrpc_glink_open(int cid)
2340{
2341 int err = 0;
2342 void *handle = NULL;
2343 struct fastrpc_apps *me = &gfa;
2344 struct glink_open_config *cfg;
2345 struct fastrpc_glink_info *link;
2346
2347 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2348 if (err)
2349 goto bail;
2350 link = &me->channel[cid].link;
2351 cfg = &me->channel[cid].link.cfg;
2352 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2353 if (err)
2354 goto bail;
2355
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302356 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2357 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002358 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002359
2360 link->port_state = FASTRPC_LINK_CONNECTING;
2361 cfg->priv = (void *)(uintptr_t)cid;
2362 cfg->edge = gcinfo[cid].link.link_info.edge;
2363 cfg->transport = gcinfo[cid].link.link_info.transport;
2364 cfg->name = FASTRPC_GLINK_GUID;
2365 cfg->notify_rx = fastrpc_glink_notify_rx;
2366 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2367 cfg->notify_state = fastrpc_glink_notify_state;
2368 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2369 handle = glink_open(cfg);
2370 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302371 if (err) {
2372 if (link->port_state == FASTRPC_LINK_CONNECTING)
2373 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002374 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302375 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002376 me->channel[cid].chan = handle;
2377bail:
2378 return err;
2379}
2380
Sathish Ambley1ca68232017-01-19 10:32:55 -08002381static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2382{
2383 filp->private_data = inode->i_private;
2384 return 0;
2385}
2386
2387static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2388 size_t count, loff_t *position)
2389{
2390 struct fastrpc_file *fl = filp->private_data;
2391 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302392 struct fastrpc_buf *buf = NULL;
2393 struct fastrpc_mmap *map = NULL;
2394 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002395 struct fastrpc_channel_ctx *chan;
2396 struct fastrpc_session_ctx *sess;
2397 unsigned int len = 0;
2398 int i, j, ret = 0;
2399 char *fileinfo = NULL;
2400
2401 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2402 if (!fileinfo)
2403 goto bail;
2404 if (fl == NULL) {
2405 for (i = 0; i < NUM_CHANNELS; i++) {
2406 chan = &gcinfo[i];
2407 len += scnprintf(fileinfo + len,
2408 DEBUGFS_SIZE - len, "%s\n\n",
2409 chan->name);
2410 len += scnprintf(fileinfo + len,
2411 DEBUGFS_SIZE - len, "%s %d\n",
2412 "sesscount:", chan->sesscount);
2413 for (j = 0; j < chan->sesscount; j++) {
2414 sess = &chan->session[j];
2415 len += scnprintf(fileinfo + len,
2416 DEBUGFS_SIZE - len,
2417 "%s%d\n\n", "SESSION", j);
2418 len += scnprintf(fileinfo + len,
2419 DEBUGFS_SIZE - len,
2420 "%s %d\n", "sid:",
2421 sess->smmu.cb);
2422 len += scnprintf(fileinfo + len,
2423 DEBUGFS_SIZE - len,
2424 "%s %d\n", "SECURE:",
2425 sess->smmu.secure);
2426 }
2427 }
2428 } else {
2429 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2430 "%s %d\n\n",
2431 "PROCESS_ID:", fl->tgid);
2432 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2433 "%s %d\n\n",
2434 "CHANNEL_ID:", fl->cid);
2435 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2436 "%s %d\n\n",
2437 "SSRCOUNT:", fl->ssrcount);
2438 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2439 "%s\n",
2440 "LIST OF BUFS:");
2441 spin_lock(&fl->hlock);
2442 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2443 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302444 "%s %pK %s %pK %s %llx\n", "buf:",
2445 buf, "buf->virt:", buf->virt,
2446 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002447 }
2448 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2449 "\n%s\n",
2450 "LIST OF MAPS:");
2451 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2452 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302453 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002454 "map:", map,
2455 "map->va:", map->va,
2456 "map->phys:", map->phys);
2457 }
2458 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2459 "\n%s\n",
2460 "LIST OF PENDING SMQCONTEXTS:");
2461 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2462 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302463 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002464 "smqcontext:", ictx,
2465 "sc:", ictx->sc,
2466 "tid:", ictx->pid,
2467 "handle", ictx->rpra->h);
2468 }
2469 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2470 "\n%s\n",
2471 "LIST OF INTERRUPTED SMQCONTEXTS:");
2472 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2473 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302474 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002475 "smqcontext:", ictx,
2476 "sc:", ictx->sc,
2477 "tid:", ictx->pid,
2478 "handle", ictx->rpra->h);
2479 }
2480 spin_unlock(&fl->hlock);
2481 }
2482 if (len > DEBUGFS_SIZE)
2483 len = DEBUGFS_SIZE;
2484 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2485 kfree(fileinfo);
2486bail:
2487 return ret;
2488}
2489
2490static const struct file_operations debugfs_fops = {
2491 .open = fastrpc_debugfs_open,
2492 .read = fastrpc_debugfs_read,
2493};
Sathish Ambley36849af2017-02-02 09:35:55 -08002494static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002495{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002496 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002497 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002498
2499 mutex_lock(&me->smd_mutex);
2500
Sathish Ambley36849af2017-02-02 09:35:55 -08002501 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002502 if (err)
2503 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002504 cid = fl->cid;
c_mtharue1a5ce12017-10-13 20:47:09 +05302505 if (me->channel[cid].ssrcount !=
2506 me->channel[cid].prevssrcount) {
2507 if (!me->channel[cid].issubsystemup) {
2508 VERIFY(err, 0);
2509 if (err)
2510 goto bail;
2511 }
2512 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002513 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2514 if (err)
2515 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002516 fl->ssrcount = me->channel[cid].ssrcount;
2517 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302518 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302519 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2520 if (err)
2521 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002522 VERIFY(err, 0 == fastrpc_glink_open(cid));
2523 if (err)
2524 goto bail;
2525
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302526 VERIFY(err,
2527 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002528 RPC_TIMEOUT));
2529 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302530 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002531 goto bail;
2532 }
2533 kref_init(&me->channel[cid].kref);
2534 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2535 MAJOR(me->dev_no), cid);
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302536 err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
2537 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002538 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302539 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2540 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002541 if (me->channel[cid].ssrcount !=
2542 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302543 if (fastrpc_mmap_remove_ssr(fl))
2544 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002545 me->channel[cid].prevssrcount =
2546 me->channel[cid].ssrcount;
2547 }
2548 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002549
2550bail:
2551 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552 return err;
2553}
2554
Sathish Ambley36849af2017-02-02 09:35:55 -08002555static int fastrpc_device_open(struct inode *inode, struct file *filp)
2556{
2557 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002558 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302559 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002560 struct fastrpc_apps *me = &gfa;
2561
c_mtharue1a5ce12017-10-13 20:47:09 +05302562 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002563 if (err)
2564 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002565 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2566 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002567 context_list_ctor(&fl->clst);
2568 spin_lock_init(&fl->hlock);
2569 INIT_HLIST_HEAD(&fl->maps);
2570 INIT_HLIST_HEAD(&fl->bufs);
2571 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302572 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002573 fl->tgid = current->tgid;
2574 fl->apps = me;
2575 fl->mode = FASTRPC_MODE_SERIAL;
2576 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002577 if (debugfs_file != NULL)
2578 fl->debugfs_file = debugfs_file;
2579 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302580 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002581 filp->private_data = fl;
2582 spin_lock(&me->hlock);
2583 hlist_add_head(&fl->hn, &me->drivers);
2584 spin_unlock(&me->hlock);
2585 return 0;
2586}
2587
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002588static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2589{
2590 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002591 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002592
c_mtharue1a5ce12017-10-13 20:47:09 +05302593 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002594 if (err)
2595 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002596 if (fl->cid == -1) {
2597 cid = *info;
2598 VERIFY(err, cid < NUM_CHANNELS);
2599 if (err)
2600 goto bail;
2601 fl->cid = cid;
2602 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2603 VERIFY(err, !fastrpc_session_alloc_locked(
2604 &fl->apps->channel[cid], 0, &fl->sctx));
2605 if (err)
2606 goto bail;
2607 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302608 VERIFY(err, fl->sctx != NULL);
2609 if (err)
2610 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002611 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2612bail:
2613 return err;
2614}
2615
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302616static int fastrpc_internal_control(struct fastrpc_file *fl,
2617 struct fastrpc_ioctl_control *cp)
2618{
2619 int err = 0;
2620 int latency;
2621
2622 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2623 if (err)
2624 goto bail;
2625 VERIFY(err, !IS_ERR_OR_NULL(cp));
2626 if (err)
2627 goto bail;
2628
2629 switch (cp->req) {
2630 case FASTRPC_CONTROL_LATENCY:
2631 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2632 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2633 VERIFY(err, latency != 0);
2634 if (err)
2635 goto bail;
2636 if (!fl->qos_request) {
2637 pm_qos_add_request(&fl->pm_qos_req,
2638 PM_QOS_CPU_DMA_LATENCY, latency);
2639 fl->qos_request = 1;
2640 } else
2641 pm_qos_update_request(&fl->pm_qos_req, latency);
2642 break;
2643 default:
2644 err = -ENOTTY;
2645 break;
2646 }
2647bail:
2648 return err;
2649}
2650
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002651static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2652 unsigned long ioctl_param)
2653{
2654 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002655 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002656 struct fastrpc_ioctl_mmap mmap;
2657 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002658 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002659 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302660 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002661 } p;
2662 void *param = (char *)ioctl_param;
2663 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2664 int size = 0, err = 0;
2665 uint32_t info;
2666
c_mtharue1a5ce12017-10-13 20:47:09 +05302667 p.inv.fds = NULL;
2668 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002669 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302670 spin_lock(&fl->hlock);
2671 if (fl->file_close == 1) {
2672 err = EBADF;
2673 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2674 spin_unlock(&fl->hlock);
2675 goto bail;
2676 }
2677 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002678
2679 switch (ioctl_num) {
2680 case FASTRPC_IOCTL_INVOKE:
2681 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002682 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002683 case FASTRPC_IOCTL_INVOKE_FD:
2684 if (!size)
2685 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2686 /* fall through */
2687 case FASTRPC_IOCTL_INVOKE_ATTRS:
2688 if (!size)
2689 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002690 /* fall through */
2691 case FASTRPC_IOCTL_INVOKE_CRC:
2692 if (!size)
2693 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302694 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002695 if (err)
2696 goto bail;
2697 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2698 0, &p.inv)));
2699 if (err)
2700 goto bail;
2701 break;
2702 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302703 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2704 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302705 if (err)
2706 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002707 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2708 if (err)
2709 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302710 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002711 if (err)
2712 goto bail;
2713 break;
2714 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302715 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2716 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302717 if (err)
2718 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002719 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2720 &p.munmap)));
2721 if (err)
2722 goto bail;
2723 break;
2724 case FASTRPC_IOCTL_SETMODE:
2725 switch ((uint32_t)ioctl_param) {
2726 case FASTRPC_MODE_PARALLEL:
2727 case FASTRPC_MODE_SERIAL:
2728 fl->mode = (uint32_t)ioctl_param;
2729 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002730 case FASTRPC_MODE_PROFILE:
2731 fl->profile = (uint32_t)ioctl_param;
2732 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302733 case FASTRPC_MODE_SESSION:
2734 fl->sessionid = 1;
2735 fl->tgid |= (1 << SESSION_ID_INDEX);
2736 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002737 default:
2738 err = -ENOTTY;
2739 break;
2740 }
2741 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002742 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302743 K_COPY_FROM_USER(err, 0, &p.perf,
2744 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002745 if (err)
2746 goto bail;
2747 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2748 if (p.perf.keys) {
2749 char *keys = PERF_KEYS;
2750
c_mtharue1a5ce12017-10-13 20:47:09 +05302751 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2752 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002753 if (err)
2754 goto bail;
2755 }
2756 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302757 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2758 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002759 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302760 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002761 if (err)
2762 goto bail;
2763 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302764 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302765 K_COPY_FROM_USER(err, 0, &p.cp, param,
2766 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302767 if (err)
2768 goto bail;
2769 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2770 if (err)
2771 goto bail;
2772 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002773 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302774 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002775 if (err)
2776 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002777 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2778 if (err)
2779 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302780 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002781 if (err)
2782 goto bail;
2783 break;
2784 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002785 p.init.attrs = 0;
2786 p.init.siglen = 0;
2787 size = sizeof(struct fastrpc_ioctl_init);
2788 /* fall through */
2789 case FASTRPC_IOCTL_INIT_ATTRS:
2790 if (!size)
2791 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302792 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002793 if (err)
2794 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302795 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302796 p.init.init.filelen < INIT_FILELEN_MAX);
2797 if (err)
2798 goto bail;
2799 VERIFY(err, p.init.init.memlen >= 0 &&
2800 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302801 if (err)
2802 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002803 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2804 if (err)
2805 goto bail;
2806 break;
2807
2808 default:
2809 err = -ENOTTY;
2810 pr_info("bad ioctl: %d\n", ioctl_num);
2811 break;
2812 }
2813 bail:
2814 return err;
2815}
2816
2817static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2818 unsigned long code,
2819 void *data)
2820{
2821 struct fastrpc_apps *me = &gfa;
2822 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302823 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002824 int cid;
2825
2826 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2827 cid = ctx - &me->channel[0];
2828 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2829 mutex_lock(&me->smd_mutex);
2830 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302831 ctx->issubsystemup = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002832 if (ctx->chan) {
2833 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302834 ctx->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002835 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2836 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2837 }
2838 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302839 if (cid == 0)
2840 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002841 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302842 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2843 if (me->channel[0].remoteheap_ramdump_dev &&
2844 notifdata->enable_ramdump) {
2845 me->channel[0].ramdumpenabled = 1;
2846 }
2847 } else if (code == SUBSYS_AFTER_POWERUP) {
2848 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002849 }
2850
2851 return NOTIFY_DONE;
2852}
2853
2854static const struct file_operations fops = {
2855 .open = fastrpc_device_open,
2856 .release = fastrpc_device_release,
2857 .unlocked_ioctl = fastrpc_device_ioctl,
2858 .compat_ioctl = compat_fastrpc_device_ioctl,
2859};
2860
2861static const struct of_device_id fastrpc_match_table[] = {
2862 { .compatible = "qcom,msm-fastrpc-adsp", },
2863 { .compatible = "qcom,msm-fastrpc-compute", },
2864 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2865 { .compatible = "qcom,msm-adsprpc-mem-region", },
2866 {}
2867};
2868
2869static int fastrpc_cb_probe(struct device *dev)
2870{
2871 struct fastrpc_channel_ctx *chan;
2872 struct fastrpc_session_ctx *sess;
2873 struct of_phandle_args iommuspec;
2874 const char *name;
2875 unsigned int start = 0x80000000;
2876 int err = 0, i;
2877 int secure_vmid = VMID_CP_PIXEL;
2878
c_mtharue1a5ce12017-10-13 20:47:09 +05302879 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2880 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002881 if (err)
2882 goto bail;
2883 for (i = 0; i < NUM_CHANNELS; i++) {
2884 if (!gcinfo[i].name)
2885 continue;
2886 if (!strcmp(name, gcinfo[i].name))
2887 break;
2888 }
2889 VERIFY(err, i < NUM_CHANNELS);
2890 if (err)
2891 goto bail;
2892 chan = &gcinfo[i];
2893 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2894 if (err)
2895 goto bail;
2896
2897 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2898 "#iommu-cells", 0, &iommuspec));
2899 if (err)
2900 goto bail;
2901 sess = &chan->session[chan->sesscount];
2902 sess->smmu.cb = iommuspec.args[0] & 0xf;
2903 sess->used = 0;
2904 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2905 "dma-coherent");
2906 sess->smmu.secure = of_property_read_bool(dev->of_node,
2907 "qcom,secure-context-bank");
2908 if (sess->smmu.secure)
2909 start = 0x60000000;
2910 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2911 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302912 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002913 if (err)
2914 goto bail;
2915
2916 if (sess->smmu.secure)
2917 iommu_domain_set_attr(sess->smmu.mapping->domain,
2918 DOMAIN_ATTR_SECURE_VMID,
2919 &secure_vmid);
2920
2921 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2922 if (err)
2923 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302924 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002925 sess->smmu.enabled = 1;
2926 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002927 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2928 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002929bail:
2930 return err;
2931}
2932
2933static int fastrpc_probe(struct platform_device *pdev)
2934{
2935 int err = 0;
2936 struct fastrpc_apps *me = &gfa;
2937 struct device *dev = &pdev->dev;
2938 struct smq_phy_page range;
2939 struct device_node *ion_node, *node;
2940 struct platform_device *ion_pdev;
2941 struct cma *cma;
2942 uint32_t val;
2943
2944 if (of_device_is_compatible(dev->of_node,
2945 "qcom,msm-fastrpc-compute-cb"))
2946 return fastrpc_cb_probe(dev);
2947
2948 if (of_device_is_compatible(dev->of_node,
2949 "qcom,msm-adsprpc-mem-region")) {
2950 me->dev = dev;
2951 range.addr = 0;
2952 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2953 if (ion_node) {
2954 for_each_available_child_of_node(ion_node, node) {
2955 if (of_property_read_u32(node, "reg", &val))
2956 continue;
2957 if (val != ION_ADSP_HEAP_ID)
2958 continue;
2959 ion_pdev = of_find_device_by_node(node);
2960 if (!ion_pdev)
2961 break;
2962 cma = dev_get_cma_area(&ion_pdev->dev);
2963 if (cma) {
2964 range.addr = cma_get_base(cma);
2965 range.size = (size_t)cma_get_size(cma);
2966 }
2967 break;
2968 }
2969 }
2970 if (range.addr) {
2971 int srcVM[1] = {VMID_HLOS};
2972 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2973 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002974 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002975 PERM_READ | PERM_WRITE | PERM_EXEC,
2976 PERM_READ | PERM_WRITE | PERM_EXEC,
2977 PERM_READ | PERM_WRITE | PERM_EXEC,
2978 };
2979
2980 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2981 srcVM, 1, destVM, destVMperm, 4));
2982 if (err)
2983 goto bail;
2984 }
2985 return 0;
2986 }
2987
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302988 err = of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
2989 &me->latency);
2990 if (err)
2991 me->latency = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002992 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2993 fastrpc_match_table,
2994 NULL, &pdev->dev));
2995 if (err)
2996 goto bail;
2997bail:
2998 return err;
2999}
3000
3001static void fastrpc_deinit(void)
3002{
3003 struct fastrpc_apps *me = &gfa;
3004 struct fastrpc_channel_ctx *chan = gcinfo;
3005 int i, j;
3006
3007 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3008 if (chan->chan) {
3009 kref_put_mutex(&chan->kref,
3010 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303011 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003012 }
3013 for (j = 0; j < NUM_SESSIONS; j++) {
3014 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303015 if (sess->smmu.dev) {
3016 arm_iommu_detach_device(sess->smmu.dev);
3017 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003018 }
3019 if (sess->smmu.mapping) {
3020 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303021 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003022 }
3023 }
3024 }
3025}
3026
3027static struct platform_driver fastrpc_driver = {
3028 .probe = fastrpc_probe,
3029 .driver = {
3030 .name = "fastrpc",
3031 .owner = THIS_MODULE,
3032 .of_match_table = fastrpc_match_table,
3033 },
3034};
3035
3036static int __init fastrpc_device_init(void)
3037{
3038 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303039 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003040 int err = 0, i;
3041
3042 memset(me, 0, sizeof(*me));
3043
3044 fastrpc_init(me);
3045 me->dev = NULL;
3046 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3047 if (err)
3048 goto register_bail;
3049 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3050 DEVICE_NAME));
3051 if (err)
3052 goto alloc_chrdev_bail;
3053 cdev_init(&me->cdev, &fops);
3054 me->cdev.owner = THIS_MODULE;
3055 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003056 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003057 if (err)
3058 goto cdev_init_bail;
3059 me->class = class_create(THIS_MODULE, "fastrpc");
3060 VERIFY(err, !IS_ERR(me->class));
3061 if (err)
3062 goto class_create_bail;
3063 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003064 dev = device_create(me->class, NULL,
3065 MKDEV(MAJOR(me->dev_no), 0),
3066 NULL, gcinfo[0].name);
3067 VERIFY(err, !IS_ERR_OR_NULL(dev));
3068 if (err)
3069 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003070 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003071 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072 me->channel[i].ssrcount = 0;
3073 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303074 me->channel[i].issubsystemup = 1;
3075 me->channel[i].ramdumpenabled = 0;
3076 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003077 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3078 me->channel[i].handle = subsys_notif_register_notifier(
3079 gcinfo[i].subsys,
3080 &me->channel[i].nb);
3081 }
3082
3083 me->client = msm_ion_client_create(DEVICE_NAME);
3084 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3085 if (err)
3086 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003087 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003088 return 0;
3089device_create_bail:
3090 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003091 if (me->channel[i].handle)
3092 subsys_notif_unregister_notifier(me->channel[i].handle,
3093 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003094 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003095 if (!IS_ERR_OR_NULL(dev))
3096 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003097 class_destroy(me->class);
3098class_create_bail:
3099 cdev_del(&me->cdev);
3100cdev_init_bail:
3101 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3102alloc_chrdev_bail:
3103register_bail:
3104 fastrpc_deinit();
3105 return err;
3106}
3107
3108static void __exit fastrpc_device_exit(void)
3109{
3110 struct fastrpc_apps *me = &gfa;
3111 int i;
3112
3113 fastrpc_file_list_dtor(me);
3114 fastrpc_deinit();
3115 for (i = 0; i < NUM_CHANNELS; i++) {
3116 if (!gcinfo[i].name)
3117 continue;
3118 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3119 subsys_notif_unregister_notifier(me->channel[i].handle,
3120 &me->channel[i].nb);
3121 }
3122 class_destroy(me->class);
3123 cdev_del(&me->cdev);
3124 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3125 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003126 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003127}
3128
3129late_initcall(fastrpc_device_init);
3130module_exit(fastrpc_device_exit);
3131
3132MODULE_LICENSE("GPL v2");