blob: 8a282122bd8020251415c6a9e056b95bdfcc4dfc [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053078#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
79#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070080
Sathish Ambleya21b5b52017-01-11 16:11:01 -080081#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
82#define FASTRPC_STATIC_HANDLE_LISTENER (3)
83#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053084#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080085
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053086#define INIT_FILELEN_MAX (2*1024*1024)
87#define INIT_MEMLEN_MAX (8*1024*1024)
88
Sathish Ambleya21b5b52017-01-11 16:11:01 -080089#define PERF_END (void)0
90
91#define PERF(enb, cnt, ff) \
92 {\
93 struct timespec startT = {0};\
94 if (enb) {\
95 getnstimeofday(&startT);\
96 } \
97 ff ;\
98 if (enb) {\
99 cnt += getnstimediff(&startT);\
100 } \
101 }
102
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700103static int fastrpc_glink_open(int cid);
104static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800105static struct dentry *debugfs_root;
106static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
108static inline uint64_t buf_page_start(uint64_t buf)
109{
110 uint64_t start = (uint64_t) buf & PAGE_MASK;
111 return start;
112}
113
114static inline uint64_t buf_page_offset(uint64_t buf)
115{
116 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
117 return offset;
118}
119
120static inline int buf_num_pages(uint64_t buf, ssize_t len)
121{
122 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
123 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
124 int nPages = end - start + 1;
125 return nPages;
126}
127
128static inline uint64_t buf_page_size(uint32_t size)
129{
130 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
131
132 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
133}
134
135static inline void *uint64_to_ptr(uint64_t addr)
136{
137 void *ptr = (void *)((uintptr_t)addr);
138
139 return ptr;
140}
141
142static inline uint64_t ptr_to_uint64(void *ptr)
143{
144 uint64_t addr = (uint64_t)((uintptr_t)ptr);
145
146 return addr;
147}
148
149struct fastrpc_file;
150
151struct fastrpc_buf {
152 struct hlist_node hn;
153 struct fastrpc_file *fl;
154 void *virt;
155 uint64_t phys;
156 ssize_t size;
157};
158
159struct fastrpc_ctx_lst;
160
161struct overlap {
162 uintptr_t start;
163 uintptr_t end;
164 int raix;
165 uintptr_t mstart;
166 uintptr_t mend;
167 uintptr_t offset;
168};
169
170struct smq_invoke_ctx {
171 struct hlist_node hn;
172 struct completion work;
173 int retval;
174 int pid;
175 int tgid;
176 remote_arg_t *lpra;
177 remote_arg64_t *rpra;
178 int *fds;
179 unsigned int *attrs;
180 struct fastrpc_mmap **maps;
181 struct fastrpc_buf *buf;
182 ssize_t used;
183 struct fastrpc_file *fl;
184 uint32_t sc;
185 struct overlap *overs;
186 struct overlap **overps;
187 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700188 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530189 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700190};
191
192struct fastrpc_ctx_lst {
193 struct hlist_head pending;
194 struct hlist_head interrupted;
195};
196
197struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530198 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct dma_iommu_mapping *mapping;
200 int cb;
201 int enabled;
202 int faults;
203 int secure;
204 int coherent;
205};
206
207struct fastrpc_session_ctx {
208 struct device *dev;
209 struct fastrpc_smmu smmu;
210 int used;
211};
212
213struct fastrpc_glink_info {
214 int link_state;
215 int port_state;
216 struct glink_open_config cfg;
217 struct glink_link_info link_info;
218 void *link_notify_handle;
219};
220
221struct fastrpc_channel_ctx {
222 char *name;
223 char *subsys;
224 void *chan;
225 struct device *dev;
226 struct fastrpc_session_ctx session[NUM_SESSIONS];
227 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530228 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 struct notifier_block nb;
230 struct kref kref;
231 int sesscount;
232 int ssrcount;
233 void *handle;
234 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700236 int vmid;
c_mtharu63ffc012017-11-16 15:26:56 +0530237 int rhvmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530238 int ramdumpenabled;
239 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700240 struct fastrpc_glink_info link;
241};
242
243struct fastrpc_apps {
244 struct fastrpc_channel_ctx *channel;
245 struct cdev cdev;
246 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530247 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 struct smq_phy_page range;
249 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530250 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700251 dev_t dev_no;
252 int compat;
253 struct hlist_head drivers;
254 spinlock_t hlock;
255 struct ion_client *client;
256 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530257 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700258};
259
260struct fastrpc_mmap {
261 struct hlist_node hn;
262 struct fastrpc_file *fl;
263 struct fastrpc_apps *apps;
264 int fd;
265 uint32_t flags;
266 struct dma_buf *buf;
267 struct sg_table *table;
268 struct dma_buf_attachment *attach;
269 struct ion_handle *handle;
270 uint64_t phys;
271 ssize_t size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530272 uintptr_t __user va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273 ssize_t len;
274 int refs;
275 uintptr_t raddr;
276 int uncached;
277 int secure;
278 uintptr_t attr;
279};
280
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281struct fastrpc_perf {
282 int64_t count;
283 int64_t flush;
284 int64_t map;
285 int64_t copy;
286 int64_t link;
287 int64_t getargs;
288 int64_t putargs;
289 int64_t invargs;
290 int64_t invoke;
291};
292
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293struct fastrpc_file {
294 struct hlist_node hn;
295 spinlock_t hlock;
296 struct hlist_head maps;
297 struct hlist_head bufs;
298 struct fastrpc_ctx_lst clst;
299 struct fastrpc_session_ctx *sctx;
300 struct fastrpc_session_ctx *secsctx;
301 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800302 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530303 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 int tgid;
305 int cid;
306 int ssrcount;
307 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530308 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800310 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800311 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530312 struct pm_qos_request pm_qos_req;
313 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314};
315
316static struct fastrpc_apps gfa;
317
318static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
319 {
320 .name = "adsprpc-smd",
321 .subsys = "adsp",
322 .link.link_info.edge = "lpass",
323 .link.link_info.transport = "smem",
324 },
325 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700326 .name = "mdsprpc-smd",
327 .subsys = "modem",
328 .link.link_info.edge = "mpss",
329 .link.link_info.transport = "smem",
330 },
331 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800332 .name = "sdsprpc-smd",
333 .subsys = "slpi",
334 .link.link_info.edge = "dsps",
335 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800336 },
337 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700338 .name = "cdsprpc-smd",
339 .subsys = "cdsp",
340 .link.link_info.edge = "cdsp",
341 .link.link_info.transport = "smem",
342 },
343};
344
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800345static inline int64_t getnstimediff(struct timespec *start)
346{
347 int64_t ns;
348 struct timespec ts, b;
349
350 getnstimeofday(&ts);
351 b = timespec_sub(ts, *start);
352 ns = timespec_to_ns(&b);
353 return ns;
354}
355
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
357{
c_mtharue1a5ce12017-10-13 20:47:09 +0530358 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700359 int vmid;
360
361 if (!fl)
362 return;
363 if (cache) {
364 spin_lock(&fl->hlock);
365 hlist_add_head(&buf->hn, &fl->bufs);
366 spin_unlock(&fl->hlock);
367 return;
368 }
369 if (!IS_ERR_OR_NULL(buf->virt)) {
370 int destVM[1] = {VMID_HLOS};
371 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
372
373 if (fl->sctx->smmu.cb)
374 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
375 vmid = fl->apps->channel[fl->cid].vmid;
376 if (vmid) {
377 int srcVM[2] = {VMID_HLOS, vmid};
378
379 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
380 srcVM, 2, destVM, destVMperm, 1);
381 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530382 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383 buf->phys);
384 }
385 kfree(buf);
386}
387
388static void fastrpc_buf_list_free(struct fastrpc_file *fl)
389{
390 struct fastrpc_buf *buf, *free;
391
392 do {
393 struct hlist_node *n;
394
c_mtharue1a5ce12017-10-13 20:47:09 +0530395 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396 spin_lock(&fl->hlock);
397 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
398 hlist_del_init(&buf->hn);
399 free = buf;
400 break;
401 }
402 spin_unlock(&fl->hlock);
403 if (free)
404 fastrpc_buf_free(free, 0);
405 } while (free);
406}
407
408static void fastrpc_mmap_add(struct fastrpc_mmap *map)
409{
c_mtharue1a5ce12017-10-13 20:47:09 +0530410 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
411 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
412 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413
c_mtharue1a5ce12017-10-13 20:47:09 +0530414 spin_lock(&me->hlock);
415 hlist_add_head(&map->hn, &me->maps);
416 spin_unlock(&me->hlock);
417 } else {
418 struct fastrpc_file *fl = map->fl;
419
420 spin_lock(&fl->hlock);
421 hlist_add_head(&map->hn, &fl->maps);
422 spin_unlock(&fl->hlock);
423 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424}
425
c_mtharue1a5ce12017-10-13 20:47:09 +0530426static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
427 uintptr_t __user va, ssize_t len, int mflags, int refs,
428 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429{
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 struct fastrpc_apps *me = &gfa;
431 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700432 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +0530433 if (mflags == ADSP_MMAP_HEAP_ADDR ||
434 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
435 spin_lock(&me->hlock);
436 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
437 if (va >= map->va &&
438 va + len <= map->va + map->len &&
439 map->fd == fd) {
440 if (refs)
441 map->refs++;
442 match = map;
443 break;
444 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700445 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530446 spin_unlock(&me->hlock);
447 } else {
448 spin_lock(&fl->hlock);
449 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
450 if (va >= map->va &&
451 va + len <= map->va + map->len &&
452 map->fd == fd) {
453 if (refs)
454 map->refs++;
455 match = map;
456 break;
457 }
458 }
459 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700460 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700461 if (match) {
462 *ppmap = match;
463 return 0;
464 }
465 return -ENOTTY;
466}
467
c_mtharue1a5ce12017-10-13 20:47:09 +0530468static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
469{
470 struct fastrpc_apps *me = &gfa;
471 void *vaddr = NULL;
472
473 if (me->dev == NULL) {
474 pr_err("device adsprpc-mem is not initialized\n");
475 return -ENODEV;
476 }
477 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
478 if (!vaddr) {
479 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
480 (unsigned int)size);
481 return -ENOMEM;
482 }
483 return 0;
484}
485
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
487 ssize_t len, struct fastrpc_mmap **ppmap)
488{
c_mtharue1a5ce12017-10-13 20:47:09 +0530489 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700490 struct hlist_node *n;
491 struct fastrpc_apps *me = &gfa;
492
493 spin_lock(&me->hlock);
494 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
495 if (map->raddr == va &&
496 map->raddr + map->len == va + len &&
497 map->refs == 1) {
498 match = map;
499 hlist_del_init(&map->hn);
500 break;
501 }
502 }
503 spin_unlock(&me->hlock);
504 if (match) {
505 *ppmap = match;
506 return 0;
507 }
508 spin_lock(&fl->hlock);
509 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
510 if (map->raddr == va &&
511 map->raddr + map->len == va + len &&
512 map->refs == 1) {
513 match = map;
514 hlist_del_init(&map->hn);
515 break;
516 }
517 }
518 spin_unlock(&fl->hlock);
519 if (match) {
520 *ppmap = match;
521 return 0;
522 }
523 return -ENOTTY;
524}
525
c_mtharu7bd6a422017-10-17 18:15:37 +0530526static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700527{
c_mtharue1a5ce12017-10-13 20:47:09 +0530528 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700529 struct fastrpc_file *fl;
530 int vmid;
531 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532
533 if (!map)
534 return;
535 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530536 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
537 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
538 spin_lock(&me->hlock);
539 map->refs--;
540 if (!map->refs)
541 hlist_del_init(&map->hn);
542 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530543 if (map->refs > 0)
544 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530545 } else {
546 spin_lock(&fl->hlock);
547 map->refs--;
548 if (!map->refs)
549 hlist_del_init(&map->hn);
550 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530551 if (map->refs > 0 && !flags)
552 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530553 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530554 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
555 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700556
c_mtharue1a5ce12017-10-13 20:47:09 +0530557 if (me->dev == NULL) {
558 pr_err("failed to free remote heap allocation\n");
559 return;
560 }
561 if (map->phys) {
562 dma_free_coherent(me->dev, map->size,
563 &(map->va), map->phys);
564 }
565 } else {
566 int destVM[1] = {VMID_HLOS};
567 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
568
569 if (map->secure)
570 sess = fl->secsctx;
571 else
572 sess = fl->sctx;
573
574 if (!IS_ERR_OR_NULL(map->handle))
575 ion_free(fl->apps->client, map->handle);
576 if (sess && sess->smmu.enabled) {
577 if (map->size || map->phys)
578 msm_dma_unmap_sg(sess->smmu.dev,
579 map->table->sgl,
580 map->table->nents, DMA_BIDIRECTIONAL,
581 map->buf);
582 }
583 vmid = fl->apps->channel[fl->cid].vmid;
584 if (vmid && map->phys) {
585 int srcVM[2] = {VMID_HLOS, vmid};
586
587 hyp_assign_phys(map->phys, buf_page_size(map->size),
588 srcVM, 2, destVM, destVMperm, 1);
589 }
590
591 if (!IS_ERR_OR_NULL(map->table))
592 dma_buf_unmap_attachment(map->attach, map->table,
593 DMA_BIDIRECTIONAL);
594 if (!IS_ERR_OR_NULL(map->attach))
595 dma_buf_detach(map->buf, map->attach);
596 if (!IS_ERR_OR_NULL(map->buf))
597 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700598 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700599 kfree(map);
600}
601
602static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
603 struct fastrpc_session_ctx **session);
604
605static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
c_mtharue1a5ce12017-10-13 20:47:09 +0530606 unsigned int attr, uintptr_t __user va, ssize_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700607 struct fastrpc_mmap **ppmap)
608{
c_mtharue1a5ce12017-10-13 20:47:09 +0530609 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 struct fastrpc_session_ctx *sess;
611 struct fastrpc_apps *apps = fl->apps;
612 int cid = fl->cid;
613 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530614 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700615 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530616 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700617 unsigned long flags;
618 int err = 0, vmid;
619
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800620 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700621 return 0;
622 map = kzalloc(sizeof(*map), GFP_KERNEL);
623 VERIFY(err, !IS_ERR_OR_NULL(map));
624 if (err)
625 goto bail;
626 INIT_HLIST_NODE(&map->hn);
627 map->flags = mflags;
628 map->refs = 1;
629 map->fl = fl;
630 map->fd = fd;
631 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530632 if (mflags == ADSP_MMAP_HEAP_ADDR ||
633 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
634 map->apps = me;
635 map->fl = NULL;
636 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700637 if (err)
638 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530639 map->phys = (uintptr_t)region_start;
640 map->size = len;
641 map->va = (uintptr_t __user)map->phys;
642 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530643 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
644 pr_info("adsprpc: buffer mapped with persist attr %x\n",
645 (unsigned int)map->attr);
646 map->refs = 2;
647 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530648 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
649 ion_import_dma_buf_fd(fl->apps->client, fd)));
650 if (err)
651 goto bail;
652 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
653 &flags));
654 if (err)
655 goto bail;
656
c_mtharue1a5ce12017-10-13 20:47:09 +0530657 map->secure = flags & ION_FLAG_SECURE;
658 if (map->secure) {
659 if (!fl->secsctx)
660 err = fastrpc_session_alloc(chan, 1,
661 &fl->secsctx);
662 if (err)
663 goto bail;
664 }
665 if (map->secure)
666 sess = fl->secsctx;
667 else
668 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530669
c_mtharue1a5ce12017-10-13 20:47:09 +0530670 VERIFY(err, !IS_ERR_OR_NULL(sess));
671 if (err)
672 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530673
674 map->uncached = !ION_IS_CACHED(flags);
675 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
676 map->uncached = 1;
677
c_mtharue1a5ce12017-10-13 20:47:09 +0530678 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
679 if (err)
680 goto bail;
681 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
682 dma_buf_attach(map->buf, sess->smmu.dev)));
683 if (err)
684 goto bail;
685 VERIFY(err, !IS_ERR_OR_NULL(map->table =
686 dma_buf_map_attachment(map->attach,
687 DMA_BIDIRECTIONAL)));
688 if (err)
689 goto bail;
690 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700691 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530692
693 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
694 (sess->smmu.coherent && map->uncached))
695 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
696 else if (map->attr & FASTRPC_ATTR_COHERENT)
697 attrs |= DMA_ATTR_FORCE_COHERENT;
698
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700699 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530700 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700701 map->table->sgl, map->table->nents,
702 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530703 if (err)
704 goto bail;
705 } else {
706 VERIFY(err, map->table->nents == 1);
707 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700708 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530709 }
710 map->phys = sg_dma_address(map->table->sgl);
711 if (sess->smmu.cb) {
712 map->phys += ((uint64_t)sess->smmu.cb << 32);
713 map->size = sg_dma_len(map->table->sgl);
714 } else {
715 map->size = buf_page_size(len);
716 }
717 vmid = fl->apps->channel[fl->cid].vmid;
718 if (vmid) {
719 int srcVM[1] = {VMID_HLOS};
720 int destVM[2] = {VMID_HLOS, vmid};
721 int destVMperm[2] = {PERM_READ | PERM_WRITE,
722 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700723
c_mtharue1a5ce12017-10-13 20:47:09 +0530724 VERIFY(err, !hyp_assign_phys(map->phys,
725 buf_page_size(map->size),
726 srcVM, 1, destVM, destVMperm, 2));
727 if (err)
728 goto bail;
729 }
730 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700731 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700732 map->len = len;
733
734 fastrpc_mmap_add(map);
735 *ppmap = map;
736
737bail:
738 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530739 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700740 return err;
741}
742
743static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
744 struct fastrpc_buf **obuf)
745{
746 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530747 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700748 struct hlist_node *n;
749
750 VERIFY(err, size > 0);
751 if (err)
752 goto bail;
753
754 /* find the smallest buffer that fits in the cache */
755 spin_lock(&fl->hlock);
756 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
757 if (buf->size >= size && (!fr || fr->size > buf->size))
758 fr = buf;
759 }
760 if (fr)
761 hlist_del_init(&fr->hn);
762 spin_unlock(&fl->hlock);
763 if (fr) {
764 *obuf = fr;
765 return 0;
766 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530767 buf = NULL;
768 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700769 if (err)
770 goto bail;
771 INIT_HLIST_NODE(&buf->hn);
772 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530773 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700774 buf->phys = 0;
775 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530776 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777 (void *)&buf->phys, GFP_KERNEL);
778 if (IS_ERR_OR_NULL(buf->virt)) {
779 /* free cache and retry */
780 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530781 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700782 (void *)&buf->phys, GFP_KERNEL);
783 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
784 }
785 if (err)
786 goto bail;
787 if (fl->sctx->smmu.cb)
788 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
789 vmid = fl->apps->channel[fl->cid].vmid;
790 if (vmid) {
791 int srcVM[1] = {VMID_HLOS};
792 int destVM[2] = {VMID_HLOS, vmid};
793 int destVMperm[2] = {PERM_READ | PERM_WRITE,
794 PERM_READ | PERM_WRITE | PERM_EXEC};
795
796 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
797 srcVM, 1, destVM, destVMperm, 2));
798 if (err)
799 goto bail;
800 }
801
802 *obuf = buf;
803 bail:
804 if (err && buf)
805 fastrpc_buf_free(buf, 0);
806 return err;
807}
808
809
810static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700811 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700812 struct smq_invoke_ctx **po)
813{
814 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530815 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700816 struct hlist_node *n;
817 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
818
819 spin_lock(&fl->hlock);
820 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
821 if (ictx->pid == current->pid) {
822 if (invoke->sc != ictx->sc || ictx->fl != fl)
823 err = -1;
824 else {
825 ctx = ictx;
826 hlist_del_init(&ctx->hn);
827 hlist_add_head(&ctx->hn, &fl->clst.pending);
828 }
829 break;
830 }
831 }
832 spin_unlock(&fl->hlock);
833 if (ctx)
834 *po = ctx;
835 return err;
836}
837
838#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
839static int overlap_ptr_cmp(const void *a, const void *b)
840{
841 struct overlap *pa = *((struct overlap **)a);
842 struct overlap *pb = *((struct overlap **)b);
843 /* sort with lowest starting buffer first */
844 int st = CMP(pa->start, pb->start);
845 /* sort with highest ending buffer first */
846 int ed = CMP(pb->end, pa->end);
847 return st == 0 ? ed : st;
848}
849
Sathish Ambley9466d672017-01-25 10:51:55 -0800850static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700851{
Sathish Ambley9466d672017-01-25 10:51:55 -0800852 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700853 remote_arg_t *lpra = ctx->lpra;
854 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
855 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
856 int nbufs = inbufs + outbufs;
857 struct overlap max;
858
859 for (i = 0; i < nbufs; ++i) {
860 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
861 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800862 if (lpra[i].buf.len) {
863 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
864 if (err)
865 goto bail;
866 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700867 ctx->overs[i].raix = i;
868 ctx->overps[i] = &ctx->overs[i];
869 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530870 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700871 max.start = 0;
872 max.end = 0;
873 for (i = 0; i < nbufs; ++i) {
874 if (ctx->overps[i]->start < max.end) {
875 ctx->overps[i]->mstart = max.end;
876 ctx->overps[i]->mend = ctx->overps[i]->end;
877 ctx->overps[i]->offset = max.end -
878 ctx->overps[i]->start;
879 if (ctx->overps[i]->end > max.end) {
880 max.end = ctx->overps[i]->end;
881 } else {
882 ctx->overps[i]->mend = 0;
883 ctx->overps[i]->mstart = 0;
884 }
885 } else {
886 ctx->overps[i]->mend = ctx->overps[i]->end;
887 ctx->overps[i]->mstart = ctx->overps[i]->start;
888 ctx->overps[i]->offset = 0;
889 max = *ctx->overps[i];
890 }
891 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800892bail:
893 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700894}
895
896#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
897 do {\
898 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530899 VERIFY(err, 0 == copy_from_user((dst),\
900 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700901 (size)));\
902 else\
903 memmove((dst), (src), (size));\
904 } while (0)
905
906#define K_COPY_TO_USER(err, kernel, dst, src, size) \
907 do {\
908 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530909 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
910 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700911 else\
912 memmove((dst), (src), (size));\
913 } while (0)
914
915
916static void context_free(struct smq_invoke_ctx *ctx);
917
918static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700919 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700920 struct smq_invoke_ctx **po)
921{
922 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530923 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700924 struct fastrpc_ctx_lst *clst = &fl->clst;
925 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
926
927 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
928 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
929 sizeof(*ctx->fds) * (bufs) +
930 sizeof(*ctx->attrs) * (bufs) +
931 sizeof(*ctx->overs) * (bufs) +
932 sizeof(*ctx->overps) * (bufs);
933
c_mtharue1a5ce12017-10-13 20:47:09 +0530934 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700935 if (err)
936 goto bail;
937
938 INIT_HLIST_NODE(&ctx->hn);
939 hlist_add_fake(&ctx->hn);
940 ctx->fl = fl;
941 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
942 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
943 ctx->fds = (int *)(&ctx->lpra[bufs]);
944 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
945 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
946 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
947
c_mtharue1a5ce12017-10-13 20:47:09 +0530948 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700949 bufs * sizeof(*ctx->lpra));
950 if (err)
951 goto bail;
952
953 if (invokefd->fds) {
954 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
955 bufs * sizeof(*ctx->fds));
956 if (err)
957 goto bail;
958 }
959 if (invokefd->attrs) {
960 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
961 bufs * sizeof(*ctx->attrs));
962 if (err)
963 goto bail;
964 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700965 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700966 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800967 if (bufs) {
968 VERIFY(err, 0 == context_build_overlap(ctx));
969 if (err)
970 goto bail;
971 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700972 ctx->retval = -1;
973 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530974 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700975 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530976 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700977
978 spin_lock(&fl->hlock);
979 hlist_add_head(&ctx->hn, &clst->pending);
980 spin_unlock(&fl->hlock);
981
982 *po = ctx;
983bail:
984 if (ctx && err)
985 context_free(ctx);
986 return err;
987}
988
989static void context_save_interrupted(struct smq_invoke_ctx *ctx)
990{
991 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
992
993 spin_lock(&ctx->fl->hlock);
994 hlist_del_init(&ctx->hn);
995 hlist_add_head(&ctx->hn, &clst->interrupted);
996 spin_unlock(&ctx->fl->hlock);
997 /* free the cache on power collapse */
998 fastrpc_buf_list_free(ctx->fl);
999}
1000
1001static void context_free(struct smq_invoke_ctx *ctx)
1002{
1003 int i;
1004 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1005 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1006 spin_lock(&ctx->fl->hlock);
1007 hlist_del_init(&ctx->hn);
1008 spin_unlock(&ctx->fl->hlock);
1009 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301010 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001011 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301012 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001013 kfree(ctx);
1014}
1015
1016static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1017{
1018 ctx->retval = retval;
1019 complete(&ctx->work);
1020}
1021
1022
1023static void fastrpc_notify_users(struct fastrpc_file *me)
1024{
1025 struct smq_invoke_ctx *ictx;
1026 struct hlist_node *n;
1027
1028 spin_lock(&me->hlock);
1029 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1030 complete(&ictx->work);
1031 }
1032 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1033 complete(&ictx->work);
1034 }
1035 spin_unlock(&me->hlock);
1036
1037}
1038
1039static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1040{
1041 struct fastrpc_file *fl;
1042 struct hlist_node *n;
1043
1044 spin_lock(&me->hlock);
1045 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1046 if (fl->cid == cid)
1047 fastrpc_notify_users(fl);
1048 }
1049 spin_unlock(&me->hlock);
1050
1051}
1052static void context_list_ctor(struct fastrpc_ctx_lst *me)
1053{
1054 INIT_HLIST_HEAD(&me->interrupted);
1055 INIT_HLIST_HEAD(&me->pending);
1056}
1057
1058static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1059{
1060 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301061 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001062 struct hlist_node *n;
1063
1064 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301065 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001066 spin_lock(&fl->hlock);
1067 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1068 hlist_del_init(&ictx->hn);
1069 ctxfree = ictx;
1070 break;
1071 }
1072 spin_unlock(&fl->hlock);
1073 if (ctxfree)
1074 context_free(ctxfree);
1075 } while (ctxfree);
1076 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301077 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001078 spin_lock(&fl->hlock);
1079 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1080 hlist_del_init(&ictx->hn);
1081 ctxfree = ictx;
1082 break;
1083 }
1084 spin_unlock(&fl->hlock);
1085 if (ctxfree)
1086 context_free(ctxfree);
1087 } while (ctxfree);
1088}
1089
1090static int fastrpc_file_free(struct fastrpc_file *fl);
1091static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1092{
1093 struct fastrpc_file *fl, *free;
1094 struct hlist_node *n;
1095
1096 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301097 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001098 spin_lock(&me->hlock);
1099 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1100 hlist_del_init(&fl->hn);
1101 free = fl;
1102 break;
1103 }
1104 spin_unlock(&me->hlock);
1105 if (free)
1106 fastrpc_file_free(free);
1107 } while (free);
1108}
1109
1110static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1111{
1112 remote_arg64_t *rpra;
1113 remote_arg_t *lpra = ctx->lpra;
1114 struct smq_invoke_buf *list;
1115 struct smq_phy_page *pages, *ipage;
1116 uint32_t sc = ctx->sc;
1117 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1118 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001119 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001120 uintptr_t args;
1121 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001122 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 int err = 0;
1124 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001125 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001126 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001127
1128 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301129 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001130 list = smq_invoke_buf_start(rpra, sc);
1131 pages = smq_phy_page_start(sc, list);
1132 ipage = pages;
1133
1134 for (i = 0; i < bufs; ++i) {
c_mtharue1a5ce12017-10-13 20:47:09 +05301135 uintptr_t __user buf = (uintptr_t __user)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 ssize_t len = lpra[i].buf.len;
1137
1138 if (ctx->fds[i] && (ctx->fds[i] != -1))
1139 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1140 ctx->attrs[i], buf, len,
1141 mflags, &ctx->maps[i]);
1142 ipage += 1;
1143 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001144 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1145 for (i = bufs; i < bufs + handles; i++) {
1146 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1147 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1148 if (err)
1149 goto bail;
1150 ipage += 1;
1151 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001152 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1153 (sizeof(uint32_t) * M_CRCLIST);
1154
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001155 /* calculate len requreed for copying */
1156 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1157 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001158 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001159 ssize_t len = lpra[i].buf.len;
1160
1161 if (!len)
1162 continue;
1163 if (ctx->maps[i])
1164 continue;
1165 if (ctx->overps[oix]->offset == 0)
1166 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001167 mstart = ctx->overps[oix]->mstart;
1168 mend = ctx->overps[oix]->mend;
1169 VERIFY(err, (mend - mstart) <= LONG_MAX);
1170 if (err)
1171 goto bail;
1172 copylen += mend - mstart;
1173 VERIFY(err, copylen >= 0);
1174 if (err)
1175 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 }
1177 ctx->used = copylen;
1178
1179 /* allocate new buffer */
1180 if (copylen) {
1181 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1182 if (err)
1183 goto bail;
1184 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301185 if (ctx->buf->virt && metalen <= copylen)
1186 memset(ctx->buf->virt, 0, metalen);
1187
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 /* copy metadata */
1189 rpra = ctx->buf->virt;
1190 ctx->rpra = rpra;
1191 list = smq_invoke_buf_start(rpra, sc);
1192 pages = smq_phy_page_start(sc, list);
1193 ipage = pages;
1194 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001195 for (i = 0; i < bufs + handles; ++i) {
1196 if (lpra[i].buf.len)
1197 list[i].num = 1;
1198 else
1199 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001200 list[i].pgidx = ipage - pages;
1201 ipage++;
1202 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301203
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001204 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001205 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301206 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001207 struct fastrpc_mmap *map = ctx->maps[i];
1208 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1209 ssize_t len = lpra[i].buf.len;
1210
1211 rpra[i].buf.pv = 0;
1212 rpra[i].buf.len = len;
1213 if (!len)
1214 continue;
1215 if (map) {
1216 struct vm_area_struct *vma;
1217 uintptr_t offset;
1218 int num = buf_num_pages(buf, len);
1219 int idx = list[i].pgidx;
1220
1221 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001222 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001223 } else {
1224 down_read(&current->mm->mmap_sem);
1225 VERIFY(err, NULL != (vma = find_vma(current->mm,
1226 map->va)));
1227 if (err) {
1228 up_read(&current->mm->mmap_sem);
1229 goto bail;
1230 }
1231 offset = buf_page_start(buf) - vma->vm_start;
1232 up_read(&current->mm->mmap_sem);
1233 VERIFY(err, offset < (uintptr_t)map->size);
1234 if (err)
1235 goto bail;
1236 }
1237 pages[idx].addr = map->phys + offset;
1238 pages[idx].size = num << PAGE_SHIFT;
1239 }
1240 rpra[i].buf.pv = buf;
1241 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001242 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001243 for (i = bufs; i < bufs + handles; ++i) {
1244 struct fastrpc_mmap *map = ctx->maps[i];
1245
1246 pages[i].addr = map->phys;
1247 pages[i].size = map->size;
1248 }
1249 fdlist = (uint64_t *)&pages[bufs + handles];
1250 for (i = 0; i < M_FDLIST; i++)
1251 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001252 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301253 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001254
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001255 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001256 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001257 rlen = copylen - metalen;
1258 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1259 int i = ctx->overps[oix]->raix;
1260 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001261 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001262 uint64_t buf;
1263 ssize_t len = lpra[i].buf.len;
1264
1265 if (!len)
1266 continue;
1267 if (map)
1268 continue;
1269 if (ctx->overps[oix]->offset == 0) {
1270 rlen -= ALIGN(args, BALIGN) - args;
1271 args = ALIGN(args, BALIGN);
1272 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001273 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001274 VERIFY(err, rlen >= mlen);
1275 if (err)
1276 goto bail;
1277 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1278 pages[list[i].pgidx].addr = ctx->buf->phys -
1279 ctx->overps[oix]->offset +
1280 (copylen - rlen);
1281 pages[list[i].pgidx].addr =
1282 buf_page_start(pages[list[i].pgidx].addr);
1283 buf = rpra[i].buf.pv;
1284 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1285 if (i < inbufs) {
1286 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1287 lpra[i].buf.pv, len);
1288 if (err)
1289 goto bail;
1290 }
1291 args = args + mlen;
1292 rlen -= mlen;
1293 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001294 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001295
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001296 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001297 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1298 int i = ctx->overps[oix]->raix;
1299 struct fastrpc_mmap *map = ctx->maps[i];
1300
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001301 if (map && map->uncached)
1302 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301303 if (ctx->fl->sctx->smmu.coherent &&
1304 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1305 continue;
1306 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1307 continue;
1308
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001309 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1310 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1311 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1312 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001313 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301314 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001315 rpra[i].dma.fd = ctx->fds[i];
1316 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1317 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001318 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001319
1320 if (!ctx->fl->sctx->smmu.coherent) {
1321 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001322 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001323 PERF_END);
1324 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001325 bail:
1326 return err;
1327}
1328
1329static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1330 remote_arg_t *upra)
1331{
1332 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001333 struct smq_invoke_buf *list;
1334 struct smq_phy_page *pages;
1335 struct fastrpc_mmap *mmap;
1336 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001337 uint32_t *crclist = NULL;
1338
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001339 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001340 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001341 int err = 0;
1342
1343 inbufs = REMOTE_SCALARS_INBUFS(sc);
1344 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001345 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1346 list = smq_invoke_buf_start(ctx->rpra, sc);
1347 pages = smq_phy_page_start(sc, list);
1348 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001349 crclist = (uint32_t *)(fdlist + M_FDLIST);
1350
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001351 for (i = inbufs; i < inbufs + outbufs; ++i) {
1352 if (!ctx->maps[i]) {
1353 K_COPY_TO_USER(err, kernel,
1354 ctx->lpra[i].buf.pv,
1355 uint64_to_ptr(rpra[i].buf.pv),
1356 rpra[i].buf.len);
1357 if (err)
1358 goto bail;
1359 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301360 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301361 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001362 }
1363 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001364 if (inbufs + outbufs + handles) {
1365 for (i = 0; i < M_FDLIST; i++) {
1366 if (!fdlist[i])
1367 break;
1368 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001369 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301370 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001371 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001372 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001373 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301374 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001375 crclist, M_CRCLIST*sizeof(uint32_t));
1376
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001377 bail:
1378 return err;
1379}
1380
1381static void inv_args_pre(struct smq_invoke_ctx *ctx)
1382{
1383 int i, inbufs, outbufs;
1384 uint32_t sc = ctx->sc;
1385 remote_arg64_t *rpra = ctx->rpra;
1386 uintptr_t end;
1387
1388 inbufs = REMOTE_SCALARS_INBUFS(sc);
1389 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1390 for (i = inbufs; i < inbufs + outbufs; ++i) {
1391 struct fastrpc_mmap *map = ctx->maps[i];
1392
1393 if (map && map->uncached)
1394 continue;
1395 if (!rpra[i].buf.len)
1396 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301397 if (ctx->fl->sctx->smmu.coherent &&
1398 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1399 continue;
1400 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1401 continue;
1402
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001403 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1404 buf_page_start(rpra[i].buf.pv))
1405 continue;
1406 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1407 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1408 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1409 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1410 rpra[i].buf.len);
1411 if (!IS_CACHE_ALIGNED(end))
1412 dmac_flush_range((char *)end,
1413 (char *)end + 1);
1414 }
1415}
1416
1417static void inv_args(struct smq_invoke_ctx *ctx)
1418{
1419 int i, inbufs, outbufs;
1420 uint32_t sc = ctx->sc;
1421 remote_arg64_t *rpra = ctx->rpra;
1422 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001423
1424 inbufs = REMOTE_SCALARS_INBUFS(sc);
1425 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1426 for (i = inbufs; i < inbufs + outbufs; ++i) {
1427 struct fastrpc_mmap *map = ctx->maps[i];
1428
1429 if (map && map->uncached)
1430 continue;
1431 if (!rpra[i].buf.len)
1432 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301433 if (ctx->fl->sctx->smmu.coherent &&
1434 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1435 continue;
1436 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1437 continue;
1438
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001439 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1440 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441 continue;
1442 }
1443 if (map && map->handle)
1444 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1445 (char *)uint64_to_ptr(rpra[i].buf.pv),
1446 rpra[i].buf.len, ION_IOC_INV_CACHES);
1447 else
1448 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1449 (char *)uint64_to_ptr(rpra[i].buf.pv
1450 + rpra[i].buf.len));
1451 }
1452
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001453 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 dmac_inv_range(rpra, (char *)rpra + used);
1455}
1456
1457static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1458 uint32_t kernel, uint32_t handle)
1459{
1460 struct smq_msg *msg = &ctx->msg;
1461 struct fastrpc_file *fl = ctx->fl;
1462 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1463 int err = 0;
1464
c_mtharue1a5ce12017-10-13 20:47:09 +05301465 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001466 if (err)
1467 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301468 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001469 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301470 if (fl->sessionid)
1471 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001472 if (kernel)
1473 msg->pid = 0;
1474 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1475 msg->invoke.header.handle = handle;
1476 msg->invoke.header.sc = ctx->sc;
1477 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1478 msg->invoke.page.size = buf_page_size(ctx->used);
1479
1480 if (fl->ssrcount != channel_ctx->ssrcount) {
1481 err = -ECONNRESET;
1482 goto bail;
1483 }
1484 VERIFY(err, channel_ctx->link.port_state ==
1485 FASTRPC_LINK_CONNECTED);
1486 if (err)
1487 goto bail;
1488 err = glink_tx(channel_ctx->chan,
1489 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1490 GLINK_TX_REQ_INTENT);
1491 bail:
1492 return err;
1493}
1494
1495static void fastrpc_init(struct fastrpc_apps *me)
1496{
1497 int i;
1498
1499 INIT_HLIST_HEAD(&me->drivers);
1500 spin_lock_init(&me->hlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301501 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001502 me->channel = &gcinfo[0];
1503 for (i = 0; i < NUM_CHANNELS; i++) {
1504 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301505 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001506 me->channel[i].sesscount = 0;
1507 }
1508}
1509
1510static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1511
1512static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1513 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001514 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001515{
c_mtharue1a5ce12017-10-13 20:47:09 +05301516 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001517 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1518 int cid = fl->cid;
1519 int interrupted = 0;
1520 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001521 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001523 if (fl->profile)
1524 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301525
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301526
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301527 VERIFY(err, fl->sctx != NULL);
1528 if (err)
1529 goto bail;
1530 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1531 if (err)
1532 goto bail;
1533
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001534 if (!kernel) {
1535 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1536 &ctx));
1537 if (err)
1538 goto bail;
1539 if (fl->sctx->smmu.faults)
1540 err = FASTRPC_ENOSUCH;
1541 if (err)
1542 goto bail;
1543 if (ctx)
1544 goto wait;
1545 }
1546
1547 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1548 if (err)
1549 goto bail;
1550
1551 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001552 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001553 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001554 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001555 if (err)
1556 goto bail;
1557 }
1558
Sathish Ambleyc432b502017-06-05 12:03:42 -07001559 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001560 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001561 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001562 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001563 PERF_END);
1564
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001565 if (err)
1566 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001567 wait:
1568 if (kernel)
1569 wait_for_completion(&ctx->work);
1570 else {
1571 interrupted = wait_for_completion_interruptible(&ctx->work);
1572 VERIFY(err, 0 == (err = interrupted));
1573 if (err)
1574 goto bail;
1575 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001576
1577 PERF(fl->profile, fl->perf.invargs,
1578 if (!fl->sctx->smmu.coherent)
1579 inv_args(ctx);
1580 PERF_END);
1581
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001582 VERIFY(err, 0 == (err = ctx->retval));
1583 if (err)
1584 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001585
1586 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001587 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001588 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001589 if (err)
1590 goto bail;
1591 bail:
1592 if (ctx && interrupted == -ERESTARTSYS)
1593 context_save_interrupted(ctx);
1594 else if (ctx)
1595 context_free(ctx);
1596 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1597 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001598
1599 if (fl->profile && !interrupted) {
1600 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1601 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301602 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001603 fl->perf.count++;
1604 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001605 return err;
1606}
1607
Sathish Ambley36849af2017-02-02 09:35:55 -08001608static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001609static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001610 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611{
1612 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301613 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001614 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001615 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001616 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301617 struct fastrpc_mmap *file = NULL, *mem = NULL;
1618 char *proc_name = NULL;
1619 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301620 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301621 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1622 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001623
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301624 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001625 if (err)
1626 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001627 if (init->flags == FASTRPC_INIT_ATTACH) {
1628 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301629 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630
1631 ra[0].buf.pv = (void *)&tgid;
1632 ra[0].buf.len = sizeof(tgid);
1633 ioctl.inv.handle = 1;
1634 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1635 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301636 ioctl.fds = NULL;
1637 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001638 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001639 fl->pd = 0;
1640 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1641 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1642 if (err)
1643 goto bail;
1644 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001645 remote_arg_t ra[6];
1646 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001647 int mflags = 0;
1648 struct {
1649 int pgid;
1650 int namelen;
1651 int filelen;
1652 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001653 int attrs;
1654 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001655 } inbuf;
1656
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301657 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001658 inbuf.namelen = strlen(current->comm) + 1;
1659 inbuf.filelen = init->filelen;
1660 fl->pd = 1;
1661 if (init->filelen) {
1662 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1663 init->file, init->filelen, mflags, &file));
1664 if (err)
1665 goto bail;
1666 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301667
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001668 inbuf.pageslen = 1;
1669 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1670 init->mem, init->memlen, mflags, &mem));
1671 if (err)
1672 goto bail;
1673 inbuf.pageslen = 1;
1674 ra[0].buf.pv = (void *)&inbuf;
1675 ra[0].buf.len = sizeof(inbuf);
1676 fds[0] = 0;
1677
1678 ra[1].buf.pv = (void *)current->comm;
1679 ra[1].buf.len = inbuf.namelen;
1680 fds[1] = 0;
1681
1682 ra[2].buf.pv = (void *)init->file;
1683 ra[2].buf.len = inbuf.filelen;
1684 fds[2] = init->filefd;
1685
1686 pages[0].addr = mem->phys;
1687 pages[0].size = mem->size;
1688 ra[3].buf.pv = (void *)pages;
1689 ra[3].buf.len = 1 * sizeof(*pages);
1690 fds[3] = 0;
1691
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001692 inbuf.attrs = uproc->attrs;
1693 ra[4].buf.pv = (void *)&(inbuf.attrs);
1694 ra[4].buf.len = sizeof(inbuf.attrs);
1695 fds[4] = 0;
1696
1697 inbuf.siglen = uproc->siglen;
1698 ra[5].buf.pv = (void *)&(inbuf.siglen);
1699 ra[5].buf.len = sizeof(inbuf.siglen);
1700 fds[5] = 0;
1701
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001702 ioctl.inv.handle = 1;
1703 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001704 if (uproc->attrs)
1705 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001706 ioctl.inv.pra = ra;
1707 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301708 ioctl.attrs = NULL;
1709 ioctl.crc = NULL;
1710 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1711 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1712 if (err)
1713 goto bail;
1714 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1715 remote_arg_t ra[3];
1716 uint64_t phys = 0;
1717 ssize_t size = 0;
1718 int fds[3];
1719 struct {
1720 int pgid;
1721 int namelen;
1722 int pageslen;
1723 } inbuf;
1724
1725 if (!init->filelen)
1726 goto bail;
1727
1728 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1729 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1730 if (err)
1731 goto bail;
1732 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1733 (void __user *)init->file, init->filelen));
1734 if (err)
1735 goto bail;
1736
1737 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301738 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301739 inbuf.pageslen = 0;
1740 if (!me->staticpd_flags) {
1741 inbuf.pageslen = 1;
1742 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1743 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1744 &mem));
1745 if (err)
1746 goto bail;
1747 phys = mem->phys;
1748 size = mem->size;
1749 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1750 srcVM, 1, destVM, destVMperm, 1));
1751 if (err) {
1752 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1753 err);
1754 pr_err("map->phys %llx, map->size %d\n",
1755 phys, (int)size);
1756 goto bail;
1757 }
1758 me->staticpd_flags = 1;
1759 }
1760
1761 ra[0].buf.pv = (void *)&inbuf;
1762 ra[0].buf.len = sizeof(inbuf);
1763 fds[0] = 0;
1764
1765 ra[1].buf.pv = (void *)proc_name;
1766 ra[1].buf.len = inbuf.namelen;
1767 fds[1] = 0;
1768
1769 pages[0].addr = phys;
1770 pages[0].size = size;
1771
1772 ra[2].buf.pv = (void *)pages;
1773 ra[2].buf.len = sizeof(*pages);
1774 fds[2] = 0;
1775 ioctl.inv.handle = 1;
1776
1777 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1778 ioctl.inv.pra = ra;
1779 ioctl.fds = NULL;
1780 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001781 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001782 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1783 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1784 if (err)
1785 goto bail;
1786 } else {
1787 err = -ENOTTY;
1788 }
1789bail:
c_mtharud91205a2017-11-07 16:01:06 +05301790 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301791 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1792 me->staticpd_flags = 0;
1793 if (mem && err) {
1794 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1795 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1796 destVM, 1, srcVM, hlosVMperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301797 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301798 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001799 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301800 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001801 return err;
1802}
1803
1804static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1805{
1806 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001807 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001808 remote_arg_t ra[1];
1809 int tgid = 0;
1810
Sathish Ambley36849af2017-02-02 09:35:55 -08001811 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1812 if (err)
1813 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301814 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001815 if (err)
1816 goto bail;
1817 tgid = fl->tgid;
1818 ra[0].buf.pv = (void *)&tgid;
1819 ra[0].buf.len = sizeof(tgid);
1820 ioctl.inv.handle = 1;
1821 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1822 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301823 ioctl.fds = NULL;
1824 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001825 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001826 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1827 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1828bail:
1829 return err;
1830}
1831
1832static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1833 struct fastrpc_mmap *map)
1834{
Sathish Ambleybae51902017-07-03 15:00:49 -07001835 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05301836 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001837 struct smq_phy_page page;
1838 int num = 1;
1839 remote_arg_t ra[3];
1840 int err = 0;
1841 struct {
1842 int pid;
1843 uint32_t flags;
1844 uintptr_t vaddrin;
1845 int num;
1846 } inargs;
1847 struct {
1848 uintptr_t vaddrout;
1849 } routargs;
1850
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301851 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001852 inargs.vaddrin = (uintptr_t)map->va;
1853 inargs.flags = flags;
1854 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1855 ra[0].buf.pv = (void *)&inargs;
1856 ra[0].buf.len = sizeof(inargs);
1857 page.addr = map->phys;
1858 page.size = map->size;
1859 ra[1].buf.pv = (void *)&page;
1860 ra[1].buf.len = num * sizeof(page);
1861
1862 ra[2].buf.pv = (void *)&routargs;
1863 ra[2].buf.len = sizeof(routargs);
1864
1865 ioctl.inv.handle = 1;
1866 if (fl->apps->compat)
1867 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1868 else
1869 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1870 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301871 ioctl.fds = NULL;
1872 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001873 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001874 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1875 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1876 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301877 if (err)
1878 goto bail;
1879 if (flags == ADSP_MMAP_HEAP_ADDR) {
1880 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001881
c_mtharue1a5ce12017-10-13 20:47:09 +05301882 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1883 desc.args[1] = map->phys;
1884 desc.args[2] = map->size;
1885 desc.arginfo = SCM_ARGS(3);
1886 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1887 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1888 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1889
1890 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301891 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301892 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1893
1894 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1895 srcVM, 1, destVM, destVMperm, 1));
1896 if (err)
1897 goto bail;
1898 }
1899bail:
1900 return err;
1901}
1902
1903static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1904 struct fastrpc_mmap *map)
1905{
1906 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05301907 struct fastrpc_apps *me = &gfa;
1908 int srcVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301909 int destVM[1] = {VMID_HLOS};
1910 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1911
1912 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1913 struct fastrpc_ioctl_invoke_crc ioctl;
1914 struct scm_desc desc = {0};
1915 remote_arg_t ra[1];
1916 int err = 0;
1917 struct {
1918 uint8_t skey;
1919 } routargs;
1920
1921 ra[0].buf.pv = (void *)&routargs;
1922 ra[0].buf.len = sizeof(routargs);
1923
1924 ioctl.inv.handle = 1;
1925 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1926 ioctl.inv.pra = ra;
1927 ioctl.fds = NULL;
1928 ioctl.attrs = NULL;
1929 ioctl.crc = NULL;
1930 if (fl == NULL)
1931 goto bail;
1932
1933 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1934 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1935 if (err)
1936 goto bail;
1937 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1938 desc.args[1] = map->phys;
1939 desc.args[2] = map->size;
1940 desc.args[3] = routargs.skey;
1941 desc.arginfo = SCM_ARGS(4);
1942 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1943 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1944 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1945 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1946 srcVM, 1, destVM, destVMperm, 1));
1947 if (err)
1948 goto bail;
1949 }
1950
1951bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001952 return err;
1953}
1954
1955static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1956 struct fastrpc_mmap *map)
1957{
Sathish Ambleybae51902017-07-03 15:00:49 -07001958 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001959 remote_arg_t ra[1];
1960 int err = 0;
1961 struct {
1962 int pid;
1963 uintptr_t vaddrout;
1964 ssize_t size;
1965 } inargs;
1966
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301967 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001968 inargs.size = map->size;
1969 inargs.vaddrout = map->raddr;
1970 ra[0].buf.pv = (void *)&inargs;
1971 ra[0].buf.len = sizeof(inargs);
1972
1973 ioctl.inv.handle = 1;
1974 if (fl->apps->compat)
1975 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1976 else
1977 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1978 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301979 ioctl.fds = NULL;
1980 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001981 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001982 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1983 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301984 if (err)
1985 goto bail;
1986 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1987 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1988 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
1989 if (err)
1990 goto bail;
1991 }
1992bail:
1993 return err;
1994}
1995
1996static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
1997{
1998 struct fastrpc_mmap *match = NULL, *map = NULL;
1999 struct hlist_node *n = NULL;
2000 int err = 0, ret = 0;
2001 struct fastrpc_apps *me = &gfa;
2002 struct ramdump_segment *ramdump_segments_rh = NULL;
2003
2004 do {
2005 match = NULL;
2006 spin_lock(&me->hlock);
2007 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2008 match = map;
2009 hlist_del_init(&map->hn);
2010 break;
2011 }
2012 spin_unlock(&me->hlock);
2013
2014 if (match) {
2015 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2016 if (err)
2017 goto bail;
2018 if (me->channel[0].ramdumpenabled) {
2019 ramdump_segments_rh = kcalloc(1,
2020 sizeof(struct ramdump_segment), GFP_KERNEL);
2021 if (ramdump_segments_rh) {
2022 ramdump_segments_rh->address =
2023 match->phys;
2024 ramdump_segments_rh->size = match->size;
2025 ret = do_elf_ramdump(
2026 me->channel[0].remoteheap_ramdump_dev,
2027 ramdump_segments_rh, 1);
2028 if (ret < 0)
2029 pr_err("ADSPRPC: unable to dump heap");
2030 kfree(ramdump_segments_rh);
2031 }
2032 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302033 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302034 }
2035 } while (match);
2036bail:
2037 if (err && match)
2038 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002039 return err;
2040}
2041
2042static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
2043 ssize_t len, struct fastrpc_mmap **ppmap);
2044
2045static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2046
2047static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2048 struct fastrpc_ioctl_munmap *ud)
2049{
2050 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302051 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002052
2053 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2054 if (err)
2055 goto bail;
2056 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2057 if (err)
2058 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302059 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002060bail:
2061 if (err && map)
2062 fastrpc_mmap_add(map);
2063 return err;
2064}
2065
c_mtharu7bd6a422017-10-17 18:15:37 +05302066static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2067 struct fastrpc_ioctl_munmap_fd *ud) {
2068 int err = 0;
2069 struct fastrpc_mmap *map = NULL;
2070
2071 VERIFY(err, (fl && ud));
2072 if (err)
2073 goto bail;
2074
2075 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2076 pr_err("mapping not found to unamp %x va %llx %x\n",
2077 ud->fd, (unsigned long long)ud->va,
2078 (unsigned int)ud->len);
2079 err = -1;
2080 goto bail;
2081 }
2082 if (map)
2083 fastrpc_mmap_free(map, 0);
2084bail:
2085 return err;
2086}
2087
2088
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002089static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2090 struct fastrpc_ioctl_mmap *ud)
2091{
2092
c_mtharue1a5ce12017-10-13 20:47:09 +05302093 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002094 int err = 0;
2095
c_mtharue1a5ce12017-10-13 20:47:09 +05302096 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t __user)ud->vaddrin,
2097 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002098 return 0;
2099
2100 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
c_mtharue1a5ce12017-10-13 20:47:09 +05302101 (uintptr_t __user)ud->vaddrin, ud->size,
2102 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002103 if (err)
2104 goto bail;
2105 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2106 if (err)
2107 goto bail;
2108 ud->vaddrout = map->raddr;
2109 bail:
2110 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302111 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002112 return err;
2113}
2114
2115static void fastrpc_channel_close(struct kref *kref)
2116{
2117 struct fastrpc_apps *me = &gfa;
2118 struct fastrpc_channel_ctx *ctx;
2119 int cid;
2120
2121 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2122 cid = ctx - &gcinfo[0];
2123 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302124 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302125 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2126 ctx->link.link_notify_handle = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302127 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002128 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2129 MAJOR(me->dev_no), cid);
2130}
2131
2132static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2133
2134static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2135 int secure, struct fastrpc_session_ctx **session)
2136{
2137 struct fastrpc_apps *me = &gfa;
2138 int idx = 0, err = 0;
2139
2140 if (chan->sesscount) {
2141 for (idx = 0; idx < chan->sesscount; ++idx) {
2142 if (!chan->session[idx].used &&
2143 chan->session[idx].smmu.secure == secure) {
2144 chan->session[idx].used = 1;
2145 break;
2146 }
2147 }
2148 VERIFY(err, idx < chan->sesscount);
2149 if (err)
2150 goto bail;
2151 chan->session[idx].smmu.faults = 0;
2152 } else {
2153 VERIFY(err, me->dev != NULL);
2154 if (err)
2155 goto bail;
2156 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302157 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002158 }
2159
2160 *session = &chan->session[idx];
2161 bail:
2162 return err;
2163}
2164
c_mtharue1a5ce12017-10-13 20:47:09 +05302165static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2166 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002167{
2168 if (glink_queue_rx_intent(h, NULL, size))
2169 return false;
2170 return true;
2171}
2172
c_mtharue1a5ce12017-10-13 20:47:09 +05302173static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002174 const void *pkt_priv, const void *ptr)
2175{
2176}
2177
c_mtharue1a5ce12017-10-13 20:47:09 +05302178static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002179 const void *pkt_priv, const void *ptr, size_t size)
2180{
2181 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302182 struct smq_invoke_ctx *ctx;
2183 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002184
c_mtharufdac6892017-10-12 13:09:01 +05302185 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2186 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302187 goto bail;
2188
c_mtharufdac6892017-10-12 13:09:01 +05302189 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2190 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2191 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302192 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302193
c_mtharufdac6892017-10-12 13:09:01 +05302194 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302195bail:
c_mtharufdac6892017-10-12 13:09:01 +05302196 if (err)
2197 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002198 glink_rx_done(handle, ptr, true);
2199}
2200
c_mtharue1a5ce12017-10-13 20:47:09 +05302201static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002202 unsigned int event)
2203{
2204 struct fastrpc_apps *me = &gfa;
2205 int cid = (int)(uintptr_t)priv;
2206 struct fastrpc_glink_info *link;
2207
2208 if (cid < 0 || cid >= NUM_CHANNELS)
2209 return;
2210 link = &me->channel[cid].link;
2211 switch (event) {
2212 case GLINK_CONNECTED:
2213 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302214 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002215 break;
2216 case GLINK_LOCAL_DISCONNECTED:
2217 link->port_state = FASTRPC_LINK_DISCONNECTED;
2218 break;
2219 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002220 break;
2221 default:
2222 break;
2223 }
2224}
2225
2226static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2227 struct fastrpc_session_ctx **session)
2228{
2229 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302230 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002231
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302232 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002233 if (!*session)
2234 err = fastrpc_session_alloc_locked(chan, secure, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302235 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002236 return err;
2237}
2238
2239static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2240 struct fastrpc_session_ctx *session)
2241{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302242 struct fastrpc_apps *me = &gfa;
2243
2244 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002245 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302246 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002247}
2248
2249static int fastrpc_file_free(struct fastrpc_file *fl)
2250{
2251 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302252 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002253 int cid;
2254
2255 if (!fl)
2256 return 0;
2257 cid = fl->cid;
2258
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302259 (void)fastrpc_release_current_dsp_process(fl);
2260
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002261 spin_lock(&fl->apps->hlock);
2262 hlist_del_init(&fl->hn);
2263 spin_unlock(&fl->apps->hlock);
2264
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002265 if (!fl->sctx) {
2266 kfree(fl);
2267 return 0;
2268 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302269 spin_lock(&fl->hlock);
2270 fl->file_close = 1;
2271 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002272 fastrpc_context_list_dtor(fl);
2273 fastrpc_buf_list_free(fl);
2274 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302275 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002276 }
2277 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2278 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302279 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002280 if (fl->sctx)
2281 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2282 if (fl->secsctx)
2283 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2284 kfree(fl);
2285 return 0;
2286}
2287
2288static int fastrpc_device_release(struct inode *inode, struct file *file)
2289{
2290 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2291
2292 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302293 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2294 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002295 if (fl->debugfs_file != NULL)
2296 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002297 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302298 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002299 }
2300 return 0;
2301}
2302
2303static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2304 void *priv)
2305{
2306 struct fastrpc_apps *me = &gfa;
2307 int cid = (int)((uintptr_t)priv);
2308 struct fastrpc_glink_info *link;
2309
2310 if (cid < 0 || cid >= NUM_CHANNELS)
2311 return;
2312
2313 link = &me->channel[cid].link;
2314 switch (cb_info->link_state) {
2315 case GLINK_LINK_STATE_UP:
2316 link->link_state = FASTRPC_LINK_STATE_UP;
2317 complete(&me->channel[cid].work);
2318 break;
2319 case GLINK_LINK_STATE_DOWN:
2320 link->link_state = FASTRPC_LINK_STATE_DOWN;
2321 break;
2322 default:
2323 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2324 break;
2325 }
2326}
2327
2328static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2329{
2330 int err = 0;
2331 struct fastrpc_glink_info *link;
2332
2333 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2334 if (err)
2335 goto bail;
2336
2337 link = &me->channel[cid].link;
2338 if (link->link_notify_handle != NULL)
2339 goto bail;
2340
2341 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2342 link->link_notify_handle = glink_register_link_state_cb(
2343 &link->link_info,
2344 (void *)((uintptr_t)cid));
2345 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2346 if (err) {
2347 link->link_notify_handle = NULL;
2348 goto bail;
2349 }
2350 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2351 RPC_TIMEOUT));
2352bail:
2353 return err;
2354}
2355
2356static void fastrpc_glink_close(void *chan, int cid)
2357{
2358 int err = 0;
2359 struct fastrpc_glink_info *link;
2360
2361 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2362 if (err)
2363 return;
2364 link = &gfa.channel[cid].link;
2365
c_mtharu314a4202017-11-15 22:09:17 +05302366 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2367 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002368 link->port_state = FASTRPC_LINK_DISCONNECTING;
2369 glink_close(chan);
2370 }
2371}
2372
2373static int fastrpc_glink_open(int cid)
2374{
2375 int err = 0;
2376 void *handle = NULL;
2377 struct fastrpc_apps *me = &gfa;
2378 struct glink_open_config *cfg;
2379 struct fastrpc_glink_info *link;
2380
2381 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2382 if (err)
2383 goto bail;
2384 link = &me->channel[cid].link;
2385 cfg = &me->channel[cid].link.cfg;
2386 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2387 if (err)
2388 goto bail;
2389
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302390 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2391 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002392 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002393
2394 link->port_state = FASTRPC_LINK_CONNECTING;
2395 cfg->priv = (void *)(uintptr_t)cid;
2396 cfg->edge = gcinfo[cid].link.link_info.edge;
2397 cfg->transport = gcinfo[cid].link.link_info.transport;
2398 cfg->name = FASTRPC_GLINK_GUID;
2399 cfg->notify_rx = fastrpc_glink_notify_rx;
2400 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2401 cfg->notify_state = fastrpc_glink_notify_state;
2402 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2403 handle = glink_open(cfg);
2404 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302405 if (err) {
2406 if (link->port_state == FASTRPC_LINK_CONNECTING)
2407 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002408 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302409 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002410 me->channel[cid].chan = handle;
2411bail:
2412 return err;
2413}
2414
Sathish Ambley1ca68232017-01-19 10:32:55 -08002415static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2416{
2417 filp->private_data = inode->i_private;
2418 return 0;
2419}
2420
2421static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2422 size_t count, loff_t *position)
2423{
2424 struct fastrpc_file *fl = filp->private_data;
2425 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302426 struct fastrpc_buf *buf = NULL;
2427 struct fastrpc_mmap *map = NULL;
2428 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002429 struct fastrpc_channel_ctx *chan;
2430 struct fastrpc_session_ctx *sess;
2431 unsigned int len = 0;
2432 int i, j, ret = 0;
2433 char *fileinfo = NULL;
2434
2435 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2436 if (!fileinfo)
2437 goto bail;
2438 if (fl == NULL) {
2439 for (i = 0; i < NUM_CHANNELS; i++) {
2440 chan = &gcinfo[i];
2441 len += scnprintf(fileinfo + len,
2442 DEBUGFS_SIZE - len, "%s\n\n",
2443 chan->name);
2444 len += scnprintf(fileinfo + len,
2445 DEBUGFS_SIZE - len, "%s %d\n",
2446 "sesscount:", chan->sesscount);
2447 for (j = 0; j < chan->sesscount; j++) {
2448 sess = &chan->session[j];
2449 len += scnprintf(fileinfo + len,
2450 DEBUGFS_SIZE - len,
2451 "%s%d\n\n", "SESSION", j);
2452 len += scnprintf(fileinfo + len,
2453 DEBUGFS_SIZE - len,
2454 "%s %d\n", "sid:",
2455 sess->smmu.cb);
2456 len += scnprintf(fileinfo + len,
2457 DEBUGFS_SIZE - len,
2458 "%s %d\n", "SECURE:",
2459 sess->smmu.secure);
2460 }
2461 }
2462 } else {
2463 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2464 "%s %d\n\n",
2465 "PROCESS_ID:", fl->tgid);
2466 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2467 "%s %d\n\n",
2468 "CHANNEL_ID:", fl->cid);
2469 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2470 "%s %d\n\n",
2471 "SSRCOUNT:", fl->ssrcount);
2472 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2473 "%s\n",
2474 "LIST OF BUFS:");
2475 spin_lock(&fl->hlock);
2476 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2477 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302478 "%s %pK %s %pK %s %llx\n", "buf:",
2479 buf, "buf->virt:", buf->virt,
2480 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002481 }
2482 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2483 "\n%s\n",
2484 "LIST OF MAPS:");
2485 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2486 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302487 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002488 "map:", map,
2489 "map->va:", map->va,
2490 "map->phys:", map->phys);
2491 }
2492 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2493 "\n%s\n",
2494 "LIST OF PENDING SMQCONTEXTS:");
2495 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2496 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302497 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002498 "smqcontext:", ictx,
2499 "sc:", ictx->sc,
2500 "tid:", ictx->pid,
2501 "handle", ictx->rpra->h);
2502 }
2503 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2504 "\n%s\n",
2505 "LIST OF INTERRUPTED SMQCONTEXTS:");
2506 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2507 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302508 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002509 "smqcontext:", ictx,
2510 "sc:", ictx->sc,
2511 "tid:", ictx->pid,
2512 "handle", ictx->rpra->h);
2513 }
2514 spin_unlock(&fl->hlock);
2515 }
2516 if (len > DEBUGFS_SIZE)
2517 len = DEBUGFS_SIZE;
2518 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2519 kfree(fileinfo);
2520bail:
2521 return ret;
2522}
2523
2524static const struct file_operations debugfs_fops = {
2525 .open = fastrpc_debugfs_open,
2526 .read = fastrpc_debugfs_read,
2527};
Sathish Ambley36849af2017-02-02 09:35:55 -08002528static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002529{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002530 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002531 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002532
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302533 mutex_lock(&me->smd_mutex);
2534
Sathish Ambley36849af2017-02-02 09:35:55 -08002535 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002536 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302537 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002538 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302539 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2540 if (err)
2541 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302542 if (me->channel[cid].ssrcount !=
2543 me->channel[cid].prevssrcount) {
2544 if (!me->channel[cid].issubsystemup) {
2545 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302546 if (err) {
2547 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05302548 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302549 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302550 }
2551 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552 fl->ssrcount = me->channel[cid].ssrcount;
2553 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302554 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302555 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2556 if (err)
2557 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002558 VERIFY(err, 0 == fastrpc_glink_open(cid));
2559 if (err)
2560 goto bail;
2561
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302562 VERIFY(err,
2563 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002564 RPC_TIMEOUT));
2565 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302566 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002567 goto bail;
2568 }
2569 kref_init(&me->channel[cid].kref);
2570 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2571 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302572 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2573 FASTRPC_GLINK_INTENT_LEN);
2574 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2575 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002576 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302577 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2578 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002579 if (me->channel[cid].ssrcount !=
2580 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302581 if (fastrpc_mmap_remove_ssr(fl))
2582 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002583 me->channel[cid].prevssrcount =
2584 me->channel[cid].ssrcount;
2585 }
2586 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587
2588bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302589 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002590 return err;
2591}
2592
Sathish Ambley36849af2017-02-02 09:35:55 -08002593static int fastrpc_device_open(struct inode *inode, struct file *filp)
2594{
2595 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002596 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302597 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002598 struct fastrpc_apps *me = &gfa;
2599
c_mtharue1a5ce12017-10-13 20:47:09 +05302600 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002601 if (err)
2602 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002603 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2604 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002605 context_list_ctor(&fl->clst);
2606 spin_lock_init(&fl->hlock);
2607 INIT_HLIST_HEAD(&fl->maps);
2608 INIT_HLIST_HEAD(&fl->bufs);
2609 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302610 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002611 fl->tgid = current->tgid;
2612 fl->apps = me;
2613 fl->mode = FASTRPC_MODE_SERIAL;
2614 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002615 if (debugfs_file != NULL)
2616 fl->debugfs_file = debugfs_file;
2617 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302618 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002619 filp->private_data = fl;
2620 spin_lock(&me->hlock);
2621 hlist_add_head(&fl->hn, &me->drivers);
2622 spin_unlock(&me->hlock);
2623 return 0;
2624}
2625
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002626static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2627{
2628 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002629 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002630
c_mtharue1a5ce12017-10-13 20:47:09 +05302631 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002632 if (err)
2633 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002634 if (fl->cid == -1) {
2635 cid = *info;
2636 VERIFY(err, cid < NUM_CHANNELS);
2637 if (err)
2638 goto bail;
2639 fl->cid = cid;
2640 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2641 VERIFY(err, !fastrpc_session_alloc_locked(
2642 &fl->apps->channel[cid], 0, &fl->sctx));
2643 if (err)
2644 goto bail;
2645 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302646 VERIFY(err, fl->sctx != NULL);
2647 if (err)
2648 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002649 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2650bail:
2651 return err;
2652}
2653
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302654static int fastrpc_internal_control(struct fastrpc_file *fl,
2655 struct fastrpc_ioctl_control *cp)
2656{
2657 int err = 0;
2658 int latency;
2659
2660 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2661 if (err)
2662 goto bail;
2663 VERIFY(err, !IS_ERR_OR_NULL(cp));
2664 if (err)
2665 goto bail;
2666
2667 switch (cp->req) {
2668 case FASTRPC_CONTROL_LATENCY:
2669 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2670 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2671 VERIFY(err, latency != 0);
2672 if (err)
2673 goto bail;
2674 if (!fl->qos_request) {
2675 pm_qos_add_request(&fl->pm_qos_req,
2676 PM_QOS_CPU_DMA_LATENCY, latency);
2677 fl->qos_request = 1;
2678 } else
2679 pm_qos_update_request(&fl->pm_qos_req, latency);
2680 break;
2681 default:
2682 err = -ENOTTY;
2683 break;
2684 }
2685bail:
2686 return err;
2687}
2688
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002689static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2690 unsigned long ioctl_param)
2691{
2692 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002693 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002694 struct fastrpc_ioctl_mmap mmap;
2695 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302696 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002697 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002698 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302699 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002700 } p;
2701 void *param = (char *)ioctl_param;
2702 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2703 int size = 0, err = 0;
2704 uint32_t info;
2705
c_mtharue1a5ce12017-10-13 20:47:09 +05302706 p.inv.fds = NULL;
2707 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002708 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302709 spin_lock(&fl->hlock);
2710 if (fl->file_close == 1) {
2711 err = EBADF;
2712 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2713 spin_unlock(&fl->hlock);
2714 goto bail;
2715 }
2716 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002717
2718 switch (ioctl_num) {
2719 case FASTRPC_IOCTL_INVOKE:
2720 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002721 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002722 case FASTRPC_IOCTL_INVOKE_FD:
2723 if (!size)
2724 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2725 /* fall through */
2726 case FASTRPC_IOCTL_INVOKE_ATTRS:
2727 if (!size)
2728 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002729 /* fall through */
2730 case FASTRPC_IOCTL_INVOKE_CRC:
2731 if (!size)
2732 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302733 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002734 if (err)
2735 goto bail;
2736 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2737 0, &p.inv)));
2738 if (err)
2739 goto bail;
2740 break;
2741 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302742 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2743 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302744 if (err)
2745 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002746 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2747 if (err)
2748 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302749 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002750 if (err)
2751 goto bail;
2752 break;
2753 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302754 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2755 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302756 if (err)
2757 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002758 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2759 &p.munmap)));
2760 if (err)
2761 goto bail;
2762 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302763 case FASTRPC_IOCTL_MUNMAP_FD:
2764 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2765 sizeof(p.munmap_fd));
2766 if (err)
2767 goto bail;
2768 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2769 &p.munmap_fd)));
2770 if (err)
2771 goto bail;
2772 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002773 case FASTRPC_IOCTL_SETMODE:
2774 switch ((uint32_t)ioctl_param) {
2775 case FASTRPC_MODE_PARALLEL:
2776 case FASTRPC_MODE_SERIAL:
2777 fl->mode = (uint32_t)ioctl_param;
2778 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002779 case FASTRPC_MODE_PROFILE:
2780 fl->profile = (uint32_t)ioctl_param;
2781 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302782 case FASTRPC_MODE_SESSION:
2783 fl->sessionid = 1;
2784 fl->tgid |= (1 << SESSION_ID_INDEX);
2785 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002786 default:
2787 err = -ENOTTY;
2788 break;
2789 }
2790 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002791 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302792 K_COPY_FROM_USER(err, 0, &p.perf,
2793 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002794 if (err)
2795 goto bail;
2796 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2797 if (p.perf.keys) {
2798 char *keys = PERF_KEYS;
2799
c_mtharue1a5ce12017-10-13 20:47:09 +05302800 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2801 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002802 if (err)
2803 goto bail;
2804 }
2805 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302806 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2807 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002808 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302809 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002810 if (err)
2811 goto bail;
2812 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302813 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302814 K_COPY_FROM_USER(err, 0, &p.cp, param,
2815 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302816 if (err)
2817 goto bail;
2818 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2819 if (err)
2820 goto bail;
2821 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002822 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302823 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002824 if (err)
2825 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002826 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2827 if (err)
2828 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302829 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002830 if (err)
2831 goto bail;
2832 break;
2833 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002834 p.init.attrs = 0;
2835 p.init.siglen = 0;
2836 size = sizeof(struct fastrpc_ioctl_init);
2837 /* fall through */
2838 case FASTRPC_IOCTL_INIT_ATTRS:
2839 if (!size)
2840 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302841 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002842 if (err)
2843 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302844 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302845 p.init.init.filelen < INIT_FILELEN_MAX);
2846 if (err)
2847 goto bail;
2848 VERIFY(err, p.init.init.memlen >= 0 &&
2849 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302850 if (err)
2851 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302852 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002853 if (err)
2854 goto bail;
2855 break;
2856
2857 default:
2858 err = -ENOTTY;
2859 pr_info("bad ioctl: %d\n", ioctl_num);
2860 break;
2861 }
2862 bail:
2863 return err;
2864}
2865
2866static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2867 unsigned long code,
2868 void *data)
2869{
2870 struct fastrpc_apps *me = &gfa;
2871 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302872 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002873 int cid;
2874
2875 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2876 cid = ctx - &me->channel[0];
2877 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302878 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002879 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302880 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302881 if (ctx->chan) {
2882 fastrpc_glink_close(ctx->chan, cid);
2883 ctx->chan = NULL;
2884 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2885 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2886 }
2887 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302888 if (cid == 0)
2889 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002890 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302891 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2892 if (me->channel[0].remoteheap_ramdump_dev &&
2893 notifdata->enable_ramdump) {
2894 me->channel[0].ramdumpenabled = 1;
2895 }
2896 } else if (code == SUBSYS_AFTER_POWERUP) {
2897 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002898 }
2899
2900 return NOTIFY_DONE;
2901}
2902
2903static const struct file_operations fops = {
2904 .open = fastrpc_device_open,
2905 .release = fastrpc_device_release,
2906 .unlocked_ioctl = fastrpc_device_ioctl,
2907 .compat_ioctl = compat_fastrpc_device_ioctl,
2908};
2909
2910static const struct of_device_id fastrpc_match_table[] = {
2911 { .compatible = "qcom,msm-fastrpc-adsp", },
2912 { .compatible = "qcom,msm-fastrpc-compute", },
2913 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2914 { .compatible = "qcom,msm-adsprpc-mem-region", },
2915 {}
2916};
2917
2918static int fastrpc_cb_probe(struct device *dev)
2919{
2920 struct fastrpc_channel_ctx *chan;
2921 struct fastrpc_session_ctx *sess;
2922 struct of_phandle_args iommuspec;
2923 const char *name;
2924 unsigned int start = 0x80000000;
2925 int err = 0, i;
2926 int secure_vmid = VMID_CP_PIXEL;
2927
c_mtharue1a5ce12017-10-13 20:47:09 +05302928 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2929 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002930 if (err)
2931 goto bail;
2932 for (i = 0; i < NUM_CHANNELS; i++) {
2933 if (!gcinfo[i].name)
2934 continue;
2935 if (!strcmp(name, gcinfo[i].name))
2936 break;
2937 }
2938 VERIFY(err, i < NUM_CHANNELS);
2939 if (err)
2940 goto bail;
2941 chan = &gcinfo[i];
2942 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2943 if (err)
2944 goto bail;
2945
2946 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2947 "#iommu-cells", 0, &iommuspec));
2948 if (err)
2949 goto bail;
2950 sess = &chan->session[chan->sesscount];
2951 sess->smmu.cb = iommuspec.args[0] & 0xf;
2952 sess->used = 0;
2953 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2954 "dma-coherent");
2955 sess->smmu.secure = of_property_read_bool(dev->of_node,
2956 "qcom,secure-context-bank");
2957 if (sess->smmu.secure)
2958 start = 0x60000000;
2959 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2960 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302961 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002962 if (err)
2963 goto bail;
2964
2965 if (sess->smmu.secure)
2966 iommu_domain_set_attr(sess->smmu.mapping->domain,
2967 DOMAIN_ATTR_SECURE_VMID,
2968 &secure_vmid);
2969
2970 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2971 if (err)
2972 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302973 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002974 sess->smmu.enabled = 1;
2975 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002976 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2977 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002978bail:
2979 return err;
2980}
2981
2982static int fastrpc_probe(struct platform_device *pdev)
2983{
2984 int err = 0;
2985 struct fastrpc_apps *me = &gfa;
2986 struct device *dev = &pdev->dev;
2987 struct smq_phy_page range;
2988 struct device_node *ion_node, *node;
2989 struct platform_device *ion_pdev;
2990 struct cma *cma;
2991 uint32_t val;
2992
c_mtharu63ffc012017-11-16 15:26:56 +05302993
2994 if (of_device_is_compatible(dev->of_node,
2995 "qcom,msm-fastrpc-compute")) {
2996 of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
2997 &gcinfo[0].rhvmid);
2998
2999 pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
3000
3001 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3002 &me->latency);
3003 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003004 if (of_device_is_compatible(dev->of_node,
3005 "qcom,msm-fastrpc-compute-cb"))
3006 return fastrpc_cb_probe(dev);
3007
3008 if (of_device_is_compatible(dev->of_node,
3009 "qcom,msm-adsprpc-mem-region")) {
3010 me->dev = dev;
3011 range.addr = 0;
3012 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3013 if (ion_node) {
3014 for_each_available_child_of_node(ion_node, node) {
3015 if (of_property_read_u32(node, "reg", &val))
3016 continue;
3017 if (val != ION_ADSP_HEAP_ID)
3018 continue;
3019 ion_pdev = of_find_device_by_node(node);
3020 if (!ion_pdev)
3021 break;
3022 cma = dev_get_cma_area(&ion_pdev->dev);
3023 if (cma) {
3024 range.addr = cma_get_base(cma);
3025 range.size = (size_t)cma_get_size(cma);
3026 }
3027 break;
3028 }
3029 }
3030 if (range.addr) {
3031 int srcVM[1] = {VMID_HLOS};
3032 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3033 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003034 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003035 PERM_READ | PERM_WRITE | PERM_EXEC,
3036 PERM_READ | PERM_WRITE | PERM_EXEC,
3037 PERM_READ | PERM_WRITE | PERM_EXEC,
3038 };
3039
3040 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3041 srcVM, 1, destVM, destVMperm, 4));
3042 if (err)
3043 goto bail;
3044 }
3045 return 0;
3046 }
3047
3048 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3049 fastrpc_match_table,
3050 NULL, &pdev->dev));
3051 if (err)
3052 goto bail;
3053bail:
3054 return err;
3055}
3056
3057static void fastrpc_deinit(void)
3058{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303059 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003060 struct fastrpc_channel_ctx *chan = gcinfo;
3061 int i, j;
3062
3063 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3064 if (chan->chan) {
3065 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303066 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303067 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003068 }
3069 for (j = 0; j < NUM_SESSIONS; j++) {
3070 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303071 if (sess->smmu.dev) {
3072 arm_iommu_detach_device(sess->smmu.dev);
3073 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003074 }
3075 if (sess->smmu.mapping) {
3076 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303077 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003078 }
3079 }
3080 }
3081}
3082
3083static struct platform_driver fastrpc_driver = {
3084 .probe = fastrpc_probe,
3085 .driver = {
3086 .name = "fastrpc",
3087 .owner = THIS_MODULE,
3088 .of_match_table = fastrpc_match_table,
3089 },
3090};
3091
3092static int __init fastrpc_device_init(void)
3093{
3094 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303095 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003096 int err = 0, i;
3097
3098 memset(me, 0, sizeof(*me));
3099
3100 fastrpc_init(me);
3101 me->dev = NULL;
3102 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3103 if (err)
3104 goto register_bail;
3105 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3106 DEVICE_NAME));
3107 if (err)
3108 goto alloc_chrdev_bail;
3109 cdev_init(&me->cdev, &fops);
3110 me->cdev.owner = THIS_MODULE;
3111 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003112 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003113 if (err)
3114 goto cdev_init_bail;
3115 me->class = class_create(THIS_MODULE, "fastrpc");
3116 VERIFY(err, !IS_ERR(me->class));
3117 if (err)
3118 goto class_create_bail;
3119 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003120 dev = device_create(me->class, NULL,
3121 MKDEV(MAJOR(me->dev_no), 0),
3122 NULL, gcinfo[0].name);
3123 VERIFY(err, !IS_ERR_OR_NULL(dev));
3124 if (err)
3125 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003126 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003127 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003128 me->channel[i].ssrcount = 0;
3129 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303130 me->channel[i].issubsystemup = 1;
3131 me->channel[i].ramdumpenabled = 0;
3132 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003133 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3134 me->channel[i].handle = subsys_notif_register_notifier(
3135 gcinfo[i].subsys,
3136 &me->channel[i].nb);
3137 }
3138
3139 me->client = msm_ion_client_create(DEVICE_NAME);
3140 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3141 if (err)
3142 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003143 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003144 return 0;
3145device_create_bail:
3146 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003147 if (me->channel[i].handle)
3148 subsys_notif_unregister_notifier(me->channel[i].handle,
3149 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003150 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003151 if (!IS_ERR_OR_NULL(dev))
3152 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003153 class_destroy(me->class);
3154class_create_bail:
3155 cdev_del(&me->cdev);
3156cdev_init_bail:
3157 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3158alloc_chrdev_bail:
3159register_bail:
3160 fastrpc_deinit();
3161 return err;
3162}
3163
3164static void __exit fastrpc_device_exit(void)
3165{
3166 struct fastrpc_apps *me = &gfa;
3167 int i;
3168
3169 fastrpc_file_list_dtor(me);
3170 fastrpc_deinit();
3171 for (i = 0; i < NUM_CHANNELS; i++) {
3172 if (!gcinfo[i].name)
3173 continue;
3174 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3175 subsys_notif_unregister_notifier(me->channel[i].handle,
3176 &me->channel[i].nb);
3177 }
3178 class_destroy(me->class);
3179 cdev_del(&me->cdev);
3180 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3181 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003182 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003183}
3184
3185late_initcall(fastrpc_device_init);
3186module_exit(fastrpc_device_exit);
3187
3188MODULE_LICENSE("GPL v2");