blob: cd47c773a0e8a69b56cf298b8fbcedde94496e1d [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070063
64#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
65
66#define FASTRPC_LINK_STATE_DOWN (0x0)
67#define FASTRPC_LINK_STATE_UP (0x1)
68#define FASTRPC_LINK_DISCONNECTED (0x0)
69#define FASTRPC_LINK_CONNECTING (0x1)
70#define FASTRPC_LINK_CONNECTED (0x3)
71#define FASTRPC_LINK_DISCONNECTING (0x7)
72
Sathish Ambleya21b5b52017-01-11 16:11:01 -080073#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
74#define FASTRPC_STATIC_HANDLE_LISTENER (3)
75#define FASTRPC_STATIC_HANDLE_MAX (20)
76
77#define PERF_END (void)0
78
79#define PERF(enb, cnt, ff) \
80 {\
81 struct timespec startT = {0};\
82 if (enb) {\
83 getnstimeofday(&startT);\
84 } \
85 ff ;\
86 if (enb) {\
87 cnt += getnstimediff(&startT);\
88 } \
89 }
90
Sathish Ambley69e1ab02016-10-18 10:28:15 -070091static int fastrpc_glink_open(int cid);
92static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080093static struct dentry *debugfs_root;
94static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96static inline uint64_t buf_page_start(uint64_t buf)
97{
98 uint64_t start = (uint64_t) buf & PAGE_MASK;
99 return start;
100}
101
102static inline uint64_t buf_page_offset(uint64_t buf)
103{
104 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
105 return offset;
106}
107
108static inline int buf_num_pages(uint64_t buf, ssize_t len)
109{
110 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
111 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
112 int nPages = end - start + 1;
113 return nPages;
114}
115
116static inline uint64_t buf_page_size(uint32_t size)
117{
118 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
119
120 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
121}
122
123static inline void *uint64_to_ptr(uint64_t addr)
124{
125 void *ptr = (void *)((uintptr_t)addr);
126
127 return ptr;
128}
129
130static inline uint64_t ptr_to_uint64(void *ptr)
131{
132 uint64_t addr = (uint64_t)((uintptr_t)ptr);
133
134 return addr;
135}
136
137struct fastrpc_file;
138
139struct fastrpc_buf {
140 struct hlist_node hn;
141 struct fastrpc_file *fl;
142 void *virt;
143 uint64_t phys;
144 ssize_t size;
145};
146
147struct fastrpc_ctx_lst;
148
149struct overlap {
150 uintptr_t start;
151 uintptr_t end;
152 int raix;
153 uintptr_t mstart;
154 uintptr_t mend;
155 uintptr_t offset;
156};
157
158struct smq_invoke_ctx {
159 struct hlist_node hn;
160 struct completion work;
161 int retval;
162 int pid;
163 int tgid;
164 remote_arg_t *lpra;
165 remote_arg64_t *rpra;
166 int *fds;
167 unsigned int *attrs;
168 struct fastrpc_mmap **maps;
169 struct fastrpc_buf *buf;
170 ssize_t used;
171 struct fastrpc_file *fl;
172 uint32_t sc;
173 struct overlap *overs;
174 struct overlap **overps;
175 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700176 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700177};
178
179struct fastrpc_ctx_lst {
180 struct hlist_head pending;
181 struct hlist_head interrupted;
182};
183
184struct fastrpc_smmu {
185 struct dma_iommu_mapping *mapping;
186 int cb;
187 int enabled;
188 int faults;
189 int secure;
190 int coherent;
191};
192
193struct fastrpc_session_ctx {
194 struct device *dev;
195 struct fastrpc_smmu smmu;
196 int used;
197};
198
199struct fastrpc_glink_info {
200 int link_state;
201 int port_state;
202 struct glink_open_config cfg;
203 struct glink_link_info link_info;
204 void *link_notify_handle;
205};
206
207struct fastrpc_channel_ctx {
208 char *name;
209 char *subsys;
210 void *chan;
211 struct device *dev;
212 struct fastrpc_session_ctx session[NUM_SESSIONS];
213 struct completion work;
214 struct notifier_block nb;
215 struct kref kref;
216 int sesscount;
217 int ssrcount;
218 void *handle;
219 int prevssrcount;
220 int vmid;
221 struct fastrpc_glink_info link;
222};
223
224struct fastrpc_apps {
225 struct fastrpc_channel_ctx *channel;
226 struct cdev cdev;
227 struct class *class;
228 struct mutex smd_mutex;
229 struct smq_phy_page range;
230 struct hlist_head maps;
231 dev_t dev_no;
232 int compat;
233 struct hlist_head drivers;
234 spinlock_t hlock;
235 struct ion_client *client;
236 struct device *dev;
237};
238
239struct fastrpc_mmap {
240 struct hlist_node hn;
241 struct fastrpc_file *fl;
242 struct fastrpc_apps *apps;
243 int fd;
244 uint32_t flags;
245 struct dma_buf *buf;
246 struct sg_table *table;
247 struct dma_buf_attachment *attach;
248 struct ion_handle *handle;
249 uint64_t phys;
250 ssize_t size;
251 uintptr_t va;
252 ssize_t len;
253 int refs;
254 uintptr_t raddr;
255 int uncached;
256 int secure;
257 uintptr_t attr;
258};
259
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800260struct fastrpc_perf {
261 int64_t count;
262 int64_t flush;
263 int64_t map;
264 int64_t copy;
265 int64_t link;
266 int64_t getargs;
267 int64_t putargs;
268 int64_t invargs;
269 int64_t invoke;
270};
271
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700272struct fastrpc_file {
273 struct hlist_node hn;
274 spinlock_t hlock;
275 struct hlist_head maps;
276 struct hlist_head bufs;
277 struct fastrpc_ctx_lst clst;
278 struct fastrpc_session_ctx *sctx;
279 struct fastrpc_session_ctx *secsctx;
280 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700282 int tgid;
283 int cid;
284 int ssrcount;
285 int pd;
286 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800287 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800288 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700289};
290
291static struct fastrpc_apps gfa;
292
293static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
294 {
295 .name = "adsprpc-smd",
296 .subsys = "adsp",
297 .link.link_info.edge = "lpass",
298 .link.link_info.transport = "smem",
299 },
300 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700301 .name = "mdsprpc-smd",
302 .subsys = "modem",
303 .link.link_info.edge = "mpss",
304 .link.link_info.transport = "smem",
305 },
306 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800307 .name = "sdsprpc-smd",
308 .subsys = "slpi",
309 .link.link_info.edge = "dsps",
310 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800311 },
312 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700313 .name = "cdsprpc-smd",
314 .subsys = "cdsp",
315 .link.link_info.edge = "cdsp",
316 .link.link_info.transport = "smem",
317 },
318};
319
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800320static inline int64_t getnstimediff(struct timespec *start)
321{
322 int64_t ns;
323 struct timespec ts, b;
324
325 getnstimeofday(&ts);
326 b = timespec_sub(ts, *start);
327 ns = timespec_to_ns(&b);
328 return ns;
329}
330
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700331static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
332{
333 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
334 int vmid;
335
336 if (!fl)
337 return;
338 if (cache) {
339 spin_lock(&fl->hlock);
340 hlist_add_head(&buf->hn, &fl->bufs);
341 spin_unlock(&fl->hlock);
342 return;
343 }
344 if (!IS_ERR_OR_NULL(buf->virt)) {
345 int destVM[1] = {VMID_HLOS};
346 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
347
348 if (fl->sctx->smmu.cb)
349 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
350 vmid = fl->apps->channel[fl->cid].vmid;
351 if (vmid) {
352 int srcVM[2] = {VMID_HLOS, vmid};
353
354 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
355 srcVM, 2, destVM, destVMperm, 1);
356 }
357 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
358 buf->phys);
359 }
360 kfree(buf);
361}
362
363static void fastrpc_buf_list_free(struct fastrpc_file *fl)
364{
365 struct fastrpc_buf *buf, *free;
366
367 do {
368 struct hlist_node *n;
369
370 free = 0;
371 spin_lock(&fl->hlock);
372 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
373 hlist_del_init(&buf->hn);
374 free = buf;
375 break;
376 }
377 spin_unlock(&fl->hlock);
378 if (free)
379 fastrpc_buf_free(free, 0);
380 } while (free);
381}
382
383static void fastrpc_mmap_add(struct fastrpc_mmap *map)
384{
385 struct fastrpc_file *fl = map->fl;
386
387 spin_lock(&fl->hlock);
388 hlist_add_head(&map->hn, &fl->maps);
389 spin_unlock(&fl->hlock);
390}
391
392static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800393 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700394{
395 struct fastrpc_mmap *match = 0, *map;
396 struct hlist_node *n;
397
398 spin_lock(&fl->hlock);
399 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
400 if (va >= map->va &&
401 va + len <= map->va + map->len &&
402 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800403 if (refs)
404 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700405 match = map;
406 break;
407 }
408 }
409 spin_unlock(&fl->hlock);
410 if (match) {
411 *ppmap = match;
412 return 0;
413 }
414 return -ENOTTY;
415}
416
417static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
418 ssize_t len, struct fastrpc_mmap **ppmap)
419{
420 struct fastrpc_mmap *match = 0, *map;
421 struct hlist_node *n;
422 struct fastrpc_apps *me = &gfa;
423
424 spin_lock(&me->hlock);
425 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
426 if (map->raddr == va &&
427 map->raddr + map->len == va + len &&
428 map->refs == 1) {
429 match = map;
430 hlist_del_init(&map->hn);
431 break;
432 }
433 }
434 spin_unlock(&me->hlock);
435 if (match) {
436 *ppmap = match;
437 return 0;
438 }
439 spin_lock(&fl->hlock);
440 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
441 if (map->raddr == va &&
442 map->raddr + map->len == va + len &&
443 map->refs == 1) {
444 match = map;
445 hlist_del_init(&map->hn);
446 break;
447 }
448 }
449 spin_unlock(&fl->hlock);
450 if (match) {
451 *ppmap = match;
452 return 0;
453 }
454 return -ENOTTY;
455}
456
457static void fastrpc_mmap_free(struct fastrpc_mmap *map)
458{
459 struct fastrpc_file *fl;
460 int vmid;
461 struct fastrpc_session_ctx *sess;
462 int destVM[1] = {VMID_HLOS};
463 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
464
465 if (!map)
466 return;
467 fl = map->fl;
468 spin_lock(&fl->hlock);
469 map->refs--;
470 if (!map->refs)
471 hlist_del_init(&map->hn);
472 spin_unlock(&fl->hlock);
473 if (map->refs > 0)
474 return;
475 if (map->secure)
476 sess = fl->secsctx;
477 else
478 sess = fl->sctx;
479
480 if (!IS_ERR_OR_NULL(map->handle))
481 ion_free(fl->apps->client, map->handle);
482 if (sess->smmu.enabled) {
483 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800484 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700485 map->table->sgl,
486 map->table->nents, DMA_BIDIRECTIONAL,
487 map->buf);
488 }
489 vmid = fl->apps->channel[fl->cid].vmid;
490 if (vmid && map->phys) {
491 int srcVM[2] = {VMID_HLOS, vmid};
492
493 hyp_assign_phys(map->phys, buf_page_size(map->size),
494 srcVM, 2, destVM, destVMperm, 1);
495 }
496
497 if (!IS_ERR_OR_NULL(map->table))
498 dma_buf_unmap_attachment(map->attach, map->table,
499 DMA_BIDIRECTIONAL);
500 if (!IS_ERR_OR_NULL(map->attach))
501 dma_buf_detach(map->buf, map->attach);
502 if (!IS_ERR_OR_NULL(map->buf))
503 dma_buf_put(map->buf);
504 kfree(map);
505}
506
507static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
508 struct fastrpc_session_ctx **session);
509
510static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
511 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
512 struct fastrpc_mmap **ppmap)
513{
514 struct fastrpc_session_ctx *sess;
515 struct fastrpc_apps *apps = fl->apps;
516 int cid = fl->cid;
517 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
518 struct fastrpc_mmap *map = 0;
519 unsigned long attrs;
520 unsigned long flags;
521 int err = 0, vmid;
522
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800523 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700524 return 0;
525 map = kzalloc(sizeof(*map), GFP_KERNEL);
526 VERIFY(err, !IS_ERR_OR_NULL(map));
527 if (err)
528 goto bail;
529 INIT_HLIST_NODE(&map->hn);
530 map->flags = mflags;
531 map->refs = 1;
532 map->fl = fl;
533 map->fd = fd;
534 map->attr = attr;
535 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
536 ion_import_dma_buf_fd(fl->apps->client, fd)));
537 if (err)
538 goto bail;
539 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
540 &flags));
541 if (err)
542 goto bail;
543
544 map->uncached = !ION_IS_CACHED(flags);
545 if (map->attr & FASTRPC_ATTR_NOVA)
546 map->uncached = 1;
547
548 map->secure = flags & ION_FLAG_SECURE;
549 if (map->secure) {
550 if (!fl->secsctx)
551 err = fastrpc_session_alloc(chan, 1,
552 &fl->secsctx);
553 if (err)
554 goto bail;
555 }
556 if (map->secure)
557 sess = fl->secsctx;
558 else
559 sess = fl->sctx;
560
561 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
562 if (err)
563 goto bail;
564 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
565 dma_buf_attach(map->buf, sess->dev)));
566 if (err)
567 goto bail;
568 VERIFY(err, !IS_ERR_OR_NULL(map->table =
569 dma_buf_map_attachment(map->attach,
570 DMA_BIDIRECTIONAL)));
571 if (err)
572 goto bail;
573 if (sess->smmu.enabled) {
574 attrs = DMA_ATTR_EXEC_MAPPING;
575 VERIFY(err, map->table->nents ==
576 msm_dma_map_sg_attrs(sess->dev,
577 map->table->sgl, map->table->nents,
578 DMA_BIDIRECTIONAL, map->buf, attrs));
579 if (err)
580 goto bail;
581 } else {
582 VERIFY(err, map->table->nents == 1);
583 if (err)
584 goto bail;
585 }
586 map->phys = sg_dma_address(map->table->sgl);
587 if (sess->smmu.cb) {
588 map->phys += ((uint64_t)sess->smmu.cb << 32);
589 map->size = sg_dma_len(map->table->sgl);
590 } else {
591 map->size = buf_page_size(len);
592 }
593 vmid = fl->apps->channel[fl->cid].vmid;
594 if (vmid) {
595 int srcVM[1] = {VMID_HLOS};
596 int destVM[2] = {VMID_HLOS, vmid};
597 int destVMperm[2] = {PERM_READ | PERM_WRITE,
598 PERM_READ | PERM_WRITE | PERM_EXEC};
599
600 VERIFY(err, !hyp_assign_phys(map->phys,
601 buf_page_size(map->size),
602 srcVM, 1, destVM, destVMperm, 2));
603 if (err)
604 goto bail;
605 }
606 map->va = va;
607 map->len = len;
608
609 fastrpc_mmap_add(map);
610 *ppmap = map;
611
612bail:
613 if (err && map)
614 fastrpc_mmap_free(map);
615 return err;
616}
617
618static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
619 struct fastrpc_buf **obuf)
620{
621 int err = 0, vmid;
622 struct fastrpc_buf *buf = 0, *fr = 0;
623 struct hlist_node *n;
624
625 VERIFY(err, size > 0);
626 if (err)
627 goto bail;
628
629 /* find the smallest buffer that fits in the cache */
630 spin_lock(&fl->hlock);
631 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
632 if (buf->size >= size && (!fr || fr->size > buf->size))
633 fr = buf;
634 }
635 if (fr)
636 hlist_del_init(&fr->hn);
637 spin_unlock(&fl->hlock);
638 if (fr) {
639 *obuf = fr;
640 return 0;
641 }
642 buf = 0;
643 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
644 if (err)
645 goto bail;
646 INIT_HLIST_NODE(&buf->hn);
647 buf->fl = fl;
648 buf->virt = 0;
649 buf->phys = 0;
650 buf->size = size;
651 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
652 (void *)&buf->phys, GFP_KERNEL);
653 if (IS_ERR_OR_NULL(buf->virt)) {
654 /* free cache and retry */
655 fastrpc_buf_list_free(fl);
656 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
657 (void *)&buf->phys, GFP_KERNEL);
658 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
659 }
660 if (err)
661 goto bail;
662 if (fl->sctx->smmu.cb)
663 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
664 vmid = fl->apps->channel[fl->cid].vmid;
665 if (vmid) {
666 int srcVM[1] = {VMID_HLOS};
667 int destVM[2] = {VMID_HLOS, vmid};
668 int destVMperm[2] = {PERM_READ | PERM_WRITE,
669 PERM_READ | PERM_WRITE | PERM_EXEC};
670
671 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
672 srcVM, 1, destVM, destVMperm, 2));
673 if (err)
674 goto bail;
675 }
676
677 *obuf = buf;
678 bail:
679 if (err && buf)
680 fastrpc_buf_free(buf, 0);
681 return err;
682}
683
684
685static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700686 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700687 struct smq_invoke_ctx **po)
688{
689 int err = 0;
690 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
691 struct hlist_node *n;
692 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
693
694 spin_lock(&fl->hlock);
695 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
696 if (ictx->pid == current->pid) {
697 if (invoke->sc != ictx->sc || ictx->fl != fl)
698 err = -1;
699 else {
700 ctx = ictx;
701 hlist_del_init(&ctx->hn);
702 hlist_add_head(&ctx->hn, &fl->clst.pending);
703 }
704 break;
705 }
706 }
707 spin_unlock(&fl->hlock);
708 if (ctx)
709 *po = ctx;
710 return err;
711}
712
713#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
714static int overlap_ptr_cmp(const void *a, const void *b)
715{
716 struct overlap *pa = *((struct overlap **)a);
717 struct overlap *pb = *((struct overlap **)b);
718 /* sort with lowest starting buffer first */
719 int st = CMP(pa->start, pb->start);
720 /* sort with highest ending buffer first */
721 int ed = CMP(pb->end, pa->end);
722 return st == 0 ? ed : st;
723}
724
Sathish Ambley9466d672017-01-25 10:51:55 -0800725static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700726{
Sathish Ambley9466d672017-01-25 10:51:55 -0800727 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700728 remote_arg_t *lpra = ctx->lpra;
729 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
730 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
731 int nbufs = inbufs + outbufs;
732 struct overlap max;
733
734 for (i = 0; i < nbufs; ++i) {
735 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
736 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800737 if (lpra[i].buf.len) {
738 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
739 if (err)
740 goto bail;
741 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700742 ctx->overs[i].raix = i;
743 ctx->overps[i] = &ctx->overs[i];
744 }
745 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
746 max.start = 0;
747 max.end = 0;
748 for (i = 0; i < nbufs; ++i) {
749 if (ctx->overps[i]->start < max.end) {
750 ctx->overps[i]->mstart = max.end;
751 ctx->overps[i]->mend = ctx->overps[i]->end;
752 ctx->overps[i]->offset = max.end -
753 ctx->overps[i]->start;
754 if (ctx->overps[i]->end > max.end) {
755 max.end = ctx->overps[i]->end;
756 } else {
757 ctx->overps[i]->mend = 0;
758 ctx->overps[i]->mstart = 0;
759 }
760 } else {
761 ctx->overps[i]->mend = ctx->overps[i]->end;
762 ctx->overps[i]->mstart = ctx->overps[i]->start;
763 ctx->overps[i]->offset = 0;
764 max = *ctx->overps[i];
765 }
766 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800767bail:
768 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700769}
770
771#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
772 do {\
773 if (!(kernel))\
774 VERIFY(err, 0 == copy_from_user((dst), (src),\
775 (size)));\
776 else\
777 memmove((dst), (src), (size));\
778 } while (0)
779
780#define K_COPY_TO_USER(err, kernel, dst, src, size) \
781 do {\
782 if (!(kernel))\
783 VERIFY(err, 0 == copy_to_user((dst), (src),\
784 (size)));\
785 else\
786 memmove((dst), (src), (size));\
787 } while (0)
788
789
790static void context_free(struct smq_invoke_ctx *ctx);
791
792static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700793 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700794 struct smq_invoke_ctx **po)
795{
796 int err = 0, bufs, size = 0;
797 struct smq_invoke_ctx *ctx = 0;
798 struct fastrpc_ctx_lst *clst = &fl->clst;
799 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
800
801 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
802 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
803 sizeof(*ctx->fds) * (bufs) +
804 sizeof(*ctx->attrs) * (bufs) +
805 sizeof(*ctx->overs) * (bufs) +
806 sizeof(*ctx->overps) * (bufs);
807
808 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
809 if (err)
810 goto bail;
811
812 INIT_HLIST_NODE(&ctx->hn);
813 hlist_add_fake(&ctx->hn);
814 ctx->fl = fl;
815 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
816 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
817 ctx->fds = (int *)(&ctx->lpra[bufs]);
818 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
819 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
820 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
821
822 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
823 bufs * sizeof(*ctx->lpra));
824 if (err)
825 goto bail;
826
827 if (invokefd->fds) {
828 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
829 bufs * sizeof(*ctx->fds));
830 if (err)
831 goto bail;
832 }
833 if (invokefd->attrs) {
834 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
835 bufs * sizeof(*ctx->attrs));
836 if (err)
837 goto bail;
838 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700839 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700840 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800841 if (bufs) {
842 VERIFY(err, 0 == context_build_overlap(ctx));
843 if (err)
844 goto bail;
845 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700846 ctx->retval = -1;
847 ctx->pid = current->pid;
848 ctx->tgid = current->tgid;
849 init_completion(&ctx->work);
850
851 spin_lock(&fl->hlock);
852 hlist_add_head(&ctx->hn, &clst->pending);
853 spin_unlock(&fl->hlock);
854
855 *po = ctx;
856bail:
857 if (ctx && err)
858 context_free(ctx);
859 return err;
860}
861
862static void context_save_interrupted(struct smq_invoke_ctx *ctx)
863{
864 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
865
866 spin_lock(&ctx->fl->hlock);
867 hlist_del_init(&ctx->hn);
868 hlist_add_head(&ctx->hn, &clst->interrupted);
869 spin_unlock(&ctx->fl->hlock);
870 /* free the cache on power collapse */
871 fastrpc_buf_list_free(ctx->fl);
872}
873
874static void context_free(struct smq_invoke_ctx *ctx)
875{
876 int i;
877 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
878 REMOTE_SCALARS_OUTBUFS(ctx->sc);
879 spin_lock(&ctx->fl->hlock);
880 hlist_del_init(&ctx->hn);
881 spin_unlock(&ctx->fl->hlock);
882 for (i = 0; i < nbufs; ++i)
883 fastrpc_mmap_free(ctx->maps[i]);
884 fastrpc_buf_free(ctx->buf, 1);
885 kfree(ctx);
886}
887
888static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
889{
890 ctx->retval = retval;
891 complete(&ctx->work);
892}
893
894
895static void fastrpc_notify_users(struct fastrpc_file *me)
896{
897 struct smq_invoke_ctx *ictx;
898 struct hlist_node *n;
899
900 spin_lock(&me->hlock);
901 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
902 complete(&ictx->work);
903 }
904 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
905 complete(&ictx->work);
906 }
907 spin_unlock(&me->hlock);
908
909}
910
911static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
912{
913 struct fastrpc_file *fl;
914 struct hlist_node *n;
915
916 spin_lock(&me->hlock);
917 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
918 if (fl->cid == cid)
919 fastrpc_notify_users(fl);
920 }
921 spin_unlock(&me->hlock);
922
923}
924static void context_list_ctor(struct fastrpc_ctx_lst *me)
925{
926 INIT_HLIST_HEAD(&me->interrupted);
927 INIT_HLIST_HEAD(&me->pending);
928}
929
930static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
931{
932 struct fastrpc_ctx_lst *clst = &fl->clst;
933 struct smq_invoke_ctx *ictx = 0, *ctxfree;
934 struct hlist_node *n;
935
936 do {
937 ctxfree = 0;
938 spin_lock(&fl->hlock);
939 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
940 hlist_del_init(&ictx->hn);
941 ctxfree = ictx;
942 break;
943 }
944 spin_unlock(&fl->hlock);
945 if (ctxfree)
946 context_free(ctxfree);
947 } while (ctxfree);
948 do {
949 ctxfree = 0;
950 spin_lock(&fl->hlock);
951 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
952 hlist_del_init(&ictx->hn);
953 ctxfree = ictx;
954 break;
955 }
956 spin_unlock(&fl->hlock);
957 if (ctxfree)
958 context_free(ctxfree);
959 } while (ctxfree);
960}
961
962static int fastrpc_file_free(struct fastrpc_file *fl);
963static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
964{
965 struct fastrpc_file *fl, *free;
966 struct hlist_node *n;
967
968 do {
969 free = 0;
970 spin_lock(&me->hlock);
971 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
972 hlist_del_init(&fl->hn);
973 free = fl;
974 break;
975 }
976 spin_unlock(&me->hlock);
977 if (free)
978 fastrpc_file_free(free);
979 } while (free);
980}
981
982static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
983{
984 remote_arg64_t *rpra;
985 remote_arg_t *lpra = ctx->lpra;
986 struct smq_invoke_buf *list;
987 struct smq_phy_page *pages, *ipage;
988 uint32_t sc = ctx->sc;
989 int inbufs = REMOTE_SCALARS_INBUFS(sc);
990 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800991 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700992 uintptr_t args;
993 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800994 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700995 int err = 0;
996 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800997 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -0700998 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700999
1000 /* calculate size of the metadata */
1001 rpra = 0;
1002 list = smq_invoke_buf_start(rpra, sc);
1003 pages = smq_phy_page_start(sc, list);
1004 ipage = pages;
1005
1006 for (i = 0; i < bufs; ++i) {
1007 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1008 ssize_t len = lpra[i].buf.len;
1009
1010 if (ctx->fds[i] && (ctx->fds[i] != -1))
1011 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1012 ctx->attrs[i], buf, len,
1013 mflags, &ctx->maps[i]);
1014 ipage += 1;
1015 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001016 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1017 for (i = bufs; i < bufs + handles; i++) {
1018 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1019 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1020 if (err)
1021 goto bail;
1022 ipage += 1;
1023 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001024 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1025 (sizeof(uint32_t) * M_CRCLIST);
1026
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001027 /* calculate len requreed for copying */
1028 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1029 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001030 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001031 ssize_t len = lpra[i].buf.len;
1032
1033 if (!len)
1034 continue;
1035 if (ctx->maps[i])
1036 continue;
1037 if (ctx->overps[oix]->offset == 0)
1038 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001039 mstart = ctx->overps[oix]->mstart;
1040 mend = ctx->overps[oix]->mend;
1041 VERIFY(err, (mend - mstart) <= LONG_MAX);
1042 if (err)
1043 goto bail;
1044 copylen += mend - mstart;
1045 VERIFY(err, copylen >= 0);
1046 if (err)
1047 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001048 }
1049 ctx->used = copylen;
1050
1051 /* allocate new buffer */
1052 if (copylen) {
1053 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1054 if (err)
1055 goto bail;
1056 }
1057 /* copy metadata */
1058 rpra = ctx->buf->virt;
1059 ctx->rpra = rpra;
1060 list = smq_invoke_buf_start(rpra, sc);
1061 pages = smq_phy_page_start(sc, list);
1062 ipage = pages;
1063 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001064 for (i = 0; i < bufs + handles; ++i) {
1065 if (lpra[i].buf.len)
1066 list[i].num = 1;
1067 else
1068 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 list[i].pgidx = ipage - pages;
1070 ipage++;
1071 }
1072 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001073 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001074 for (i = 0; i < inbufs + outbufs; ++i) {
1075 struct fastrpc_mmap *map = ctx->maps[i];
1076 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1077 ssize_t len = lpra[i].buf.len;
1078
1079 rpra[i].buf.pv = 0;
1080 rpra[i].buf.len = len;
1081 if (!len)
1082 continue;
1083 if (map) {
1084 struct vm_area_struct *vma;
1085 uintptr_t offset;
1086 int num = buf_num_pages(buf, len);
1087 int idx = list[i].pgidx;
1088
1089 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001090 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001091 } else {
1092 down_read(&current->mm->mmap_sem);
1093 VERIFY(err, NULL != (vma = find_vma(current->mm,
1094 map->va)));
1095 if (err) {
1096 up_read(&current->mm->mmap_sem);
1097 goto bail;
1098 }
1099 offset = buf_page_start(buf) - vma->vm_start;
1100 up_read(&current->mm->mmap_sem);
1101 VERIFY(err, offset < (uintptr_t)map->size);
1102 if (err)
1103 goto bail;
1104 }
1105 pages[idx].addr = map->phys + offset;
1106 pages[idx].size = num << PAGE_SHIFT;
1107 }
1108 rpra[i].buf.pv = buf;
1109 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001110 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001111 for (i = bufs; i < bufs + handles; ++i) {
1112 struct fastrpc_mmap *map = ctx->maps[i];
1113
1114 pages[i].addr = map->phys;
1115 pages[i].size = map->size;
1116 }
1117 fdlist = (uint64_t *)&pages[bufs + handles];
1118 for (i = 0; i < M_FDLIST; i++)
1119 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001120 crclist = (uint32_t *)&fdlist[M_FDLIST];
1121 memset(crclist, 0, sizeof(uint32_t)*M_FDLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001122
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001124 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001125 rlen = copylen - metalen;
1126 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1127 int i = ctx->overps[oix]->raix;
1128 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001129 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001130 uint64_t buf;
1131 ssize_t len = lpra[i].buf.len;
1132
1133 if (!len)
1134 continue;
1135 if (map)
1136 continue;
1137 if (ctx->overps[oix]->offset == 0) {
1138 rlen -= ALIGN(args, BALIGN) - args;
1139 args = ALIGN(args, BALIGN);
1140 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001141 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001142 VERIFY(err, rlen >= mlen);
1143 if (err)
1144 goto bail;
1145 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1146 pages[list[i].pgidx].addr = ctx->buf->phys -
1147 ctx->overps[oix]->offset +
1148 (copylen - rlen);
1149 pages[list[i].pgidx].addr =
1150 buf_page_start(pages[list[i].pgidx].addr);
1151 buf = rpra[i].buf.pv;
1152 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1153 if (i < inbufs) {
1154 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1155 lpra[i].buf.pv, len);
1156 if (err)
1157 goto bail;
1158 }
1159 args = args + mlen;
1160 rlen -= mlen;
1161 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001162 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001163
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001164 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001165 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1166 int i = ctx->overps[oix]->raix;
1167 struct fastrpc_mmap *map = ctx->maps[i];
1168
1169 if (ctx->fl->sctx->smmu.coherent)
1170 continue;
1171 if (map && map->uncached)
1172 continue;
1173 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1174 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1175 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1176 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001177 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001178 for (i = bufs; i < bufs + handles; i++) {
1179 rpra[i].dma.fd = ctx->fds[i];
1180 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1181 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001182 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001183
1184 if (!ctx->fl->sctx->smmu.coherent) {
1185 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001186 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001187 PERF_END);
1188 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001189 bail:
1190 return err;
1191}
1192
1193static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1194 remote_arg_t *upra)
1195{
1196 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001197 struct smq_invoke_buf *list;
1198 struct smq_phy_page *pages;
1199 struct fastrpc_mmap *mmap;
1200 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001201 uint32_t *crclist = NULL;
1202
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001203 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001204 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001205 int err = 0;
1206
1207 inbufs = REMOTE_SCALARS_INBUFS(sc);
1208 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001209 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1210 list = smq_invoke_buf_start(ctx->rpra, sc);
1211 pages = smq_phy_page_start(sc, list);
1212 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001213 crclist = (uint32_t *)(fdlist + M_FDLIST);
1214
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001215 for (i = inbufs; i < inbufs + outbufs; ++i) {
1216 if (!ctx->maps[i]) {
1217 K_COPY_TO_USER(err, kernel,
1218 ctx->lpra[i].buf.pv,
1219 uint64_to_ptr(rpra[i].buf.pv),
1220 rpra[i].buf.len);
1221 if (err)
1222 goto bail;
1223 } else {
1224 fastrpc_mmap_free(ctx->maps[i]);
1225 ctx->maps[i] = 0;
1226 }
1227 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001228 if (inbufs + outbufs + handles) {
1229 for (i = 0; i < M_FDLIST; i++) {
1230 if (!fdlist[i])
1231 break;
1232 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001233 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001234 fastrpc_mmap_free(mmap);
1235 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001236 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001237 if (ctx->crc && crclist && rpra)
1238 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1239 crclist, M_CRCLIST*sizeof(uint32_t));
1240
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001241 bail:
1242 return err;
1243}
1244
1245static void inv_args_pre(struct smq_invoke_ctx *ctx)
1246{
1247 int i, inbufs, outbufs;
1248 uint32_t sc = ctx->sc;
1249 remote_arg64_t *rpra = ctx->rpra;
1250 uintptr_t end;
1251
1252 inbufs = REMOTE_SCALARS_INBUFS(sc);
1253 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1254 for (i = inbufs; i < inbufs + outbufs; ++i) {
1255 struct fastrpc_mmap *map = ctx->maps[i];
1256
1257 if (map && map->uncached)
1258 continue;
1259 if (!rpra[i].buf.len)
1260 continue;
1261 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1262 buf_page_start(rpra[i].buf.pv))
1263 continue;
1264 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1265 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1266 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1267 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1268 rpra[i].buf.len);
1269 if (!IS_CACHE_ALIGNED(end))
1270 dmac_flush_range((char *)end,
1271 (char *)end + 1);
1272 }
1273}
1274
1275static void inv_args(struct smq_invoke_ctx *ctx)
1276{
1277 int i, inbufs, outbufs;
1278 uint32_t sc = ctx->sc;
1279 remote_arg64_t *rpra = ctx->rpra;
1280 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001281
1282 inbufs = REMOTE_SCALARS_INBUFS(sc);
1283 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1284 for (i = inbufs; i < inbufs + outbufs; ++i) {
1285 struct fastrpc_mmap *map = ctx->maps[i];
1286
1287 if (map && map->uncached)
1288 continue;
1289 if (!rpra[i].buf.len)
1290 continue;
1291 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1292 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001293 continue;
1294 }
1295 if (map && map->handle)
1296 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1297 (char *)uint64_to_ptr(rpra[i].buf.pv),
1298 rpra[i].buf.len, ION_IOC_INV_CACHES);
1299 else
1300 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1301 (char *)uint64_to_ptr(rpra[i].buf.pv
1302 + rpra[i].buf.len));
1303 }
1304
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001305 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001306 dmac_inv_range(rpra, (char *)rpra + used);
1307}
1308
1309static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1310 uint32_t kernel, uint32_t handle)
1311{
1312 struct smq_msg *msg = &ctx->msg;
1313 struct fastrpc_file *fl = ctx->fl;
1314 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1315 int err = 0;
1316
1317 VERIFY(err, 0 != channel_ctx->chan);
1318 if (err)
1319 goto bail;
1320 msg->pid = current->tgid;
1321 msg->tid = current->pid;
1322 if (kernel)
1323 msg->pid = 0;
1324 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1325 msg->invoke.header.handle = handle;
1326 msg->invoke.header.sc = ctx->sc;
1327 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1328 msg->invoke.page.size = buf_page_size(ctx->used);
1329
1330 if (fl->ssrcount != channel_ctx->ssrcount) {
1331 err = -ECONNRESET;
1332 goto bail;
1333 }
1334 VERIFY(err, channel_ctx->link.port_state ==
1335 FASTRPC_LINK_CONNECTED);
1336 if (err)
1337 goto bail;
1338 err = glink_tx(channel_ctx->chan,
1339 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1340 GLINK_TX_REQ_INTENT);
1341 bail:
1342 return err;
1343}
1344
1345static void fastrpc_init(struct fastrpc_apps *me)
1346{
1347 int i;
1348
1349 INIT_HLIST_HEAD(&me->drivers);
1350 spin_lock_init(&me->hlock);
1351 mutex_init(&me->smd_mutex);
1352 me->channel = &gcinfo[0];
1353 for (i = 0; i < NUM_CHANNELS; i++) {
1354 init_completion(&me->channel[i].work);
1355 me->channel[i].sesscount = 0;
1356 }
1357}
1358
1359static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1360
1361static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1362 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001363 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364{
1365 struct smq_invoke_ctx *ctx = 0;
1366 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1367 int cid = fl->cid;
1368 int interrupted = 0;
1369 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001370 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001371
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001372 if (fl->profile)
1373 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001374 if (!kernel) {
1375 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1376 &ctx));
1377 if (err)
1378 goto bail;
1379 if (fl->sctx->smmu.faults)
1380 err = FASTRPC_ENOSUCH;
1381 if (err)
1382 goto bail;
1383 if (ctx)
1384 goto wait;
1385 }
1386
1387 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1388 if (err)
1389 goto bail;
1390
1391 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001392 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001393 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001394 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001395 if (err)
1396 goto bail;
1397 }
1398
Sathish Ambleyc432b502017-06-05 12:03:42 -07001399 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001400 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001401 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001402 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001403 PERF_END);
1404
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001405 if (err)
1406 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001407 wait:
1408 if (kernel)
1409 wait_for_completion(&ctx->work);
1410 else {
1411 interrupted = wait_for_completion_interruptible(&ctx->work);
1412 VERIFY(err, 0 == (err = interrupted));
1413 if (err)
1414 goto bail;
1415 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001416
1417 PERF(fl->profile, fl->perf.invargs,
1418 if (!fl->sctx->smmu.coherent)
1419 inv_args(ctx);
1420 PERF_END);
1421
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001422 VERIFY(err, 0 == (err = ctx->retval));
1423 if (err)
1424 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001425
1426 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001427 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001428 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 if (err)
1430 goto bail;
1431 bail:
1432 if (ctx && interrupted == -ERESTARTSYS)
1433 context_save_interrupted(ctx);
1434 else if (ctx)
1435 context_free(ctx);
1436 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1437 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001438
1439 if (fl->profile && !interrupted) {
1440 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1441 fl->perf.invoke += getnstimediff(&invoket);
1442 if (!(invoke->handle >= 0 &&
1443 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1444 fl->perf.count++;
1445 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001446 return err;
1447}
1448
Sathish Ambley36849af2017-02-02 09:35:55 -08001449static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001450static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001451 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452{
1453 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001454 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001455 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001456 struct smq_phy_page pages[1];
1457 struct fastrpc_mmap *file = 0, *mem = 0;
1458
Sathish Ambley36849af2017-02-02 09:35:55 -08001459 VERIFY(err, !fastrpc_channel_open(fl));
1460 if (err)
1461 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001462 if (init->flags == FASTRPC_INIT_ATTACH) {
1463 remote_arg_t ra[1];
1464 int tgid = current->tgid;
1465
1466 ra[0].buf.pv = (void *)&tgid;
1467 ra[0].buf.len = sizeof(tgid);
1468 ioctl.inv.handle = 1;
1469 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1470 ioctl.inv.pra = ra;
1471 ioctl.fds = 0;
1472 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001473 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001474 fl->pd = 0;
1475 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1476 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1477 if (err)
1478 goto bail;
1479 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001480 remote_arg_t ra[6];
1481 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001482 int mflags = 0;
1483 struct {
1484 int pgid;
1485 int namelen;
1486 int filelen;
1487 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001488 int attrs;
1489 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001490 } inbuf;
1491
1492 inbuf.pgid = current->tgid;
1493 inbuf.namelen = strlen(current->comm) + 1;
1494 inbuf.filelen = init->filelen;
1495 fl->pd = 1;
1496 if (init->filelen) {
1497 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1498 init->file, init->filelen, mflags, &file));
1499 if (err)
1500 goto bail;
1501 }
1502 inbuf.pageslen = 1;
1503 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1504 init->mem, init->memlen, mflags, &mem));
1505 if (err)
1506 goto bail;
1507 inbuf.pageslen = 1;
1508 ra[0].buf.pv = (void *)&inbuf;
1509 ra[0].buf.len = sizeof(inbuf);
1510 fds[0] = 0;
1511
1512 ra[1].buf.pv = (void *)current->comm;
1513 ra[1].buf.len = inbuf.namelen;
1514 fds[1] = 0;
1515
1516 ra[2].buf.pv = (void *)init->file;
1517 ra[2].buf.len = inbuf.filelen;
1518 fds[2] = init->filefd;
1519
1520 pages[0].addr = mem->phys;
1521 pages[0].size = mem->size;
1522 ra[3].buf.pv = (void *)pages;
1523 ra[3].buf.len = 1 * sizeof(*pages);
1524 fds[3] = 0;
1525
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001526 inbuf.attrs = uproc->attrs;
1527 ra[4].buf.pv = (void *)&(inbuf.attrs);
1528 ra[4].buf.len = sizeof(inbuf.attrs);
1529 fds[4] = 0;
1530
1531 inbuf.siglen = uproc->siglen;
1532 ra[5].buf.pv = (void *)&(inbuf.siglen);
1533 ra[5].buf.len = sizeof(inbuf.siglen);
1534 fds[5] = 0;
1535
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001536 ioctl.inv.handle = 1;
1537 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001538 if (uproc->attrs)
1539 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001540 ioctl.inv.pra = ra;
1541 ioctl.fds = fds;
1542 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001543 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001544 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1545 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1546 if (err)
1547 goto bail;
1548 } else {
1549 err = -ENOTTY;
1550 }
1551bail:
1552 if (mem && err)
1553 fastrpc_mmap_free(mem);
1554 if (file)
1555 fastrpc_mmap_free(file);
1556 return err;
1557}
1558
1559static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1560{
1561 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001562 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 remote_arg_t ra[1];
1564 int tgid = 0;
1565
Sathish Ambley36849af2017-02-02 09:35:55 -08001566 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1567 if (err)
1568 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1570 if (err)
1571 goto bail;
1572 tgid = fl->tgid;
1573 ra[0].buf.pv = (void *)&tgid;
1574 ra[0].buf.len = sizeof(tgid);
1575 ioctl.inv.handle = 1;
1576 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1577 ioctl.inv.pra = ra;
1578 ioctl.fds = 0;
1579 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001580 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001581 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1582 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1583bail:
1584 return err;
1585}
1586
1587static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1588 struct fastrpc_mmap *map)
1589{
Sathish Ambleybae51902017-07-03 15:00:49 -07001590 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001591 struct smq_phy_page page;
1592 int num = 1;
1593 remote_arg_t ra[3];
1594 int err = 0;
1595 struct {
1596 int pid;
1597 uint32_t flags;
1598 uintptr_t vaddrin;
1599 int num;
1600 } inargs;
1601 struct {
1602 uintptr_t vaddrout;
1603 } routargs;
1604
1605 inargs.pid = current->tgid;
1606 inargs.vaddrin = (uintptr_t)map->va;
1607 inargs.flags = flags;
1608 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1609 ra[0].buf.pv = (void *)&inargs;
1610 ra[0].buf.len = sizeof(inargs);
1611 page.addr = map->phys;
1612 page.size = map->size;
1613 ra[1].buf.pv = (void *)&page;
1614 ra[1].buf.len = num * sizeof(page);
1615
1616 ra[2].buf.pv = (void *)&routargs;
1617 ra[2].buf.len = sizeof(routargs);
1618
1619 ioctl.inv.handle = 1;
1620 if (fl->apps->compat)
1621 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1622 else
1623 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1624 ioctl.inv.pra = ra;
1625 ioctl.fds = 0;
1626 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001627 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001628 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1629 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1630 map->raddr = (uintptr_t)routargs.vaddrout;
1631
1632 return err;
1633}
1634
1635static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1636 struct fastrpc_mmap *map)
1637{
Sathish Ambleybae51902017-07-03 15:00:49 -07001638 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001639 remote_arg_t ra[1];
1640 int err = 0;
1641 struct {
1642 int pid;
1643 uintptr_t vaddrout;
1644 ssize_t size;
1645 } inargs;
1646
1647 inargs.pid = current->tgid;
1648 inargs.size = map->size;
1649 inargs.vaddrout = map->raddr;
1650 ra[0].buf.pv = (void *)&inargs;
1651 ra[0].buf.len = sizeof(inargs);
1652
1653 ioctl.inv.handle = 1;
1654 if (fl->apps->compat)
1655 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1656 else
1657 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1658 ioctl.inv.pra = ra;
1659 ioctl.fds = 0;
1660 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001661 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001662 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1663 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1664 return err;
1665}
1666
1667static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1668 ssize_t len, struct fastrpc_mmap **ppmap);
1669
1670static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1671
1672static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1673 struct fastrpc_ioctl_munmap *ud)
1674{
1675 int err = 0;
1676 struct fastrpc_mmap *map = 0;
1677
1678 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1679 if (err)
1680 goto bail;
1681 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1682 if (err)
1683 goto bail;
1684 fastrpc_mmap_free(map);
1685bail:
1686 if (err && map)
1687 fastrpc_mmap_add(map);
1688 return err;
1689}
1690
1691static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1692 struct fastrpc_ioctl_mmap *ud)
1693{
1694
1695 struct fastrpc_mmap *map = 0;
1696 int err = 0;
1697
1698 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001699 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001700 return 0;
1701
1702 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1703 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1704 if (err)
1705 goto bail;
1706 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1707 if (err)
1708 goto bail;
1709 ud->vaddrout = map->raddr;
1710 bail:
1711 if (err && map)
1712 fastrpc_mmap_free(map);
1713 return err;
1714}
1715
1716static void fastrpc_channel_close(struct kref *kref)
1717{
1718 struct fastrpc_apps *me = &gfa;
1719 struct fastrpc_channel_ctx *ctx;
1720 int cid;
1721
1722 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1723 cid = ctx - &gcinfo[0];
1724 fastrpc_glink_close(ctx->chan, cid);
1725 ctx->chan = 0;
1726 mutex_unlock(&me->smd_mutex);
1727 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1728 MAJOR(me->dev_no), cid);
1729}
1730
1731static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1732
1733static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1734 int secure, struct fastrpc_session_ctx **session)
1735{
1736 struct fastrpc_apps *me = &gfa;
1737 int idx = 0, err = 0;
1738
1739 if (chan->sesscount) {
1740 for (idx = 0; idx < chan->sesscount; ++idx) {
1741 if (!chan->session[idx].used &&
1742 chan->session[idx].smmu.secure == secure) {
1743 chan->session[idx].used = 1;
1744 break;
1745 }
1746 }
1747 VERIFY(err, idx < chan->sesscount);
1748 if (err)
1749 goto bail;
1750 chan->session[idx].smmu.faults = 0;
1751 } else {
1752 VERIFY(err, me->dev != NULL);
1753 if (err)
1754 goto bail;
1755 chan->session[0].dev = me->dev;
1756 }
1757
1758 *session = &chan->session[idx];
1759 bail:
1760 return err;
1761}
1762
1763bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1764{
1765 if (glink_queue_rx_intent(h, NULL, size))
1766 return false;
1767 return true;
1768}
1769
1770void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1771 const void *pkt_priv, const void *ptr)
1772{
1773}
1774
1775void fastrpc_glink_notify_rx(void *handle, const void *priv,
1776 const void *pkt_priv, const void *ptr, size_t size)
1777{
1778 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1779 int len = size;
1780
1781 while (len >= sizeof(*rsp) && rsp) {
1782 rsp->ctx = rsp->ctx & ~1;
1783 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1784 rsp++;
1785 len = len - sizeof(*rsp);
1786 }
1787 glink_rx_done(handle, ptr, true);
1788}
1789
1790void fastrpc_glink_notify_state(void *handle, const void *priv,
1791 unsigned int event)
1792{
1793 struct fastrpc_apps *me = &gfa;
1794 int cid = (int)(uintptr_t)priv;
1795 struct fastrpc_glink_info *link;
1796
1797 if (cid < 0 || cid >= NUM_CHANNELS)
1798 return;
1799 link = &me->channel[cid].link;
1800 switch (event) {
1801 case GLINK_CONNECTED:
1802 link->port_state = FASTRPC_LINK_CONNECTED;
1803 complete(&me->channel[cid].work);
1804 break;
1805 case GLINK_LOCAL_DISCONNECTED:
1806 link->port_state = FASTRPC_LINK_DISCONNECTED;
1807 break;
1808 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301809 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001810 fastrpc_glink_close(me->channel[cid].chan, cid);
1811 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001812 }
1813 break;
1814 default:
1815 break;
1816 }
1817}
1818
1819static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1820 struct fastrpc_session_ctx **session)
1821{
1822 int err = 0;
1823 struct fastrpc_apps *me = &gfa;
1824
1825 mutex_lock(&me->smd_mutex);
1826 if (!*session)
1827 err = fastrpc_session_alloc_locked(chan, secure, session);
1828 mutex_unlock(&me->smd_mutex);
1829 return err;
1830}
1831
1832static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1833 struct fastrpc_session_ctx *session)
1834{
1835 struct fastrpc_apps *me = &gfa;
1836
1837 mutex_lock(&me->smd_mutex);
1838 session->used = 0;
1839 mutex_unlock(&me->smd_mutex);
1840}
1841
1842static int fastrpc_file_free(struct fastrpc_file *fl)
1843{
1844 struct hlist_node *n;
1845 struct fastrpc_mmap *map = 0;
1846 int cid;
1847
1848 if (!fl)
1849 return 0;
1850 cid = fl->cid;
1851
1852 spin_lock(&fl->apps->hlock);
1853 hlist_del_init(&fl->hn);
1854 spin_unlock(&fl->apps->hlock);
1855
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001856 if (!fl->sctx) {
1857 kfree(fl);
1858 return 0;
1859 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001860 (void)fastrpc_release_current_dsp_process(fl);
1861 fastrpc_context_list_dtor(fl);
1862 fastrpc_buf_list_free(fl);
1863 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1864 fastrpc_mmap_free(map);
1865 }
1866 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1867 kref_put_mutex(&fl->apps->channel[cid].kref,
1868 fastrpc_channel_close, &fl->apps->smd_mutex);
1869 if (fl->sctx)
1870 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1871 if (fl->secsctx)
1872 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1873 kfree(fl);
1874 return 0;
1875}
1876
1877static int fastrpc_device_release(struct inode *inode, struct file *file)
1878{
1879 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1880
1881 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001882 if (fl->debugfs_file != NULL)
1883 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001884 fastrpc_file_free(fl);
1885 file->private_data = 0;
1886 }
1887 return 0;
1888}
1889
1890static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1891 void *priv)
1892{
1893 struct fastrpc_apps *me = &gfa;
1894 int cid = (int)((uintptr_t)priv);
1895 struct fastrpc_glink_info *link;
1896
1897 if (cid < 0 || cid >= NUM_CHANNELS)
1898 return;
1899
1900 link = &me->channel[cid].link;
1901 switch (cb_info->link_state) {
1902 case GLINK_LINK_STATE_UP:
1903 link->link_state = FASTRPC_LINK_STATE_UP;
1904 complete(&me->channel[cid].work);
1905 break;
1906 case GLINK_LINK_STATE_DOWN:
1907 link->link_state = FASTRPC_LINK_STATE_DOWN;
1908 break;
1909 default:
1910 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1911 break;
1912 }
1913}
1914
1915static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1916{
1917 int err = 0;
1918 struct fastrpc_glink_info *link;
1919
1920 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1921 if (err)
1922 goto bail;
1923
1924 link = &me->channel[cid].link;
1925 if (link->link_notify_handle != NULL)
1926 goto bail;
1927
1928 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1929 link->link_notify_handle = glink_register_link_state_cb(
1930 &link->link_info,
1931 (void *)((uintptr_t)cid));
1932 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1933 if (err) {
1934 link->link_notify_handle = NULL;
1935 goto bail;
1936 }
1937 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1938 RPC_TIMEOUT));
1939bail:
1940 return err;
1941}
1942
1943static void fastrpc_glink_close(void *chan, int cid)
1944{
1945 int err = 0;
1946 struct fastrpc_glink_info *link;
1947
1948 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1949 if (err)
1950 return;
1951 link = &gfa.channel[cid].link;
1952
1953 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1954 link->port_state == FASTRPC_LINK_CONNECTING) {
1955 link->port_state = FASTRPC_LINK_DISCONNECTING;
1956 glink_close(chan);
1957 }
1958}
1959
1960static int fastrpc_glink_open(int cid)
1961{
1962 int err = 0;
1963 void *handle = NULL;
1964 struct fastrpc_apps *me = &gfa;
1965 struct glink_open_config *cfg;
1966 struct fastrpc_glink_info *link;
1967
1968 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1969 if (err)
1970 goto bail;
1971 link = &me->channel[cid].link;
1972 cfg = &me->channel[cid].link.cfg;
1973 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1974 if (err)
1975 goto bail;
1976
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301977 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
1978 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001979 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001980
1981 link->port_state = FASTRPC_LINK_CONNECTING;
1982 cfg->priv = (void *)(uintptr_t)cid;
1983 cfg->edge = gcinfo[cid].link.link_info.edge;
1984 cfg->transport = gcinfo[cid].link.link_info.transport;
1985 cfg->name = FASTRPC_GLINK_GUID;
1986 cfg->notify_rx = fastrpc_glink_notify_rx;
1987 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1988 cfg->notify_state = fastrpc_glink_notify_state;
1989 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1990 handle = glink_open(cfg);
1991 VERIFY(err, !IS_ERR_OR_NULL(handle));
1992 if (err)
1993 goto bail;
1994 me->channel[cid].chan = handle;
1995bail:
1996 return err;
1997}
1998
Sathish Ambley1ca68232017-01-19 10:32:55 -08001999static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2000{
2001 filp->private_data = inode->i_private;
2002 return 0;
2003}
2004
2005static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2006 size_t count, loff_t *position)
2007{
2008 struct fastrpc_file *fl = filp->private_data;
2009 struct hlist_node *n;
2010 struct fastrpc_buf *buf = 0;
2011 struct fastrpc_mmap *map = 0;
2012 struct smq_invoke_ctx *ictx = 0;
2013 struct fastrpc_channel_ctx *chan;
2014 struct fastrpc_session_ctx *sess;
2015 unsigned int len = 0;
2016 int i, j, ret = 0;
2017 char *fileinfo = NULL;
2018
2019 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2020 if (!fileinfo)
2021 goto bail;
2022 if (fl == NULL) {
2023 for (i = 0; i < NUM_CHANNELS; i++) {
2024 chan = &gcinfo[i];
2025 len += scnprintf(fileinfo + len,
2026 DEBUGFS_SIZE - len, "%s\n\n",
2027 chan->name);
2028 len += scnprintf(fileinfo + len,
2029 DEBUGFS_SIZE - len, "%s %d\n",
2030 "sesscount:", chan->sesscount);
2031 for (j = 0; j < chan->sesscount; j++) {
2032 sess = &chan->session[j];
2033 len += scnprintf(fileinfo + len,
2034 DEBUGFS_SIZE - len,
2035 "%s%d\n\n", "SESSION", j);
2036 len += scnprintf(fileinfo + len,
2037 DEBUGFS_SIZE - len,
2038 "%s %d\n", "sid:",
2039 sess->smmu.cb);
2040 len += scnprintf(fileinfo + len,
2041 DEBUGFS_SIZE - len,
2042 "%s %d\n", "SECURE:",
2043 sess->smmu.secure);
2044 }
2045 }
2046 } else {
2047 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2048 "%s %d\n\n",
2049 "PROCESS_ID:", fl->tgid);
2050 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2051 "%s %d\n\n",
2052 "CHANNEL_ID:", fl->cid);
2053 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2054 "%s %d\n\n",
2055 "SSRCOUNT:", fl->ssrcount);
2056 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2057 "%s\n",
2058 "LIST OF BUFS:");
2059 spin_lock(&fl->hlock);
2060 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2061 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2062 "%s %p %s %p %s %llx\n", "buf:",
2063 buf, "buf->virt:", buf->virt,
2064 "buf->phys:", buf->phys);
2065 }
2066 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2067 "\n%s\n",
2068 "LIST OF MAPS:");
2069 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2070 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2071 "%s %p %s %lx %s %llx\n",
2072 "map:", map,
2073 "map->va:", map->va,
2074 "map->phys:", map->phys);
2075 }
2076 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2077 "\n%s\n",
2078 "LIST OF PENDING SMQCONTEXTS:");
2079 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2080 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2081 "%s %p %s %u %s %u %s %u\n",
2082 "smqcontext:", ictx,
2083 "sc:", ictx->sc,
2084 "tid:", ictx->pid,
2085 "handle", ictx->rpra->h);
2086 }
2087 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2088 "\n%s\n",
2089 "LIST OF INTERRUPTED SMQCONTEXTS:");
2090 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2091 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2092 "%s %p %s %u %s %u %s %u\n",
2093 "smqcontext:", ictx,
2094 "sc:", ictx->sc,
2095 "tid:", ictx->pid,
2096 "handle", ictx->rpra->h);
2097 }
2098 spin_unlock(&fl->hlock);
2099 }
2100 if (len > DEBUGFS_SIZE)
2101 len = DEBUGFS_SIZE;
2102 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2103 kfree(fileinfo);
2104bail:
2105 return ret;
2106}
2107
2108static const struct file_operations debugfs_fops = {
2109 .open = fastrpc_debugfs_open,
2110 .read = fastrpc_debugfs_read,
2111};
Sathish Ambley36849af2017-02-02 09:35:55 -08002112static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002113{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002114 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002115 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002116
2117 mutex_lock(&me->smd_mutex);
2118
Sathish Ambley36849af2017-02-02 09:35:55 -08002119 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002120 if (err)
2121 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002122 cid = fl->cid;
2123 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2124 if (err)
2125 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002126 fl->ssrcount = me->channel[cid].ssrcount;
2127 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2128 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302129 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2130 if (err)
2131 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002132 VERIFY(err, 0 == fastrpc_glink_open(cid));
2133 if (err)
2134 goto bail;
2135
2136 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2137 RPC_TIMEOUT));
2138 if (err) {
2139 me->channel[cid].chan = 0;
2140 goto bail;
2141 }
2142 kref_init(&me->channel[cid].kref);
2143 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2144 MAJOR(me->dev_no), cid);
2145 if (me->channel[cid].ssrcount !=
2146 me->channel[cid].prevssrcount) {
2147 me->channel[cid].prevssrcount =
2148 me->channel[cid].ssrcount;
2149 }
2150 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002151
2152bail:
2153 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002154 return err;
2155}
2156
Sathish Ambley36849af2017-02-02 09:35:55 -08002157static int fastrpc_device_open(struct inode *inode, struct file *filp)
2158{
2159 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002160 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002161 struct fastrpc_file *fl = 0;
2162 struct fastrpc_apps *me = &gfa;
2163
2164 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2165 if (err)
2166 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002167 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2168 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002169 context_list_ctor(&fl->clst);
2170 spin_lock_init(&fl->hlock);
2171 INIT_HLIST_HEAD(&fl->maps);
2172 INIT_HLIST_HEAD(&fl->bufs);
2173 INIT_HLIST_NODE(&fl->hn);
2174 fl->tgid = current->tgid;
2175 fl->apps = me;
2176 fl->mode = FASTRPC_MODE_SERIAL;
2177 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002178 if (debugfs_file != NULL)
2179 fl->debugfs_file = debugfs_file;
2180 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002181 filp->private_data = fl;
2182 spin_lock(&me->hlock);
2183 hlist_add_head(&fl->hn, &me->drivers);
2184 spin_unlock(&me->hlock);
2185 return 0;
2186}
2187
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002188static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2189{
2190 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002191 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192
Sathish Ambley36849af2017-02-02 09:35:55 -08002193 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002194 if (err)
2195 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002196 if (fl->cid == -1) {
2197 cid = *info;
2198 VERIFY(err, cid < NUM_CHANNELS);
2199 if (err)
2200 goto bail;
2201 fl->cid = cid;
2202 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2203 VERIFY(err, !fastrpc_session_alloc_locked(
2204 &fl->apps->channel[cid], 0, &fl->sctx));
2205 if (err)
2206 goto bail;
2207 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002208 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2209bail:
2210 return err;
2211}
2212
2213static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2214 unsigned long ioctl_param)
2215{
2216 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002217 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002218 struct fastrpc_ioctl_mmap mmap;
2219 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002220 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002221 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002222 } p;
2223 void *param = (char *)ioctl_param;
2224 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2225 int size = 0, err = 0;
2226 uint32_t info;
2227
2228 p.inv.fds = 0;
2229 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002230 p.inv.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002231
2232 switch (ioctl_num) {
2233 case FASTRPC_IOCTL_INVOKE:
2234 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002235 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002236 case FASTRPC_IOCTL_INVOKE_FD:
2237 if (!size)
2238 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2239 /* fall through */
2240 case FASTRPC_IOCTL_INVOKE_ATTRS:
2241 if (!size)
2242 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002243 /* fall through */
2244 case FASTRPC_IOCTL_INVOKE_CRC:
2245 if (!size)
2246 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002247 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2248 if (err)
2249 goto bail;
2250 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2251 0, &p.inv)));
2252 if (err)
2253 goto bail;
2254 break;
2255 case FASTRPC_IOCTL_MMAP:
2256 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2257 sizeof(p.mmap)));
2258 if (err)
2259 goto bail;
2260 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2261 if (err)
2262 goto bail;
2263 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2264 if (err)
2265 goto bail;
2266 break;
2267 case FASTRPC_IOCTL_MUNMAP:
2268 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2269 sizeof(p.munmap)));
2270 if (err)
2271 goto bail;
2272 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2273 &p.munmap)));
2274 if (err)
2275 goto bail;
2276 break;
2277 case FASTRPC_IOCTL_SETMODE:
2278 switch ((uint32_t)ioctl_param) {
2279 case FASTRPC_MODE_PARALLEL:
2280 case FASTRPC_MODE_SERIAL:
2281 fl->mode = (uint32_t)ioctl_param;
2282 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002283 case FASTRPC_MODE_PROFILE:
2284 fl->profile = (uint32_t)ioctl_param;
2285 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002286 default:
2287 err = -ENOTTY;
2288 break;
2289 }
2290 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002291 case FASTRPC_IOCTL_GETPERF:
2292 VERIFY(err, 0 == copy_from_user(&p.perf,
2293 param, sizeof(p.perf)));
2294 if (err)
2295 goto bail;
2296 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2297 if (p.perf.keys) {
2298 char *keys = PERF_KEYS;
2299
2300 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2301 keys, strlen(keys)+1));
2302 if (err)
2303 goto bail;
2304 }
2305 if (p.perf.data) {
2306 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2307 &fl->perf, sizeof(fl->perf)));
2308 }
2309 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2310 if (err)
2311 goto bail;
2312 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002313 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002314 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2315 if (err)
2316 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002317 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2318 if (err)
2319 goto bail;
2320 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2321 if (err)
2322 goto bail;
2323 break;
2324 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002325 p.init.attrs = 0;
2326 p.init.siglen = 0;
2327 size = sizeof(struct fastrpc_ioctl_init);
2328 /* fall through */
2329 case FASTRPC_IOCTL_INIT_ATTRS:
2330 if (!size)
2331 size = sizeof(struct fastrpc_ioctl_init_attrs);
2332 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002333 if (err)
2334 goto bail;
2335 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2336 if (err)
2337 goto bail;
2338 break;
2339
2340 default:
2341 err = -ENOTTY;
2342 pr_info("bad ioctl: %d\n", ioctl_num);
2343 break;
2344 }
2345 bail:
2346 return err;
2347}
2348
2349static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2350 unsigned long code,
2351 void *data)
2352{
2353 struct fastrpc_apps *me = &gfa;
2354 struct fastrpc_channel_ctx *ctx;
2355 int cid;
2356
2357 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2358 cid = ctx - &me->channel[0];
2359 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2360 mutex_lock(&me->smd_mutex);
2361 ctx->ssrcount++;
2362 if (ctx->chan) {
2363 fastrpc_glink_close(ctx->chan, cid);
2364 ctx->chan = 0;
2365 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2366 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2367 }
2368 mutex_unlock(&me->smd_mutex);
2369 fastrpc_notify_drivers(me, cid);
2370 }
2371
2372 return NOTIFY_DONE;
2373}
2374
2375static const struct file_operations fops = {
2376 .open = fastrpc_device_open,
2377 .release = fastrpc_device_release,
2378 .unlocked_ioctl = fastrpc_device_ioctl,
2379 .compat_ioctl = compat_fastrpc_device_ioctl,
2380};
2381
2382static const struct of_device_id fastrpc_match_table[] = {
2383 { .compatible = "qcom,msm-fastrpc-adsp", },
2384 { .compatible = "qcom,msm-fastrpc-compute", },
2385 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2386 { .compatible = "qcom,msm-adsprpc-mem-region", },
2387 {}
2388};
2389
2390static int fastrpc_cb_probe(struct device *dev)
2391{
2392 struct fastrpc_channel_ctx *chan;
2393 struct fastrpc_session_ctx *sess;
2394 struct of_phandle_args iommuspec;
2395 const char *name;
2396 unsigned int start = 0x80000000;
2397 int err = 0, i;
2398 int secure_vmid = VMID_CP_PIXEL;
2399
2400 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2401 if (err)
2402 goto bail;
2403 for (i = 0; i < NUM_CHANNELS; i++) {
2404 if (!gcinfo[i].name)
2405 continue;
2406 if (!strcmp(name, gcinfo[i].name))
2407 break;
2408 }
2409 VERIFY(err, i < NUM_CHANNELS);
2410 if (err)
2411 goto bail;
2412 chan = &gcinfo[i];
2413 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2414 if (err)
2415 goto bail;
2416
2417 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2418 "#iommu-cells", 0, &iommuspec));
2419 if (err)
2420 goto bail;
2421 sess = &chan->session[chan->sesscount];
2422 sess->smmu.cb = iommuspec.args[0] & 0xf;
2423 sess->used = 0;
2424 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2425 "dma-coherent");
2426 sess->smmu.secure = of_property_read_bool(dev->of_node,
2427 "qcom,secure-context-bank");
2428 if (sess->smmu.secure)
2429 start = 0x60000000;
2430 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2431 arm_iommu_create_mapping(&platform_bus_type,
2432 start, 0x7fffffff)));
2433 if (err)
2434 goto bail;
2435
2436 if (sess->smmu.secure)
2437 iommu_domain_set_attr(sess->smmu.mapping->domain,
2438 DOMAIN_ATTR_SECURE_VMID,
2439 &secure_vmid);
2440
2441 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2442 if (err)
2443 goto bail;
2444 sess->dev = dev;
2445 sess->smmu.enabled = 1;
2446 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002447 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2448 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002449bail:
2450 return err;
2451}
2452
2453static int fastrpc_probe(struct platform_device *pdev)
2454{
2455 int err = 0;
2456 struct fastrpc_apps *me = &gfa;
2457 struct device *dev = &pdev->dev;
2458 struct smq_phy_page range;
2459 struct device_node *ion_node, *node;
2460 struct platform_device *ion_pdev;
2461 struct cma *cma;
2462 uint32_t val;
2463
2464 if (of_device_is_compatible(dev->of_node,
2465 "qcom,msm-fastrpc-compute-cb"))
2466 return fastrpc_cb_probe(dev);
2467
2468 if (of_device_is_compatible(dev->of_node,
2469 "qcom,msm-adsprpc-mem-region")) {
2470 me->dev = dev;
2471 range.addr = 0;
2472 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2473 if (ion_node) {
2474 for_each_available_child_of_node(ion_node, node) {
2475 if (of_property_read_u32(node, "reg", &val))
2476 continue;
2477 if (val != ION_ADSP_HEAP_ID)
2478 continue;
2479 ion_pdev = of_find_device_by_node(node);
2480 if (!ion_pdev)
2481 break;
2482 cma = dev_get_cma_area(&ion_pdev->dev);
2483 if (cma) {
2484 range.addr = cma_get_base(cma);
2485 range.size = (size_t)cma_get_size(cma);
2486 }
2487 break;
2488 }
2489 }
2490 if (range.addr) {
2491 int srcVM[1] = {VMID_HLOS};
2492 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2493 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002494 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002495 PERM_READ | PERM_WRITE | PERM_EXEC,
2496 PERM_READ | PERM_WRITE | PERM_EXEC,
2497 PERM_READ | PERM_WRITE | PERM_EXEC,
2498 };
2499
2500 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2501 srcVM, 1, destVM, destVMperm, 4));
2502 if (err)
2503 goto bail;
2504 }
2505 return 0;
2506 }
2507
2508 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2509 fastrpc_match_table,
2510 NULL, &pdev->dev));
2511 if (err)
2512 goto bail;
2513bail:
2514 return err;
2515}
2516
2517static void fastrpc_deinit(void)
2518{
2519 struct fastrpc_apps *me = &gfa;
2520 struct fastrpc_channel_ctx *chan = gcinfo;
2521 int i, j;
2522
2523 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2524 if (chan->chan) {
2525 kref_put_mutex(&chan->kref,
2526 fastrpc_channel_close, &me->smd_mutex);
2527 chan->chan = 0;
2528 }
2529 for (j = 0; j < NUM_SESSIONS; j++) {
2530 struct fastrpc_session_ctx *sess = &chan->session[j];
2531
2532 if (sess->smmu.enabled) {
2533 arm_iommu_detach_device(sess->dev);
2534 sess->dev = 0;
2535 }
2536 if (sess->smmu.mapping) {
2537 arm_iommu_release_mapping(sess->smmu.mapping);
2538 sess->smmu.mapping = 0;
2539 }
2540 }
2541 }
2542}
2543
2544static struct platform_driver fastrpc_driver = {
2545 .probe = fastrpc_probe,
2546 .driver = {
2547 .name = "fastrpc",
2548 .owner = THIS_MODULE,
2549 .of_match_table = fastrpc_match_table,
2550 },
2551};
2552
2553static int __init fastrpc_device_init(void)
2554{
2555 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002556 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002557 int err = 0, i;
2558
2559 memset(me, 0, sizeof(*me));
2560
2561 fastrpc_init(me);
2562 me->dev = NULL;
2563 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2564 if (err)
2565 goto register_bail;
2566 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2567 DEVICE_NAME));
2568 if (err)
2569 goto alloc_chrdev_bail;
2570 cdev_init(&me->cdev, &fops);
2571 me->cdev.owner = THIS_MODULE;
2572 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002573 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002574 if (err)
2575 goto cdev_init_bail;
2576 me->class = class_create(THIS_MODULE, "fastrpc");
2577 VERIFY(err, !IS_ERR(me->class));
2578 if (err)
2579 goto class_create_bail;
2580 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002581 dev = device_create(me->class, NULL,
2582 MKDEV(MAJOR(me->dev_no), 0),
2583 NULL, gcinfo[0].name);
2584 VERIFY(err, !IS_ERR_OR_NULL(dev));
2585 if (err)
2586 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002588 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002589 me->channel[i].ssrcount = 0;
2590 me->channel[i].prevssrcount = 0;
2591 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2592 me->channel[i].handle = subsys_notif_register_notifier(
2593 gcinfo[i].subsys,
2594 &me->channel[i].nb);
2595 }
2596
2597 me->client = msm_ion_client_create(DEVICE_NAME);
2598 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2599 if (err)
2600 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002601 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002602 return 0;
2603device_create_bail:
2604 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002605 if (me->channel[i].handle)
2606 subsys_notif_unregister_notifier(me->channel[i].handle,
2607 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002608 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002609 if (!IS_ERR_OR_NULL(dev))
2610 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002611 class_destroy(me->class);
2612class_create_bail:
2613 cdev_del(&me->cdev);
2614cdev_init_bail:
2615 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2616alloc_chrdev_bail:
2617register_bail:
2618 fastrpc_deinit();
2619 return err;
2620}
2621
2622static void __exit fastrpc_device_exit(void)
2623{
2624 struct fastrpc_apps *me = &gfa;
2625 int i;
2626
2627 fastrpc_file_list_dtor(me);
2628 fastrpc_deinit();
2629 for (i = 0; i < NUM_CHANNELS; i++) {
2630 if (!gcinfo[i].name)
2631 continue;
2632 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2633 subsys_notif_unregister_notifier(me->channel[i].handle,
2634 &me->channel[i].nb);
2635 }
2636 class_destroy(me->class);
2637 cdev_del(&me->cdev);
2638 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2639 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002640 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002641}
2642
2643late_initcall(fastrpc_device_init);
2644module_exit(fastrpc_device_exit);
2645
2646MODULE_LICENSE("GPL v2");