blob: a835344b10c80da46fb2ebe7b665804c74e10902 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
47
48#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
49#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
50#define TZ_PIL_AUTH_QDSP6_PROC 1
51#define FASTRPC_ENOSUCH 39
52#define VMID_SSC_Q6 5
53#define VMID_ADSP_Q6 6
54
55#define RPC_TIMEOUT (5 * HZ)
56#define BALIGN 128
57#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
58#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080059#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
62
63#define FASTRPC_LINK_STATE_DOWN (0x0)
64#define FASTRPC_LINK_STATE_UP (0x1)
65#define FASTRPC_LINK_DISCONNECTED (0x0)
66#define FASTRPC_LINK_CONNECTING (0x1)
67#define FASTRPC_LINK_CONNECTED (0x3)
68#define FASTRPC_LINK_DISCONNECTING (0x7)
69
70static int fastrpc_glink_open(int cid);
71static void fastrpc_glink_close(void *chan, int cid);
72
73static inline uint64_t buf_page_start(uint64_t buf)
74{
75 uint64_t start = (uint64_t) buf & PAGE_MASK;
76 return start;
77}
78
79static inline uint64_t buf_page_offset(uint64_t buf)
80{
81 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
82 return offset;
83}
84
85static inline int buf_num_pages(uint64_t buf, ssize_t len)
86{
87 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
88 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
89 int nPages = end - start + 1;
90 return nPages;
91}
92
93static inline uint64_t buf_page_size(uint32_t size)
94{
95 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
96
97 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
98}
99
100static inline void *uint64_to_ptr(uint64_t addr)
101{
102 void *ptr = (void *)((uintptr_t)addr);
103
104 return ptr;
105}
106
107static inline uint64_t ptr_to_uint64(void *ptr)
108{
109 uint64_t addr = (uint64_t)((uintptr_t)ptr);
110
111 return addr;
112}
113
114struct fastrpc_file;
115
116struct fastrpc_buf {
117 struct hlist_node hn;
118 struct fastrpc_file *fl;
119 void *virt;
120 uint64_t phys;
121 ssize_t size;
122};
123
124struct fastrpc_ctx_lst;
125
126struct overlap {
127 uintptr_t start;
128 uintptr_t end;
129 int raix;
130 uintptr_t mstart;
131 uintptr_t mend;
132 uintptr_t offset;
133};
134
135struct smq_invoke_ctx {
136 struct hlist_node hn;
137 struct completion work;
138 int retval;
139 int pid;
140 int tgid;
141 remote_arg_t *lpra;
142 remote_arg64_t *rpra;
143 int *fds;
144 unsigned int *attrs;
145 struct fastrpc_mmap **maps;
146 struct fastrpc_buf *buf;
147 ssize_t used;
148 struct fastrpc_file *fl;
149 uint32_t sc;
150 struct overlap *overs;
151 struct overlap **overps;
152 struct smq_msg msg;
153};
154
155struct fastrpc_ctx_lst {
156 struct hlist_head pending;
157 struct hlist_head interrupted;
158};
159
160struct fastrpc_smmu {
161 struct dma_iommu_mapping *mapping;
162 int cb;
163 int enabled;
164 int faults;
165 int secure;
166 int coherent;
167};
168
169struct fastrpc_session_ctx {
170 struct device *dev;
171 struct fastrpc_smmu smmu;
172 int used;
173};
174
175struct fastrpc_glink_info {
176 int link_state;
177 int port_state;
178 struct glink_open_config cfg;
179 struct glink_link_info link_info;
180 void *link_notify_handle;
181};
182
183struct fastrpc_channel_ctx {
184 char *name;
185 char *subsys;
186 void *chan;
187 struct device *dev;
188 struct fastrpc_session_ctx session[NUM_SESSIONS];
189 struct completion work;
190 struct notifier_block nb;
191 struct kref kref;
192 int sesscount;
193 int ssrcount;
194 void *handle;
195 int prevssrcount;
196 int vmid;
197 struct fastrpc_glink_info link;
198};
199
200struct fastrpc_apps {
201 struct fastrpc_channel_ctx *channel;
202 struct cdev cdev;
203 struct class *class;
204 struct mutex smd_mutex;
205 struct smq_phy_page range;
206 struct hlist_head maps;
207 dev_t dev_no;
208 int compat;
209 struct hlist_head drivers;
210 spinlock_t hlock;
211 struct ion_client *client;
212 struct device *dev;
213};
214
215struct fastrpc_mmap {
216 struct hlist_node hn;
217 struct fastrpc_file *fl;
218 struct fastrpc_apps *apps;
219 int fd;
220 uint32_t flags;
221 struct dma_buf *buf;
222 struct sg_table *table;
223 struct dma_buf_attachment *attach;
224 struct ion_handle *handle;
225 uint64_t phys;
226 ssize_t size;
227 uintptr_t va;
228 ssize_t len;
229 int refs;
230 uintptr_t raddr;
231 int uncached;
232 int secure;
233 uintptr_t attr;
234};
235
236struct fastrpc_file {
237 struct hlist_node hn;
238 spinlock_t hlock;
239 struct hlist_head maps;
240 struct hlist_head bufs;
241 struct fastrpc_ctx_lst clst;
242 struct fastrpc_session_ctx *sctx;
243 struct fastrpc_session_ctx *secsctx;
244 uint32_t mode;
245 int tgid;
246 int cid;
247 int ssrcount;
248 int pd;
249 struct fastrpc_apps *apps;
250};
251
252static struct fastrpc_apps gfa;
253
254static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
255 {
256 .name = "adsprpc-smd",
257 .subsys = "adsp",
258 .link.link_info.edge = "lpass",
259 .link.link_info.transport = "smem",
260 },
261 {
262 .name = "sdsprpc-smd",
263 .subsys = "dsps",
264 .link.link_info.edge = "dsps",
265 .link.link_info.transport = "smem",
266 },
267 {
268 .name = "mdsprpc-smd",
269 .subsys = "modem",
270 .link.link_info.edge = "mpss",
271 .link.link_info.transport = "smem",
272 },
273 {
274 .name = "cdsprpc-smd",
275 .subsys = "cdsp",
276 .link.link_info.edge = "cdsp",
277 .link.link_info.transport = "smem",
278 },
279};
280
281static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
282{
283 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
284 int vmid;
285
286 if (!fl)
287 return;
288 if (cache) {
289 spin_lock(&fl->hlock);
290 hlist_add_head(&buf->hn, &fl->bufs);
291 spin_unlock(&fl->hlock);
292 return;
293 }
294 if (!IS_ERR_OR_NULL(buf->virt)) {
295 int destVM[1] = {VMID_HLOS};
296 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
297
298 if (fl->sctx->smmu.cb)
299 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
300 vmid = fl->apps->channel[fl->cid].vmid;
301 if (vmid) {
302 int srcVM[2] = {VMID_HLOS, vmid};
303
304 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
305 srcVM, 2, destVM, destVMperm, 1);
306 }
307 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
308 buf->phys);
309 }
310 kfree(buf);
311}
312
313static void fastrpc_buf_list_free(struct fastrpc_file *fl)
314{
315 struct fastrpc_buf *buf, *free;
316
317 do {
318 struct hlist_node *n;
319
320 free = 0;
321 spin_lock(&fl->hlock);
322 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
323 hlist_del_init(&buf->hn);
324 free = buf;
325 break;
326 }
327 spin_unlock(&fl->hlock);
328 if (free)
329 fastrpc_buf_free(free, 0);
330 } while (free);
331}
332
333static void fastrpc_mmap_add(struct fastrpc_mmap *map)
334{
335 struct fastrpc_file *fl = map->fl;
336
337 spin_lock(&fl->hlock);
338 hlist_add_head(&map->hn, &fl->maps);
339 spin_unlock(&fl->hlock);
340}
341
342static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800343 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700344{
345 struct fastrpc_mmap *match = 0, *map;
346 struct hlist_node *n;
347
348 spin_lock(&fl->hlock);
349 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
350 if (va >= map->va &&
351 va + len <= map->va + map->len &&
352 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800353 if (refs)
354 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700355 match = map;
356 break;
357 }
358 }
359 spin_unlock(&fl->hlock);
360 if (match) {
361 *ppmap = match;
362 return 0;
363 }
364 return -ENOTTY;
365}
366
367static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
368 ssize_t len, struct fastrpc_mmap **ppmap)
369{
370 struct fastrpc_mmap *match = 0, *map;
371 struct hlist_node *n;
372 struct fastrpc_apps *me = &gfa;
373
374 spin_lock(&me->hlock);
375 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
376 if (map->raddr == va &&
377 map->raddr + map->len == va + len &&
378 map->refs == 1) {
379 match = map;
380 hlist_del_init(&map->hn);
381 break;
382 }
383 }
384 spin_unlock(&me->hlock);
385 if (match) {
386 *ppmap = match;
387 return 0;
388 }
389 spin_lock(&fl->hlock);
390 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
391 if (map->raddr == va &&
392 map->raddr + map->len == va + len &&
393 map->refs == 1) {
394 match = map;
395 hlist_del_init(&map->hn);
396 break;
397 }
398 }
399 spin_unlock(&fl->hlock);
400 if (match) {
401 *ppmap = match;
402 return 0;
403 }
404 return -ENOTTY;
405}
406
407static void fastrpc_mmap_free(struct fastrpc_mmap *map)
408{
409 struct fastrpc_file *fl;
410 int vmid;
411 struct fastrpc_session_ctx *sess;
412 int destVM[1] = {VMID_HLOS};
413 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
414
415 if (!map)
416 return;
417 fl = map->fl;
418 spin_lock(&fl->hlock);
419 map->refs--;
420 if (!map->refs)
421 hlist_del_init(&map->hn);
422 spin_unlock(&fl->hlock);
423 if (map->refs > 0)
424 return;
425 if (map->secure)
426 sess = fl->secsctx;
427 else
428 sess = fl->sctx;
429
430 if (!IS_ERR_OR_NULL(map->handle))
431 ion_free(fl->apps->client, map->handle);
432 if (sess->smmu.enabled) {
433 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800434 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700435 map->table->sgl,
436 map->table->nents, DMA_BIDIRECTIONAL,
437 map->buf);
438 }
439 vmid = fl->apps->channel[fl->cid].vmid;
440 if (vmid && map->phys) {
441 int srcVM[2] = {VMID_HLOS, vmid};
442
443 hyp_assign_phys(map->phys, buf_page_size(map->size),
444 srcVM, 2, destVM, destVMperm, 1);
445 }
446
447 if (!IS_ERR_OR_NULL(map->table))
448 dma_buf_unmap_attachment(map->attach, map->table,
449 DMA_BIDIRECTIONAL);
450 if (!IS_ERR_OR_NULL(map->attach))
451 dma_buf_detach(map->buf, map->attach);
452 if (!IS_ERR_OR_NULL(map->buf))
453 dma_buf_put(map->buf);
454 kfree(map);
455}
456
457static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
458 struct fastrpc_session_ctx **session);
459
460static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
461 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
462 struct fastrpc_mmap **ppmap)
463{
464 struct fastrpc_session_ctx *sess;
465 struct fastrpc_apps *apps = fl->apps;
466 int cid = fl->cid;
467 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
468 struct fastrpc_mmap *map = 0;
469 unsigned long attrs;
470 unsigned long flags;
471 int err = 0, vmid;
472
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800473 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700474 return 0;
475 map = kzalloc(sizeof(*map), GFP_KERNEL);
476 VERIFY(err, !IS_ERR_OR_NULL(map));
477 if (err)
478 goto bail;
479 INIT_HLIST_NODE(&map->hn);
480 map->flags = mflags;
481 map->refs = 1;
482 map->fl = fl;
483 map->fd = fd;
484 map->attr = attr;
485 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
486 ion_import_dma_buf_fd(fl->apps->client, fd)));
487 if (err)
488 goto bail;
489 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
490 &flags));
491 if (err)
492 goto bail;
493
494 map->uncached = !ION_IS_CACHED(flags);
495 if (map->attr & FASTRPC_ATTR_NOVA)
496 map->uncached = 1;
497
498 map->secure = flags & ION_FLAG_SECURE;
499 if (map->secure) {
500 if (!fl->secsctx)
501 err = fastrpc_session_alloc(chan, 1,
502 &fl->secsctx);
503 if (err)
504 goto bail;
505 }
506 if (map->secure)
507 sess = fl->secsctx;
508 else
509 sess = fl->sctx;
510
511 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
512 if (err)
513 goto bail;
514 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
515 dma_buf_attach(map->buf, sess->dev)));
516 if (err)
517 goto bail;
518 VERIFY(err, !IS_ERR_OR_NULL(map->table =
519 dma_buf_map_attachment(map->attach,
520 DMA_BIDIRECTIONAL)));
521 if (err)
522 goto bail;
523 if (sess->smmu.enabled) {
524 attrs = DMA_ATTR_EXEC_MAPPING;
525 VERIFY(err, map->table->nents ==
526 msm_dma_map_sg_attrs(sess->dev,
527 map->table->sgl, map->table->nents,
528 DMA_BIDIRECTIONAL, map->buf, attrs));
529 if (err)
530 goto bail;
531 } else {
532 VERIFY(err, map->table->nents == 1);
533 if (err)
534 goto bail;
535 }
536 map->phys = sg_dma_address(map->table->sgl);
537 if (sess->smmu.cb) {
538 map->phys += ((uint64_t)sess->smmu.cb << 32);
539 map->size = sg_dma_len(map->table->sgl);
540 } else {
541 map->size = buf_page_size(len);
542 }
543 vmid = fl->apps->channel[fl->cid].vmid;
544 if (vmid) {
545 int srcVM[1] = {VMID_HLOS};
546 int destVM[2] = {VMID_HLOS, vmid};
547 int destVMperm[2] = {PERM_READ | PERM_WRITE,
548 PERM_READ | PERM_WRITE | PERM_EXEC};
549
550 VERIFY(err, !hyp_assign_phys(map->phys,
551 buf_page_size(map->size),
552 srcVM, 1, destVM, destVMperm, 2));
553 if (err)
554 goto bail;
555 }
556 map->va = va;
557 map->len = len;
558
559 fastrpc_mmap_add(map);
560 *ppmap = map;
561
562bail:
563 if (err && map)
564 fastrpc_mmap_free(map);
565 return err;
566}
567
568static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
569 struct fastrpc_buf **obuf)
570{
571 int err = 0, vmid;
572 struct fastrpc_buf *buf = 0, *fr = 0;
573 struct hlist_node *n;
574
575 VERIFY(err, size > 0);
576 if (err)
577 goto bail;
578
579 /* find the smallest buffer that fits in the cache */
580 spin_lock(&fl->hlock);
581 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
582 if (buf->size >= size && (!fr || fr->size > buf->size))
583 fr = buf;
584 }
585 if (fr)
586 hlist_del_init(&fr->hn);
587 spin_unlock(&fl->hlock);
588 if (fr) {
589 *obuf = fr;
590 return 0;
591 }
592 buf = 0;
593 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
594 if (err)
595 goto bail;
596 INIT_HLIST_NODE(&buf->hn);
597 buf->fl = fl;
598 buf->virt = 0;
599 buf->phys = 0;
600 buf->size = size;
601 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
602 (void *)&buf->phys, GFP_KERNEL);
603 if (IS_ERR_OR_NULL(buf->virt)) {
604 /* free cache and retry */
605 fastrpc_buf_list_free(fl);
606 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
607 (void *)&buf->phys, GFP_KERNEL);
608 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
609 }
610 if (err)
611 goto bail;
612 if (fl->sctx->smmu.cb)
613 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
614 vmid = fl->apps->channel[fl->cid].vmid;
615 if (vmid) {
616 int srcVM[1] = {VMID_HLOS};
617 int destVM[2] = {VMID_HLOS, vmid};
618 int destVMperm[2] = {PERM_READ | PERM_WRITE,
619 PERM_READ | PERM_WRITE | PERM_EXEC};
620
621 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
622 srcVM, 1, destVM, destVMperm, 2));
623 if (err)
624 goto bail;
625 }
626
627 *obuf = buf;
628 bail:
629 if (err && buf)
630 fastrpc_buf_free(buf, 0);
631 return err;
632}
633
634
635static int context_restore_interrupted(struct fastrpc_file *fl,
636 struct fastrpc_ioctl_invoke_attrs *inv,
637 struct smq_invoke_ctx **po)
638{
639 int err = 0;
640 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
641 struct hlist_node *n;
642 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
643
644 spin_lock(&fl->hlock);
645 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
646 if (ictx->pid == current->pid) {
647 if (invoke->sc != ictx->sc || ictx->fl != fl)
648 err = -1;
649 else {
650 ctx = ictx;
651 hlist_del_init(&ctx->hn);
652 hlist_add_head(&ctx->hn, &fl->clst.pending);
653 }
654 break;
655 }
656 }
657 spin_unlock(&fl->hlock);
658 if (ctx)
659 *po = ctx;
660 return err;
661}
662
663#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
664static int overlap_ptr_cmp(const void *a, const void *b)
665{
666 struct overlap *pa = *((struct overlap **)a);
667 struct overlap *pb = *((struct overlap **)b);
668 /* sort with lowest starting buffer first */
669 int st = CMP(pa->start, pb->start);
670 /* sort with highest ending buffer first */
671 int ed = CMP(pb->end, pa->end);
672 return st == 0 ? ed : st;
673}
674
675static void context_build_overlap(struct smq_invoke_ctx *ctx)
676{
677 int i;
678 remote_arg_t *lpra = ctx->lpra;
679 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
680 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
681 int nbufs = inbufs + outbufs;
682 struct overlap max;
683
684 for (i = 0; i < nbufs; ++i) {
685 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
686 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
687 ctx->overs[i].raix = i;
688 ctx->overps[i] = &ctx->overs[i];
689 }
690 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
691 max.start = 0;
692 max.end = 0;
693 for (i = 0; i < nbufs; ++i) {
694 if (ctx->overps[i]->start < max.end) {
695 ctx->overps[i]->mstart = max.end;
696 ctx->overps[i]->mend = ctx->overps[i]->end;
697 ctx->overps[i]->offset = max.end -
698 ctx->overps[i]->start;
699 if (ctx->overps[i]->end > max.end) {
700 max.end = ctx->overps[i]->end;
701 } else {
702 ctx->overps[i]->mend = 0;
703 ctx->overps[i]->mstart = 0;
704 }
705 } else {
706 ctx->overps[i]->mend = ctx->overps[i]->end;
707 ctx->overps[i]->mstart = ctx->overps[i]->start;
708 ctx->overps[i]->offset = 0;
709 max = *ctx->overps[i];
710 }
711 }
712}
713
714#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
715 do {\
716 if (!(kernel))\
717 VERIFY(err, 0 == copy_from_user((dst), (src),\
718 (size)));\
719 else\
720 memmove((dst), (src), (size));\
721 } while (0)
722
723#define K_COPY_TO_USER(err, kernel, dst, src, size) \
724 do {\
725 if (!(kernel))\
726 VERIFY(err, 0 == copy_to_user((dst), (src),\
727 (size)));\
728 else\
729 memmove((dst), (src), (size));\
730 } while (0)
731
732
733static void context_free(struct smq_invoke_ctx *ctx);
734
735static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
736 struct fastrpc_ioctl_invoke_attrs *invokefd,
737 struct smq_invoke_ctx **po)
738{
739 int err = 0, bufs, size = 0;
740 struct smq_invoke_ctx *ctx = 0;
741 struct fastrpc_ctx_lst *clst = &fl->clst;
742 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
743
744 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
745 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
746 sizeof(*ctx->fds) * (bufs) +
747 sizeof(*ctx->attrs) * (bufs) +
748 sizeof(*ctx->overs) * (bufs) +
749 sizeof(*ctx->overps) * (bufs);
750
751 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
752 if (err)
753 goto bail;
754
755 INIT_HLIST_NODE(&ctx->hn);
756 hlist_add_fake(&ctx->hn);
757 ctx->fl = fl;
758 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
759 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
760 ctx->fds = (int *)(&ctx->lpra[bufs]);
761 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
762 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
763 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
764
765 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
766 bufs * sizeof(*ctx->lpra));
767 if (err)
768 goto bail;
769
770 if (invokefd->fds) {
771 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
772 bufs * sizeof(*ctx->fds));
773 if (err)
774 goto bail;
775 }
776 if (invokefd->attrs) {
777 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
778 bufs * sizeof(*ctx->attrs));
779 if (err)
780 goto bail;
781 }
782
783 ctx->sc = invoke->sc;
784 if (bufs)
785 context_build_overlap(ctx);
786 ctx->retval = -1;
787 ctx->pid = current->pid;
788 ctx->tgid = current->tgid;
789 init_completion(&ctx->work);
790
791 spin_lock(&fl->hlock);
792 hlist_add_head(&ctx->hn, &clst->pending);
793 spin_unlock(&fl->hlock);
794
795 *po = ctx;
796bail:
797 if (ctx && err)
798 context_free(ctx);
799 return err;
800}
801
802static void context_save_interrupted(struct smq_invoke_ctx *ctx)
803{
804 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
805
806 spin_lock(&ctx->fl->hlock);
807 hlist_del_init(&ctx->hn);
808 hlist_add_head(&ctx->hn, &clst->interrupted);
809 spin_unlock(&ctx->fl->hlock);
810 /* free the cache on power collapse */
811 fastrpc_buf_list_free(ctx->fl);
812}
813
814static void context_free(struct smq_invoke_ctx *ctx)
815{
816 int i;
817 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
818 REMOTE_SCALARS_OUTBUFS(ctx->sc);
819 spin_lock(&ctx->fl->hlock);
820 hlist_del_init(&ctx->hn);
821 spin_unlock(&ctx->fl->hlock);
822 for (i = 0; i < nbufs; ++i)
823 fastrpc_mmap_free(ctx->maps[i]);
824 fastrpc_buf_free(ctx->buf, 1);
825 kfree(ctx);
826}
827
828static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
829{
830 ctx->retval = retval;
831 complete(&ctx->work);
832}
833
834
835static void fastrpc_notify_users(struct fastrpc_file *me)
836{
837 struct smq_invoke_ctx *ictx;
838 struct hlist_node *n;
839
840 spin_lock(&me->hlock);
841 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
842 complete(&ictx->work);
843 }
844 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
845 complete(&ictx->work);
846 }
847 spin_unlock(&me->hlock);
848
849}
850
851static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
852{
853 struct fastrpc_file *fl;
854 struct hlist_node *n;
855
856 spin_lock(&me->hlock);
857 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
858 if (fl->cid == cid)
859 fastrpc_notify_users(fl);
860 }
861 spin_unlock(&me->hlock);
862
863}
864static void context_list_ctor(struct fastrpc_ctx_lst *me)
865{
866 INIT_HLIST_HEAD(&me->interrupted);
867 INIT_HLIST_HEAD(&me->pending);
868}
869
870static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
871{
872 struct fastrpc_ctx_lst *clst = &fl->clst;
873 struct smq_invoke_ctx *ictx = 0, *ctxfree;
874 struct hlist_node *n;
875
876 do {
877 ctxfree = 0;
878 spin_lock(&fl->hlock);
879 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
880 hlist_del_init(&ictx->hn);
881 ctxfree = ictx;
882 break;
883 }
884 spin_unlock(&fl->hlock);
885 if (ctxfree)
886 context_free(ctxfree);
887 } while (ctxfree);
888 do {
889 ctxfree = 0;
890 spin_lock(&fl->hlock);
891 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
892 hlist_del_init(&ictx->hn);
893 ctxfree = ictx;
894 break;
895 }
896 spin_unlock(&fl->hlock);
897 if (ctxfree)
898 context_free(ctxfree);
899 } while (ctxfree);
900}
901
902static int fastrpc_file_free(struct fastrpc_file *fl);
903static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
904{
905 struct fastrpc_file *fl, *free;
906 struct hlist_node *n;
907
908 do {
909 free = 0;
910 spin_lock(&me->hlock);
911 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
912 hlist_del_init(&fl->hn);
913 free = fl;
914 break;
915 }
916 spin_unlock(&me->hlock);
917 if (free)
918 fastrpc_file_free(free);
919 } while (free);
920}
921
922static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
923{
924 remote_arg64_t *rpra;
925 remote_arg_t *lpra = ctx->lpra;
926 struct smq_invoke_buf *list;
927 struct smq_phy_page *pages, *ipage;
928 uint32_t sc = ctx->sc;
929 int inbufs = REMOTE_SCALARS_INBUFS(sc);
930 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800931 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700932 uintptr_t args;
933 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800934 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700935 int err = 0;
936 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800937 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700938
939 /* calculate size of the metadata */
940 rpra = 0;
941 list = smq_invoke_buf_start(rpra, sc);
942 pages = smq_phy_page_start(sc, list);
943 ipage = pages;
944
945 for (i = 0; i < bufs; ++i) {
946 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
947 ssize_t len = lpra[i].buf.len;
948
949 if (ctx->fds[i] && (ctx->fds[i] != -1))
950 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
951 ctx->attrs[i], buf, len,
952 mflags, &ctx->maps[i]);
953 ipage += 1;
954 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800955 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
956 for (i = bufs; i < bufs + handles; i++) {
957 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
958 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
959 if (err)
960 goto bail;
961 ipage += 1;
962 }
963 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700964 /* calculate len requreed for copying */
965 for (oix = 0; oix < inbufs + outbufs; ++oix) {
966 int i = ctx->overps[oix]->raix;
967 ssize_t len = lpra[i].buf.len;
968
969 if (!len)
970 continue;
971 if (ctx->maps[i])
972 continue;
973 if (ctx->overps[oix]->offset == 0)
974 copylen = ALIGN(copylen, BALIGN);
975 copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
976 }
977 ctx->used = copylen;
978
979 /* allocate new buffer */
980 if (copylen) {
981 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
982 if (err)
983 goto bail;
984 }
985 /* copy metadata */
986 rpra = ctx->buf->virt;
987 ctx->rpra = rpra;
988 list = smq_invoke_buf_start(rpra, sc);
989 pages = smq_phy_page_start(sc, list);
990 ipage = pages;
991 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800992 for (i = 0; i < bufs + handles; ++i) {
993 if (lpra[i].buf.len)
994 list[i].num = 1;
995 else
996 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700997 list[i].pgidx = ipage - pages;
998 ipage++;
999 }
1000 /* map ion buffers */
1001 for (i = 0; i < inbufs + outbufs; ++i) {
1002 struct fastrpc_mmap *map = ctx->maps[i];
1003 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1004 ssize_t len = lpra[i].buf.len;
1005
1006 rpra[i].buf.pv = 0;
1007 rpra[i].buf.len = len;
1008 if (!len)
1009 continue;
1010 if (map) {
1011 struct vm_area_struct *vma;
1012 uintptr_t offset;
1013 int num = buf_num_pages(buf, len);
1014 int idx = list[i].pgidx;
1015
1016 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001017 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001018 } else {
1019 down_read(&current->mm->mmap_sem);
1020 VERIFY(err, NULL != (vma = find_vma(current->mm,
1021 map->va)));
1022 if (err) {
1023 up_read(&current->mm->mmap_sem);
1024 goto bail;
1025 }
1026 offset = buf_page_start(buf) - vma->vm_start;
1027 up_read(&current->mm->mmap_sem);
1028 VERIFY(err, offset < (uintptr_t)map->size);
1029 if (err)
1030 goto bail;
1031 }
1032 pages[idx].addr = map->phys + offset;
1033 pages[idx].size = num << PAGE_SHIFT;
1034 }
1035 rpra[i].buf.pv = buf;
1036 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001037 for (i = bufs; i < bufs + handles; ++i) {
1038 struct fastrpc_mmap *map = ctx->maps[i];
1039
1040 pages[i].addr = map->phys;
1041 pages[i].size = map->size;
1042 }
1043 fdlist = (uint64_t *)&pages[bufs + handles];
1044 for (i = 0; i < M_FDLIST; i++)
1045 fdlist[i] = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001046 /* copy non ion buffers */
1047 rlen = copylen - metalen;
1048 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1049 int i = ctx->overps[oix]->raix;
1050 struct fastrpc_mmap *map = ctx->maps[i];
1051 int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1052 uint64_t buf;
1053 ssize_t len = lpra[i].buf.len;
1054
1055 if (!len)
1056 continue;
1057 if (map)
1058 continue;
1059 if (ctx->overps[oix]->offset == 0) {
1060 rlen -= ALIGN(args, BALIGN) - args;
1061 args = ALIGN(args, BALIGN);
1062 }
1063 VERIFY(err, rlen >= mlen);
1064 if (err)
1065 goto bail;
1066 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1067 pages[list[i].pgidx].addr = ctx->buf->phys -
1068 ctx->overps[oix]->offset +
1069 (copylen - rlen);
1070 pages[list[i].pgidx].addr =
1071 buf_page_start(pages[list[i].pgidx].addr);
1072 buf = rpra[i].buf.pv;
1073 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1074 if (i < inbufs) {
1075 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1076 lpra[i].buf.pv, len);
1077 if (err)
1078 goto bail;
1079 }
1080 args = args + mlen;
1081 rlen -= mlen;
1082 }
1083
1084 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1085 int i = ctx->overps[oix]->raix;
1086 struct fastrpc_mmap *map = ctx->maps[i];
1087
1088 if (ctx->fl->sctx->smmu.coherent)
1089 continue;
1090 if (map && map->uncached)
1091 continue;
1092 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1093 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1094 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1095 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001096 for (i = bufs; i < bufs + handles; i++) {
1097 rpra[i].dma.fd = ctx->fds[i];
1098 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1099 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001100 }
1101 if (!ctx->fl->sctx->smmu.coherent)
1102 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
1103 bail:
1104 return err;
1105}
1106
1107static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1108 remote_arg_t *upra)
1109{
1110 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001111 struct smq_invoke_buf *list;
1112 struct smq_phy_page *pages;
1113 struct fastrpc_mmap *mmap;
1114 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001115 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001116 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001117 int err = 0;
1118
1119 inbufs = REMOTE_SCALARS_INBUFS(sc);
1120 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001121 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1122 list = smq_invoke_buf_start(ctx->rpra, sc);
1123 pages = smq_phy_page_start(sc, list);
1124 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001125 for (i = inbufs; i < inbufs + outbufs; ++i) {
1126 if (!ctx->maps[i]) {
1127 K_COPY_TO_USER(err, kernel,
1128 ctx->lpra[i].buf.pv,
1129 uint64_to_ptr(rpra[i].buf.pv),
1130 rpra[i].buf.len);
1131 if (err)
1132 goto bail;
1133 } else {
1134 fastrpc_mmap_free(ctx->maps[i]);
1135 ctx->maps[i] = 0;
1136 }
1137 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001138 if (inbufs + outbufs + handles) {
1139 for (i = 0; i < M_FDLIST; i++) {
1140 if (!fdlist[i])
1141 break;
1142 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001143 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001144 fastrpc_mmap_free(mmap);
1145 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001146 }
1147 bail:
1148 return err;
1149}
1150
1151static void inv_args_pre(struct smq_invoke_ctx *ctx)
1152{
1153 int i, inbufs, outbufs;
1154 uint32_t sc = ctx->sc;
1155 remote_arg64_t *rpra = ctx->rpra;
1156 uintptr_t end;
1157
1158 inbufs = REMOTE_SCALARS_INBUFS(sc);
1159 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1160 for (i = inbufs; i < inbufs + outbufs; ++i) {
1161 struct fastrpc_mmap *map = ctx->maps[i];
1162
1163 if (map && map->uncached)
1164 continue;
1165 if (!rpra[i].buf.len)
1166 continue;
1167 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1168 buf_page_start(rpra[i].buf.pv))
1169 continue;
1170 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1171 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1172 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1173 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1174 rpra[i].buf.len);
1175 if (!IS_CACHE_ALIGNED(end))
1176 dmac_flush_range((char *)end,
1177 (char *)end + 1);
1178 }
1179}
1180
1181static void inv_args(struct smq_invoke_ctx *ctx)
1182{
1183 int i, inbufs, outbufs;
1184 uint32_t sc = ctx->sc;
1185 remote_arg64_t *rpra = ctx->rpra;
1186 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001187
1188 inbufs = REMOTE_SCALARS_INBUFS(sc);
1189 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1190 for (i = inbufs; i < inbufs + outbufs; ++i) {
1191 struct fastrpc_mmap *map = ctx->maps[i];
1192
1193 if (map && map->uncached)
1194 continue;
1195 if (!rpra[i].buf.len)
1196 continue;
1197 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1198 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001199 continue;
1200 }
1201 if (map && map->handle)
1202 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1203 (char *)uint64_to_ptr(rpra[i].buf.pv),
1204 rpra[i].buf.len, ION_IOC_INV_CACHES);
1205 else
1206 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1207 (char *)uint64_to_ptr(rpra[i].buf.pv
1208 + rpra[i].buf.len));
1209 }
1210
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001211 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001212 dmac_inv_range(rpra, (char *)rpra + used);
1213}
1214
1215static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1216 uint32_t kernel, uint32_t handle)
1217{
1218 struct smq_msg *msg = &ctx->msg;
1219 struct fastrpc_file *fl = ctx->fl;
1220 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1221 int err = 0;
1222
1223 VERIFY(err, 0 != channel_ctx->chan);
1224 if (err)
1225 goto bail;
1226 msg->pid = current->tgid;
1227 msg->tid = current->pid;
1228 if (kernel)
1229 msg->pid = 0;
1230 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1231 msg->invoke.header.handle = handle;
1232 msg->invoke.header.sc = ctx->sc;
1233 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1234 msg->invoke.page.size = buf_page_size(ctx->used);
1235
1236 if (fl->ssrcount != channel_ctx->ssrcount) {
1237 err = -ECONNRESET;
1238 goto bail;
1239 }
1240 VERIFY(err, channel_ctx->link.port_state ==
1241 FASTRPC_LINK_CONNECTED);
1242 if (err)
1243 goto bail;
1244 err = glink_tx(channel_ctx->chan,
1245 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1246 GLINK_TX_REQ_INTENT);
1247 bail:
1248 return err;
1249}
1250
1251static void fastrpc_init(struct fastrpc_apps *me)
1252{
1253 int i;
1254
1255 INIT_HLIST_HEAD(&me->drivers);
1256 spin_lock_init(&me->hlock);
1257 mutex_init(&me->smd_mutex);
1258 me->channel = &gcinfo[0];
1259 for (i = 0; i < NUM_CHANNELS; i++) {
1260 init_completion(&me->channel[i].work);
1261 me->channel[i].sesscount = 0;
1262 }
1263}
1264
1265static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1266
1267static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1268 uint32_t kernel,
1269 struct fastrpc_ioctl_invoke_attrs *inv)
1270{
1271 struct smq_invoke_ctx *ctx = 0;
1272 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1273 int cid = fl->cid;
1274 int interrupted = 0;
1275 int err = 0;
1276
1277 if (!kernel) {
1278 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1279 &ctx));
1280 if (err)
1281 goto bail;
1282 if (fl->sctx->smmu.faults)
1283 err = FASTRPC_ENOSUCH;
1284 if (err)
1285 goto bail;
1286 if (ctx)
1287 goto wait;
1288 }
1289
1290 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1291 if (err)
1292 goto bail;
1293
1294 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
1295 VERIFY(err, 0 == get_args(kernel, ctx));
1296 if (err)
1297 goto bail;
1298 }
1299
1300 if (!fl->sctx->smmu.coherent) {
1301 inv_args_pre(ctx);
1302 if (mode == FASTRPC_MODE_SERIAL)
1303 inv_args(ctx);
1304 }
1305 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
1306 if (err)
1307 goto bail;
1308 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1309 inv_args(ctx);
1310 wait:
1311 if (kernel)
1312 wait_for_completion(&ctx->work);
1313 else {
1314 interrupted = wait_for_completion_interruptible(&ctx->work);
1315 VERIFY(err, 0 == (err = interrupted));
1316 if (err)
1317 goto bail;
1318 }
1319 VERIFY(err, 0 == (err = ctx->retval));
1320 if (err)
1321 goto bail;
1322 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
1323 if (err)
1324 goto bail;
1325 bail:
1326 if (ctx && interrupted == -ERESTARTSYS)
1327 context_save_interrupted(ctx);
1328 else if (ctx)
1329 context_free(ctx);
1330 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1331 err = ECONNRESET;
1332 return err;
1333}
1334
1335static int fastrpc_init_process(struct fastrpc_file *fl,
1336 struct fastrpc_ioctl_init *init)
1337{
1338 int err = 0;
1339 struct fastrpc_ioctl_invoke_attrs ioctl;
1340 struct smq_phy_page pages[1];
1341 struct fastrpc_mmap *file = 0, *mem = 0;
1342
1343 if (init->flags == FASTRPC_INIT_ATTACH) {
1344 remote_arg_t ra[1];
1345 int tgid = current->tgid;
1346
1347 ra[0].buf.pv = (void *)&tgid;
1348 ra[0].buf.len = sizeof(tgid);
1349 ioctl.inv.handle = 1;
1350 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1351 ioctl.inv.pra = ra;
1352 ioctl.fds = 0;
1353 ioctl.attrs = 0;
1354 fl->pd = 0;
1355 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1356 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1357 if (err)
1358 goto bail;
1359 } else if (init->flags == FASTRPC_INIT_CREATE) {
1360 remote_arg_t ra[4];
1361 int fds[4];
1362 int mflags = 0;
1363 struct {
1364 int pgid;
1365 int namelen;
1366 int filelen;
1367 int pageslen;
1368 } inbuf;
1369
1370 inbuf.pgid = current->tgid;
1371 inbuf.namelen = strlen(current->comm) + 1;
1372 inbuf.filelen = init->filelen;
1373 fl->pd = 1;
1374 if (init->filelen) {
1375 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1376 init->file, init->filelen, mflags, &file));
1377 if (err)
1378 goto bail;
1379 }
1380 inbuf.pageslen = 1;
1381 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1382 init->mem, init->memlen, mflags, &mem));
1383 if (err)
1384 goto bail;
1385 inbuf.pageslen = 1;
1386 ra[0].buf.pv = (void *)&inbuf;
1387 ra[0].buf.len = sizeof(inbuf);
1388 fds[0] = 0;
1389
1390 ra[1].buf.pv = (void *)current->comm;
1391 ra[1].buf.len = inbuf.namelen;
1392 fds[1] = 0;
1393
1394 ra[2].buf.pv = (void *)init->file;
1395 ra[2].buf.len = inbuf.filelen;
1396 fds[2] = init->filefd;
1397
1398 pages[0].addr = mem->phys;
1399 pages[0].size = mem->size;
1400 ra[3].buf.pv = (void *)pages;
1401 ra[3].buf.len = 1 * sizeof(*pages);
1402 fds[3] = 0;
1403
1404 ioctl.inv.handle = 1;
1405 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
1406 ioctl.inv.pra = ra;
1407 ioctl.fds = fds;
1408 ioctl.attrs = 0;
1409 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1410 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1411 if (err)
1412 goto bail;
1413 } else {
1414 err = -ENOTTY;
1415 }
1416bail:
1417 if (mem && err)
1418 fastrpc_mmap_free(mem);
1419 if (file)
1420 fastrpc_mmap_free(file);
1421 return err;
1422}
1423
1424static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1425{
1426 int err = 0;
1427 struct fastrpc_ioctl_invoke_attrs ioctl;
1428 remote_arg_t ra[1];
1429 int tgid = 0;
1430
1431 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1432 if (err)
1433 goto bail;
1434 tgid = fl->tgid;
1435 ra[0].buf.pv = (void *)&tgid;
1436 ra[0].buf.len = sizeof(tgid);
1437 ioctl.inv.handle = 1;
1438 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1439 ioctl.inv.pra = ra;
1440 ioctl.fds = 0;
1441 ioctl.attrs = 0;
1442 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1443 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1444bail:
1445 return err;
1446}
1447
1448static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1449 struct fastrpc_mmap *map)
1450{
1451 struct fastrpc_ioctl_invoke_attrs ioctl;
1452 struct smq_phy_page page;
1453 int num = 1;
1454 remote_arg_t ra[3];
1455 int err = 0;
1456 struct {
1457 int pid;
1458 uint32_t flags;
1459 uintptr_t vaddrin;
1460 int num;
1461 } inargs;
1462 struct {
1463 uintptr_t vaddrout;
1464 } routargs;
1465
1466 inargs.pid = current->tgid;
1467 inargs.vaddrin = (uintptr_t)map->va;
1468 inargs.flags = flags;
1469 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1470 ra[0].buf.pv = (void *)&inargs;
1471 ra[0].buf.len = sizeof(inargs);
1472 page.addr = map->phys;
1473 page.size = map->size;
1474 ra[1].buf.pv = (void *)&page;
1475 ra[1].buf.len = num * sizeof(page);
1476
1477 ra[2].buf.pv = (void *)&routargs;
1478 ra[2].buf.len = sizeof(routargs);
1479
1480 ioctl.inv.handle = 1;
1481 if (fl->apps->compat)
1482 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1483 else
1484 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1485 ioctl.inv.pra = ra;
1486 ioctl.fds = 0;
1487 ioctl.attrs = 0;
1488 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1489 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1490 map->raddr = (uintptr_t)routargs.vaddrout;
1491
1492 return err;
1493}
1494
1495static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1496 struct fastrpc_mmap *map)
1497{
1498 struct fastrpc_ioctl_invoke_attrs ioctl;
1499 remote_arg_t ra[1];
1500 int err = 0;
1501 struct {
1502 int pid;
1503 uintptr_t vaddrout;
1504 ssize_t size;
1505 } inargs;
1506
1507 inargs.pid = current->tgid;
1508 inargs.size = map->size;
1509 inargs.vaddrout = map->raddr;
1510 ra[0].buf.pv = (void *)&inargs;
1511 ra[0].buf.len = sizeof(inargs);
1512
1513 ioctl.inv.handle = 1;
1514 if (fl->apps->compat)
1515 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1516 else
1517 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1518 ioctl.inv.pra = ra;
1519 ioctl.fds = 0;
1520 ioctl.attrs = 0;
1521 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1522 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1523 return err;
1524}
1525
1526static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1527 ssize_t len, struct fastrpc_mmap **ppmap);
1528
1529static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1530
1531static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1532 struct fastrpc_ioctl_munmap *ud)
1533{
1534 int err = 0;
1535 struct fastrpc_mmap *map = 0;
1536
1537 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1538 if (err)
1539 goto bail;
1540 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1541 if (err)
1542 goto bail;
1543 fastrpc_mmap_free(map);
1544bail:
1545 if (err && map)
1546 fastrpc_mmap_add(map);
1547 return err;
1548}
1549
1550static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1551 struct fastrpc_ioctl_mmap *ud)
1552{
1553
1554 struct fastrpc_mmap *map = 0;
1555 int err = 0;
1556
1557 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001558 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001559 return 0;
1560
1561 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1562 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1563 if (err)
1564 goto bail;
1565 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1566 if (err)
1567 goto bail;
1568 ud->vaddrout = map->raddr;
1569 bail:
1570 if (err && map)
1571 fastrpc_mmap_free(map);
1572 return err;
1573}
1574
1575static void fastrpc_channel_close(struct kref *kref)
1576{
1577 struct fastrpc_apps *me = &gfa;
1578 struct fastrpc_channel_ctx *ctx;
1579 int cid;
1580
1581 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1582 cid = ctx - &gcinfo[0];
1583 fastrpc_glink_close(ctx->chan, cid);
1584 ctx->chan = 0;
1585 mutex_unlock(&me->smd_mutex);
1586 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1587 MAJOR(me->dev_no), cid);
1588}
1589
1590static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1591
1592static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1593 int secure, struct fastrpc_session_ctx **session)
1594{
1595 struct fastrpc_apps *me = &gfa;
1596 int idx = 0, err = 0;
1597
1598 if (chan->sesscount) {
1599 for (idx = 0; idx < chan->sesscount; ++idx) {
1600 if (!chan->session[idx].used &&
1601 chan->session[idx].smmu.secure == secure) {
1602 chan->session[idx].used = 1;
1603 break;
1604 }
1605 }
1606 VERIFY(err, idx < chan->sesscount);
1607 if (err)
1608 goto bail;
1609 chan->session[idx].smmu.faults = 0;
1610 } else {
1611 VERIFY(err, me->dev != NULL);
1612 if (err)
1613 goto bail;
1614 chan->session[0].dev = me->dev;
1615 }
1616
1617 *session = &chan->session[idx];
1618 bail:
1619 return err;
1620}
1621
1622bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1623{
1624 if (glink_queue_rx_intent(h, NULL, size))
1625 return false;
1626 return true;
1627}
1628
1629void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1630 const void *pkt_priv, const void *ptr)
1631{
1632}
1633
1634void fastrpc_glink_notify_rx(void *handle, const void *priv,
1635 const void *pkt_priv, const void *ptr, size_t size)
1636{
1637 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1638 int len = size;
1639
1640 while (len >= sizeof(*rsp) && rsp) {
1641 rsp->ctx = rsp->ctx & ~1;
1642 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1643 rsp++;
1644 len = len - sizeof(*rsp);
1645 }
1646 glink_rx_done(handle, ptr, true);
1647}
1648
1649void fastrpc_glink_notify_state(void *handle, const void *priv,
1650 unsigned int event)
1651{
1652 struct fastrpc_apps *me = &gfa;
1653 int cid = (int)(uintptr_t)priv;
1654 struct fastrpc_glink_info *link;
1655
1656 if (cid < 0 || cid >= NUM_CHANNELS)
1657 return;
1658 link = &me->channel[cid].link;
1659 switch (event) {
1660 case GLINK_CONNECTED:
1661 link->port_state = FASTRPC_LINK_CONNECTED;
1662 complete(&me->channel[cid].work);
1663 break;
1664 case GLINK_LOCAL_DISCONNECTED:
1665 link->port_state = FASTRPC_LINK_DISCONNECTED;
1666 break;
1667 case GLINK_REMOTE_DISCONNECTED:
1668 if (me->channel[cid].chan &&
1669 link->link_state == FASTRPC_LINK_STATE_UP) {
1670 fastrpc_glink_close(me->channel[cid].chan, cid);
1671 me->channel[cid].chan = 0;
1672 link->port_state = FASTRPC_LINK_DISCONNECTED;
1673 }
1674 break;
1675 default:
1676 break;
1677 }
1678}
1679
1680static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1681 struct fastrpc_session_ctx **session)
1682{
1683 int err = 0;
1684 struct fastrpc_apps *me = &gfa;
1685
1686 mutex_lock(&me->smd_mutex);
1687 if (!*session)
1688 err = fastrpc_session_alloc_locked(chan, secure, session);
1689 mutex_unlock(&me->smd_mutex);
1690 return err;
1691}
1692
1693static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1694 struct fastrpc_session_ctx *session)
1695{
1696 struct fastrpc_apps *me = &gfa;
1697
1698 mutex_lock(&me->smd_mutex);
1699 session->used = 0;
1700 mutex_unlock(&me->smd_mutex);
1701}
1702
1703static int fastrpc_file_free(struct fastrpc_file *fl)
1704{
1705 struct hlist_node *n;
1706 struct fastrpc_mmap *map = 0;
1707 int cid;
1708
1709 if (!fl)
1710 return 0;
1711 cid = fl->cid;
1712
1713 spin_lock(&fl->apps->hlock);
1714 hlist_del_init(&fl->hn);
1715 spin_unlock(&fl->apps->hlock);
1716
1717 (void)fastrpc_release_current_dsp_process(fl);
1718 fastrpc_context_list_dtor(fl);
1719 fastrpc_buf_list_free(fl);
1720 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1721 fastrpc_mmap_free(map);
1722 }
1723 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1724 kref_put_mutex(&fl->apps->channel[cid].kref,
1725 fastrpc_channel_close, &fl->apps->smd_mutex);
1726 if (fl->sctx)
1727 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1728 if (fl->secsctx)
1729 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1730 kfree(fl);
1731 return 0;
1732}
1733
1734static int fastrpc_device_release(struct inode *inode, struct file *file)
1735{
1736 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1737
1738 if (fl) {
1739 fastrpc_file_free(fl);
1740 file->private_data = 0;
1741 }
1742 return 0;
1743}
1744
1745static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1746 void *priv)
1747{
1748 struct fastrpc_apps *me = &gfa;
1749 int cid = (int)((uintptr_t)priv);
1750 struct fastrpc_glink_info *link;
1751
1752 if (cid < 0 || cid >= NUM_CHANNELS)
1753 return;
1754
1755 link = &me->channel[cid].link;
1756 switch (cb_info->link_state) {
1757 case GLINK_LINK_STATE_UP:
1758 link->link_state = FASTRPC_LINK_STATE_UP;
1759 complete(&me->channel[cid].work);
1760 break;
1761 case GLINK_LINK_STATE_DOWN:
1762 link->link_state = FASTRPC_LINK_STATE_DOWN;
1763 break;
1764 default:
1765 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1766 break;
1767 }
1768}
1769
1770static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1771{
1772 int err = 0;
1773 struct fastrpc_glink_info *link;
1774
1775 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1776 if (err)
1777 goto bail;
1778
1779 link = &me->channel[cid].link;
1780 if (link->link_notify_handle != NULL)
1781 goto bail;
1782
1783 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1784 link->link_notify_handle = glink_register_link_state_cb(
1785 &link->link_info,
1786 (void *)((uintptr_t)cid));
1787 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1788 if (err) {
1789 link->link_notify_handle = NULL;
1790 goto bail;
1791 }
1792 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1793 RPC_TIMEOUT));
1794bail:
1795 return err;
1796}
1797
1798static void fastrpc_glink_close(void *chan, int cid)
1799{
1800 int err = 0;
1801 struct fastrpc_glink_info *link;
1802
1803 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1804 if (err)
1805 return;
1806 link = &gfa.channel[cid].link;
1807
1808 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1809 link->port_state == FASTRPC_LINK_CONNECTING) {
1810 link->port_state = FASTRPC_LINK_DISCONNECTING;
1811 glink_close(chan);
1812 }
1813}
1814
1815static int fastrpc_glink_open(int cid)
1816{
1817 int err = 0;
1818 void *handle = NULL;
1819 struct fastrpc_apps *me = &gfa;
1820 struct glink_open_config *cfg;
1821 struct fastrpc_glink_info *link;
1822
1823 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1824 if (err)
1825 goto bail;
1826 link = &me->channel[cid].link;
1827 cfg = &me->channel[cid].link.cfg;
1828 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1829 if (err)
1830 goto bail;
1831
1832 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1833 link->port_state == FASTRPC_LINK_CONNECTING) {
1834 goto bail;
1835 }
1836
1837 link->port_state = FASTRPC_LINK_CONNECTING;
1838 cfg->priv = (void *)(uintptr_t)cid;
1839 cfg->edge = gcinfo[cid].link.link_info.edge;
1840 cfg->transport = gcinfo[cid].link.link_info.transport;
1841 cfg->name = FASTRPC_GLINK_GUID;
1842 cfg->notify_rx = fastrpc_glink_notify_rx;
1843 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1844 cfg->notify_state = fastrpc_glink_notify_state;
1845 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1846 handle = glink_open(cfg);
1847 VERIFY(err, !IS_ERR_OR_NULL(handle));
1848 if (err)
1849 goto bail;
1850 me->channel[cid].chan = handle;
1851bail:
1852 return err;
1853}
1854
1855static int fastrpc_device_open(struct inode *inode, struct file *filp)
1856{
1857 int cid = MINOR(inode->i_rdev);
1858 int err = 0;
1859 struct fastrpc_apps *me = &gfa;
1860 struct fastrpc_file *fl = 0;
1861
1862 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
1863 if (err)
1864 return err;
1865
1866 filp->private_data = fl;
1867
1868 mutex_lock(&me->smd_mutex);
1869
1870 context_list_ctor(&fl->clst);
1871 spin_lock_init(&fl->hlock);
1872 INIT_HLIST_HEAD(&fl->maps);
1873 INIT_HLIST_HEAD(&fl->bufs);
1874 INIT_HLIST_NODE(&fl->hn);
1875 fl->tgid = current->tgid;
1876 fl->apps = me;
1877 fl->cid = cid;
1878 VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
1879 &fl->sctx));
1880 if (err)
1881 goto bail;
1882 fl->cid = cid;
1883 fl->ssrcount = me->channel[cid].ssrcount;
1884 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
1885 (me->channel[cid].chan == 0)) {
1886 fastrpc_glink_register(cid, me);
1887 VERIFY(err, 0 == fastrpc_glink_open(cid));
1888 if (err)
1889 goto bail;
1890
1891 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1892 RPC_TIMEOUT));
1893 if (err) {
1894 me->channel[cid].chan = 0;
1895 goto bail;
1896 }
1897 kref_init(&me->channel[cid].kref);
1898 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
1899 MAJOR(me->dev_no), cid);
1900 if (me->channel[cid].ssrcount !=
1901 me->channel[cid].prevssrcount) {
1902 me->channel[cid].prevssrcount =
1903 me->channel[cid].ssrcount;
1904 }
1905 }
1906 spin_lock(&me->hlock);
1907 hlist_add_head(&fl->hn, &me->drivers);
1908 spin_unlock(&me->hlock);
1909
1910bail:
1911 mutex_unlock(&me->smd_mutex);
1912
1913 if (err)
1914 fastrpc_device_release(inode, filp);
1915 return err;
1916}
1917
1918static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
1919{
1920 int err = 0;
1921
1922 VERIFY(err, fl && fl->sctx);
1923 if (err)
1924 goto bail;
1925 *info = (fl->sctx->smmu.enabled ? 1 : 0);
1926bail:
1927 return err;
1928}
1929
1930static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1931 unsigned long ioctl_param)
1932{
1933 union {
1934 struct fastrpc_ioctl_invoke_attrs inv;
1935 struct fastrpc_ioctl_mmap mmap;
1936 struct fastrpc_ioctl_munmap munmap;
1937 struct fastrpc_ioctl_init init;
1938 } p;
1939 void *param = (char *)ioctl_param;
1940 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1941 int size = 0, err = 0;
1942 uint32_t info;
1943
1944 p.inv.fds = 0;
1945 p.inv.attrs = 0;
1946
1947 switch (ioctl_num) {
1948 case FASTRPC_IOCTL_INVOKE:
1949 size = sizeof(struct fastrpc_ioctl_invoke);
1950 case FASTRPC_IOCTL_INVOKE_FD:
1951 if (!size)
1952 size = sizeof(struct fastrpc_ioctl_invoke_fd);
1953 /* fall through */
1954 case FASTRPC_IOCTL_INVOKE_ATTRS:
1955 if (!size)
1956 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
1957 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
1958 if (err)
1959 goto bail;
1960 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
1961 0, &p.inv)));
1962 if (err)
1963 goto bail;
1964 break;
1965 case FASTRPC_IOCTL_MMAP:
1966 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
1967 sizeof(p.mmap)));
1968 if (err)
1969 goto bail;
1970 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
1971 if (err)
1972 goto bail;
1973 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
1974 if (err)
1975 goto bail;
1976 break;
1977 case FASTRPC_IOCTL_MUNMAP:
1978 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
1979 sizeof(p.munmap)));
1980 if (err)
1981 goto bail;
1982 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
1983 &p.munmap)));
1984 if (err)
1985 goto bail;
1986 break;
1987 case FASTRPC_IOCTL_SETMODE:
1988 switch ((uint32_t)ioctl_param) {
1989 case FASTRPC_MODE_PARALLEL:
1990 case FASTRPC_MODE_SERIAL:
1991 fl->mode = (uint32_t)ioctl_param;
1992 break;
1993 default:
1994 err = -ENOTTY;
1995 break;
1996 }
1997 break;
1998 case FASTRPC_IOCTL_GETINFO:
1999 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2000 if (err)
2001 goto bail;
2002 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2003 if (err)
2004 goto bail;
2005 break;
2006 case FASTRPC_IOCTL_INIT:
2007 VERIFY(err, 0 == copy_from_user(&p.init, param,
2008 sizeof(p.init)));
2009 if (err)
2010 goto bail;
2011 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2012 if (err)
2013 goto bail;
2014 break;
2015
2016 default:
2017 err = -ENOTTY;
2018 pr_info("bad ioctl: %d\n", ioctl_num);
2019 break;
2020 }
2021 bail:
2022 return err;
2023}
2024
2025static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2026 unsigned long code,
2027 void *data)
2028{
2029 struct fastrpc_apps *me = &gfa;
2030 struct fastrpc_channel_ctx *ctx;
2031 int cid;
2032
2033 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2034 cid = ctx - &me->channel[0];
2035 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2036 mutex_lock(&me->smd_mutex);
2037 ctx->ssrcount++;
2038 if (ctx->chan) {
2039 fastrpc_glink_close(ctx->chan, cid);
2040 ctx->chan = 0;
2041 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2042 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2043 }
2044 mutex_unlock(&me->smd_mutex);
2045 fastrpc_notify_drivers(me, cid);
2046 }
2047
2048 return NOTIFY_DONE;
2049}
2050
2051static const struct file_operations fops = {
2052 .open = fastrpc_device_open,
2053 .release = fastrpc_device_release,
2054 .unlocked_ioctl = fastrpc_device_ioctl,
2055 .compat_ioctl = compat_fastrpc_device_ioctl,
2056};
2057
2058static const struct of_device_id fastrpc_match_table[] = {
2059 { .compatible = "qcom,msm-fastrpc-adsp", },
2060 { .compatible = "qcom,msm-fastrpc-compute", },
2061 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2062 { .compatible = "qcom,msm-adsprpc-mem-region", },
2063 {}
2064};
2065
2066static int fastrpc_cb_probe(struct device *dev)
2067{
2068 struct fastrpc_channel_ctx *chan;
2069 struct fastrpc_session_ctx *sess;
2070 struct of_phandle_args iommuspec;
2071 const char *name;
2072 unsigned int start = 0x80000000;
2073 int err = 0, i;
2074 int secure_vmid = VMID_CP_PIXEL;
2075
2076 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2077 if (err)
2078 goto bail;
2079 for (i = 0; i < NUM_CHANNELS; i++) {
2080 if (!gcinfo[i].name)
2081 continue;
2082 if (!strcmp(name, gcinfo[i].name))
2083 break;
2084 }
2085 VERIFY(err, i < NUM_CHANNELS);
2086 if (err)
2087 goto bail;
2088 chan = &gcinfo[i];
2089 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2090 if (err)
2091 goto bail;
2092
2093 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2094 "#iommu-cells", 0, &iommuspec));
2095 if (err)
2096 goto bail;
2097 sess = &chan->session[chan->sesscount];
2098 sess->smmu.cb = iommuspec.args[0] & 0xf;
2099 sess->used = 0;
2100 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2101 "dma-coherent");
2102 sess->smmu.secure = of_property_read_bool(dev->of_node,
2103 "qcom,secure-context-bank");
2104 if (sess->smmu.secure)
2105 start = 0x60000000;
2106 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2107 arm_iommu_create_mapping(&platform_bus_type,
2108 start, 0x7fffffff)));
2109 if (err)
2110 goto bail;
2111
2112 if (sess->smmu.secure)
2113 iommu_domain_set_attr(sess->smmu.mapping->domain,
2114 DOMAIN_ATTR_SECURE_VMID,
2115 &secure_vmid);
2116
2117 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2118 if (err)
2119 goto bail;
2120 sess->dev = dev;
2121 sess->smmu.enabled = 1;
2122 chan->sesscount++;
2123bail:
2124 return err;
2125}
2126
2127static int fastrpc_probe(struct platform_device *pdev)
2128{
2129 int err = 0;
2130 struct fastrpc_apps *me = &gfa;
2131 struct device *dev = &pdev->dev;
2132 struct smq_phy_page range;
2133 struct device_node *ion_node, *node;
2134 struct platform_device *ion_pdev;
2135 struct cma *cma;
2136 uint32_t val;
2137
2138 if (of_device_is_compatible(dev->of_node,
2139 "qcom,msm-fastrpc-compute-cb"))
2140 return fastrpc_cb_probe(dev);
2141
2142 if (of_device_is_compatible(dev->of_node,
2143 "qcom,msm-adsprpc-mem-region")) {
2144 me->dev = dev;
2145 range.addr = 0;
2146 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2147 if (ion_node) {
2148 for_each_available_child_of_node(ion_node, node) {
2149 if (of_property_read_u32(node, "reg", &val))
2150 continue;
2151 if (val != ION_ADSP_HEAP_ID)
2152 continue;
2153 ion_pdev = of_find_device_by_node(node);
2154 if (!ion_pdev)
2155 break;
2156 cma = dev_get_cma_area(&ion_pdev->dev);
2157 if (cma) {
2158 range.addr = cma_get_base(cma);
2159 range.size = (size_t)cma_get_size(cma);
2160 }
2161 break;
2162 }
2163 }
2164 if (range.addr) {
2165 int srcVM[1] = {VMID_HLOS};
2166 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2167 VMID_ADSP_Q6};
2168 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2169 PERM_READ | PERM_WRITE | PERM_EXEC,
2170 PERM_READ | PERM_WRITE | PERM_EXEC,
2171 PERM_READ | PERM_WRITE | PERM_EXEC,
2172 };
2173
2174 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2175 srcVM, 1, destVM, destVMperm, 4));
2176 if (err)
2177 goto bail;
2178 }
2179 return 0;
2180 }
2181
2182 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2183 fastrpc_match_table,
2184 NULL, &pdev->dev));
2185 if (err)
2186 goto bail;
2187bail:
2188 return err;
2189}
2190
2191static void fastrpc_deinit(void)
2192{
2193 struct fastrpc_apps *me = &gfa;
2194 struct fastrpc_channel_ctx *chan = gcinfo;
2195 int i, j;
2196
2197 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2198 if (chan->chan) {
2199 kref_put_mutex(&chan->kref,
2200 fastrpc_channel_close, &me->smd_mutex);
2201 chan->chan = 0;
2202 }
2203 for (j = 0; j < NUM_SESSIONS; j++) {
2204 struct fastrpc_session_ctx *sess = &chan->session[j];
2205
2206 if (sess->smmu.enabled) {
2207 arm_iommu_detach_device(sess->dev);
2208 sess->dev = 0;
2209 }
2210 if (sess->smmu.mapping) {
2211 arm_iommu_release_mapping(sess->smmu.mapping);
2212 sess->smmu.mapping = 0;
2213 }
2214 }
2215 }
2216}
2217
2218static struct platform_driver fastrpc_driver = {
2219 .probe = fastrpc_probe,
2220 .driver = {
2221 .name = "fastrpc",
2222 .owner = THIS_MODULE,
2223 .of_match_table = fastrpc_match_table,
2224 },
2225};
2226
2227static int __init fastrpc_device_init(void)
2228{
2229 struct fastrpc_apps *me = &gfa;
2230 int err = 0, i;
2231
2232 memset(me, 0, sizeof(*me));
2233
2234 fastrpc_init(me);
2235 me->dev = NULL;
2236 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2237 if (err)
2238 goto register_bail;
2239 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2240 DEVICE_NAME));
2241 if (err)
2242 goto alloc_chrdev_bail;
2243 cdev_init(&me->cdev, &fops);
2244 me->cdev.owner = THIS_MODULE;
2245 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
2246 NUM_CHANNELS));
2247 if (err)
2248 goto cdev_init_bail;
2249 me->class = class_create(THIS_MODULE, "fastrpc");
2250 VERIFY(err, !IS_ERR(me->class));
2251 if (err)
2252 goto class_create_bail;
2253 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
2254 for (i = 0; i < NUM_CHANNELS; i++) {
2255 if (!gcinfo[i].name)
2256 continue;
2257 me->channel[i].dev = device_create(me->class, NULL,
2258 MKDEV(MAJOR(me->dev_no), i),
2259 NULL, gcinfo[i].name);
2260 VERIFY(err, !IS_ERR(me->channel[i].dev));
2261 if (err)
2262 goto device_create_bail;
2263 me->channel[i].ssrcount = 0;
2264 me->channel[i].prevssrcount = 0;
2265 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2266 me->channel[i].handle = subsys_notif_register_notifier(
2267 gcinfo[i].subsys,
2268 &me->channel[i].nb);
2269 }
2270
2271 me->client = msm_ion_client_create(DEVICE_NAME);
2272 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2273 if (err)
2274 goto device_create_bail;
2275 return 0;
2276device_create_bail:
2277 for (i = 0; i < NUM_CHANNELS; i++) {
2278 if (IS_ERR_OR_NULL(me->channel[i].dev))
2279 continue;
2280 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2281 subsys_notif_unregister_notifier(me->channel[i].handle,
2282 &me->channel[i].nb);
2283 }
2284 class_destroy(me->class);
2285class_create_bail:
2286 cdev_del(&me->cdev);
2287cdev_init_bail:
2288 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2289alloc_chrdev_bail:
2290register_bail:
2291 fastrpc_deinit();
2292 return err;
2293}
2294
2295static void __exit fastrpc_device_exit(void)
2296{
2297 struct fastrpc_apps *me = &gfa;
2298 int i;
2299
2300 fastrpc_file_list_dtor(me);
2301 fastrpc_deinit();
2302 for (i = 0; i < NUM_CHANNELS; i++) {
2303 if (!gcinfo[i].name)
2304 continue;
2305 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2306 subsys_notif_unregister_notifier(me->channel[i].handle,
2307 &me->channel[i].nb);
2308 }
2309 class_destroy(me->class);
2310 cdev_del(&me->cdev);
2311 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2312 ion_client_destroy(me->client);
2313}
2314
2315late_initcall(fastrpc_device_init);
2316module_exit(fastrpc_device_exit);
2317
2318MODULE_LICENSE("GPL v2");