blob: 1a5f3826be90b93eab1cb66e7f072df9366c0819 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
2 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
47
48#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
49#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
50#define TZ_PIL_AUTH_QDSP6_PROC 1
51#define FASTRPC_ENOSUCH 39
52#define VMID_SSC_Q6 5
53#define VMID_ADSP_Q6 6
54
55#define RPC_TIMEOUT (5 * HZ)
56#define BALIGN 128
57#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
58#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
59
60#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
61
62#define FASTRPC_LINK_STATE_DOWN (0x0)
63#define FASTRPC_LINK_STATE_UP (0x1)
64#define FASTRPC_LINK_DISCONNECTED (0x0)
65#define FASTRPC_LINK_CONNECTING (0x1)
66#define FASTRPC_LINK_CONNECTED (0x3)
67#define FASTRPC_LINK_DISCONNECTING (0x7)
68
69static int fastrpc_glink_open(int cid);
70static void fastrpc_glink_close(void *chan, int cid);
71
72static inline uint64_t buf_page_start(uint64_t buf)
73{
74 uint64_t start = (uint64_t) buf & PAGE_MASK;
75 return start;
76}
77
78static inline uint64_t buf_page_offset(uint64_t buf)
79{
80 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
81 return offset;
82}
83
84static inline int buf_num_pages(uint64_t buf, ssize_t len)
85{
86 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
87 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
88 int nPages = end - start + 1;
89 return nPages;
90}
91
92static inline uint64_t buf_page_size(uint32_t size)
93{
94 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
95
96 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
97}
98
99static inline void *uint64_to_ptr(uint64_t addr)
100{
101 void *ptr = (void *)((uintptr_t)addr);
102
103 return ptr;
104}
105
106static inline uint64_t ptr_to_uint64(void *ptr)
107{
108 uint64_t addr = (uint64_t)((uintptr_t)ptr);
109
110 return addr;
111}
112
113struct fastrpc_file;
114
115struct fastrpc_buf {
116 struct hlist_node hn;
117 struct fastrpc_file *fl;
118 void *virt;
119 uint64_t phys;
120 ssize_t size;
121};
122
123struct fastrpc_ctx_lst;
124
125struct overlap {
126 uintptr_t start;
127 uintptr_t end;
128 int raix;
129 uintptr_t mstart;
130 uintptr_t mend;
131 uintptr_t offset;
132};
133
134struct smq_invoke_ctx {
135 struct hlist_node hn;
136 struct completion work;
137 int retval;
138 int pid;
139 int tgid;
140 remote_arg_t *lpra;
141 remote_arg64_t *rpra;
142 int *fds;
143 unsigned int *attrs;
144 struct fastrpc_mmap **maps;
145 struct fastrpc_buf *buf;
146 ssize_t used;
147 struct fastrpc_file *fl;
148 uint32_t sc;
149 struct overlap *overs;
150 struct overlap **overps;
151 struct smq_msg msg;
152};
153
154struct fastrpc_ctx_lst {
155 struct hlist_head pending;
156 struct hlist_head interrupted;
157};
158
159struct fastrpc_smmu {
160 struct dma_iommu_mapping *mapping;
161 int cb;
162 int enabled;
163 int faults;
164 int secure;
165 int coherent;
166};
167
168struct fastrpc_session_ctx {
169 struct device *dev;
170 struct fastrpc_smmu smmu;
171 int used;
172};
173
174struct fastrpc_glink_info {
175 int link_state;
176 int port_state;
177 struct glink_open_config cfg;
178 struct glink_link_info link_info;
179 void *link_notify_handle;
180};
181
182struct fastrpc_channel_ctx {
183 char *name;
184 char *subsys;
185 void *chan;
186 struct device *dev;
187 struct fastrpc_session_ctx session[NUM_SESSIONS];
188 struct completion work;
189 struct notifier_block nb;
190 struct kref kref;
191 int sesscount;
192 int ssrcount;
193 void *handle;
194 int prevssrcount;
195 int vmid;
196 struct fastrpc_glink_info link;
197};
198
199struct fastrpc_apps {
200 struct fastrpc_channel_ctx *channel;
201 struct cdev cdev;
202 struct class *class;
203 struct mutex smd_mutex;
204 struct smq_phy_page range;
205 struct hlist_head maps;
206 dev_t dev_no;
207 int compat;
208 struct hlist_head drivers;
209 spinlock_t hlock;
210 struct ion_client *client;
211 struct device *dev;
212};
213
214struct fastrpc_mmap {
215 struct hlist_node hn;
216 struct fastrpc_file *fl;
217 struct fastrpc_apps *apps;
218 int fd;
219 uint32_t flags;
220 struct dma_buf *buf;
221 struct sg_table *table;
222 struct dma_buf_attachment *attach;
223 struct ion_handle *handle;
224 uint64_t phys;
225 ssize_t size;
226 uintptr_t va;
227 ssize_t len;
228 int refs;
229 uintptr_t raddr;
230 int uncached;
231 int secure;
232 uintptr_t attr;
233};
234
235struct fastrpc_file {
236 struct hlist_node hn;
237 spinlock_t hlock;
238 struct hlist_head maps;
239 struct hlist_head bufs;
240 struct fastrpc_ctx_lst clst;
241 struct fastrpc_session_ctx *sctx;
242 struct fastrpc_session_ctx *secsctx;
243 uint32_t mode;
244 int tgid;
245 int cid;
246 int ssrcount;
247 int pd;
248 struct fastrpc_apps *apps;
249};
250
251static struct fastrpc_apps gfa;
252
253static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
254 {
255 .name = "adsprpc-smd",
256 .subsys = "adsp",
257 .link.link_info.edge = "lpass",
258 .link.link_info.transport = "smem",
259 },
260 {
261 .name = "sdsprpc-smd",
262 .subsys = "dsps",
263 .link.link_info.edge = "dsps",
264 .link.link_info.transport = "smem",
265 },
266 {
267 .name = "mdsprpc-smd",
268 .subsys = "modem",
269 .link.link_info.edge = "mpss",
270 .link.link_info.transport = "smem",
271 },
272 {
273 .name = "cdsprpc-smd",
274 .subsys = "cdsp",
275 .link.link_info.edge = "cdsp",
276 .link.link_info.transport = "smem",
277 },
278};
279
280static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
281{
282 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
283 int vmid;
284
285 if (!fl)
286 return;
287 if (cache) {
288 spin_lock(&fl->hlock);
289 hlist_add_head(&buf->hn, &fl->bufs);
290 spin_unlock(&fl->hlock);
291 return;
292 }
293 if (!IS_ERR_OR_NULL(buf->virt)) {
294 int destVM[1] = {VMID_HLOS};
295 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
296
297 if (fl->sctx->smmu.cb)
298 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
299 vmid = fl->apps->channel[fl->cid].vmid;
300 if (vmid) {
301 int srcVM[2] = {VMID_HLOS, vmid};
302
303 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
304 srcVM, 2, destVM, destVMperm, 1);
305 }
306 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
307 buf->phys);
308 }
309 kfree(buf);
310}
311
312static void fastrpc_buf_list_free(struct fastrpc_file *fl)
313{
314 struct fastrpc_buf *buf, *free;
315
316 do {
317 struct hlist_node *n;
318
319 free = 0;
320 spin_lock(&fl->hlock);
321 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
322 hlist_del_init(&buf->hn);
323 free = buf;
324 break;
325 }
326 spin_unlock(&fl->hlock);
327 if (free)
328 fastrpc_buf_free(free, 0);
329 } while (free);
330}
331
332static void fastrpc_mmap_add(struct fastrpc_mmap *map)
333{
334 struct fastrpc_file *fl = map->fl;
335
336 spin_lock(&fl->hlock);
337 hlist_add_head(&map->hn, &fl->maps);
338 spin_unlock(&fl->hlock);
339}
340
341static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
342 ssize_t len, int mflags, struct fastrpc_mmap **ppmap)
343{
344 struct fastrpc_mmap *match = 0, *map;
345 struct hlist_node *n;
346
347 spin_lock(&fl->hlock);
348 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
349 if (va >= map->va &&
350 va + len <= map->va + map->len &&
351 map->fd == fd) {
352 map->refs++;
353 match = map;
354 break;
355 }
356 }
357 spin_unlock(&fl->hlock);
358 if (match) {
359 *ppmap = match;
360 return 0;
361 }
362 return -ENOTTY;
363}
364
365static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
366 ssize_t len, struct fastrpc_mmap **ppmap)
367{
368 struct fastrpc_mmap *match = 0, *map;
369 struct hlist_node *n;
370 struct fastrpc_apps *me = &gfa;
371
372 spin_lock(&me->hlock);
373 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
374 if (map->raddr == va &&
375 map->raddr + map->len == va + len &&
376 map->refs == 1) {
377 match = map;
378 hlist_del_init(&map->hn);
379 break;
380 }
381 }
382 spin_unlock(&me->hlock);
383 if (match) {
384 *ppmap = match;
385 return 0;
386 }
387 spin_lock(&fl->hlock);
388 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
389 if (map->raddr == va &&
390 map->raddr + map->len == va + len &&
391 map->refs == 1) {
392 match = map;
393 hlist_del_init(&map->hn);
394 break;
395 }
396 }
397 spin_unlock(&fl->hlock);
398 if (match) {
399 *ppmap = match;
400 return 0;
401 }
402 return -ENOTTY;
403}
404
405static void fastrpc_mmap_free(struct fastrpc_mmap *map)
406{
407 struct fastrpc_file *fl;
408 int vmid;
409 struct fastrpc_session_ctx *sess;
410 int destVM[1] = {VMID_HLOS};
411 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
412
413 if (!map)
414 return;
415 fl = map->fl;
416 spin_lock(&fl->hlock);
417 map->refs--;
418 if (!map->refs)
419 hlist_del_init(&map->hn);
420 spin_unlock(&fl->hlock);
421 if (map->refs > 0)
422 return;
423 if (map->secure)
424 sess = fl->secsctx;
425 else
426 sess = fl->sctx;
427
428 if (!IS_ERR_OR_NULL(map->handle))
429 ion_free(fl->apps->client, map->handle);
430 if (sess->smmu.enabled) {
431 if (map->size || map->phys)
432 msm_dma_unmap_sg(fl->sctx->dev,
433 map->table->sgl,
434 map->table->nents, DMA_BIDIRECTIONAL,
435 map->buf);
436 }
437 vmid = fl->apps->channel[fl->cid].vmid;
438 if (vmid && map->phys) {
439 int srcVM[2] = {VMID_HLOS, vmid};
440
441 hyp_assign_phys(map->phys, buf_page_size(map->size),
442 srcVM, 2, destVM, destVMperm, 1);
443 }
444
445 if (!IS_ERR_OR_NULL(map->table))
446 dma_buf_unmap_attachment(map->attach, map->table,
447 DMA_BIDIRECTIONAL);
448 if (!IS_ERR_OR_NULL(map->attach))
449 dma_buf_detach(map->buf, map->attach);
450 if (!IS_ERR_OR_NULL(map->buf))
451 dma_buf_put(map->buf);
452 kfree(map);
453}
454
455static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
456 struct fastrpc_session_ctx **session);
457
458static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
459 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
460 struct fastrpc_mmap **ppmap)
461{
462 struct fastrpc_session_ctx *sess;
463 struct fastrpc_apps *apps = fl->apps;
464 int cid = fl->cid;
465 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
466 struct fastrpc_mmap *map = 0;
467 unsigned long attrs;
468 unsigned long flags;
469 int err = 0, vmid;
470
471 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, ppmap))
472 return 0;
473 map = kzalloc(sizeof(*map), GFP_KERNEL);
474 VERIFY(err, !IS_ERR_OR_NULL(map));
475 if (err)
476 goto bail;
477 INIT_HLIST_NODE(&map->hn);
478 map->flags = mflags;
479 map->refs = 1;
480 map->fl = fl;
481 map->fd = fd;
482 map->attr = attr;
483 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
484 ion_import_dma_buf_fd(fl->apps->client, fd)));
485 if (err)
486 goto bail;
487 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
488 &flags));
489 if (err)
490 goto bail;
491
492 map->uncached = !ION_IS_CACHED(flags);
493 if (map->attr & FASTRPC_ATTR_NOVA)
494 map->uncached = 1;
495
496 map->secure = flags & ION_FLAG_SECURE;
497 if (map->secure) {
498 if (!fl->secsctx)
499 err = fastrpc_session_alloc(chan, 1,
500 &fl->secsctx);
501 if (err)
502 goto bail;
503 }
504 if (map->secure)
505 sess = fl->secsctx;
506 else
507 sess = fl->sctx;
508
509 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
510 if (err)
511 goto bail;
512 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
513 dma_buf_attach(map->buf, sess->dev)));
514 if (err)
515 goto bail;
516 VERIFY(err, !IS_ERR_OR_NULL(map->table =
517 dma_buf_map_attachment(map->attach,
518 DMA_BIDIRECTIONAL)));
519 if (err)
520 goto bail;
521 if (sess->smmu.enabled) {
522 attrs = DMA_ATTR_EXEC_MAPPING;
523 VERIFY(err, map->table->nents ==
524 msm_dma_map_sg_attrs(sess->dev,
525 map->table->sgl, map->table->nents,
526 DMA_BIDIRECTIONAL, map->buf, attrs));
527 if (err)
528 goto bail;
529 } else {
530 VERIFY(err, map->table->nents == 1);
531 if (err)
532 goto bail;
533 }
534 map->phys = sg_dma_address(map->table->sgl);
535 if (sess->smmu.cb) {
536 map->phys += ((uint64_t)sess->smmu.cb << 32);
537 map->size = sg_dma_len(map->table->sgl);
538 } else {
539 map->size = buf_page_size(len);
540 }
541 vmid = fl->apps->channel[fl->cid].vmid;
542 if (vmid) {
543 int srcVM[1] = {VMID_HLOS};
544 int destVM[2] = {VMID_HLOS, vmid};
545 int destVMperm[2] = {PERM_READ | PERM_WRITE,
546 PERM_READ | PERM_WRITE | PERM_EXEC};
547
548 VERIFY(err, !hyp_assign_phys(map->phys,
549 buf_page_size(map->size),
550 srcVM, 1, destVM, destVMperm, 2));
551 if (err)
552 goto bail;
553 }
554 map->va = va;
555 map->len = len;
556
557 fastrpc_mmap_add(map);
558 *ppmap = map;
559
560bail:
561 if (err && map)
562 fastrpc_mmap_free(map);
563 return err;
564}
565
566static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
567 struct fastrpc_buf **obuf)
568{
569 int err = 0, vmid;
570 struct fastrpc_buf *buf = 0, *fr = 0;
571 struct hlist_node *n;
572
573 VERIFY(err, size > 0);
574 if (err)
575 goto bail;
576
577 /* find the smallest buffer that fits in the cache */
578 spin_lock(&fl->hlock);
579 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
580 if (buf->size >= size && (!fr || fr->size > buf->size))
581 fr = buf;
582 }
583 if (fr)
584 hlist_del_init(&fr->hn);
585 spin_unlock(&fl->hlock);
586 if (fr) {
587 *obuf = fr;
588 return 0;
589 }
590 buf = 0;
591 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
592 if (err)
593 goto bail;
594 INIT_HLIST_NODE(&buf->hn);
595 buf->fl = fl;
596 buf->virt = 0;
597 buf->phys = 0;
598 buf->size = size;
599 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
600 (void *)&buf->phys, GFP_KERNEL);
601 if (IS_ERR_OR_NULL(buf->virt)) {
602 /* free cache and retry */
603 fastrpc_buf_list_free(fl);
604 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
605 (void *)&buf->phys, GFP_KERNEL);
606 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
607 }
608 if (err)
609 goto bail;
610 if (fl->sctx->smmu.cb)
611 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
612 vmid = fl->apps->channel[fl->cid].vmid;
613 if (vmid) {
614 int srcVM[1] = {VMID_HLOS};
615 int destVM[2] = {VMID_HLOS, vmid};
616 int destVMperm[2] = {PERM_READ | PERM_WRITE,
617 PERM_READ | PERM_WRITE | PERM_EXEC};
618
619 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
620 srcVM, 1, destVM, destVMperm, 2));
621 if (err)
622 goto bail;
623 }
624
625 *obuf = buf;
626 bail:
627 if (err && buf)
628 fastrpc_buf_free(buf, 0);
629 return err;
630}
631
632
633static int context_restore_interrupted(struct fastrpc_file *fl,
634 struct fastrpc_ioctl_invoke_attrs *inv,
635 struct smq_invoke_ctx **po)
636{
637 int err = 0;
638 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
639 struct hlist_node *n;
640 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
641
642 spin_lock(&fl->hlock);
643 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
644 if (ictx->pid == current->pid) {
645 if (invoke->sc != ictx->sc || ictx->fl != fl)
646 err = -1;
647 else {
648 ctx = ictx;
649 hlist_del_init(&ctx->hn);
650 hlist_add_head(&ctx->hn, &fl->clst.pending);
651 }
652 break;
653 }
654 }
655 spin_unlock(&fl->hlock);
656 if (ctx)
657 *po = ctx;
658 return err;
659}
660
661#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
662static int overlap_ptr_cmp(const void *a, const void *b)
663{
664 struct overlap *pa = *((struct overlap **)a);
665 struct overlap *pb = *((struct overlap **)b);
666 /* sort with lowest starting buffer first */
667 int st = CMP(pa->start, pb->start);
668 /* sort with highest ending buffer first */
669 int ed = CMP(pb->end, pa->end);
670 return st == 0 ? ed : st;
671}
672
673static void context_build_overlap(struct smq_invoke_ctx *ctx)
674{
675 int i;
676 remote_arg_t *lpra = ctx->lpra;
677 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
678 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
679 int nbufs = inbufs + outbufs;
680 struct overlap max;
681
682 for (i = 0; i < nbufs; ++i) {
683 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
684 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
685 ctx->overs[i].raix = i;
686 ctx->overps[i] = &ctx->overs[i];
687 }
688 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
689 max.start = 0;
690 max.end = 0;
691 for (i = 0; i < nbufs; ++i) {
692 if (ctx->overps[i]->start < max.end) {
693 ctx->overps[i]->mstart = max.end;
694 ctx->overps[i]->mend = ctx->overps[i]->end;
695 ctx->overps[i]->offset = max.end -
696 ctx->overps[i]->start;
697 if (ctx->overps[i]->end > max.end) {
698 max.end = ctx->overps[i]->end;
699 } else {
700 ctx->overps[i]->mend = 0;
701 ctx->overps[i]->mstart = 0;
702 }
703 } else {
704 ctx->overps[i]->mend = ctx->overps[i]->end;
705 ctx->overps[i]->mstart = ctx->overps[i]->start;
706 ctx->overps[i]->offset = 0;
707 max = *ctx->overps[i];
708 }
709 }
710}
711
712#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
713 do {\
714 if (!(kernel))\
715 VERIFY(err, 0 == copy_from_user((dst), (src),\
716 (size)));\
717 else\
718 memmove((dst), (src), (size));\
719 } while (0)
720
721#define K_COPY_TO_USER(err, kernel, dst, src, size) \
722 do {\
723 if (!(kernel))\
724 VERIFY(err, 0 == copy_to_user((dst), (src),\
725 (size)));\
726 else\
727 memmove((dst), (src), (size));\
728 } while (0)
729
730
731static void context_free(struct smq_invoke_ctx *ctx);
732
733static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
734 struct fastrpc_ioctl_invoke_attrs *invokefd,
735 struct smq_invoke_ctx **po)
736{
737 int err = 0, bufs, size = 0;
738 struct smq_invoke_ctx *ctx = 0;
739 struct fastrpc_ctx_lst *clst = &fl->clst;
740 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
741
742 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
743 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
744 sizeof(*ctx->fds) * (bufs) +
745 sizeof(*ctx->attrs) * (bufs) +
746 sizeof(*ctx->overs) * (bufs) +
747 sizeof(*ctx->overps) * (bufs);
748
749 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
750 if (err)
751 goto bail;
752
753 INIT_HLIST_NODE(&ctx->hn);
754 hlist_add_fake(&ctx->hn);
755 ctx->fl = fl;
756 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
757 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
758 ctx->fds = (int *)(&ctx->lpra[bufs]);
759 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
760 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
761 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
762
763 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
764 bufs * sizeof(*ctx->lpra));
765 if (err)
766 goto bail;
767
768 if (invokefd->fds) {
769 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
770 bufs * sizeof(*ctx->fds));
771 if (err)
772 goto bail;
773 }
774 if (invokefd->attrs) {
775 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
776 bufs * sizeof(*ctx->attrs));
777 if (err)
778 goto bail;
779 }
780
781 ctx->sc = invoke->sc;
782 if (bufs)
783 context_build_overlap(ctx);
784 ctx->retval = -1;
785 ctx->pid = current->pid;
786 ctx->tgid = current->tgid;
787 init_completion(&ctx->work);
788
789 spin_lock(&fl->hlock);
790 hlist_add_head(&ctx->hn, &clst->pending);
791 spin_unlock(&fl->hlock);
792
793 *po = ctx;
794bail:
795 if (ctx && err)
796 context_free(ctx);
797 return err;
798}
799
800static void context_save_interrupted(struct smq_invoke_ctx *ctx)
801{
802 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
803
804 spin_lock(&ctx->fl->hlock);
805 hlist_del_init(&ctx->hn);
806 hlist_add_head(&ctx->hn, &clst->interrupted);
807 spin_unlock(&ctx->fl->hlock);
808 /* free the cache on power collapse */
809 fastrpc_buf_list_free(ctx->fl);
810}
811
812static void context_free(struct smq_invoke_ctx *ctx)
813{
814 int i;
815 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
816 REMOTE_SCALARS_OUTBUFS(ctx->sc);
817 spin_lock(&ctx->fl->hlock);
818 hlist_del_init(&ctx->hn);
819 spin_unlock(&ctx->fl->hlock);
820 for (i = 0; i < nbufs; ++i)
821 fastrpc_mmap_free(ctx->maps[i]);
822 fastrpc_buf_free(ctx->buf, 1);
823 kfree(ctx);
824}
825
826static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
827{
828 ctx->retval = retval;
829 complete(&ctx->work);
830}
831
832
833static void fastrpc_notify_users(struct fastrpc_file *me)
834{
835 struct smq_invoke_ctx *ictx;
836 struct hlist_node *n;
837
838 spin_lock(&me->hlock);
839 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
840 complete(&ictx->work);
841 }
842 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
843 complete(&ictx->work);
844 }
845 spin_unlock(&me->hlock);
846
847}
848
849static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
850{
851 struct fastrpc_file *fl;
852 struct hlist_node *n;
853
854 spin_lock(&me->hlock);
855 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
856 if (fl->cid == cid)
857 fastrpc_notify_users(fl);
858 }
859 spin_unlock(&me->hlock);
860
861}
862static void context_list_ctor(struct fastrpc_ctx_lst *me)
863{
864 INIT_HLIST_HEAD(&me->interrupted);
865 INIT_HLIST_HEAD(&me->pending);
866}
867
868static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
869{
870 struct fastrpc_ctx_lst *clst = &fl->clst;
871 struct smq_invoke_ctx *ictx = 0, *ctxfree;
872 struct hlist_node *n;
873
874 do {
875 ctxfree = 0;
876 spin_lock(&fl->hlock);
877 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
878 hlist_del_init(&ictx->hn);
879 ctxfree = ictx;
880 break;
881 }
882 spin_unlock(&fl->hlock);
883 if (ctxfree)
884 context_free(ctxfree);
885 } while (ctxfree);
886 do {
887 ctxfree = 0;
888 spin_lock(&fl->hlock);
889 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
890 hlist_del_init(&ictx->hn);
891 ctxfree = ictx;
892 break;
893 }
894 spin_unlock(&fl->hlock);
895 if (ctxfree)
896 context_free(ctxfree);
897 } while (ctxfree);
898}
899
900static int fastrpc_file_free(struct fastrpc_file *fl);
901static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
902{
903 struct fastrpc_file *fl, *free;
904 struct hlist_node *n;
905
906 do {
907 free = 0;
908 spin_lock(&me->hlock);
909 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
910 hlist_del_init(&fl->hn);
911 free = fl;
912 break;
913 }
914 spin_unlock(&me->hlock);
915 if (free)
916 fastrpc_file_free(free);
917 } while (free);
918}
919
920static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
921{
922 remote_arg64_t *rpra;
923 remote_arg_t *lpra = ctx->lpra;
924 struct smq_invoke_buf *list;
925 struct smq_phy_page *pages, *ipage;
926 uint32_t sc = ctx->sc;
927 int inbufs = REMOTE_SCALARS_INBUFS(sc);
928 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
929 int bufs = inbufs + outbufs;
930 uintptr_t args;
931 ssize_t rlen = 0, copylen = 0, metalen = 0;
932 int i, inh, oix;
933 int err = 0;
934 int mflags = 0;
935
936 /* calculate size of the metadata */
937 rpra = 0;
938 list = smq_invoke_buf_start(rpra, sc);
939 pages = smq_phy_page_start(sc, list);
940 ipage = pages;
941
942 for (i = 0; i < bufs; ++i) {
943 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
944 ssize_t len = lpra[i].buf.len;
945
946 if (ctx->fds[i] && (ctx->fds[i] != -1))
947 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
948 ctx->attrs[i], buf, len,
949 mflags, &ctx->maps[i]);
950 ipage += 1;
951 }
952 metalen = copylen = (ssize_t)&ipage[0];
953 /* calculate len requreed for copying */
954 for (oix = 0; oix < inbufs + outbufs; ++oix) {
955 int i = ctx->overps[oix]->raix;
956 ssize_t len = lpra[i].buf.len;
957
958 if (!len)
959 continue;
960 if (ctx->maps[i])
961 continue;
962 if (ctx->overps[oix]->offset == 0)
963 copylen = ALIGN(copylen, BALIGN);
964 copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
965 }
966 ctx->used = copylen;
967
968 /* allocate new buffer */
969 if (copylen) {
970 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
971 if (err)
972 goto bail;
973 }
974 /* copy metadata */
975 rpra = ctx->buf->virt;
976 ctx->rpra = rpra;
977 list = smq_invoke_buf_start(rpra, sc);
978 pages = smq_phy_page_start(sc, list);
979 ipage = pages;
980 args = (uintptr_t)ctx->buf->virt + metalen;
981 for (i = 0; i < bufs; ++i) {
982 ssize_t len = lpra[i].buf.len;
983
984 list[i].num = 0;
985 list[i].pgidx = 0;
986 if (!len)
987 continue;
988 list[i].num = 1;
989 list[i].pgidx = ipage - pages;
990 ipage++;
991 }
992 /* map ion buffers */
993 for (i = 0; i < inbufs + outbufs; ++i) {
994 struct fastrpc_mmap *map = ctx->maps[i];
995 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
996 ssize_t len = lpra[i].buf.len;
997
998 rpra[i].buf.pv = 0;
999 rpra[i].buf.len = len;
1000 if (!len)
1001 continue;
1002 if (map) {
1003 struct vm_area_struct *vma;
1004 uintptr_t offset;
1005 int num = buf_num_pages(buf, len);
1006 int idx = list[i].pgidx;
1007
1008 if (map->attr & FASTRPC_ATTR_NOVA) {
1009 offset = (uintptr_t)lpra[i].buf.pv;
1010 } else {
1011 down_read(&current->mm->mmap_sem);
1012 VERIFY(err, NULL != (vma = find_vma(current->mm,
1013 map->va)));
1014 if (err) {
1015 up_read(&current->mm->mmap_sem);
1016 goto bail;
1017 }
1018 offset = buf_page_start(buf) - vma->vm_start;
1019 up_read(&current->mm->mmap_sem);
1020 VERIFY(err, offset < (uintptr_t)map->size);
1021 if (err)
1022 goto bail;
1023 }
1024 pages[idx].addr = map->phys + offset;
1025 pages[idx].size = num << PAGE_SHIFT;
1026 }
1027 rpra[i].buf.pv = buf;
1028 }
1029 /* copy non ion buffers */
1030 rlen = copylen - metalen;
1031 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1032 int i = ctx->overps[oix]->raix;
1033 struct fastrpc_mmap *map = ctx->maps[i];
1034 int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1035 uint64_t buf;
1036 ssize_t len = lpra[i].buf.len;
1037
1038 if (!len)
1039 continue;
1040 if (map)
1041 continue;
1042 if (ctx->overps[oix]->offset == 0) {
1043 rlen -= ALIGN(args, BALIGN) - args;
1044 args = ALIGN(args, BALIGN);
1045 }
1046 VERIFY(err, rlen >= mlen);
1047 if (err)
1048 goto bail;
1049 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1050 pages[list[i].pgidx].addr = ctx->buf->phys -
1051 ctx->overps[oix]->offset +
1052 (copylen - rlen);
1053 pages[list[i].pgidx].addr =
1054 buf_page_start(pages[list[i].pgidx].addr);
1055 buf = rpra[i].buf.pv;
1056 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1057 if (i < inbufs) {
1058 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1059 lpra[i].buf.pv, len);
1060 if (err)
1061 goto bail;
1062 }
1063 args = args + mlen;
1064 rlen -= mlen;
1065 }
1066
1067 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1068 int i = ctx->overps[oix]->raix;
1069 struct fastrpc_mmap *map = ctx->maps[i];
1070
1071 if (ctx->fl->sctx->smmu.coherent)
1072 continue;
1073 if (map && map->uncached)
1074 continue;
1075 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1076 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1077 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1078 }
1079 inh = inbufs + outbufs;
1080 for (i = 0; i < REMOTE_SCALARS_INHANDLES(sc); i++) {
1081 rpra[inh + i].buf.pv = ptr_to_uint64(ctx->lpra[inh + i].buf.pv);
1082 rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
1083 rpra[inh + i].h = ctx->lpra[inh + i].h;
1084 }
1085 if (!ctx->fl->sctx->smmu.coherent)
1086 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
1087 bail:
1088 return err;
1089}
1090
1091static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1092 remote_arg_t *upra)
1093{
1094 uint32_t sc = ctx->sc;
1095 remote_arg64_t *rpra = ctx->rpra;
1096 int i, inbufs, outbufs, outh, size;
1097 int err = 0;
1098
1099 inbufs = REMOTE_SCALARS_INBUFS(sc);
1100 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1101 for (i = inbufs; i < inbufs + outbufs; ++i) {
1102 if (!ctx->maps[i]) {
1103 K_COPY_TO_USER(err, kernel,
1104 ctx->lpra[i].buf.pv,
1105 uint64_to_ptr(rpra[i].buf.pv),
1106 rpra[i].buf.len);
1107 if (err)
1108 goto bail;
1109 } else {
1110 fastrpc_mmap_free(ctx->maps[i]);
1111 ctx->maps[i] = 0;
1112 }
1113 }
1114 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
1115 if (size) {
1116 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
1117 K_COPY_TO_USER(err, kernel, &upra[outh], &rpra[outh], size);
1118 if (err)
1119 goto bail;
1120 }
1121 bail:
1122 return err;
1123}
1124
1125static void inv_args_pre(struct smq_invoke_ctx *ctx)
1126{
1127 int i, inbufs, outbufs;
1128 uint32_t sc = ctx->sc;
1129 remote_arg64_t *rpra = ctx->rpra;
1130 uintptr_t end;
1131
1132 inbufs = REMOTE_SCALARS_INBUFS(sc);
1133 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1134 for (i = inbufs; i < inbufs + outbufs; ++i) {
1135 struct fastrpc_mmap *map = ctx->maps[i];
1136
1137 if (map && map->uncached)
1138 continue;
1139 if (!rpra[i].buf.len)
1140 continue;
1141 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1142 buf_page_start(rpra[i].buf.pv))
1143 continue;
1144 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1145 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1146 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1147 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1148 rpra[i].buf.len);
1149 if (!IS_CACHE_ALIGNED(end))
1150 dmac_flush_range((char *)end,
1151 (char *)end + 1);
1152 }
1153}
1154
1155static void inv_args(struct smq_invoke_ctx *ctx)
1156{
1157 int i, inbufs, outbufs;
1158 uint32_t sc = ctx->sc;
1159 remote_arg64_t *rpra = ctx->rpra;
1160 int used = ctx->used;
1161 int inv = 0;
1162
1163 inbufs = REMOTE_SCALARS_INBUFS(sc);
1164 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1165 for (i = inbufs; i < inbufs + outbufs; ++i) {
1166 struct fastrpc_mmap *map = ctx->maps[i];
1167
1168 if (map && map->uncached)
1169 continue;
1170 if (!rpra[i].buf.len)
1171 continue;
1172 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1173 buf_page_start(rpra[i].buf.pv)) {
1174 inv = 1;
1175 continue;
1176 }
1177 if (map && map->handle)
1178 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1179 (char *)uint64_to_ptr(rpra[i].buf.pv),
1180 rpra[i].buf.len, ION_IOC_INV_CACHES);
1181 else
1182 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1183 (char *)uint64_to_ptr(rpra[i].buf.pv
1184 + rpra[i].buf.len));
1185 }
1186
1187 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
1188 dmac_inv_range(rpra, (char *)rpra + used);
1189}
1190
1191static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1192 uint32_t kernel, uint32_t handle)
1193{
1194 struct smq_msg *msg = &ctx->msg;
1195 struct fastrpc_file *fl = ctx->fl;
1196 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1197 int err = 0;
1198
1199 VERIFY(err, 0 != channel_ctx->chan);
1200 if (err)
1201 goto bail;
1202 msg->pid = current->tgid;
1203 msg->tid = current->pid;
1204 if (kernel)
1205 msg->pid = 0;
1206 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1207 msg->invoke.header.handle = handle;
1208 msg->invoke.header.sc = ctx->sc;
1209 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1210 msg->invoke.page.size = buf_page_size(ctx->used);
1211
1212 if (fl->ssrcount != channel_ctx->ssrcount) {
1213 err = -ECONNRESET;
1214 goto bail;
1215 }
1216 VERIFY(err, channel_ctx->link.port_state ==
1217 FASTRPC_LINK_CONNECTED);
1218 if (err)
1219 goto bail;
1220 err = glink_tx(channel_ctx->chan,
1221 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1222 GLINK_TX_REQ_INTENT);
1223 bail:
1224 return err;
1225}
1226
1227static void fastrpc_init(struct fastrpc_apps *me)
1228{
1229 int i;
1230
1231 INIT_HLIST_HEAD(&me->drivers);
1232 spin_lock_init(&me->hlock);
1233 mutex_init(&me->smd_mutex);
1234 me->channel = &gcinfo[0];
1235 for (i = 0; i < NUM_CHANNELS; i++) {
1236 init_completion(&me->channel[i].work);
1237 me->channel[i].sesscount = 0;
1238 }
1239}
1240
1241static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1242
1243static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1244 uint32_t kernel,
1245 struct fastrpc_ioctl_invoke_attrs *inv)
1246{
1247 struct smq_invoke_ctx *ctx = 0;
1248 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1249 int cid = fl->cid;
1250 int interrupted = 0;
1251 int err = 0;
1252
1253 if (!kernel) {
1254 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1255 &ctx));
1256 if (err)
1257 goto bail;
1258 if (fl->sctx->smmu.faults)
1259 err = FASTRPC_ENOSUCH;
1260 if (err)
1261 goto bail;
1262 if (ctx)
1263 goto wait;
1264 }
1265
1266 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1267 if (err)
1268 goto bail;
1269
1270 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
1271 VERIFY(err, 0 == get_args(kernel, ctx));
1272 if (err)
1273 goto bail;
1274 }
1275
1276 if (!fl->sctx->smmu.coherent) {
1277 inv_args_pre(ctx);
1278 if (mode == FASTRPC_MODE_SERIAL)
1279 inv_args(ctx);
1280 }
1281 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
1282 if (err)
1283 goto bail;
1284 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1285 inv_args(ctx);
1286 wait:
1287 if (kernel)
1288 wait_for_completion(&ctx->work);
1289 else {
1290 interrupted = wait_for_completion_interruptible(&ctx->work);
1291 VERIFY(err, 0 == (err = interrupted));
1292 if (err)
1293 goto bail;
1294 }
1295 VERIFY(err, 0 == (err = ctx->retval));
1296 if (err)
1297 goto bail;
1298 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
1299 if (err)
1300 goto bail;
1301 bail:
1302 if (ctx && interrupted == -ERESTARTSYS)
1303 context_save_interrupted(ctx);
1304 else if (ctx)
1305 context_free(ctx);
1306 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1307 err = ECONNRESET;
1308 return err;
1309}
1310
1311static int fastrpc_init_process(struct fastrpc_file *fl,
1312 struct fastrpc_ioctl_init *init)
1313{
1314 int err = 0;
1315 struct fastrpc_ioctl_invoke_attrs ioctl;
1316 struct smq_phy_page pages[1];
1317 struct fastrpc_mmap *file = 0, *mem = 0;
1318
1319 if (init->flags == FASTRPC_INIT_ATTACH) {
1320 remote_arg_t ra[1];
1321 int tgid = current->tgid;
1322
1323 ra[0].buf.pv = (void *)&tgid;
1324 ra[0].buf.len = sizeof(tgid);
1325 ioctl.inv.handle = 1;
1326 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1327 ioctl.inv.pra = ra;
1328 ioctl.fds = 0;
1329 ioctl.attrs = 0;
1330 fl->pd = 0;
1331 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1332 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1333 if (err)
1334 goto bail;
1335 } else if (init->flags == FASTRPC_INIT_CREATE) {
1336 remote_arg_t ra[4];
1337 int fds[4];
1338 int mflags = 0;
1339 struct {
1340 int pgid;
1341 int namelen;
1342 int filelen;
1343 int pageslen;
1344 } inbuf;
1345
1346 inbuf.pgid = current->tgid;
1347 inbuf.namelen = strlen(current->comm) + 1;
1348 inbuf.filelen = init->filelen;
1349 fl->pd = 1;
1350 if (init->filelen) {
1351 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1352 init->file, init->filelen, mflags, &file));
1353 if (err)
1354 goto bail;
1355 }
1356 inbuf.pageslen = 1;
1357 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1358 init->mem, init->memlen, mflags, &mem));
1359 if (err)
1360 goto bail;
1361 inbuf.pageslen = 1;
1362 ra[0].buf.pv = (void *)&inbuf;
1363 ra[0].buf.len = sizeof(inbuf);
1364 fds[0] = 0;
1365
1366 ra[1].buf.pv = (void *)current->comm;
1367 ra[1].buf.len = inbuf.namelen;
1368 fds[1] = 0;
1369
1370 ra[2].buf.pv = (void *)init->file;
1371 ra[2].buf.len = inbuf.filelen;
1372 fds[2] = init->filefd;
1373
1374 pages[0].addr = mem->phys;
1375 pages[0].size = mem->size;
1376 ra[3].buf.pv = (void *)pages;
1377 ra[3].buf.len = 1 * sizeof(*pages);
1378 fds[3] = 0;
1379
1380 ioctl.inv.handle = 1;
1381 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
1382 ioctl.inv.pra = ra;
1383 ioctl.fds = fds;
1384 ioctl.attrs = 0;
1385 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1386 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1387 if (err)
1388 goto bail;
1389 } else {
1390 err = -ENOTTY;
1391 }
1392bail:
1393 if (mem && err)
1394 fastrpc_mmap_free(mem);
1395 if (file)
1396 fastrpc_mmap_free(file);
1397 return err;
1398}
1399
1400static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1401{
1402 int err = 0;
1403 struct fastrpc_ioctl_invoke_attrs ioctl;
1404 remote_arg_t ra[1];
1405 int tgid = 0;
1406
1407 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1408 if (err)
1409 goto bail;
1410 tgid = fl->tgid;
1411 ra[0].buf.pv = (void *)&tgid;
1412 ra[0].buf.len = sizeof(tgid);
1413 ioctl.inv.handle = 1;
1414 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1415 ioctl.inv.pra = ra;
1416 ioctl.fds = 0;
1417 ioctl.attrs = 0;
1418 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1419 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1420bail:
1421 return err;
1422}
1423
1424static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1425 struct fastrpc_mmap *map)
1426{
1427 struct fastrpc_ioctl_invoke_attrs ioctl;
1428 struct smq_phy_page page;
1429 int num = 1;
1430 remote_arg_t ra[3];
1431 int err = 0;
1432 struct {
1433 int pid;
1434 uint32_t flags;
1435 uintptr_t vaddrin;
1436 int num;
1437 } inargs;
1438 struct {
1439 uintptr_t vaddrout;
1440 } routargs;
1441
1442 inargs.pid = current->tgid;
1443 inargs.vaddrin = (uintptr_t)map->va;
1444 inargs.flags = flags;
1445 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1446 ra[0].buf.pv = (void *)&inargs;
1447 ra[0].buf.len = sizeof(inargs);
1448 page.addr = map->phys;
1449 page.size = map->size;
1450 ra[1].buf.pv = (void *)&page;
1451 ra[1].buf.len = num * sizeof(page);
1452
1453 ra[2].buf.pv = (void *)&routargs;
1454 ra[2].buf.len = sizeof(routargs);
1455
1456 ioctl.inv.handle = 1;
1457 if (fl->apps->compat)
1458 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1459 else
1460 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1461 ioctl.inv.pra = ra;
1462 ioctl.fds = 0;
1463 ioctl.attrs = 0;
1464 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1465 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1466 map->raddr = (uintptr_t)routargs.vaddrout;
1467
1468 return err;
1469}
1470
1471static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1472 struct fastrpc_mmap *map)
1473{
1474 struct fastrpc_ioctl_invoke_attrs ioctl;
1475 remote_arg_t ra[1];
1476 int err = 0;
1477 struct {
1478 int pid;
1479 uintptr_t vaddrout;
1480 ssize_t size;
1481 } inargs;
1482
1483 inargs.pid = current->tgid;
1484 inargs.size = map->size;
1485 inargs.vaddrout = map->raddr;
1486 ra[0].buf.pv = (void *)&inargs;
1487 ra[0].buf.len = sizeof(inargs);
1488
1489 ioctl.inv.handle = 1;
1490 if (fl->apps->compat)
1491 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1492 else
1493 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1494 ioctl.inv.pra = ra;
1495 ioctl.fds = 0;
1496 ioctl.attrs = 0;
1497 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1498 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1499 return err;
1500}
1501
1502static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1503 ssize_t len, struct fastrpc_mmap **ppmap);
1504
1505static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1506
1507static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1508 struct fastrpc_ioctl_munmap *ud)
1509{
1510 int err = 0;
1511 struct fastrpc_mmap *map = 0;
1512
1513 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1514 if (err)
1515 goto bail;
1516 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1517 if (err)
1518 goto bail;
1519 fastrpc_mmap_free(map);
1520bail:
1521 if (err && map)
1522 fastrpc_mmap_add(map);
1523 return err;
1524}
1525
1526static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1527 struct fastrpc_ioctl_mmap *ud)
1528{
1529
1530 struct fastrpc_mmap *map = 0;
1531 int err = 0;
1532
1533 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
1534 ud->flags, &map))
1535 return 0;
1536
1537 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1538 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1539 if (err)
1540 goto bail;
1541 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1542 if (err)
1543 goto bail;
1544 ud->vaddrout = map->raddr;
1545 bail:
1546 if (err && map)
1547 fastrpc_mmap_free(map);
1548 return err;
1549}
1550
1551static void fastrpc_channel_close(struct kref *kref)
1552{
1553 struct fastrpc_apps *me = &gfa;
1554 struct fastrpc_channel_ctx *ctx;
1555 int cid;
1556
1557 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1558 cid = ctx - &gcinfo[0];
1559 fastrpc_glink_close(ctx->chan, cid);
1560 ctx->chan = 0;
1561 mutex_unlock(&me->smd_mutex);
1562 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1563 MAJOR(me->dev_no), cid);
1564}
1565
1566static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1567
1568static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1569 int secure, struct fastrpc_session_ctx **session)
1570{
1571 struct fastrpc_apps *me = &gfa;
1572 int idx = 0, err = 0;
1573
1574 if (chan->sesscount) {
1575 for (idx = 0; idx < chan->sesscount; ++idx) {
1576 if (!chan->session[idx].used &&
1577 chan->session[idx].smmu.secure == secure) {
1578 chan->session[idx].used = 1;
1579 break;
1580 }
1581 }
1582 VERIFY(err, idx < chan->sesscount);
1583 if (err)
1584 goto bail;
1585 chan->session[idx].smmu.faults = 0;
1586 } else {
1587 VERIFY(err, me->dev != NULL);
1588 if (err)
1589 goto bail;
1590 chan->session[0].dev = me->dev;
1591 }
1592
1593 *session = &chan->session[idx];
1594 bail:
1595 return err;
1596}
1597
1598bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1599{
1600 if (glink_queue_rx_intent(h, NULL, size))
1601 return false;
1602 return true;
1603}
1604
1605void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1606 const void *pkt_priv, const void *ptr)
1607{
1608}
1609
1610void fastrpc_glink_notify_rx(void *handle, const void *priv,
1611 const void *pkt_priv, const void *ptr, size_t size)
1612{
1613 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1614 int len = size;
1615
1616 while (len >= sizeof(*rsp) && rsp) {
1617 rsp->ctx = rsp->ctx & ~1;
1618 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1619 rsp++;
1620 len = len - sizeof(*rsp);
1621 }
1622 glink_rx_done(handle, ptr, true);
1623}
1624
1625void fastrpc_glink_notify_state(void *handle, const void *priv,
1626 unsigned int event)
1627{
1628 struct fastrpc_apps *me = &gfa;
1629 int cid = (int)(uintptr_t)priv;
1630 struct fastrpc_glink_info *link;
1631
1632 if (cid < 0 || cid >= NUM_CHANNELS)
1633 return;
1634 link = &me->channel[cid].link;
1635 switch (event) {
1636 case GLINK_CONNECTED:
1637 link->port_state = FASTRPC_LINK_CONNECTED;
1638 complete(&me->channel[cid].work);
1639 break;
1640 case GLINK_LOCAL_DISCONNECTED:
1641 link->port_state = FASTRPC_LINK_DISCONNECTED;
1642 break;
1643 case GLINK_REMOTE_DISCONNECTED:
1644 if (me->channel[cid].chan &&
1645 link->link_state == FASTRPC_LINK_STATE_UP) {
1646 fastrpc_glink_close(me->channel[cid].chan, cid);
1647 me->channel[cid].chan = 0;
1648 link->port_state = FASTRPC_LINK_DISCONNECTED;
1649 }
1650 break;
1651 default:
1652 break;
1653 }
1654}
1655
1656static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1657 struct fastrpc_session_ctx **session)
1658{
1659 int err = 0;
1660 struct fastrpc_apps *me = &gfa;
1661
1662 mutex_lock(&me->smd_mutex);
1663 if (!*session)
1664 err = fastrpc_session_alloc_locked(chan, secure, session);
1665 mutex_unlock(&me->smd_mutex);
1666 return err;
1667}
1668
1669static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1670 struct fastrpc_session_ctx *session)
1671{
1672 struct fastrpc_apps *me = &gfa;
1673
1674 mutex_lock(&me->smd_mutex);
1675 session->used = 0;
1676 mutex_unlock(&me->smd_mutex);
1677}
1678
1679static int fastrpc_file_free(struct fastrpc_file *fl)
1680{
1681 struct hlist_node *n;
1682 struct fastrpc_mmap *map = 0;
1683 int cid;
1684
1685 if (!fl)
1686 return 0;
1687 cid = fl->cid;
1688
1689 spin_lock(&fl->apps->hlock);
1690 hlist_del_init(&fl->hn);
1691 spin_unlock(&fl->apps->hlock);
1692
1693 (void)fastrpc_release_current_dsp_process(fl);
1694 fastrpc_context_list_dtor(fl);
1695 fastrpc_buf_list_free(fl);
1696 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1697 fastrpc_mmap_free(map);
1698 }
1699 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1700 kref_put_mutex(&fl->apps->channel[cid].kref,
1701 fastrpc_channel_close, &fl->apps->smd_mutex);
1702 if (fl->sctx)
1703 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1704 if (fl->secsctx)
1705 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1706 kfree(fl);
1707 return 0;
1708}
1709
1710static int fastrpc_device_release(struct inode *inode, struct file *file)
1711{
1712 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1713
1714 if (fl) {
1715 fastrpc_file_free(fl);
1716 file->private_data = 0;
1717 }
1718 return 0;
1719}
1720
1721static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1722 void *priv)
1723{
1724 struct fastrpc_apps *me = &gfa;
1725 int cid = (int)((uintptr_t)priv);
1726 struct fastrpc_glink_info *link;
1727
1728 if (cid < 0 || cid >= NUM_CHANNELS)
1729 return;
1730
1731 link = &me->channel[cid].link;
1732 switch (cb_info->link_state) {
1733 case GLINK_LINK_STATE_UP:
1734 link->link_state = FASTRPC_LINK_STATE_UP;
1735 complete(&me->channel[cid].work);
1736 break;
1737 case GLINK_LINK_STATE_DOWN:
1738 link->link_state = FASTRPC_LINK_STATE_DOWN;
1739 break;
1740 default:
1741 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1742 break;
1743 }
1744}
1745
1746static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1747{
1748 int err = 0;
1749 struct fastrpc_glink_info *link;
1750
1751 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1752 if (err)
1753 goto bail;
1754
1755 link = &me->channel[cid].link;
1756 if (link->link_notify_handle != NULL)
1757 goto bail;
1758
1759 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1760 link->link_notify_handle = glink_register_link_state_cb(
1761 &link->link_info,
1762 (void *)((uintptr_t)cid));
1763 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1764 if (err) {
1765 link->link_notify_handle = NULL;
1766 goto bail;
1767 }
1768 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1769 RPC_TIMEOUT));
1770bail:
1771 return err;
1772}
1773
1774static void fastrpc_glink_close(void *chan, int cid)
1775{
1776 int err = 0;
1777 struct fastrpc_glink_info *link;
1778
1779 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1780 if (err)
1781 return;
1782 link = &gfa.channel[cid].link;
1783
1784 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1785 link->port_state == FASTRPC_LINK_CONNECTING) {
1786 link->port_state = FASTRPC_LINK_DISCONNECTING;
1787 glink_close(chan);
1788 }
1789}
1790
1791static int fastrpc_glink_open(int cid)
1792{
1793 int err = 0;
1794 void *handle = NULL;
1795 struct fastrpc_apps *me = &gfa;
1796 struct glink_open_config *cfg;
1797 struct fastrpc_glink_info *link;
1798
1799 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1800 if (err)
1801 goto bail;
1802 link = &me->channel[cid].link;
1803 cfg = &me->channel[cid].link.cfg;
1804 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1805 if (err)
1806 goto bail;
1807
1808 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1809 link->port_state == FASTRPC_LINK_CONNECTING) {
1810 goto bail;
1811 }
1812
1813 link->port_state = FASTRPC_LINK_CONNECTING;
1814 cfg->priv = (void *)(uintptr_t)cid;
1815 cfg->edge = gcinfo[cid].link.link_info.edge;
1816 cfg->transport = gcinfo[cid].link.link_info.transport;
1817 cfg->name = FASTRPC_GLINK_GUID;
1818 cfg->notify_rx = fastrpc_glink_notify_rx;
1819 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1820 cfg->notify_state = fastrpc_glink_notify_state;
1821 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1822 handle = glink_open(cfg);
1823 VERIFY(err, !IS_ERR_OR_NULL(handle));
1824 if (err)
1825 goto bail;
1826 me->channel[cid].chan = handle;
1827bail:
1828 return err;
1829}
1830
1831static int fastrpc_device_open(struct inode *inode, struct file *filp)
1832{
1833 int cid = MINOR(inode->i_rdev);
1834 int err = 0;
1835 struct fastrpc_apps *me = &gfa;
1836 struct fastrpc_file *fl = 0;
1837
1838 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
1839 if (err)
1840 return err;
1841
1842 filp->private_data = fl;
1843
1844 mutex_lock(&me->smd_mutex);
1845
1846 context_list_ctor(&fl->clst);
1847 spin_lock_init(&fl->hlock);
1848 INIT_HLIST_HEAD(&fl->maps);
1849 INIT_HLIST_HEAD(&fl->bufs);
1850 INIT_HLIST_NODE(&fl->hn);
1851 fl->tgid = current->tgid;
1852 fl->apps = me;
1853 fl->cid = cid;
1854 VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
1855 &fl->sctx));
1856 if (err)
1857 goto bail;
1858 fl->cid = cid;
1859 fl->ssrcount = me->channel[cid].ssrcount;
1860 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
1861 (me->channel[cid].chan == 0)) {
1862 fastrpc_glink_register(cid, me);
1863 VERIFY(err, 0 == fastrpc_glink_open(cid));
1864 if (err)
1865 goto bail;
1866
1867 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1868 RPC_TIMEOUT));
1869 if (err) {
1870 me->channel[cid].chan = 0;
1871 goto bail;
1872 }
1873 kref_init(&me->channel[cid].kref);
1874 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
1875 MAJOR(me->dev_no), cid);
1876 if (me->channel[cid].ssrcount !=
1877 me->channel[cid].prevssrcount) {
1878 me->channel[cid].prevssrcount =
1879 me->channel[cid].ssrcount;
1880 }
1881 }
1882 spin_lock(&me->hlock);
1883 hlist_add_head(&fl->hn, &me->drivers);
1884 spin_unlock(&me->hlock);
1885
1886bail:
1887 mutex_unlock(&me->smd_mutex);
1888
1889 if (err)
1890 fastrpc_device_release(inode, filp);
1891 return err;
1892}
1893
1894static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
1895{
1896 int err = 0;
1897
1898 VERIFY(err, fl && fl->sctx);
1899 if (err)
1900 goto bail;
1901 *info = (fl->sctx->smmu.enabled ? 1 : 0);
1902bail:
1903 return err;
1904}
1905
1906static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1907 unsigned long ioctl_param)
1908{
1909 union {
1910 struct fastrpc_ioctl_invoke_attrs inv;
1911 struct fastrpc_ioctl_mmap mmap;
1912 struct fastrpc_ioctl_munmap munmap;
1913 struct fastrpc_ioctl_init init;
1914 } p;
1915 void *param = (char *)ioctl_param;
1916 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1917 int size = 0, err = 0;
1918 uint32_t info;
1919
1920 p.inv.fds = 0;
1921 p.inv.attrs = 0;
1922
1923 switch (ioctl_num) {
1924 case FASTRPC_IOCTL_INVOKE:
1925 size = sizeof(struct fastrpc_ioctl_invoke);
1926 case FASTRPC_IOCTL_INVOKE_FD:
1927 if (!size)
1928 size = sizeof(struct fastrpc_ioctl_invoke_fd);
1929 /* fall through */
1930 case FASTRPC_IOCTL_INVOKE_ATTRS:
1931 if (!size)
1932 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
1933 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
1934 if (err)
1935 goto bail;
1936 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
1937 0, &p.inv)));
1938 if (err)
1939 goto bail;
1940 break;
1941 case FASTRPC_IOCTL_MMAP:
1942 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
1943 sizeof(p.mmap)));
1944 if (err)
1945 goto bail;
1946 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
1947 if (err)
1948 goto bail;
1949 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
1950 if (err)
1951 goto bail;
1952 break;
1953 case FASTRPC_IOCTL_MUNMAP:
1954 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
1955 sizeof(p.munmap)));
1956 if (err)
1957 goto bail;
1958 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
1959 &p.munmap)));
1960 if (err)
1961 goto bail;
1962 break;
1963 case FASTRPC_IOCTL_SETMODE:
1964 switch ((uint32_t)ioctl_param) {
1965 case FASTRPC_MODE_PARALLEL:
1966 case FASTRPC_MODE_SERIAL:
1967 fl->mode = (uint32_t)ioctl_param;
1968 break;
1969 default:
1970 err = -ENOTTY;
1971 break;
1972 }
1973 break;
1974 case FASTRPC_IOCTL_GETINFO:
1975 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
1976 if (err)
1977 goto bail;
1978 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
1979 if (err)
1980 goto bail;
1981 break;
1982 case FASTRPC_IOCTL_INIT:
1983 VERIFY(err, 0 == copy_from_user(&p.init, param,
1984 sizeof(p.init)));
1985 if (err)
1986 goto bail;
1987 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
1988 if (err)
1989 goto bail;
1990 break;
1991
1992 default:
1993 err = -ENOTTY;
1994 pr_info("bad ioctl: %d\n", ioctl_num);
1995 break;
1996 }
1997 bail:
1998 return err;
1999}
2000
2001static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2002 unsigned long code,
2003 void *data)
2004{
2005 struct fastrpc_apps *me = &gfa;
2006 struct fastrpc_channel_ctx *ctx;
2007 int cid;
2008
2009 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2010 cid = ctx - &me->channel[0];
2011 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2012 mutex_lock(&me->smd_mutex);
2013 ctx->ssrcount++;
2014 if (ctx->chan) {
2015 fastrpc_glink_close(ctx->chan, cid);
2016 ctx->chan = 0;
2017 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2018 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2019 }
2020 mutex_unlock(&me->smd_mutex);
2021 fastrpc_notify_drivers(me, cid);
2022 }
2023
2024 return NOTIFY_DONE;
2025}
2026
2027static const struct file_operations fops = {
2028 .open = fastrpc_device_open,
2029 .release = fastrpc_device_release,
2030 .unlocked_ioctl = fastrpc_device_ioctl,
2031 .compat_ioctl = compat_fastrpc_device_ioctl,
2032};
2033
2034static const struct of_device_id fastrpc_match_table[] = {
2035 { .compatible = "qcom,msm-fastrpc-adsp", },
2036 { .compatible = "qcom,msm-fastrpc-compute", },
2037 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2038 { .compatible = "qcom,msm-adsprpc-mem-region", },
2039 {}
2040};
2041
2042static int fastrpc_cb_probe(struct device *dev)
2043{
2044 struct fastrpc_channel_ctx *chan;
2045 struct fastrpc_session_ctx *sess;
2046 struct of_phandle_args iommuspec;
2047 const char *name;
2048 unsigned int start = 0x80000000;
2049 int err = 0, i;
2050 int secure_vmid = VMID_CP_PIXEL;
2051
2052 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2053 if (err)
2054 goto bail;
2055 for (i = 0; i < NUM_CHANNELS; i++) {
2056 if (!gcinfo[i].name)
2057 continue;
2058 if (!strcmp(name, gcinfo[i].name))
2059 break;
2060 }
2061 VERIFY(err, i < NUM_CHANNELS);
2062 if (err)
2063 goto bail;
2064 chan = &gcinfo[i];
2065 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2066 if (err)
2067 goto bail;
2068
2069 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2070 "#iommu-cells", 0, &iommuspec));
2071 if (err)
2072 goto bail;
2073 sess = &chan->session[chan->sesscount];
2074 sess->smmu.cb = iommuspec.args[0] & 0xf;
2075 sess->used = 0;
2076 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2077 "dma-coherent");
2078 sess->smmu.secure = of_property_read_bool(dev->of_node,
2079 "qcom,secure-context-bank");
2080 if (sess->smmu.secure)
2081 start = 0x60000000;
2082 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2083 arm_iommu_create_mapping(&platform_bus_type,
2084 start, 0x7fffffff)));
2085 if (err)
2086 goto bail;
2087
2088 if (sess->smmu.secure)
2089 iommu_domain_set_attr(sess->smmu.mapping->domain,
2090 DOMAIN_ATTR_SECURE_VMID,
2091 &secure_vmid);
2092
2093 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2094 if (err)
2095 goto bail;
2096 sess->dev = dev;
2097 sess->smmu.enabled = 1;
2098 chan->sesscount++;
2099bail:
2100 return err;
2101}
2102
2103static int fastrpc_probe(struct platform_device *pdev)
2104{
2105 int err = 0;
2106 struct fastrpc_apps *me = &gfa;
2107 struct device *dev = &pdev->dev;
2108 struct smq_phy_page range;
2109 struct device_node *ion_node, *node;
2110 struct platform_device *ion_pdev;
2111 struct cma *cma;
2112 uint32_t val;
2113
2114 if (of_device_is_compatible(dev->of_node,
2115 "qcom,msm-fastrpc-compute-cb"))
2116 return fastrpc_cb_probe(dev);
2117
2118 if (of_device_is_compatible(dev->of_node,
2119 "qcom,msm-adsprpc-mem-region")) {
2120 me->dev = dev;
2121 range.addr = 0;
2122 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2123 if (ion_node) {
2124 for_each_available_child_of_node(ion_node, node) {
2125 if (of_property_read_u32(node, "reg", &val))
2126 continue;
2127 if (val != ION_ADSP_HEAP_ID)
2128 continue;
2129 ion_pdev = of_find_device_by_node(node);
2130 if (!ion_pdev)
2131 break;
2132 cma = dev_get_cma_area(&ion_pdev->dev);
2133 if (cma) {
2134 range.addr = cma_get_base(cma);
2135 range.size = (size_t)cma_get_size(cma);
2136 }
2137 break;
2138 }
2139 }
2140 if (range.addr) {
2141 int srcVM[1] = {VMID_HLOS};
2142 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2143 VMID_ADSP_Q6};
2144 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2145 PERM_READ | PERM_WRITE | PERM_EXEC,
2146 PERM_READ | PERM_WRITE | PERM_EXEC,
2147 PERM_READ | PERM_WRITE | PERM_EXEC,
2148 };
2149
2150 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2151 srcVM, 1, destVM, destVMperm, 4));
2152 if (err)
2153 goto bail;
2154 }
2155 return 0;
2156 }
2157
2158 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2159 fastrpc_match_table,
2160 NULL, &pdev->dev));
2161 if (err)
2162 goto bail;
2163bail:
2164 return err;
2165}
2166
2167static void fastrpc_deinit(void)
2168{
2169 struct fastrpc_apps *me = &gfa;
2170 struct fastrpc_channel_ctx *chan = gcinfo;
2171 int i, j;
2172
2173 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2174 if (chan->chan) {
2175 kref_put_mutex(&chan->kref,
2176 fastrpc_channel_close, &me->smd_mutex);
2177 chan->chan = 0;
2178 }
2179 for (j = 0; j < NUM_SESSIONS; j++) {
2180 struct fastrpc_session_ctx *sess = &chan->session[j];
2181
2182 if (sess->smmu.enabled) {
2183 arm_iommu_detach_device(sess->dev);
2184 sess->dev = 0;
2185 }
2186 if (sess->smmu.mapping) {
2187 arm_iommu_release_mapping(sess->smmu.mapping);
2188 sess->smmu.mapping = 0;
2189 }
2190 }
2191 }
2192}
2193
2194static struct platform_driver fastrpc_driver = {
2195 .probe = fastrpc_probe,
2196 .driver = {
2197 .name = "fastrpc",
2198 .owner = THIS_MODULE,
2199 .of_match_table = fastrpc_match_table,
2200 },
2201};
2202
2203static int __init fastrpc_device_init(void)
2204{
2205 struct fastrpc_apps *me = &gfa;
2206 int err = 0, i;
2207
2208 memset(me, 0, sizeof(*me));
2209
2210 fastrpc_init(me);
2211 me->dev = NULL;
2212 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2213 if (err)
2214 goto register_bail;
2215 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2216 DEVICE_NAME));
2217 if (err)
2218 goto alloc_chrdev_bail;
2219 cdev_init(&me->cdev, &fops);
2220 me->cdev.owner = THIS_MODULE;
2221 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
2222 NUM_CHANNELS));
2223 if (err)
2224 goto cdev_init_bail;
2225 me->class = class_create(THIS_MODULE, "fastrpc");
2226 VERIFY(err, !IS_ERR(me->class));
2227 if (err)
2228 goto class_create_bail;
2229 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
2230 for (i = 0; i < NUM_CHANNELS; i++) {
2231 if (!gcinfo[i].name)
2232 continue;
2233 me->channel[i].dev = device_create(me->class, NULL,
2234 MKDEV(MAJOR(me->dev_no), i),
2235 NULL, gcinfo[i].name);
2236 VERIFY(err, !IS_ERR(me->channel[i].dev));
2237 if (err)
2238 goto device_create_bail;
2239 me->channel[i].ssrcount = 0;
2240 me->channel[i].prevssrcount = 0;
2241 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2242 me->channel[i].handle = subsys_notif_register_notifier(
2243 gcinfo[i].subsys,
2244 &me->channel[i].nb);
2245 }
2246
2247 me->client = msm_ion_client_create(DEVICE_NAME);
2248 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2249 if (err)
2250 goto device_create_bail;
2251 return 0;
2252device_create_bail:
2253 for (i = 0; i < NUM_CHANNELS; i++) {
2254 if (IS_ERR_OR_NULL(me->channel[i].dev))
2255 continue;
2256 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2257 subsys_notif_unregister_notifier(me->channel[i].handle,
2258 &me->channel[i].nb);
2259 }
2260 class_destroy(me->class);
2261class_create_bail:
2262 cdev_del(&me->cdev);
2263cdev_init_bail:
2264 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2265alloc_chrdev_bail:
2266register_bail:
2267 fastrpc_deinit();
2268 return err;
2269}
2270
2271static void __exit fastrpc_device_exit(void)
2272{
2273 struct fastrpc_apps *me = &gfa;
2274 int i;
2275
2276 fastrpc_file_list_dtor(me);
2277 fastrpc_deinit();
2278 for (i = 0; i < NUM_CHANNELS; i++) {
2279 if (!gcinfo[i].name)
2280 continue;
2281 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2282 subsys_notif_unregister_notifier(me->channel[i].handle,
2283 &me->channel[i].nb);
2284 }
2285 class_destroy(me->class);
2286 cdev_del(&me->cdev);
2287 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2288 ion_client_destroy(me->client);
2289}
2290
2291late_initcall(fastrpc_device_init);
2292module_exit(fastrpc_device_exit);
2293
2294MODULE_LICENSE("GPL v2");