blob: 866ee6f10ed612644e2ef9a924e90fc5d3b0d9fb [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
2 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
47
48#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
49#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
50#define TZ_PIL_AUTH_QDSP6_PROC 1
51#define FASTRPC_ENOSUCH 39
52#define VMID_SSC_Q6 5
53#define VMID_ADSP_Q6 6
54
55#define RPC_TIMEOUT (5 * HZ)
56#define BALIGN 128
57#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
58#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080059#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
62
63#define FASTRPC_LINK_STATE_DOWN (0x0)
64#define FASTRPC_LINK_STATE_UP (0x1)
65#define FASTRPC_LINK_DISCONNECTED (0x0)
66#define FASTRPC_LINK_CONNECTING (0x1)
67#define FASTRPC_LINK_CONNECTED (0x3)
68#define FASTRPC_LINK_DISCONNECTING (0x7)
69
70static int fastrpc_glink_open(int cid);
71static void fastrpc_glink_close(void *chan, int cid);
72
73static inline uint64_t buf_page_start(uint64_t buf)
74{
75 uint64_t start = (uint64_t) buf & PAGE_MASK;
76 return start;
77}
78
79static inline uint64_t buf_page_offset(uint64_t buf)
80{
81 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
82 return offset;
83}
84
85static inline int buf_num_pages(uint64_t buf, ssize_t len)
86{
87 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
88 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
89 int nPages = end - start + 1;
90 return nPages;
91}
92
93static inline uint64_t buf_page_size(uint32_t size)
94{
95 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
96
97 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
98}
99
100static inline void *uint64_to_ptr(uint64_t addr)
101{
102 void *ptr = (void *)((uintptr_t)addr);
103
104 return ptr;
105}
106
107static inline uint64_t ptr_to_uint64(void *ptr)
108{
109 uint64_t addr = (uint64_t)((uintptr_t)ptr);
110
111 return addr;
112}
113
114struct fastrpc_file;
115
116struct fastrpc_buf {
117 struct hlist_node hn;
118 struct fastrpc_file *fl;
119 void *virt;
120 uint64_t phys;
121 ssize_t size;
122};
123
124struct fastrpc_ctx_lst;
125
126struct overlap {
127 uintptr_t start;
128 uintptr_t end;
129 int raix;
130 uintptr_t mstart;
131 uintptr_t mend;
132 uintptr_t offset;
133};
134
135struct smq_invoke_ctx {
136 struct hlist_node hn;
137 struct completion work;
138 int retval;
139 int pid;
140 int tgid;
141 remote_arg_t *lpra;
142 remote_arg64_t *rpra;
143 int *fds;
144 unsigned int *attrs;
145 struct fastrpc_mmap **maps;
146 struct fastrpc_buf *buf;
147 ssize_t used;
148 struct fastrpc_file *fl;
149 uint32_t sc;
150 struct overlap *overs;
151 struct overlap **overps;
152 struct smq_msg msg;
153};
154
155struct fastrpc_ctx_lst {
156 struct hlist_head pending;
157 struct hlist_head interrupted;
158};
159
160struct fastrpc_smmu {
161 struct dma_iommu_mapping *mapping;
162 int cb;
163 int enabled;
164 int faults;
165 int secure;
166 int coherent;
167};
168
169struct fastrpc_session_ctx {
170 struct device *dev;
171 struct fastrpc_smmu smmu;
172 int used;
173};
174
175struct fastrpc_glink_info {
176 int link_state;
177 int port_state;
178 struct glink_open_config cfg;
179 struct glink_link_info link_info;
180 void *link_notify_handle;
181};
182
183struct fastrpc_channel_ctx {
184 char *name;
185 char *subsys;
186 void *chan;
187 struct device *dev;
188 struct fastrpc_session_ctx session[NUM_SESSIONS];
189 struct completion work;
190 struct notifier_block nb;
191 struct kref kref;
192 int sesscount;
193 int ssrcount;
194 void *handle;
195 int prevssrcount;
196 int vmid;
197 struct fastrpc_glink_info link;
198};
199
200struct fastrpc_apps {
201 struct fastrpc_channel_ctx *channel;
202 struct cdev cdev;
203 struct class *class;
204 struct mutex smd_mutex;
205 struct smq_phy_page range;
206 struct hlist_head maps;
207 dev_t dev_no;
208 int compat;
209 struct hlist_head drivers;
210 spinlock_t hlock;
211 struct ion_client *client;
212 struct device *dev;
213};
214
215struct fastrpc_mmap {
216 struct hlist_node hn;
217 struct fastrpc_file *fl;
218 struct fastrpc_apps *apps;
219 int fd;
220 uint32_t flags;
221 struct dma_buf *buf;
222 struct sg_table *table;
223 struct dma_buf_attachment *attach;
224 struct ion_handle *handle;
225 uint64_t phys;
226 ssize_t size;
227 uintptr_t va;
228 ssize_t len;
229 int refs;
230 uintptr_t raddr;
231 int uncached;
232 int secure;
233 uintptr_t attr;
234};
235
236struct fastrpc_file {
237 struct hlist_node hn;
238 spinlock_t hlock;
239 struct hlist_head maps;
240 struct hlist_head bufs;
241 struct fastrpc_ctx_lst clst;
242 struct fastrpc_session_ctx *sctx;
243 struct fastrpc_session_ctx *secsctx;
244 uint32_t mode;
245 int tgid;
246 int cid;
247 int ssrcount;
248 int pd;
249 struct fastrpc_apps *apps;
250};
251
252static struct fastrpc_apps gfa;
253
254static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
255 {
256 .name = "adsprpc-smd",
257 .subsys = "adsp",
258 .link.link_info.edge = "lpass",
259 .link.link_info.transport = "smem",
260 },
261 {
262 .name = "sdsprpc-smd",
263 .subsys = "dsps",
264 .link.link_info.edge = "dsps",
265 .link.link_info.transport = "smem",
266 },
267 {
268 .name = "mdsprpc-smd",
269 .subsys = "modem",
270 .link.link_info.edge = "mpss",
271 .link.link_info.transport = "smem",
272 },
273 {
274 .name = "cdsprpc-smd",
275 .subsys = "cdsp",
276 .link.link_info.edge = "cdsp",
277 .link.link_info.transport = "smem",
278 },
279};
280
281static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
282{
283 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
284 int vmid;
285
286 if (!fl)
287 return;
288 if (cache) {
289 spin_lock(&fl->hlock);
290 hlist_add_head(&buf->hn, &fl->bufs);
291 spin_unlock(&fl->hlock);
292 return;
293 }
294 if (!IS_ERR_OR_NULL(buf->virt)) {
295 int destVM[1] = {VMID_HLOS};
296 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
297
298 if (fl->sctx->smmu.cb)
299 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
300 vmid = fl->apps->channel[fl->cid].vmid;
301 if (vmid) {
302 int srcVM[2] = {VMID_HLOS, vmid};
303
304 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
305 srcVM, 2, destVM, destVMperm, 1);
306 }
307 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
308 buf->phys);
309 }
310 kfree(buf);
311}
312
313static void fastrpc_buf_list_free(struct fastrpc_file *fl)
314{
315 struct fastrpc_buf *buf, *free;
316
317 do {
318 struct hlist_node *n;
319
320 free = 0;
321 spin_lock(&fl->hlock);
322 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
323 hlist_del_init(&buf->hn);
324 free = buf;
325 break;
326 }
327 spin_unlock(&fl->hlock);
328 if (free)
329 fastrpc_buf_free(free, 0);
330 } while (free);
331}
332
333static void fastrpc_mmap_add(struct fastrpc_mmap *map)
334{
335 struct fastrpc_file *fl = map->fl;
336
337 spin_lock(&fl->hlock);
338 hlist_add_head(&map->hn, &fl->maps);
339 spin_unlock(&fl->hlock);
340}
341
342static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
343 ssize_t len, int mflags, struct fastrpc_mmap **ppmap)
344{
345 struct fastrpc_mmap *match = 0, *map;
346 struct hlist_node *n;
347
348 spin_lock(&fl->hlock);
349 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
350 if (va >= map->va &&
351 va + len <= map->va + map->len &&
352 map->fd == fd) {
353 map->refs++;
354 match = map;
355 break;
356 }
357 }
358 spin_unlock(&fl->hlock);
359 if (match) {
360 *ppmap = match;
361 return 0;
362 }
363 return -ENOTTY;
364}
365
366static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
367 ssize_t len, struct fastrpc_mmap **ppmap)
368{
369 struct fastrpc_mmap *match = 0, *map;
370 struct hlist_node *n;
371 struct fastrpc_apps *me = &gfa;
372
373 spin_lock(&me->hlock);
374 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
375 if (map->raddr == va &&
376 map->raddr + map->len == va + len &&
377 map->refs == 1) {
378 match = map;
379 hlist_del_init(&map->hn);
380 break;
381 }
382 }
383 spin_unlock(&me->hlock);
384 if (match) {
385 *ppmap = match;
386 return 0;
387 }
388 spin_lock(&fl->hlock);
389 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
390 if (map->raddr == va &&
391 map->raddr + map->len == va + len &&
392 map->refs == 1) {
393 match = map;
394 hlist_del_init(&map->hn);
395 break;
396 }
397 }
398 spin_unlock(&fl->hlock);
399 if (match) {
400 *ppmap = match;
401 return 0;
402 }
403 return -ENOTTY;
404}
405
406static void fastrpc_mmap_free(struct fastrpc_mmap *map)
407{
408 struct fastrpc_file *fl;
409 int vmid;
410 struct fastrpc_session_ctx *sess;
411 int destVM[1] = {VMID_HLOS};
412 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
413
414 if (!map)
415 return;
416 fl = map->fl;
417 spin_lock(&fl->hlock);
418 map->refs--;
419 if (!map->refs)
420 hlist_del_init(&map->hn);
421 spin_unlock(&fl->hlock);
422 if (map->refs > 0)
423 return;
424 if (map->secure)
425 sess = fl->secsctx;
426 else
427 sess = fl->sctx;
428
429 if (!IS_ERR_OR_NULL(map->handle))
430 ion_free(fl->apps->client, map->handle);
431 if (sess->smmu.enabled) {
432 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800433 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700434 map->table->sgl,
435 map->table->nents, DMA_BIDIRECTIONAL,
436 map->buf);
437 }
438 vmid = fl->apps->channel[fl->cid].vmid;
439 if (vmid && map->phys) {
440 int srcVM[2] = {VMID_HLOS, vmid};
441
442 hyp_assign_phys(map->phys, buf_page_size(map->size),
443 srcVM, 2, destVM, destVMperm, 1);
444 }
445
446 if (!IS_ERR_OR_NULL(map->table))
447 dma_buf_unmap_attachment(map->attach, map->table,
448 DMA_BIDIRECTIONAL);
449 if (!IS_ERR_OR_NULL(map->attach))
450 dma_buf_detach(map->buf, map->attach);
451 if (!IS_ERR_OR_NULL(map->buf))
452 dma_buf_put(map->buf);
453 kfree(map);
454}
455
456static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
457 struct fastrpc_session_ctx **session);
458
459static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
460 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
461 struct fastrpc_mmap **ppmap)
462{
463 struct fastrpc_session_ctx *sess;
464 struct fastrpc_apps *apps = fl->apps;
465 int cid = fl->cid;
466 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
467 struct fastrpc_mmap *map = 0;
468 unsigned long attrs;
469 unsigned long flags;
470 int err = 0, vmid;
471
472 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, ppmap))
473 return 0;
474 map = kzalloc(sizeof(*map), GFP_KERNEL);
475 VERIFY(err, !IS_ERR_OR_NULL(map));
476 if (err)
477 goto bail;
478 INIT_HLIST_NODE(&map->hn);
479 map->flags = mflags;
480 map->refs = 1;
481 map->fl = fl;
482 map->fd = fd;
483 map->attr = attr;
484 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
485 ion_import_dma_buf_fd(fl->apps->client, fd)));
486 if (err)
487 goto bail;
488 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
489 &flags));
490 if (err)
491 goto bail;
492
493 map->uncached = !ION_IS_CACHED(flags);
494 if (map->attr & FASTRPC_ATTR_NOVA)
495 map->uncached = 1;
496
497 map->secure = flags & ION_FLAG_SECURE;
498 if (map->secure) {
499 if (!fl->secsctx)
500 err = fastrpc_session_alloc(chan, 1,
501 &fl->secsctx);
502 if (err)
503 goto bail;
504 }
505 if (map->secure)
506 sess = fl->secsctx;
507 else
508 sess = fl->sctx;
509
510 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
511 if (err)
512 goto bail;
513 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
514 dma_buf_attach(map->buf, sess->dev)));
515 if (err)
516 goto bail;
517 VERIFY(err, !IS_ERR_OR_NULL(map->table =
518 dma_buf_map_attachment(map->attach,
519 DMA_BIDIRECTIONAL)));
520 if (err)
521 goto bail;
522 if (sess->smmu.enabled) {
523 attrs = DMA_ATTR_EXEC_MAPPING;
524 VERIFY(err, map->table->nents ==
525 msm_dma_map_sg_attrs(sess->dev,
526 map->table->sgl, map->table->nents,
527 DMA_BIDIRECTIONAL, map->buf, attrs));
528 if (err)
529 goto bail;
530 } else {
531 VERIFY(err, map->table->nents == 1);
532 if (err)
533 goto bail;
534 }
535 map->phys = sg_dma_address(map->table->sgl);
536 if (sess->smmu.cb) {
537 map->phys += ((uint64_t)sess->smmu.cb << 32);
538 map->size = sg_dma_len(map->table->sgl);
539 } else {
540 map->size = buf_page_size(len);
541 }
542 vmid = fl->apps->channel[fl->cid].vmid;
543 if (vmid) {
544 int srcVM[1] = {VMID_HLOS};
545 int destVM[2] = {VMID_HLOS, vmid};
546 int destVMperm[2] = {PERM_READ | PERM_WRITE,
547 PERM_READ | PERM_WRITE | PERM_EXEC};
548
549 VERIFY(err, !hyp_assign_phys(map->phys,
550 buf_page_size(map->size),
551 srcVM, 1, destVM, destVMperm, 2));
552 if (err)
553 goto bail;
554 }
555 map->va = va;
556 map->len = len;
557
558 fastrpc_mmap_add(map);
559 *ppmap = map;
560
561bail:
562 if (err && map)
563 fastrpc_mmap_free(map);
564 return err;
565}
566
567static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
568 struct fastrpc_buf **obuf)
569{
570 int err = 0, vmid;
571 struct fastrpc_buf *buf = 0, *fr = 0;
572 struct hlist_node *n;
573
574 VERIFY(err, size > 0);
575 if (err)
576 goto bail;
577
578 /* find the smallest buffer that fits in the cache */
579 spin_lock(&fl->hlock);
580 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
581 if (buf->size >= size && (!fr || fr->size > buf->size))
582 fr = buf;
583 }
584 if (fr)
585 hlist_del_init(&fr->hn);
586 spin_unlock(&fl->hlock);
587 if (fr) {
588 *obuf = fr;
589 return 0;
590 }
591 buf = 0;
592 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
593 if (err)
594 goto bail;
595 INIT_HLIST_NODE(&buf->hn);
596 buf->fl = fl;
597 buf->virt = 0;
598 buf->phys = 0;
599 buf->size = size;
600 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
601 (void *)&buf->phys, GFP_KERNEL);
602 if (IS_ERR_OR_NULL(buf->virt)) {
603 /* free cache and retry */
604 fastrpc_buf_list_free(fl);
605 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
606 (void *)&buf->phys, GFP_KERNEL);
607 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
608 }
609 if (err)
610 goto bail;
611 if (fl->sctx->smmu.cb)
612 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
613 vmid = fl->apps->channel[fl->cid].vmid;
614 if (vmid) {
615 int srcVM[1] = {VMID_HLOS};
616 int destVM[2] = {VMID_HLOS, vmid};
617 int destVMperm[2] = {PERM_READ | PERM_WRITE,
618 PERM_READ | PERM_WRITE | PERM_EXEC};
619
620 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
621 srcVM, 1, destVM, destVMperm, 2));
622 if (err)
623 goto bail;
624 }
625
626 *obuf = buf;
627 bail:
628 if (err && buf)
629 fastrpc_buf_free(buf, 0);
630 return err;
631}
632
633
634static int context_restore_interrupted(struct fastrpc_file *fl,
635 struct fastrpc_ioctl_invoke_attrs *inv,
636 struct smq_invoke_ctx **po)
637{
638 int err = 0;
639 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
640 struct hlist_node *n;
641 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
642
643 spin_lock(&fl->hlock);
644 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
645 if (ictx->pid == current->pid) {
646 if (invoke->sc != ictx->sc || ictx->fl != fl)
647 err = -1;
648 else {
649 ctx = ictx;
650 hlist_del_init(&ctx->hn);
651 hlist_add_head(&ctx->hn, &fl->clst.pending);
652 }
653 break;
654 }
655 }
656 spin_unlock(&fl->hlock);
657 if (ctx)
658 *po = ctx;
659 return err;
660}
661
662#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
663static int overlap_ptr_cmp(const void *a, const void *b)
664{
665 struct overlap *pa = *((struct overlap **)a);
666 struct overlap *pb = *((struct overlap **)b);
667 /* sort with lowest starting buffer first */
668 int st = CMP(pa->start, pb->start);
669 /* sort with highest ending buffer first */
670 int ed = CMP(pb->end, pa->end);
671 return st == 0 ? ed : st;
672}
673
674static void context_build_overlap(struct smq_invoke_ctx *ctx)
675{
676 int i;
677 remote_arg_t *lpra = ctx->lpra;
678 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
679 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
680 int nbufs = inbufs + outbufs;
681 struct overlap max;
682
683 for (i = 0; i < nbufs; ++i) {
684 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
685 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
686 ctx->overs[i].raix = i;
687 ctx->overps[i] = &ctx->overs[i];
688 }
689 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
690 max.start = 0;
691 max.end = 0;
692 for (i = 0; i < nbufs; ++i) {
693 if (ctx->overps[i]->start < max.end) {
694 ctx->overps[i]->mstart = max.end;
695 ctx->overps[i]->mend = ctx->overps[i]->end;
696 ctx->overps[i]->offset = max.end -
697 ctx->overps[i]->start;
698 if (ctx->overps[i]->end > max.end) {
699 max.end = ctx->overps[i]->end;
700 } else {
701 ctx->overps[i]->mend = 0;
702 ctx->overps[i]->mstart = 0;
703 }
704 } else {
705 ctx->overps[i]->mend = ctx->overps[i]->end;
706 ctx->overps[i]->mstart = ctx->overps[i]->start;
707 ctx->overps[i]->offset = 0;
708 max = *ctx->overps[i];
709 }
710 }
711}
712
713#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
714 do {\
715 if (!(kernel))\
716 VERIFY(err, 0 == copy_from_user((dst), (src),\
717 (size)));\
718 else\
719 memmove((dst), (src), (size));\
720 } while (0)
721
722#define K_COPY_TO_USER(err, kernel, dst, src, size) \
723 do {\
724 if (!(kernel))\
725 VERIFY(err, 0 == copy_to_user((dst), (src),\
726 (size)));\
727 else\
728 memmove((dst), (src), (size));\
729 } while (0)
730
731
732static void context_free(struct smq_invoke_ctx *ctx);
733
734static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
735 struct fastrpc_ioctl_invoke_attrs *invokefd,
736 struct smq_invoke_ctx **po)
737{
738 int err = 0, bufs, size = 0;
739 struct smq_invoke_ctx *ctx = 0;
740 struct fastrpc_ctx_lst *clst = &fl->clst;
741 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
742
743 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
744 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
745 sizeof(*ctx->fds) * (bufs) +
746 sizeof(*ctx->attrs) * (bufs) +
747 sizeof(*ctx->overs) * (bufs) +
748 sizeof(*ctx->overps) * (bufs);
749
750 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
751 if (err)
752 goto bail;
753
754 INIT_HLIST_NODE(&ctx->hn);
755 hlist_add_fake(&ctx->hn);
756 ctx->fl = fl;
757 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
758 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
759 ctx->fds = (int *)(&ctx->lpra[bufs]);
760 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
761 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
762 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
763
764 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
765 bufs * sizeof(*ctx->lpra));
766 if (err)
767 goto bail;
768
769 if (invokefd->fds) {
770 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
771 bufs * sizeof(*ctx->fds));
772 if (err)
773 goto bail;
774 }
775 if (invokefd->attrs) {
776 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
777 bufs * sizeof(*ctx->attrs));
778 if (err)
779 goto bail;
780 }
781
782 ctx->sc = invoke->sc;
783 if (bufs)
784 context_build_overlap(ctx);
785 ctx->retval = -1;
786 ctx->pid = current->pid;
787 ctx->tgid = current->tgid;
788 init_completion(&ctx->work);
789
790 spin_lock(&fl->hlock);
791 hlist_add_head(&ctx->hn, &clst->pending);
792 spin_unlock(&fl->hlock);
793
794 *po = ctx;
795bail:
796 if (ctx && err)
797 context_free(ctx);
798 return err;
799}
800
801static void context_save_interrupted(struct smq_invoke_ctx *ctx)
802{
803 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
804
805 spin_lock(&ctx->fl->hlock);
806 hlist_del_init(&ctx->hn);
807 hlist_add_head(&ctx->hn, &clst->interrupted);
808 spin_unlock(&ctx->fl->hlock);
809 /* free the cache on power collapse */
810 fastrpc_buf_list_free(ctx->fl);
811}
812
813static void context_free(struct smq_invoke_ctx *ctx)
814{
815 int i;
816 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
817 REMOTE_SCALARS_OUTBUFS(ctx->sc);
818 spin_lock(&ctx->fl->hlock);
819 hlist_del_init(&ctx->hn);
820 spin_unlock(&ctx->fl->hlock);
821 for (i = 0; i < nbufs; ++i)
822 fastrpc_mmap_free(ctx->maps[i]);
823 fastrpc_buf_free(ctx->buf, 1);
824 kfree(ctx);
825}
826
827static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
828{
829 ctx->retval = retval;
830 complete(&ctx->work);
831}
832
833
834static void fastrpc_notify_users(struct fastrpc_file *me)
835{
836 struct smq_invoke_ctx *ictx;
837 struct hlist_node *n;
838
839 spin_lock(&me->hlock);
840 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
841 complete(&ictx->work);
842 }
843 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
844 complete(&ictx->work);
845 }
846 spin_unlock(&me->hlock);
847
848}
849
850static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
851{
852 struct fastrpc_file *fl;
853 struct hlist_node *n;
854
855 spin_lock(&me->hlock);
856 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
857 if (fl->cid == cid)
858 fastrpc_notify_users(fl);
859 }
860 spin_unlock(&me->hlock);
861
862}
863static void context_list_ctor(struct fastrpc_ctx_lst *me)
864{
865 INIT_HLIST_HEAD(&me->interrupted);
866 INIT_HLIST_HEAD(&me->pending);
867}
868
869static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
870{
871 struct fastrpc_ctx_lst *clst = &fl->clst;
872 struct smq_invoke_ctx *ictx = 0, *ctxfree;
873 struct hlist_node *n;
874
875 do {
876 ctxfree = 0;
877 spin_lock(&fl->hlock);
878 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
879 hlist_del_init(&ictx->hn);
880 ctxfree = ictx;
881 break;
882 }
883 spin_unlock(&fl->hlock);
884 if (ctxfree)
885 context_free(ctxfree);
886 } while (ctxfree);
887 do {
888 ctxfree = 0;
889 spin_lock(&fl->hlock);
890 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
891 hlist_del_init(&ictx->hn);
892 ctxfree = ictx;
893 break;
894 }
895 spin_unlock(&fl->hlock);
896 if (ctxfree)
897 context_free(ctxfree);
898 } while (ctxfree);
899}
900
901static int fastrpc_file_free(struct fastrpc_file *fl);
902static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
903{
904 struct fastrpc_file *fl, *free;
905 struct hlist_node *n;
906
907 do {
908 free = 0;
909 spin_lock(&me->hlock);
910 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
911 hlist_del_init(&fl->hn);
912 free = fl;
913 break;
914 }
915 spin_unlock(&me->hlock);
916 if (free)
917 fastrpc_file_free(free);
918 } while (free);
919}
920
921static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
922{
923 remote_arg64_t *rpra;
924 remote_arg_t *lpra = ctx->lpra;
925 struct smq_invoke_buf *list;
926 struct smq_phy_page *pages, *ipage;
927 uint32_t sc = ctx->sc;
928 int inbufs = REMOTE_SCALARS_INBUFS(sc);
929 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800930 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700931 uintptr_t args;
932 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800933 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700934 int err = 0;
935 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800936 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700937
938 /* calculate size of the metadata */
939 rpra = 0;
940 list = smq_invoke_buf_start(rpra, sc);
941 pages = smq_phy_page_start(sc, list);
942 ipage = pages;
943
944 for (i = 0; i < bufs; ++i) {
945 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
946 ssize_t len = lpra[i].buf.len;
947
948 if (ctx->fds[i] && (ctx->fds[i] != -1))
949 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
950 ctx->attrs[i], buf, len,
951 mflags, &ctx->maps[i]);
952 ipage += 1;
953 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800954 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
955 for (i = bufs; i < bufs + handles; i++) {
956 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
957 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
958 if (err)
959 goto bail;
960 ipage += 1;
961 }
962 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700963 /* calculate len requreed for copying */
964 for (oix = 0; oix < inbufs + outbufs; ++oix) {
965 int i = ctx->overps[oix]->raix;
966 ssize_t len = lpra[i].buf.len;
967
968 if (!len)
969 continue;
970 if (ctx->maps[i])
971 continue;
972 if (ctx->overps[oix]->offset == 0)
973 copylen = ALIGN(copylen, BALIGN);
974 copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
975 }
976 ctx->used = copylen;
977
978 /* allocate new buffer */
979 if (copylen) {
980 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
981 if (err)
982 goto bail;
983 }
984 /* copy metadata */
985 rpra = ctx->buf->virt;
986 ctx->rpra = rpra;
987 list = smq_invoke_buf_start(rpra, sc);
988 pages = smq_phy_page_start(sc, list);
989 ipage = pages;
990 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800991 for (i = 0; i < bufs + handles; ++i) {
992 if (lpra[i].buf.len)
993 list[i].num = 1;
994 else
995 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700996 list[i].pgidx = ipage - pages;
997 ipage++;
998 }
999 /* map ion buffers */
1000 for (i = 0; i < inbufs + outbufs; ++i) {
1001 struct fastrpc_mmap *map = ctx->maps[i];
1002 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1003 ssize_t len = lpra[i].buf.len;
1004
1005 rpra[i].buf.pv = 0;
1006 rpra[i].buf.len = len;
1007 if (!len)
1008 continue;
1009 if (map) {
1010 struct vm_area_struct *vma;
1011 uintptr_t offset;
1012 int num = buf_num_pages(buf, len);
1013 int idx = list[i].pgidx;
1014
1015 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001016 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001017 } else {
1018 down_read(&current->mm->mmap_sem);
1019 VERIFY(err, NULL != (vma = find_vma(current->mm,
1020 map->va)));
1021 if (err) {
1022 up_read(&current->mm->mmap_sem);
1023 goto bail;
1024 }
1025 offset = buf_page_start(buf) - vma->vm_start;
1026 up_read(&current->mm->mmap_sem);
1027 VERIFY(err, offset < (uintptr_t)map->size);
1028 if (err)
1029 goto bail;
1030 }
1031 pages[idx].addr = map->phys + offset;
1032 pages[idx].size = num << PAGE_SHIFT;
1033 }
1034 rpra[i].buf.pv = buf;
1035 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001036 for (i = bufs; i < bufs + handles; ++i) {
1037 struct fastrpc_mmap *map = ctx->maps[i];
1038
1039 pages[i].addr = map->phys;
1040 pages[i].size = map->size;
1041 }
1042 fdlist = (uint64_t *)&pages[bufs + handles];
1043 for (i = 0; i < M_FDLIST; i++)
1044 fdlist[i] = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001045 /* copy non ion buffers */
1046 rlen = copylen - metalen;
1047 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1048 int i = ctx->overps[oix]->raix;
1049 struct fastrpc_mmap *map = ctx->maps[i];
1050 int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1051 uint64_t buf;
1052 ssize_t len = lpra[i].buf.len;
1053
1054 if (!len)
1055 continue;
1056 if (map)
1057 continue;
1058 if (ctx->overps[oix]->offset == 0) {
1059 rlen -= ALIGN(args, BALIGN) - args;
1060 args = ALIGN(args, BALIGN);
1061 }
1062 VERIFY(err, rlen >= mlen);
1063 if (err)
1064 goto bail;
1065 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1066 pages[list[i].pgidx].addr = ctx->buf->phys -
1067 ctx->overps[oix]->offset +
1068 (copylen - rlen);
1069 pages[list[i].pgidx].addr =
1070 buf_page_start(pages[list[i].pgidx].addr);
1071 buf = rpra[i].buf.pv;
1072 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1073 if (i < inbufs) {
1074 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1075 lpra[i].buf.pv, len);
1076 if (err)
1077 goto bail;
1078 }
1079 args = args + mlen;
1080 rlen -= mlen;
1081 }
1082
1083 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1084 int i = ctx->overps[oix]->raix;
1085 struct fastrpc_mmap *map = ctx->maps[i];
1086
1087 if (ctx->fl->sctx->smmu.coherent)
1088 continue;
1089 if (map && map->uncached)
1090 continue;
1091 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1092 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1093 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1094 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001095 for (i = bufs; i < bufs + handles; i++) {
1096 rpra[i].dma.fd = ctx->fds[i];
1097 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1098 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001099 }
1100 if (!ctx->fl->sctx->smmu.coherent)
1101 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
1102 bail:
1103 return err;
1104}
1105
1106static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1107 remote_arg_t *upra)
1108{
1109 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001110 struct smq_invoke_buf *list;
1111 struct smq_phy_page *pages;
1112 struct fastrpc_mmap *mmap;
1113 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001114 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001115 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001116 int err = 0;
1117
1118 inbufs = REMOTE_SCALARS_INBUFS(sc);
1119 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001120 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1121 list = smq_invoke_buf_start(ctx->rpra, sc);
1122 pages = smq_phy_page_start(sc, list);
1123 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001124 for (i = inbufs; i < inbufs + outbufs; ++i) {
1125 if (!ctx->maps[i]) {
1126 K_COPY_TO_USER(err, kernel,
1127 ctx->lpra[i].buf.pv,
1128 uint64_to_ptr(rpra[i].buf.pv),
1129 rpra[i].buf.len);
1130 if (err)
1131 goto bail;
1132 } else {
1133 fastrpc_mmap_free(ctx->maps[i]);
1134 ctx->maps[i] = 0;
1135 }
1136 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001137 if (inbufs + outbufs + handles) {
1138 for (i = 0; i < M_FDLIST; i++) {
1139 if (!fdlist[i])
1140 break;
1141 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
1142 0, &mmap))
1143 fastrpc_mmap_free(mmap);
1144 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001145 }
1146 bail:
1147 return err;
1148}
1149
1150static void inv_args_pre(struct smq_invoke_ctx *ctx)
1151{
1152 int i, inbufs, outbufs;
1153 uint32_t sc = ctx->sc;
1154 remote_arg64_t *rpra = ctx->rpra;
1155 uintptr_t end;
1156
1157 inbufs = REMOTE_SCALARS_INBUFS(sc);
1158 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1159 for (i = inbufs; i < inbufs + outbufs; ++i) {
1160 struct fastrpc_mmap *map = ctx->maps[i];
1161
1162 if (map && map->uncached)
1163 continue;
1164 if (!rpra[i].buf.len)
1165 continue;
1166 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1167 buf_page_start(rpra[i].buf.pv))
1168 continue;
1169 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1170 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1171 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1172 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1173 rpra[i].buf.len);
1174 if (!IS_CACHE_ALIGNED(end))
1175 dmac_flush_range((char *)end,
1176 (char *)end + 1);
1177 }
1178}
1179
1180static void inv_args(struct smq_invoke_ctx *ctx)
1181{
1182 int i, inbufs, outbufs;
1183 uint32_t sc = ctx->sc;
1184 remote_arg64_t *rpra = ctx->rpra;
1185 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001186
1187 inbufs = REMOTE_SCALARS_INBUFS(sc);
1188 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1189 for (i = inbufs; i < inbufs + outbufs; ++i) {
1190 struct fastrpc_mmap *map = ctx->maps[i];
1191
1192 if (map && map->uncached)
1193 continue;
1194 if (!rpra[i].buf.len)
1195 continue;
1196 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1197 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001198 continue;
1199 }
1200 if (map && map->handle)
1201 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1202 (char *)uint64_to_ptr(rpra[i].buf.pv),
1203 rpra[i].buf.len, ION_IOC_INV_CACHES);
1204 else
1205 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1206 (char *)uint64_to_ptr(rpra[i].buf.pv
1207 + rpra[i].buf.len));
1208 }
1209
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001210 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001211 dmac_inv_range(rpra, (char *)rpra + used);
1212}
1213
1214static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1215 uint32_t kernel, uint32_t handle)
1216{
1217 struct smq_msg *msg = &ctx->msg;
1218 struct fastrpc_file *fl = ctx->fl;
1219 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1220 int err = 0;
1221
1222 VERIFY(err, 0 != channel_ctx->chan);
1223 if (err)
1224 goto bail;
1225 msg->pid = current->tgid;
1226 msg->tid = current->pid;
1227 if (kernel)
1228 msg->pid = 0;
1229 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1230 msg->invoke.header.handle = handle;
1231 msg->invoke.header.sc = ctx->sc;
1232 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1233 msg->invoke.page.size = buf_page_size(ctx->used);
1234
1235 if (fl->ssrcount != channel_ctx->ssrcount) {
1236 err = -ECONNRESET;
1237 goto bail;
1238 }
1239 VERIFY(err, channel_ctx->link.port_state ==
1240 FASTRPC_LINK_CONNECTED);
1241 if (err)
1242 goto bail;
1243 err = glink_tx(channel_ctx->chan,
1244 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1245 GLINK_TX_REQ_INTENT);
1246 bail:
1247 return err;
1248}
1249
1250static void fastrpc_init(struct fastrpc_apps *me)
1251{
1252 int i;
1253
1254 INIT_HLIST_HEAD(&me->drivers);
1255 spin_lock_init(&me->hlock);
1256 mutex_init(&me->smd_mutex);
1257 me->channel = &gcinfo[0];
1258 for (i = 0; i < NUM_CHANNELS; i++) {
1259 init_completion(&me->channel[i].work);
1260 me->channel[i].sesscount = 0;
1261 }
1262}
1263
1264static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1265
1266static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1267 uint32_t kernel,
1268 struct fastrpc_ioctl_invoke_attrs *inv)
1269{
1270 struct smq_invoke_ctx *ctx = 0;
1271 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1272 int cid = fl->cid;
1273 int interrupted = 0;
1274 int err = 0;
1275
1276 if (!kernel) {
1277 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1278 &ctx));
1279 if (err)
1280 goto bail;
1281 if (fl->sctx->smmu.faults)
1282 err = FASTRPC_ENOSUCH;
1283 if (err)
1284 goto bail;
1285 if (ctx)
1286 goto wait;
1287 }
1288
1289 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1290 if (err)
1291 goto bail;
1292
1293 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
1294 VERIFY(err, 0 == get_args(kernel, ctx));
1295 if (err)
1296 goto bail;
1297 }
1298
1299 if (!fl->sctx->smmu.coherent) {
1300 inv_args_pre(ctx);
1301 if (mode == FASTRPC_MODE_SERIAL)
1302 inv_args(ctx);
1303 }
1304 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
1305 if (err)
1306 goto bail;
1307 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1308 inv_args(ctx);
1309 wait:
1310 if (kernel)
1311 wait_for_completion(&ctx->work);
1312 else {
1313 interrupted = wait_for_completion_interruptible(&ctx->work);
1314 VERIFY(err, 0 == (err = interrupted));
1315 if (err)
1316 goto bail;
1317 }
1318 VERIFY(err, 0 == (err = ctx->retval));
1319 if (err)
1320 goto bail;
1321 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
1322 if (err)
1323 goto bail;
1324 bail:
1325 if (ctx && interrupted == -ERESTARTSYS)
1326 context_save_interrupted(ctx);
1327 else if (ctx)
1328 context_free(ctx);
1329 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1330 err = ECONNRESET;
1331 return err;
1332}
1333
1334static int fastrpc_init_process(struct fastrpc_file *fl,
1335 struct fastrpc_ioctl_init *init)
1336{
1337 int err = 0;
1338 struct fastrpc_ioctl_invoke_attrs ioctl;
1339 struct smq_phy_page pages[1];
1340 struct fastrpc_mmap *file = 0, *mem = 0;
1341
1342 if (init->flags == FASTRPC_INIT_ATTACH) {
1343 remote_arg_t ra[1];
1344 int tgid = current->tgid;
1345
1346 ra[0].buf.pv = (void *)&tgid;
1347 ra[0].buf.len = sizeof(tgid);
1348 ioctl.inv.handle = 1;
1349 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1350 ioctl.inv.pra = ra;
1351 ioctl.fds = 0;
1352 ioctl.attrs = 0;
1353 fl->pd = 0;
1354 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1355 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1356 if (err)
1357 goto bail;
1358 } else if (init->flags == FASTRPC_INIT_CREATE) {
1359 remote_arg_t ra[4];
1360 int fds[4];
1361 int mflags = 0;
1362 struct {
1363 int pgid;
1364 int namelen;
1365 int filelen;
1366 int pageslen;
1367 } inbuf;
1368
1369 inbuf.pgid = current->tgid;
1370 inbuf.namelen = strlen(current->comm) + 1;
1371 inbuf.filelen = init->filelen;
1372 fl->pd = 1;
1373 if (init->filelen) {
1374 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1375 init->file, init->filelen, mflags, &file));
1376 if (err)
1377 goto bail;
1378 }
1379 inbuf.pageslen = 1;
1380 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1381 init->mem, init->memlen, mflags, &mem));
1382 if (err)
1383 goto bail;
1384 inbuf.pageslen = 1;
1385 ra[0].buf.pv = (void *)&inbuf;
1386 ra[0].buf.len = sizeof(inbuf);
1387 fds[0] = 0;
1388
1389 ra[1].buf.pv = (void *)current->comm;
1390 ra[1].buf.len = inbuf.namelen;
1391 fds[1] = 0;
1392
1393 ra[2].buf.pv = (void *)init->file;
1394 ra[2].buf.len = inbuf.filelen;
1395 fds[2] = init->filefd;
1396
1397 pages[0].addr = mem->phys;
1398 pages[0].size = mem->size;
1399 ra[3].buf.pv = (void *)pages;
1400 ra[3].buf.len = 1 * sizeof(*pages);
1401 fds[3] = 0;
1402
1403 ioctl.inv.handle = 1;
1404 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
1405 ioctl.inv.pra = ra;
1406 ioctl.fds = fds;
1407 ioctl.attrs = 0;
1408 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1409 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1410 if (err)
1411 goto bail;
1412 } else {
1413 err = -ENOTTY;
1414 }
1415bail:
1416 if (mem && err)
1417 fastrpc_mmap_free(mem);
1418 if (file)
1419 fastrpc_mmap_free(file);
1420 return err;
1421}
1422
1423static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1424{
1425 int err = 0;
1426 struct fastrpc_ioctl_invoke_attrs ioctl;
1427 remote_arg_t ra[1];
1428 int tgid = 0;
1429
1430 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1431 if (err)
1432 goto bail;
1433 tgid = fl->tgid;
1434 ra[0].buf.pv = (void *)&tgid;
1435 ra[0].buf.len = sizeof(tgid);
1436 ioctl.inv.handle = 1;
1437 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1438 ioctl.inv.pra = ra;
1439 ioctl.fds = 0;
1440 ioctl.attrs = 0;
1441 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1442 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1443bail:
1444 return err;
1445}
1446
1447static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1448 struct fastrpc_mmap *map)
1449{
1450 struct fastrpc_ioctl_invoke_attrs ioctl;
1451 struct smq_phy_page page;
1452 int num = 1;
1453 remote_arg_t ra[3];
1454 int err = 0;
1455 struct {
1456 int pid;
1457 uint32_t flags;
1458 uintptr_t vaddrin;
1459 int num;
1460 } inargs;
1461 struct {
1462 uintptr_t vaddrout;
1463 } routargs;
1464
1465 inargs.pid = current->tgid;
1466 inargs.vaddrin = (uintptr_t)map->va;
1467 inargs.flags = flags;
1468 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1469 ra[0].buf.pv = (void *)&inargs;
1470 ra[0].buf.len = sizeof(inargs);
1471 page.addr = map->phys;
1472 page.size = map->size;
1473 ra[1].buf.pv = (void *)&page;
1474 ra[1].buf.len = num * sizeof(page);
1475
1476 ra[2].buf.pv = (void *)&routargs;
1477 ra[2].buf.len = sizeof(routargs);
1478
1479 ioctl.inv.handle = 1;
1480 if (fl->apps->compat)
1481 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1482 else
1483 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1484 ioctl.inv.pra = ra;
1485 ioctl.fds = 0;
1486 ioctl.attrs = 0;
1487 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1488 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1489 map->raddr = (uintptr_t)routargs.vaddrout;
1490
1491 return err;
1492}
1493
1494static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1495 struct fastrpc_mmap *map)
1496{
1497 struct fastrpc_ioctl_invoke_attrs ioctl;
1498 remote_arg_t ra[1];
1499 int err = 0;
1500 struct {
1501 int pid;
1502 uintptr_t vaddrout;
1503 ssize_t size;
1504 } inargs;
1505
1506 inargs.pid = current->tgid;
1507 inargs.size = map->size;
1508 inargs.vaddrout = map->raddr;
1509 ra[0].buf.pv = (void *)&inargs;
1510 ra[0].buf.len = sizeof(inargs);
1511
1512 ioctl.inv.handle = 1;
1513 if (fl->apps->compat)
1514 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1515 else
1516 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1517 ioctl.inv.pra = ra;
1518 ioctl.fds = 0;
1519 ioctl.attrs = 0;
1520 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1521 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1522 return err;
1523}
1524
1525static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1526 ssize_t len, struct fastrpc_mmap **ppmap);
1527
1528static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1529
1530static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1531 struct fastrpc_ioctl_munmap *ud)
1532{
1533 int err = 0;
1534 struct fastrpc_mmap *map = 0;
1535
1536 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1537 if (err)
1538 goto bail;
1539 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1540 if (err)
1541 goto bail;
1542 fastrpc_mmap_free(map);
1543bail:
1544 if (err && map)
1545 fastrpc_mmap_add(map);
1546 return err;
1547}
1548
1549static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1550 struct fastrpc_ioctl_mmap *ud)
1551{
1552
1553 struct fastrpc_mmap *map = 0;
1554 int err = 0;
1555
1556 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
1557 ud->flags, &map))
1558 return 0;
1559
1560 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1561 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1562 if (err)
1563 goto bail;
1564 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1565 if (err)
1566 goto bail;
1567 ud->vaddrout = map->raddr;
1568 bail:
1569 if (err && map)
1570 fastrpc_mmap_free(map);
1571 return err;
1572}
1573
1574static void fastrpc_channel_close(struct kref *kref)
1575{
1576 struct fastrpc_apps *me = &gfa;
1577 struct fastrpc_channel_ctx *ctx;
1578 int cid;
1579
1580 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1581 cid = ctx - &gcinfo[0];
1582 fastrpc_glink_close(ctx->chan, cid);
1583 ctx->chan = 0;
1584 mutex_unlock(&me->smd_mutex);
1585 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1586 MAJOR(me->dev_no), cid);
1587}
1588
1589static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1590
1591static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1592 int secure, struct fastrpc_session_ctx **session)
1593{
1594 struct fastrpc_apps *me = &gfa;
1595 int idx = 0, err = 0;
1596
1597 if (chan->sesscount) {
1598 for (idx = 0; idx < chan->sesscount; ++idx) {
1599 if (!chan->session[idx].used &&
1600 chan->session[idx].smmu.secure == secure) {
1601 chan->session[idx].used = 1;
1602 break;
1603 }
1604 }
1605 VERIFY(err, idx < chan->sesscount);
1606 if (err)
1607 goto bail;
1608 chan->session[idx].smmu.faults = 0;
1609 } else {
1610 VERIFY(err, me->dev != NULL);
1611 if (err)
1612 goto bail;
1613 chan->session[0].dev = me->dev;
1614 }
1615
1616 *session = &chan->session[idx];
1617 bail:
1618 return err;
1619}
1620
1621bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1622{
1623 if (glink_queue_rx_intent(h, NULL, size))
1624 return false;
1625 return true;
1626}
1627
1628void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1629 const void *pkt_priv, const void *ptr)
1630{
1631}
1632
1633void fastrpc_glink_notify_rx(void *handle, const void *priv,
1634 const void *pkt_priv, const void *ptr, size_t size)
1635{
1636 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1637 int len = size;
1638
1639 while (len >= sizeof(*rsp) && rsp) {
1640 rsp->ctx = rsp->ctx & ~1;
1641 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1642 rsp++;
1643 len = len - sizeof(*rsp);
1644 }
1645 glink_rx_done(handle, ptr, true);
1646}
1647
1648void fastrpc_glink_notify_state(void *handle, const void *priv,
1649 unsigned int event)
1650{
1651 struct fastrpc_apps *me = &gfa;
1652 int cid = (int)(uintptr_t)priv;
1653 struct fastrpc_glink_info *link;
1654
1655 if (cid < 0 || cid >= NUM_CHANNELS)
1656 return;
1657 link = &me->channel[cid].link;
1658 switch (event) {
1659 case GLINK_CONNECTED:
1660 link->port_state = FASTRPC_LINK_CONNECTED;
1661 complete(&me->channel[cid].work);
1662 break;
1663 case GLINK_LOCAL_DISCONNECTED:
1664 link->port_state = FASTRPC_LINK_DISCONNECTED;
1665 break;
1666 case GLINK_REMOTE_DISCONNECTED:
1667 if (me->channel[cid].chan &&
1668 link->link_state == FASTRPC_LINK_STATE_UP) {
1669 fastrpc_glink_close(me->channel[cid].chan, cid);
1670 me->channel[cid].chan = 0;
1671 link->port_state = FASTRPC_LINK_DISCONNECTED;
1672 }
1673 break;
1674 default:
1675 break;
1676 }
1677}
1678
1679static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1680 struct fastrpc_session_ctx **session)
1681{
1682 int err = 0;
1683 struct fastrpc_apps *me = &gfa;
1684
1685 mutex_lock(&me->smd_mutex);
1686 if (!*session)
1687 err = fastrpc_session_alloc_locked(chan, secure, session);
1688 mutex_unlock(&me->smd_mutex);
1689 return err;
1690}
1691
1692static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1693 struct fastrpc_session_ctx *session)
1694{
1695 struct fastrpc_apps *me = &gfa;
1696
1697 mutex_lock(&me->smd_mutex);
1698 session->used = 0;
1699 mutex_unlock(&me->smd_mutex);
1700}
1701
1702static int fastrpc_file_free(struct fastrpc_file *fl)
1703{
1704 struct hlist_node *n;
1705 struct fastrpc_mmap *map = 0;
1706 int cid;
1707
1708 if (!fl)
1709 return 0;
1710 cid = fl->cid;
1711
1712 spin_lock(&fl->apps->hlock);
1713 hlist_del_init(&fl->hn);
1714 spin_unlock(&fl->apps->hlock);
1715
1716 (void)fastrpc_release_current_dsp_process(fl);
1717 fastrpc_context_list_dtor(fl);
1718 fastrpc_buf_list_free(fl);
1719 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1720 fastrpc_mmap_free(map);
1721 }
1722 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1723 kref_put_mutex(&fl->apps->channel[cid].kref,
1724 fastrpc_channel_close, &fl->apps->smd_mutex);
1725 if (fl->sctx)
1726 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1727 if (fl->secsctx)
1728 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1729 kfree(fl);
1730 return 0;
1731}
1732
1733static int fastrpc_device_release(struct inode *inode, struct file *file)
1734{
1735 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1736
1737 if (fl) {
1738 fastrpc_file_free(fl);
1739 file->private_data = 0;
1740 }
1741 return 0;
1742}
1743
1744static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1745 void *priv)
1746{
1747 struct fastrpc_apps *me = &gfa;
1748 int cid = (int)((uintptr_t)priv);
1749 struct fastrpc_glink_info *link;
1750
1751 if (cid < 0 || cid >= NUM_CHANNELS)
1752 return;
1753
1754 link = &me->channel[cid].link;
1755 switch (cb_info->link_state) {
1756 case GLINK_LINK_STATE_UP:
1757 link->link_state = FASTRPC_LINK_STATE_UP;
1758 complete(&me->channel[cid].work);
1759 break;
1760 case GLINK_LINK_STATE_DOWN:
1761 link->link_state = FASTRPC_LINK_STATE_DOWN;
1762 break;
1763 default:
1764 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1765 break;
1766 }
1767}
1768
1769static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1770{
1771 int err = 0;
1772 struct fastrpc_glink_info *link;
1773
1774 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1775 if (err)
1776 goto bail;
1777
1778 link = &me->channel[cid].link;
1779 if (link->link_notify_handle != NULL)
1780 goto bail;
1781
1782 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1783 link->link_notify_handle = glink_register_link_state_cb(
1784 &link->link_info,
1785 (void *)((uintptr_t)cid));
1786 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1787 if (err) {
1788 link->link_notify_handle = NULL;
1789 goto bail;
1790 }
1791 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1792 RPC_TIMEOUT));
1793bail:
1794 return err;
1795}
1796
1797static void fastrpc_glink_close(void *chan, int cid)
1798{
1799 int err = 0;
1800 struct fastrpc_glink_info *link;
1801
1802 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1803 if (err)
1804 return;
1805 link = &gfa.channel[cid].link;
1806
1807 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1808 link->port_state == FASTRPC_LINK_CONNECTING) {
1809 link->port_state = FASTRPC_LINK_DISCONNECTING;
1810 glink_close(chan);
1811 }
1812}
1813
1814static int fastrpc_glink_open(int cid)
1815{
1816 int err = 0;
1817 void *handle = NULL;
1818 struct fastrpc_apps *me = &gfa;
1819 struct glink_open_config *cfg;
1820 struct fastrpc_glink_info *link;
1821
1822 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1823 if (err)
1824 goto bail;
1825 link = &me->channel[cid].link;
1826 cfg = &me->channel[cid].link.cfg;
1827 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1828 if (err)
1829 goto bail;
1830
1831 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1832 link->port_state == FASTRPC_LINK_CONNECTING) {
1833 goto bail;
1834 }
1835
1836 link->port_state = FASTRPC_LINK_CONNECTING;
1837 cfg->priv = (void *)(uintptr_t)cid;
1838 cfg->edge = gcinfo[cid].link.link_info.edge;
1839 cfg->transport = gcinfo[cid].link.link_info.transport;
1840 cfg->name = FASTRPC_GLINK_GUID;
1841 cfg->notify_rx = fastrpc_glink_notify_rx;
1842 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1843 cfg->notify_state = fastrpc_glink_notify_state;
1844 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1845 handle = glink_open(cfg);
1846 VERIFY(err, !IS_ERR_OR_NULL(handle));
1847 if (err)
1848 goto bail;
1849 me->channel[cid].chan = handle;
1850bail:
1851 return err;
1852}
1853
1854static int fastrpc_device_open(struct inode *inode, struct file *filp)
1855{
1856 int cid = MINOR(inode->i_rdev);
1857 int err = 0;
1858 struct fastrpc_apps *me = &gfa;
1859 struct fastrpc_file *fl = 0;
1860
1861 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
1862 if (err)
1863 return err;
1864
1865 filp->private_data = fl;
1866
1867 mutex_lock(&me->smd_mutex);
1868
1869 context_list_ctor(&fl->clst);
1870 spin_lock_init(&fl->hlock);
1871 INIT_HLIST_HEAD(&fl->maps);
1872 INIT_HLIST_HEAD(&fl->bufs);
1873 INIT_HLIST_NODE(&fl->hn);
1874 fl->tgid = current->tgid;
1875 fl->apps = me;
1876 fl->cid = cid;
1877 VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
1878 &fl->sctx));
1879 if (err)
1880 goto bail;
1881 fl->cid = cid;
1882 fl->ssrcount = me->channel[cid].ssrcount;
1883 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
1884 (me->channel[cid].chan == 0)) {
1885 fastrpc_glink_register(cid, me);
1886 VERIFY(err, 0 == fastrpc_glink_open(cid));
1887 if (err)
1888 goto bail;
1889
1890 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1891 RPC_TIMEOUT));
1892 if (err) {
1893 me->channel[cid].chan = 0;
1894 goto bail;
1895 }
1896 kref_init(&me->channel[cid].kref);
1897 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
1898 MAJOR(me->dev_no), cid);
1899 if (me->channel[cid].ssrcount !=
1900 me->channel[cid].prevssrcount) {
1901 me->channel[cid].prevssrcount =
1902 me->channel[cid].ssrcount;
1903 }
1904 }
1905 spin_lock(&me->hlock);
1906 hlist_add_head(&fl->hn, &me->drivers);
1907 spin_unlock(&me->hlock);
1908
1909bail:
1910 mutex_unlock(&me->smd_mutex);
1911
1912 if (err)
1913 fastrpc_device_release(inode, filp);
1914 return err;
1915}
1916
1917static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
1918{
1919 int err = 0;
1920
1921 VERIFY(err, fl && fl->sctx);
1922 if (err)
1923 goto bail;
1924 *info = (fl->sctx->smmu.enabled ? 1 : 0);
1925bail:
1926 return err;
1927}
1928
1929static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1930 unsigned long ioctl_param)
1931{
1932 union {
1933 struct fastrpc_ioctl_invoke_attrs inv;
1934 struct fastrpc_ioctl_mmap mmap;
1935 struct fastrpc_ioctl_munmap munmap;
1936 struct fastrpc_ioctl_init init;
1937 } p;
1938 void *param = (char *)ioctl_param;
1939 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1940 int size = 0, err = 0;
1941 uint32_t info;
1942
1943 p.inv.fds = 0;
1944 p.inv.attrs = 0;
1945
1946 switch (ioctl_num) {
1947 case FASTRPC_IOCTL_INVOKE:
1948 size = sizeof(struct fastrpc_ioctl_invoke);
1949 case FASTRPC_IOCTL_INVOKE_FD:
1950 if (!size)
1951 size = sizeof(struct fastrpc_ioctl_invoke_fd);
1952 /* fall through */
1953 case FASTRPC_IOCTL_INVOKE_ATTRS:
1954 if (!size)
1955 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
1956 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
1957 if (err)
1958 goto bail;
1959 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
1960 0, &p.inv)));
1961 if (err)
1962 goto bail;
1963 break;
1964 case FASTRPC_IOCTL_MMAP:
1965 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
1966 sizeof(p.mmap)));
1967 if (err)
1968 goto bail;
1969 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
1970 if (err)
1971 goto bail;
1972 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
1973 if (err)
1974 goto bail;
1975 break;
1976 case FASTRPC_IOCTL_MUNMAP:
1977 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
1978 sizeof(p.munmap)));
1979 if (err)
1980 goto bail;
1981 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
1982 &p.munmap)));
1983 if (err)
1984 goto bail;
1985 break;
1986 case FASTRPC_IOCTL_SETMODE:
1987 switch ((uint32_t)ioctl_param) {
1988 case FASTRPC_MODE_PARALLEL:
1989 case FASTRPC_MODE_SERIAL:
1990 fl->mode = (uint32_t)ioctl_param;
1991 break;
1992 default:
1993 err = -ENOTTY;
1994 break;
1995 }
1996 break;
1997 case FASTRPC_IOCTL_GETINFO:
1998 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
1999 if (err)
2000 goto bail;
2001 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2002 if (err)
2003 goto bail;
2004 break;
2005 case FASTRPC_IOCTL_INIT:
2006 VERIFY(err, 0 == copy_from_user(&p.init, param,
2007 sizeof(p.init)));
2008 if (err)
2009 goto bail;
2010 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2011 if (err)
2012 goto bail;
2013 break;
2014
2015 default:
2016 err = -ENOTTY;
2017 pr_info("bad ioctl: %d\n", ioctl_num);
2018 break;
2019 }
2020 bail:
2021 return err;
2022}
2023
2024static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2025 unsigned long code,
2026 void *data)
2027{
2028 struct fastrpc_apps *me = &gfa;
2029 struct fastrpc_channel_ctx *ctx;
2030 int cid;
2031
2032 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2033 cid = ctx - &me->channel[0];
2034 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2035 mutex_lock(&me->smd_mutex);
2036 ctx->ssrcount++;
2037 if (ctx->chan) {
2038 fastrpc_glink_close(ctx->chan, cid);
2039 ctx->chan = 0;
2040 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2041 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2042 }
2043 mutex_unlock(&me->smd_mutex);
2044 fastrpc_notify_drivers(me, cid);
2045 }
2046
2047 return NOTIFY_DONE;
2048}
2049
2050static const struct file_operations fops = {
2051 .open = fastrpc_device_open,
2052 .release = fastrpc_device_release,
2053 .unlocked_ioctl = fastrpc_device_ioctl,
2054 .compat_ioctl = compat_fastrpc_device_ioctl,
2055};
2056
2057static const struct of_device_id fastrpc_match_table[] = {
2058 { .compatible = "qcom,msm-fastrpc-adsp", },
2059 { .compatible = "qcom,msm-fastrpc-compute", },
2060 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2061 { .compatible = "qcom,msm-adsprpc-mem-region", },
2062 {}
2063};
2064
2065static int fastrpc_cb_probe(struct device *dev)
2066{
2067 struct fastrpc_channel_ctx *chan;
2068 struct fastrpc_session_ctx *sess;
2069 struct of_phandle_args iommuspec;
2070 const char *name;
2071 unsigned int start = 0x80000000;
2072 int err = 0, i;
2073 int secure_vmid = VMID_CP_PIXEL;
2074
2075 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2076 if (err)
2077 goto bail;
2078 for (i = 0; i < NUM_CHANNELS; i++) {
2079 if (!gcinfo[i].name)
2080 continue;
2081 if (!strcmp(name, gcinfo[i].name))
2082 break;
2083 }
2084 VERIFY(err, i < NUM_CHANNELS);
2085 if (err)
2086 goto bail;
2087 chan = &gcinfo[i];
2088 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2089 if (err)
2090 goto bail;
2091
2092 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2093 "#iommu-cells", 0, &iommuspec));
2094 if (err)
2095 goto bail;
2096 sess = &chan->session[chan->sesscount];
2097 sess->smmu.cb = iommuspec.args[0] & 0xf;
2098 sess->used = 0;
2099 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2100 "dma-coherent");
2101 sess->smmu.secure = of_property_read_bool(dev->of_node,
2102 "qcom,secure-context-bank");
2103 if (sess->smmu.secure)
2104 start = 0x60000000;
2105 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2106 arm_iommu_create_mapping(&platform_bus_type,
2107 start, 0x7fffffff)));
2108 if (err)
2109 goto bail;
2110
2111 if (sess->smmu.secure)
2112 iommu_domain_set_attr(sess->smmu.mapping->domain,
2113 DOMAIN_ATTR_SECURE_VMID,
2114 &secure_vmid);
2115
2116 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2117 if (err)
2118 goto bail;
2119 sess->dev = dev;
2120 sess->smmu.enabled = 1;
2121 chan->sesscount++;
2122bail:
2123 return err;
2124}
2125
2126static int fastrpc_probe(struct platform_device *pdev)
2127{
2128 int err = 0;
2129 struct fastrpc_apps *me = &gfa;
2130 struct device *dev = &pdev->dev;
2131 struct smq_phy_page range;
2132 struct device_node *ion_node, *node;
2133 struct platform_device *ion_pdev;
2134 struct cma *cma;
2135 uint32_t val;
2136
2137 if (of_device_is_compatible(dev->of_node,
2138 "qcom,msm-fastrpc-compute-cb"))
2139 return fastrpc_cb_probe(dev);
2140
2141 if (of_device_is_compatible(dev->of_node,
2142 "qcom,msm-adsprpc-mem-region")) {
2143 me->dev = dev;
2144 range.addr = 0;
2145 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2146 if (ion_node) {
2147 for_each_available_child_of_node(ion_node, node) {
2148 if (of_property_read_u32(node, "reg", &val))
2149 continue;
2150 if (val != ION_ADSP_HEAP_ID)
2151 continue;
2152 ion_pdev = of_find_device_by_node(node);
2153 if (!ion_pdev)
2154 break;
2155 cma = dev_get_cma_area(&ion_pdev->dev);
2156 if (cma) {
2157 range.addr = cma_get_base(cma);
2158 range.size = (size_t)cma_get_size(cma);
2159 }
2160 break;
2161 }
2162 }
2163 if (range.addr) {
2164 int srcVM[1] = {VMID_HLOS};
2165 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2166 VMID_ADSP_Q6};
2167 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2168 PERM_READ | PERM_WRITE | PERM_EXEC,
2169 PERM_READ | PERM_WRITE | PERM_EXEC,
2170 PERM_READ | PERM_WRITE | PERM_EXEC,
2171 };
2172
2173 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2174 srcVM, 1, destVM, destVMperm, 4));
2175 if (err)
2176 goto bail;
2177 }
2178 return 0;
2179 }
2180
2181 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2182 fastrpc_match_table,
2183 NULL, &pdev->dev));
2184 if (err)
2185 goto bail;
2186bail:
2187 return err;
2188}
2189
2190static void fastrpc_deinit(void)
2191{
2192 struct fastrpc_apps *me = &gfa;
2193 struct fastrpc_channel_ctx *chan = gcinfo;
2194 int i, j;
2195
2196 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2197 if (chan->chan) {
2198 kref_put_mutex(&chan->kref,
2199 fastrpc_channel_close, &me->smd_mutex);
2200 chan->chan = 0;
2201 }
2202 for (j = 0; j < NUM_SESSIONS; j++) {
2203 struct fastrpc_session_ctx *sess = &chan->session[j];
2204
2205 if (sess->smmu.enabled) {
2206 arm_iommu_detach_device(sess->dev);
2207 sess->dev = 0;
2208 }
2209 if (sess->smmu.mapping) {
2210 arm_iommu_release_mapping(sess->smmu.mapping);
2211 sess->smmu.mapping = 0;
2212 }
2213 }
2214 }
2215}
2216
2217static struct platform_driver fastrpc_driver = {
2218 .probe = fastrpc_probe,
2219 .driver = {
2220 .name = "fastrpc",
2221 .owner = THIS_MODULE,
2222 .of_match_table = fastrpc_match_table,
2223 },
2224};
2225
2226static int __init fastrpc_device_init(void)
2227{
2228 struct fastrpc_apps *me = &gfa;
2229 int err = 0, i;
2230
2231 memset(me, 0, sizeof(*me));
2232
2233 fastrpc_init(me);
2234 me->dev = NULL;
2235 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2236 if (err)
2237 goto register_bail;
2238 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2239 DEVICE_NAME));
2240 if (err)
2241 goto alloc_chrdev_bail;
2242 cdev_init(&me->cdev, &fops);
2243 me->cdev.owner = THIS_MODULE;
2244 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
2245 NUM_CHANNELS));
2246 if (err)
2247 goto cdev_init_bail;
2248 me->class = class_create(THIS_MODULE, "fastrpc");
2249 VERIFY(err, !IS_ERR(me->class));
2250 if (err)
2251 goto class_create_bail;
2252 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
2253 for (i = 0; i < NUM_CHANNELS; i++) {
2254 if (!gcinfo[i].name)
2255 continue;
2256 me->channel[i].dev = device_create(me->class, NULL,
2257 MKDEV(MAJOR(me->dev_no), i),
2258 NULL, gcinfo[i].name);
2259 VERIFY(err, !IS_ERR(me->channel[i].dev));
2260 if (err)
2261 goto device_create_bail;
2262 me->channel[i].ssrcount = 0;
2263 me->channel[i].prevssrcount = 0;
2264 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2265 me->channel[i].handle = subsys_notif_register_notifier(
2266 gcinfo[i].subsys,
2267 &me->channel[i].nb);
2268 }
2269
2270 me->client = msm_ion_client_create(DEVICE_NAME);
2271 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2272 if (err)
2273 goto device_create_bail;
2274 return 0;
2275device_create_bail:
2276 for (i = 0; i < NUM_CHANNELS; i++) {
2277 if (IS_ERR_OR_NULL(me->channel[i].dev))
2278 continue;
2279 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2280 subsys_notif_unregister_notifier(me->channel[i].handle,
2281 &me->channel[i].nb);
2282 }
2283 class_destroy(me->class);
2284class_create_bail:
2285 cdev_del(&me->cdev);
2286cdev_init_bail:
2287 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2288alloc_chrdev_bail:
2289register_bail:
2290 fastrpc_deinit();
2291 return err;
2292}
2293
2294static void __exit fastrpc_device_exit(void)
2295{
2296 struct fastrpc_apps *me = &gfa;
2297 int i;
2298
2299 fastrpc_file_list_dtor(me);
2300 fastrpc_deinit();
2301 for (i = 0; i < NUM_CHANNELS; i++) {
2302 if (!gcinfo[i].name)
2303 continue;
2304 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2305 subsys_notif_unregister_notifier(me->channel[i].handle,
2306 &me->channel[i].nb);
2307 }
2308 class_destroy(me->class);
2309 cdev_del(&me->cdev);
2310 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2311 ion_client_destroy(me->client);
2312}
2313
2314late_initcall(fastrpc_device_init);
2315module_exit(fastrpc_device_exit);
2316
2317MODULE_LICENSE("GPL v2");