blob: a5e82229e113e12fcb55fb04122fc887cc4d7f46 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
47
48#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
49#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
50#define TZ_PIL_AUTH_QDSP6_PROC 1
51#define FASTRPC_ENOSUCH 39
52#define VMID_SSC_Q6 5
53#define VMID_ADSP_Q6 6
54
55#define RPC_TIMEOUT (5 * HZ)
56#define BALIGN 128
57#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
58#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080059#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
62
63#define FASTRPC_LINK_STATE_DOWN (0x0)
64#define FASTRPC_LINK_STATE_UP (0x1)
65#define FASTRPC_LINK_DISCONNECTED (0x0)
66#define FASTRPC_LINK_CONNECTING (0x1)
67#define FASTRPC_LINK_CONNECTED (0x3)
68#define FASTRPC_LINK_DISCONNECTING (0x7)
69
70static int fastrpc_glink_open(int cid);
71static void fastrpc_glink_close(void *chan, int cid);
72
73static inline uint64_t buf_page_start(uint64_t buf)
74{
75 uint64_t start = (uint64_t) buf & PAGE_MASK;
76 return start;
77}
78
79static inline uint64_t buf_page_offset(uint64_t buf)
80{
81 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
82 return offset;
83}
84
85static inline int buf_num_pages(uint64_t buf, ssize_t len)
86{
87 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
88 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
89 int nPages = end - start + 1;
90 return nPages;
91}
92
93static inline uint64_t buf_page_size(uint32_t size)
94{
95 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
96
97 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
98}
99
100static inline void *uint64_to_ptr(uint64_t addr)
101{
102 void *ptr = (void *)((uintptr_t)addr);
103
104 return ptr;
105}
106
107static inline uint64_t ptr_to_uint64(void *ptr)
108{
109 uint64_t addr = (uint64_t)((uintptr_t)ptr);
110
111 return addr;
112}
113
114struct fastrpc_file;
115
116struct fastrpc_buf {
117 struct hlist_node hn;
118 struct fastrpc_file *fl;
119 void *virt;
120 uint64_t phys;
121 ssize_t size;
122};
123
124struct fastrpc_ctx_lst;
125
126struct overlap {
127 uintptr_t start;
128 uintptr_t end;
129 int raix;
130 uintptr_t mstart;
131 uintptr_t mend;
132 uintptr_t offset;
133};
134
135struct smq_invoke_ctx {
136 struct hlist_node hn;
137 struct completion work;
138 int retval;
139 int pid;
140 int tgid;
141 remote_arg_t *lpra;
142 remote_arg64_t *rpra;
143 int *fds;
144 unsigned int *attrs;
145 struct fastrpc_mmap **maps;
146 struct fastrpc_buf *buf;
147 ssize_t used;
148 struct fastrpc_file *fl;
149 uint32_t sc;
150 struct overlap *overs;
151 struct overlap **overps;
152 struct smq_msg msg;
153};
154
155struct fastrpc_ctx_lst {
156 struct hlist_head pending;
157 struct hlist_head interrupted;
158};
159
160struct fastrpc_smmu {
161 struct dma_iommu_mapping *mapping;
162 int cb;
163 int enabled;
164 int faults;
165 int secure;
166 int coherent;
167};
168
169struct fastrpc_session_ctx {
170 struct device *dev;
171 struct fastrpc_smmu smmu;
172 int used;
173};
174
175struct fastrpc_glink_info {
176 int link_state;
177 int port_state;
178 struct glink_open_config cfg;
179 struct glink_link_info link_info;
180 void *link_notify_handle;
181};
182
183struct fastrpc_channel_ctx {
184 char *name;
185 char *subsys;
186 void *chan;
187 struct device *dev;
188 struct fastrpc_session_ctx session[NUM_SESSIONS];
189 struct completion work;
190 struct notifier_block nb;
191 struct kref kref;
192 int sesscount;
193 int ssrcount;
194 void *handle;
195 int prevssrcount;
196 int vmid;
197 struct fastrpc_glink_info link;
198};
199
200struct fastrpc_apps {
201 struct fastrpc_channel_ctx *channel;
202 struct cdev cdev;
203 struct class *class;
204 struct mutex smd_mutex;
205 struct smq_phy_page range;
206 struct hlist_head maps;
207 dev_t dev_no;
208 int compat;
209 struct hlist_head drivers;
210 spinlock_t hlock;
211 struct ion_client *client;
212 struct device *dev;
213};
214
215struct fastrpc_mmap {
216 struct hlist_node hn;
217 struct fastrpc_file *fl;
218 struct fastrpc_apps *apps;
219 int fd;
220 uint32_t flags;
221 struct dma_buf *buf;
222 struct sg_table *table;
223 struct dma_buf_attachment *attach;
224 struct ion_handle *handle;
225 uint64_t phys;
226 ssize_t size;
227 uintptr_t va;
228 ssize_t len;
229 int refs;
230 uintptr_t raddr;
231 int uncached;
232 int secure;
233 uintptr_t attr;
234};
235
236struct fastrpc_file {
237 struct hlist_node hn;
238 spinlock_t hlock;
239 struct hlist_head maps;
240 struct hlist_head bufs;
241 struct fastrpc_ctx_lst clst;
242 struct fastrpc_session_ctx *sctx;
243 struct fastrpc_session_ctx *secsctx;
244 uint32_t mode;
245 int tgid;
246 int cid;
247 int ssrcount;
248 int pd;
249 struct fastrpc_apps *apps;
250};
251
252static struct fastrpc_apps gfa;
253
254static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
255 {
256 .name = "adsprpc-smd",
257 .subsys = "adsp",
258 .link.link_info.edge = "lpass",
259 .link.link_info.transport = "smem",
260 },
261 {
262 .name = "sdsprpc-smd",
263 .subsys = "dsps",
264 .link.link_info.edge = "dsps",
265 .link.link_info.transport = "smem",
266 },
267 {
268 .name = "mdsprpc-smd",
269 .subsys = "modem",
270 .link.link_info.edge = "mpss",
271 .link.link_info.transport = "smem",
272 },
273 {
274 .name = "cdsprpc-smd",
275 .subsys = "cdsp",
276 .link.link_info.edge = "cdsp",
277 .link.link_info.transport = "smem",
278 },
279};
280
281static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
282{
283 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
284 int vmid;
285
286 if (!fl)
287 return;
288 if (cache) {
289 spin_lock(&fl->hlock);
290 hlist_add_head(&buf->hn, &fl->bufs);
291 spin_unlock(&fl->hlock);
292 return;
293 }
294 if (!IS_ERR_OR_NULL(buf->virt)) {
295 int destVM[1] = {VMID_HLOS};
296 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
297
298 if (fl->sctx->smmu.cb)
299 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
300 vmid = fl->apps->channel[fl->cid].vmid;
301 if (vmid) {
302 int srcVM[2] = {VMID_HLOS, vmid};
303
304 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
305 srcVM, 2, destVM, destVMperm, 1);
306 }
307 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
308 buf->phys);
309 }
310 kfree(buf);
311}
312
313static void fastrpc_buf_list_free(struct fastrpc_file *fl)
314{
315 struct fastrpc_buf *buf, *free;
316
317 do {
318 struct hlist_node *n;
319
320 free = 0;
321 spin_lock(&fl->hlock);
322 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
323 hlist_del_init(&buf->hn);
324 free = buf;
325 break;
326 }
327 spin_unlock(&fl->hlock);
328 if (free)
329 fastrpc_buf_free(free, 0);
330 } while (free);
331}
332
333static void fastrpc_mmap_add(struct fastrpc_mmap *map)
334{
335 struct fastrpc_file *fl = map->fl;
336
337 spin_lock(&fl->hlock);
338 hlist_add_head(&map->hn, &fl->maps);
339 spin_unlock(&fl->hlock);
340}
341
342static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800343 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700344{
345 struct fastrpc_mmap *match = 0, *map;
346 struct hlist_node *n;
347
348 spin_lock(&fl->hlock);
349 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
350 if (va >= map->va &&
351 va + len <= map->va + map->len &&
352 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800353 if (refs)
354 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700355 match = map;
356 break;
357 }
358 }
359 spin_unlock(&fl->hlock);
360 if (match) {
361 *ppmap = match;
362 return 0;
363 }
364 return -ENOTTY;
365}
366
367static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
368 ssize_t len, struct fastrpc_mmap **ppmap)
369{
370 struct fastrpc_mmap *match = 0, *map;
371 struct hlist_node *n;
372 struct fastrpc_apps *me = &gfa;
373
374 spin_lock(&me->hlock);
375 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
376 if (map->raddr == va &&
377 map->raddr + map->len == va + len &&
378 map->refs == 1) {
379 match = map;
380 hlist_del_init(&map->hn);
381 break;
382 }
383 }
384 spin_unlock(&me->hlock);
385 if (match) {
386 *ppmap = match;
387 return 0;
388 }
389 spin_lock(&fl->hlock);
390 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
391 if (map->raddr == va &&
392 map->raddr + map->len == va + len &&
393 map->refs == 1) {
394 match = map;
395 hlist_del_init(&map->hn);
396 break;
397 }
398 }
399 spin_unlock(&fl->hlock);
400 if (match) {
401 *ppmap = match;
402 return 0;
403 }
404 return -ENOTTY;
405}
406
407static void fastrpc_mmap_free(struct fastrpc_mmap *map)
408{
409 struct fastrpc_file *fl;
410 int vmid;
411 struct fastrpc_session_ctx *sess;
412 int destVM[1] = {VMID_HLOS};
413 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
414
415 if (!map)
416 return;
417 fl = map->fl;
418 spin_lock(&fl->hlock);
419 map->refs--;
420 if (!map->refs)
421 hlist_del_init(&map->hn);
422 spin_unlock(&fl->hlock);
423 if (map->refs > 0)
424 return;
425 if (map->secure)
426 sess = fl->secsctx;
427 else
428 sess = fl->sctx;
429
430 if (!IS_ERR_OR_NULL(map->handle))
431 ion_free(fl->apps->client, map->handle);
432 if (sess->smmu.enabled) {
433 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800434 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700435 map->table->sgl,
436 map->table->nents, DMA_BIDIRECTIONAL,
437 map->buf);
438 }
439 vmid = fl->apps->channel[fl->cid].vmid;
440 if (vmid && map->phys) {
441 int srcVM[2] = {VMID_HLOS, vmid};
442
443 hyp_assign_phys(map->phys, buf_page_size(map->size),
444 srcVM, 2, destVM, destVMperm, 1);
445 }
446
447 if (!IS_ERR_OR_NULL(map->table))
448 dma_buf_unmap_attachment(map->attach, map->table,
449 DMA_BIDIRECTIONAL);
450 if (!IS_ERR_OR_NULL(map->attach))
451 dma_buf_detach(map->buf, map->attach);
452 if (!IS_ERR_OR_NULL(map->buf))
453 dma_buf_put(map->buf);
454 kfree(map);
455}
456
457static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
458 struct fastrpc_session_ctx **session);
459
460static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
461 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
462 struct fastrpc_mmap **ppmap)
463{
464 struct fastrpc_session_ctx *sess;
465 struct fastrpc_apps *apps = fl->apps;
466 int cid = fl->cid;
467 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
468 struct fastrpc_mmap *map = 0;
469 unsigned long attrs;
470 unsigned long flags;
471 int err = 0, vmid;
472
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800473 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700474 return 0;
475 map = kzalloc(sizeof(*map), GFP_KERNEL);
476 VERIFY(err, !IS_ERR_OR_NULL(map));
477 if (err)
478 goto bail;
479 INIT_HLIST_NODE(&map->hn);
480 map->flags = mflags;
481 map->refs = 1;
482 map->fl = fl;
483 map->fd = fd;
484 map->attr = attr;
485 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
486 ion_import_dma_buf_fd(fl->apps->client, fd)));
487 if (err)
488 goto bail;
489 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
490 &flags));
491 if (err)
492 goto bail;
493
494 map->uncached = !ION_IS_CACHED(flags);
495 if (map->attr & FASTRPC_ATTR_NOVA)
496 map->uncached = 1;
497
498 map->secure = flags & ION_FLAG_SECURE;
499 if (map->secure) {
500 if (!fl->secsctx)
501 err = fastrpc_session_alloc(chan, 1,
502 &fl->secsctx);
503 if (err)
504 goto bail;
505 }
506 if (map->secure)
507 sess = fl->secsctx;
508 else
509 sess = fl->sctx;
510
511 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
512 if (err)
513 goto bail;
514 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
515 dma_buf_attach(map->buf, sess->dev)));
516 if (err)
517 goto bail;
518 VERIFY(err, !IS_ERR_OR_NULL(map->table =
519 dma_buf_map_attachment(map->attach,
520 DMA_BIDIRECTIONAL)));
521 if (err)
522 goto bail;
523 if (sess->smmu.enabled) {
524 attrs = DMA_ATTR_EXEC_MAPPING;
525 VERIFY(err, map->table->nents ==
526 msm_dma_map_sg_attrs(sess->dev,
527 map->table->sgl, map->table->nents,
528 DMA_BIDIRECTIONAL, map->buf, attrs));
529 if (err)
530 goto bail;
531 } else {
532 VERIFY(err, map->table->nents == 1);
533 if (err)
534 goto bail;
535 }
536 map->phys = sg_dma_address(map->table->sgl);
537 if (sess->smmu.cb) {
538 map->phys += ((uint64_t)sess->smmu.cb << 32);
539 map->size = sg_dma_len(map->table->sgl);
540 } else {
541 map->size = buf_page_size(len);
542 }
543 vmid = fl->apps->channel[fl->cid].vmid;
544 if (vmid) {
545 int srcVM[1] = {VMID_HLOS};
546 int destVM[2] = {VMID_HLOS, vmid};
547 int destVMperm[2] = {PERM_READ | PERM_WRITE,
548 PERM_READ | PERM_WRITE | PERM_EXEC};
549
550 VERIFY(err, !hyp_assign_phys(map->phys,
551 buf_page_size(map->size),
552 srcVM, 1, destVM, destVMperm, 2));
553 if (err)
554 goto bail;
555 }
556 map->va = va;
557 map->len = len;
558
559 fastrpc_mmap_add(map);
560 *ppmap = map;
561
562bail:
563 if (err && map)
564 fastrpc_mmap_free(map);
565 return err;
566}
567
568static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
569 struct fastrpc_buf **obuf)
570{
571 int err = 0, vmid;
572 struct fastrpc_buf *buf = 0, *fr = 0;
573 struct hlist_node *n;
574
575 VERIFY(err, size > 0);
576 if (err)
577 goto bail;
578
579 /* find the smallest buffer that fits in the cache */
580 spin_lock(&fl->hlock);
581 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
582 if (buf->size >= size && (!fr || fr->size > buf->size))
583 fr = buf;
584 }
585 if (fr)
586 hlist_del_init(&fr->hn);
587 spin_unlock(&fl->hlock);
588 if (fr) {
589 *obuf = fr;
590 return 0;
591 }
592 buf = 0;
593 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
594 if (err)
595 goto bail;
596 INIT_HLIST_NODE(&buf->hn);
597 buf->fl = fl;
598 buf->virt = 0;
599 buf->phys = 0;
600 buf->size = size;
601 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
602 (void *)&buf->phys, GFP_KERNEL);
603 if (IS_ERR_OR_NULL(buf->virt)) {
604 /* free cache and retry */
605 fastrpc_buf_list_free(fl);
606 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
607 (void *)&buf->phys, GFP_KERNEL);
608 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
609 }
610 if (err)
611 goto bail;
612 if (fl->sctx->smmu.cb)
613 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
614 vmid = fl->apps->channel[fl->cid].vmid;
615 if (vmid) {
616 int srcVM[1] = {VMID_HLOS};
617 int destVM[2] = {VMID_HLOS, vmid};
618 int destVMperm[2] = {PERM_READ | PERM_WRITE,
619 PERM_READ | PERM_WRITE | PERM_EXEC};
620
621 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
622 srcVM, 1, destVM, destVMperm, 2));
623 if (err)
624 goto bail;
625 }
626
627 *obuf = buf;
628 bail:
629 if (err && buf)
630 fastrpc_buf_free(buf, 0);
631 return err;
632}
633
634
635static int context_restore_interrupted(struct fastrpc_file *fl,
636 struct fastrpc_ioctl_invoke_attrs *inv,
637 struct smq_invoke_ctx **po)
638{
639 int err = 0;
640 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
641 struct hlist_node *n;
642 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
643
644 spin_lock(&fl->hlock);
645 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
646 if (ictx->pid == current->pid) {
647 if (invoke->sc != ictx->sc || ictx->fl != fl)
648 err = -1;
649 else {
650 ctx = ictx;
651 hlist_del_init(&ctx->hn);
652 hlist_add_head(&ctx->hn, &fl->clst.pending);
653 }
654 break;
655 }
656 }
657 spin_unlock(&fl->hlock);
658 if (ctx)
659 *po = ctx;
660 return err;
661}
662
663#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
664static int overlap_ptr_cmp(const void *a, const void *b)
665{
666 struct overlap *pa = *((struct overlap **)a);
667 struct overlap *pb = *((struct overlap **)b);
668 /* sort with lowest starting buffer first */
669 int st = CMP(pa->start, pb->start);
670 /* sort with highest ending buffer first */
671 int ed = CMP(pb->end, pa->end);
672 return st == 0 ? ed : st;
673}
674
Sathish Ambley9466d672017-01-25 10:51:55 -0800675static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700676{
Sathish Ambley9466d672017-01-25 10:51:55 -0800677 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700678 remote_arg_t *lpra = ctx->lpra;
679 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
680 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
681 int nbufs = inbufs + outbufs;
682 struct overlap max;
683
684 for (i = 0; i < nbufs; ++i) {
685 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
686 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800687 if (lpra[i].buf.len) {
688 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
689 if (err)
690 goto bail;
691 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700692 ctx->overs[i].raix = i;
693 ctx->overps[i] = &ctx->overs[i];
694 }
695 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
696 max.start = 0;
697 max.end = 0;
698 for (i = 0; i < nbufs; ++i) {
699 if (ctx->overps[i]->start < max.end) {
700 ctx->overps[i]->mstart = max.end;
701 ctx->overps[i]->mend = ctx->overps[i]->end;
702 ctx->overps[i]->offset = max.end -
703 ctx->overps[i]->start;
704 if (ctx->overps[i]->end > max.end) {
705 max.end = ctx->overps[i]->end;
706 } else {
707 ctx->overps[i]->mend = 0;
708 ctx->overps[i]->mstart = 0;
709 }
710 } else {
711 ctx->overps[i]->mend = ctx->overps[i]->end;
712 ctx->overps[i]->mstart = ctx->overps[i]->start;
713 ctx->overps[i]->offset = 0;
714 max = *ctx->overps[i];
715 }
716 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800717bail:
718 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719}
720
721#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
722 do {\
723 if (!(kernel))\
724 VERIFY(err, 0 == copy_from_user((dst), (src),\
725 (size)));\
726 else\
727 memmove((dst), (src), (size));\
728 } while (0)
729
730#define K_COPY_TO_USER(err, kernel, dst, src, size) \
731 do {\
732 if (!(kernel))\
733 VERIFY(err, 0 == copy_to_user((dst), (src),\
734 (size)));\
735 else\
736 memmove((dst), (src), (size));\
737 } while (0)
738
739
740static void context_free(struct smq_invoke_ctx *ctx);
741
742static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
743 struct fastrpc_ioctl_invoke_attrs *invokefd,
744 struct smq_invoke_ctx **po)
745{
746 int err = 0, bufs, size = 0;
747 struct smq_invoke_ctx *ctx = 0;
748 struct fastrpc_ctx_lst *clst = &fl->clst;
749 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
750
751 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
752 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
753 sizeof(*ctx->fds) * (bufs) +
754 sizeof(*ctx->attrs) * (bufs) +
755 sizeof(*ctx->overs) * (bufs) +
756 sizeof(*ctx->overps) * (bufs);
757
758 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
759 if (err)
760 goto bail;
761
762 INIT_HLIST_NODE(&ctx->hn);
763 hlist_add_fake(&ctx->hn);
764 ctx->fl = fl;
765 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
766 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
767 ctx->fds = (int *)(&ctx->lpra[bufs]);
768 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
769 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
770 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
771
772 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
773 bufs * sizeof(*ctx->lpra));
774 if (err)
775 goto bail;
776
777 if (invokefd->fds) {
778 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
779 bufs * sizeof(*ctx->fds));
780 if (err)
781 goto bail;
782 }
783 if (invokefd->attrs) {
784 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
785 bufs * sizeof(*ctx->attrs));
786 if (err)
787 goto bail;
788 }
789
790 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800791 if (bufs) {
792 VERIFY(err, 0 == context_build_overlap(ctx));
793 if (err)
794 goto bail;
795 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700796 ctx->retval = -1;
797 ctx->pid = current->pid;
798 ctx->tgid = current->tgid;
799 init_completion(&ctx->work);
800
801 spin_lock(&fl->hlock);
802 hlist_add_head(&ctx->hn, &clst->pending);
803 spin_unlock(&fl->hlock);
804
805 *po = ctx;
806bail:
807 if (ctx && err)
808 context_free(ctx);
809 return err;
810}
811
812static void context_save_interrupted(struct smq_invoke_ctx *ctx)
813{
814 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
815
816 spin_lock(&ctx->fl->hlock);
817 hlist_del_init(&ctx->hn);
818 hlist_add_head(&ctx->hn, &clst->interrupted);
819 spin_unlock(&ctx->fl->hlock);
820 /* free the cache on power collapse */
821 fastrpc_buf_list_free(ctx->fl);
822}
823
824static void context_free(struct smq_invoke_ctx *ctx)
825{
826 int i;
827 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
828 REMOTE_SCALARS_OUTBUFS(ctx->sc);
829 spin_lock(&ctx->fl->hlock);
830 hlist_del_init(&ctx->hn);
831 spin_unlock(&ctx->fl->hlock);
832 for (i = 0; i < nbufs; ++i)
833 fastrpc_mmap_free(ctx->maps[i]);
834 fastrpc_buf_free(ctx->buf, 1);
835 kfree(ctx);
836}
837
838static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
839{
840 ctx->retval = retval;
841 complete(&ctx->work);
842}
843
844
845static void fastrpc_notify_users(struct fastrpc_file *me)
846{
847 struct smq_invoke_ctx *ictx;
848 struct hlist_node *n;
849
850 spin_lock(&me->hlock);
851 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
852 complete(&ictx->work);
853 }
854 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
855 complete(&ictx->work);
856 }
857 spin_unlock(&me->hlock);
858
859}
860
861static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
862{
863 struct fastrpc_file *fl;
864 struct hlist_node *n;
865
866 spin_lock(&me->hlock);
867 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
868 if (fl->cid == cid)
869 fastrpc_notify_users(fl);
870 }
871 spin_unlock(&me->hlock);
872
873}
874static void context_list_ctor(struct fastrpc_ctx_lst *me)
875{
876 INIT_HLIST_HEAD(&me->interrupted);
877 INIT_HLIST_HEAD(&me->pending);
878}
879
880static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
881{
882 struct fastrpc_ctx_lst *clst = &fl->clst;
883 struct smq_invoke_ctx *ictx = 0, *ctxfree;
884 struct hlist_node *n;
885
886 do {
887 ctxfree = 0;
888 spin_lock(&fl->hlock);
889 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
890 hlist_del_init(&ictx->hn);
891 ctxfree = ictx;
892 break;
893 }
894 spin_unlock(&fl->hlock);
895 if (ctxfree)
896 context_free(ctxfree);
897 } while (ctxfree);
898 do {
899 ctxfree = 0;
900 spin_lock(&fl->hlock);
901 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
902 hlist_del_init(&ictx->hn);
903 ctxfree = ictx;
904 break;
905 }
906 spin_unlock(&fl->hlock);
907 if (ctxfree)
908 context_free(ctxfree);
909 } while (ctxfree);
910}
911
912static int fastrpc_file_free(struct fastrpc_file *fl);
913static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
914{
915 struct fastrpc_file *fl, *free;
916 struct hlist_node *n;
917
918 do {
919 free = 0;
920 spin_lock(&me->hlock);
921 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
922 hlist_del_init(&fl->hn);
923 free = fl;
924 break;
925 }
926 spin_unlock(&me->hlock);
927 if (free)
928 fastrpc_file_free(free);
929 } while (free);
930}
931
932static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
933{
934 remote_arg64_t *rpra;
935 remote_arg_t *lpra = ctx->lpra;
936 struct smq_invoke_buf *list;
937 struct smq_phy_page *pages, *ipage;
938 uint32_t sc = ctx->sc;
939 int inbufs = REMOTE_SCALARS_INBUFS(sc);
940 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800941 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700942 uintptr_t args;
943 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800944 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700945 int err = 0;
946 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800947 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700948
949 /* calculate size of the metadata */
950 rpra = 0;
951 list = smq_invoke_buf_start(rpra, sc);
952 pages = smq_phy_page_start(sc, list);
953 ipage = pages;
954
955 for (i = 0; i < bufs; ++i) {
956 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
957 ssize_t len = lpra[i].buf.len;
958
959 if (ctx->fds[i] && (ctx->fds[i] != -1))
960 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
961 ctx->attrs[i], buf, len,
962 mflags, &ctx->maps[i]);
963 ipage += 1;
964 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800965 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
966 for (i = bufs; i < bufs + handles; i++) {
967 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
968 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
969 if (err)
970 goto bail;
971 ipage += 1;
972 }
973 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700974 /* calculate len requreed for copying */
975 for (oix = 0; oix < inbufs + outbufs; ++oix) {
976 int i = ctx->overps[oix]->raix;
977 ssize_t len = lpra[i].buf.len;
978
979 if (!len)
980 continue;
981 if (ctx->maps[i])
982 continue;
983 if (ctx->overps[oix]->offset == 0)
984 copylen = ALIGN(copylen, BALIGN);
985 copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
986 }
987 ctx->used = copylen;
988
989 /* allocate new buffer */
990 if (copylen) {
991 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
992 if (err)
993 goto bail;
994 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +0530995 if (ctx->buf->virt && metalen <= copylen)
996 memset(ctx->buf->virt, 0, metalen);
997
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700998 /* copy metadata */
999 rpra = ctx->buf->virt;
1000 ctx->rpra = rpra;
1001 list = smq_invoke_buf_start(rpra, sc);
1002 pages = smq_phy_page_start(sc, list);
1003 ipage = pages;
1004 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001005 for (i = 0; i < bufs + handles; ++i) {
1006 if (lpra[i].buf.len)
1007 list[i].num = 1;
1008 else
1009 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001010 list[i].pgidx = ipage - pages;
1011 ipage++;
1012 }
1013 /* map ion buffers */
1014 for (i = 0; i < inbufs + outbufs; ++i) {
1015 struct fastrpc_mmap *map = ctx->maps[i];
1016 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1017 ssize_t len = lpra[i].buf.len;
1018
1019 rpra[i].buf.pv = 0;
1020 rpra[i].buf.len = len;
1021 if (!len)
1022 continue;
1023 if (map) {
1024 struct vm_area_struct *vma;
1025 uintptr_t offset;
1026 int num = buf_num_pages(buf, len);
1027 int idx = list[i].pgidx;
1028
1029 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001030 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001031 } else {
1032 down_read(&current->mm->mmap_sem);
1033 VERIFY(err, NULL != (vma = find_vma(current->mm,
1034 map->va)));
1035 if (err) {
1036 up_read(&current->mm->mmap_sem);
1037 goto bail;
1038 }
1039 offset = buf_page_start(buf) - vma->vm_start;
1040 up_read(&current->mm->mmap_sem);
1041 VERIFY(err, offset < (uintptr_t)map->size);
1042 if (err)
1043 goto bail;
1044 }
1045 pages[idx].addr = map->phys + offset;
1046 pages[idx].size = num << PAGE_SHIFT;
1047 }
1048 rpra[i].buf.pv = buf;
1049 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001050 for (i = bufs; i < bufs + handles; ++i) {
1051 struct fastrpc_mmap *map = ctx->maps[i];
1052
1053 pages[i].addr = map->phys;
1054 pages[i].size = map->size;
1055 }
1056 fdlist = (uint64_t *)&pages[bufs + handles];
1057 for (i = 0; i < M_FDLIST; i++)
1058 fdlist[i] = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001059 /* copy non ion buffers */
1060 rlen = copylen - metalen;
1061 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1062 int i = ctx->overps[oix]->raix;
1063 struct fastrpc_mmap *map = ctx->maps[i];
1064 int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1065 uint64_t buf;
1066 ssize_t len = lpra[i].buf.len;
1067
1068 if (!len)
1069 continue;
1070 if (map)
1071 continue;
1072 if (ctx->overps[oix]->offset == 0) {
1073 rlen -= ALIGN(args, BALIGN) - args;
1074 args = ALIGN(args, BALIGN);
1075 }
1076 VERIFY(err, rlen >= mlen);
1077 if (err)
1078 goto bail;
1079 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1080 pages[list[i].pgidx].addr = ctx->buf->phys -
1081 ctx->overps[oix]->offset +
1082 (copylen - rlen);
1083 pages[list[i].pgidx].addr =
1084 buf_page_start(pages[list[i].pgidx].addr);
1085 buf = rpra[i].buf.pv;
1086 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1087 if (i < inbufs) {
1088 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1089 lpra[i].buf.pv, len);
1090 if (err)
1091 goto bail;
1092 }
1093 args = args + mlen;
1094 rlen -= mlen;
1095 }
1096
1097 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1098 int i = ctx->overps[oix]->raix;
1099 struct fastrpc_mmap *map = ctx->maps[i];
1100
1101 if (ctx->fl->sctx->smmu.coherent)
1102 continue;
1103 if (map && map->uncached)
1104 continue;
1105 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1106 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1107 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1108 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001109 for (i = bufs; i < bufs + handles; i++) {
1110 rpra[i].dma.fd = ctx->fds[i];
1111 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1112 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001113 }
1114 if (!ctx->fl->sctx->smmu.coherent)
1115 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
1116 bail:
1117 return err;
1118}
1119
1120static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1121 remote_arg_t *upra)
1122{
1123 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001124 struct smq_invoke_buf *list;
1125 struct smq_phy_page *pages;
1126 struct fastrpc_mmap *mmap;
1127 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001128 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001129 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001130 int err = 0;
1131
1132 inbufs = REMOTE_SCALARS_INBUFS(sc);
1133 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001134 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1135 list = smq_invoke_buf_start(ctx->rpra, sc);
1136 pages = smq_phy_page_start(sc, list);
1137 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001138 for (i = inbufs; i < inbufs + outbufs; ++i) {
1139 if (!ctx->maps[i]) {
1140 K_COPY_TO_USER(err, kernel,
1141 ctx->lpra[i].buf.pv,
1142 uint64_to_ptr(rpra[i].buf.pv),
1143 rpra[i].buf.len);
1144 if (err)
1145 goto bail;
1146 } else {
1147 fastrpc_mmap_free(ctx->maps[i]);
1148 ctx->maps[i] = 0;
1149 }
1150 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001151 if (inbufs + outbufs + handles) {
1152 for (i = 0; i < M_FDLIST; i++) {
1153 if (!fdlist[i])
1154 break;
1155 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001156 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001157 fastrpc_mmap_free(mmap);
1158 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001159 }
1160 bail:
1161 return err;
1162}
1163
1164static void inv_args_pre(struct smq_invoke_ctx *ctx)
1165{
1166 int i, inbufs, outbufs;
1167 uint32_t sc = ctx->sc;
1168 remote_arg64_t *rpra = ctx->rpra;
1169 uintptr_t end;
1170
1171 inbufs = REMOTE_SCALARS_INBUFS(sc);
1172 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1173 for (i = inbufs; i < inbufs + outbufs; ++i) {
1174 struct fastrpc_mmap *map = ctx->maps[i];
1175
1176 if (map && map->uncached)
1177 continue;
1178 if (!rpra[i].buf.len)
1179 continue;
1180 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1181 buf_page_start(rpra[i].buf.pv))
1182 continue;
1183 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1184 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1185 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1186 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1187 rpra[i].buf.len);
1188 if (!IS_CACHE_ALIGNED(end))
1189 dmac_flush_range((char *)end,
1190 (char *)end + 1);
1191 }
1192}
1193
1194static void inv_args(struct smq_invoke_ctx *ctx)
1195{
1196 int i, inbufs, outbufs;
1197 uint32_t sc = ctx->sc;
1198 remote_arg64_t *rpra = ctx->rpra;
1199 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001200
1201 inbufs = REMOTE_SCALARS_INBUFS(sc);
1202 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1203 for (i = inbufs; i < inbufs + outbufs; ++i) {
1204 struct fastrpc_mmap *map = ctx->maps[i];
1205
1206 if (map && map->uncached)
1207 continue;
1208 if (!rpra[i].buf.len)
1209 continue;
1210 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1211 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001212 continue;
1213 }
1214 if (map && map->handle)
1215 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1216 (char *)uint64_to_ptr(rpra[i].buf.pv),
1217 rpra[i].buf.len, ION_IOC_INV_CACHES);
1218 else
1219 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1220 (char *)uint64_to_ptr(rpra[i].buf.pv
1221 + rpra[i].buf.len));
1222 }
1223
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001224 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001225 dmac_inv_range(rpra, (char *)rpra + used);
1226}
1227
1228static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1229 uint32_t kernel, uint32_t handle)
1230{
1231 struct smq_msg *msg = &ctx->msg;
1232 struct fastrpc_file *fl = ctx->fl;
1233 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1234 int err = 0;
1235
1236 VERIFY(err, 0 != channel_ctx->chan);
1237 if (err)
1238 goto bail;
1239 msg->pid = current->tgid;
1240 msg->tid = current->pid;
1241 if (kernel)
1242 msg->pid = 0;
1243 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1244 msg->invoke.header.handle = handle;
1245 msg->invoke.header.sc = ctx->sc;
1246 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1247 msg->invoke.page.size = buf_page_size(ctx->used);
1248
1249 if (fl->ssrcount != channel_ctx->ssrcount) {
1250 err = -ECONNRESET;
1251 goto bail;
1252 }
1253 VERIFY(err, channel_ctx->link.port_state ==
1254 FASTRPC_LINK_CONNECTED);
1255 if (err)
1256 goto bail;
1257 err = glink_tx(channel_ctx->chan,
1258 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1259 GLINK_TX_REQ_INTENT);
1260 bail:
1261 return err;
1262}
1263
1264static void fastrpc_init(struct fastrpc_apps *me)
1265{
1266 int i;
1267
1268 INIT_HLIST_HEAD(&me->drivers);
1269 spin_lock_init(&me->hlock);
1270 mutex_init(&me->smd_mutex);
1271 me->channel = &gcinfo[0];
1272 for (i = 0; i < NUM_CHANNELS; i++) {
1273 init_completion(&me->channel[i].work);
1274 me->channel[i].sesscount = 0;
1275 }
1276}
1277
1278static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1279
1280static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1281 uint32_t kernel,
1282 struct fastrpc_ioctl_invoke_attrs *inv)
1283{
1284 struct smq_invoke_ctx *ctx = 0;
1285 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1286 int cid = fl->cid;
1287 int interrupted = 0;
1288 int err = 0;
1289
1290 if (!kernel) {
1291 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1292 &ctx));
1293 if (err)
1294 goto bail;
1295 if (fl->sctx->smmu.faults)
1296 err = FASTRPC_ENOSUCH;
1297 if (err)
1298 goto bail;
1299 if (ctx)
1300 goto wait;
1301 }
1302
1303 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1304 if (err)
1305 goto bail;
1306
1307 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
1308 VERIFY(err, 0 == get_args(kernel, ctx));
1309 if (err)
1310 goto bail;
1311 }
1312
1313 if (!fl->sctx->smmu.coherent) {
1314 inv_args_pre(ctx);
1315 if (mode == FASTRPC_MODE_SERIAL)
1316 inv_args(ctx);
1317 }
1318 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
1319 if (err)
1320 goto bail;
1321 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1322 inv_args(ctx);
1323 wait:
1324 if (kernel)
1325 wait_for_completion(&ctx->work);
1326 else {
1327 interrupted = wait_for_completion_interruptible(&ctx->work);
1328 VERIFY(err, 0 == (err = interrupted));
1329 if (err)
1330 goto bail;
1331 }
1332 VERIFY(err, 0 == (err = ctx->retval));
1333 if (err)
1334 goto bail;
1335 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
1336 if (err)
1337 goto bail;
1338 bail:
1339 if (ctx && interrupted == -ERESTARTSYS)
1340 context_save_interrupted(ctx);
1341 else if (ctx)
1342 context_free(ctx);
1343 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1344 err = ECONNRESET;
1345 return err;
1346}
1347
1348static int fastrpc_init_process(struct fastrpc_file *fl,
1349 struct fastrpc_ioctl_init *init)
1350{
1351 int err = 0;
1352 struct fastrpc_ioctl_invoke_attrs ioctl;
1353 struct smq_phy_page pages[1];
1354 struct fastrpc_mmap *file = 0, *mem = 0;
1355
1356 if (init->flags == FASTRPC_INIT_ATTACH) {
1357 remote_arg_t ra[1];
1358 int tgid = current->tgid;
1359
1360 ra[0].buf.pv = (void *)&tgid;
1361 ra[0].buf.len = sizeof(tgid);
1362 ioctl.inv.handle = 1;
1363 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1364 ioctl.inv.pra = ra;
1365 ioctl.fds = 0;
1366 ioctl.attrs = 0;
1367 fl->pd = 0;
1368 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1369 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1370 if (err)
1371 goto bail;
1372 } else if (init->flags == FASTRPC_INIT_CREATE) {
1373 remote_arg_t ra[4];
1374 int fds[4];
1375 int mflags = 0;
1376 struct {
1377 int pgid;
1378 int namelen;
1379 int filelen;
1380 int pageslen;
1381 } inbuf;
1382
1383 inbuf.pgid = current->tgid;
1384 inbuf.namelen = strlen(current->comm) + 1;
1385 inbuf.filelen = init->filelen;
1386 fl->pd = 1;
1387 if (init->filelen) {
1388 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1389 init->file, init->filelen, mflags, &file));
1390 if (err)
1391 goto bail;
1392 }
1393 inbuf.pageslen = 1;
1394 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1395 init->mem, init->memlen, mflags, &mem));
1396 if (err)
1397 goto bail;
1398 inbuf.pageslen = 1;
1399 ra[0].buf.pv = (void *)&inbuf;
1400 ra[0].buf.len = sizeof(inbuf);
1401 fds[0] = 0;
1402
1403 ra[1].buf.pv = (void *)current->comm;
1404 ra[1].buf.len = inbuf.namelen;
1405 fds[1] = 0;
1406
1407 ra[2].buf.pv = (void *)init->file;
1408 ra[2].buf.len = inbuf.filelen;
1409 fds[2] = init->filefd;
1410
1411 pages[0].addr = mem->phys;
1412 pages[0].size = mem->size;
1413 ra[3].buf.pv = (void *)pages;
1414 ra[3].buf.len = 1 * sizeof(*pages);
1415 fds[3] = 0;
1416
1417 ioctl.inv.handle = 1;
1418 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
1419 ioctl.inv.pra = ra;
1420 ioctl.fds = fds;
1421 ioctl.attrs = 0;
1422 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1423 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1424 if (err)
1425 goto bail;
1426 } else {
1427 err = -ENOTTY;
1428 }
1429bail:
1430 if (mem && err)
1431 fastrpc_mmap_free(mem);
1432 if (file)
1433 fastrpc_mmap_free(file);
1434 return err;
1435}
1436
1437static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1438{
1439 int err = 0;
1440 struct fastrpc_ioctl_invoke_attrs ioctl;
1441 remote_arg_t ra[1];
1442 int tgid = 0;
1443
1444 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1445 if (err)
1446 goto bail;
1447 tgid = fl->tgid;
1448 ra[0].buf.pv = (void *)&tgid;
1449 ra[0].buf.len = sizeof(tgid);
1450 ioctl.inv.handle = 1;
1451 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1452 ioctl.inv.pra = ra;
1453 ioctl.fds = 0;
1454 ioctl.attrs = 0;
1455 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1456 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1457bail:
1458 return err;
1459}
1460
1461static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1462 struct fastrpc_mmap *map)
1463{
1464 struct fastrpc_ioctl_invoke_attrs ioctl;
1465 struct smq_phy_page page;
1466 int num = 1;
1467 remote_arg_t ra[3];
1468 int err = 0;
1469 struct {
1470 int pid;
1471 uint32_t flags;
1472 uintptr_t vaddrin;
1473 int num;
1474 } inargs;
1475 struct {
1476 uintptr_t vaddrout;
1477 } routargs;
1478
1479 inargs.pid = current->tgid;
1480 inargs.vaddrin = (uintptr_t)map->va;
1481 inargs.flags = flags;
1482 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1483 ra[0].buf.pv = (void *)&inargs;
1484 ra[0].buf.len = sizeof(inargs);
1485 page.addr = map->phys;
1486 page.size = map->size;
1487 ra[1].buf.pv = (void *)&page;
1488 ra[1].buf.len = num * sizeof(page);
1489
1490 ra[2].buf.pv = (void *)&routargs;
1491 ra[2].buf.len = sizeof(routargs);
1492
1493 ioctl.inv.handle = 1;
1494 if (fl->apps->compat)
1495 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1496 else
1497 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1498 ioctl.inv.pra = ra;
1499 ioctl.fds = 0;
1500 ioctl.attrs = 0;
1501 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1502 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1503 map->raddr = (uintptr_t)routargs.vaddrout;
1504
1505 return err;
1506}
1507
1508static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1509 struct fastrpc_mmap *map)
1510{
1511 struct fastrpc_ioctl_invoke_attrs ioctl;
1512 remote_arg_t ra[1];
1513 int err = 0;
1514 struct {
1515 int pid;
1516 uintptr_t vaddrout;
1517 ssize_t size;
1518 } inargs;
1519
1520 inargs.pid = current->tgid;
1521 inargs.size = map->size;
1522 inargs.vaddrout = map->raddr;
1523 ra[0].buf.pv = (void *)&inargs;
1524 ra[0].buf.len = sizeof(inargs);
1525
1526 ioctl.inv.handle = 1;
1527 if (fl->apps->compat)
1528 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1529 else
1530 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1531 ioctl.inv.pra = ra;
1532 ioctl.fds = 0;
1533 ioctl.attrs = 0;
1534 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1535 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1536 return err;
1537}
1538
1539static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1540 ssize_t len, struct fastrpc_mmap **ppmap);
1541
1542static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1543
1544static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1545 struct fastrpc_ioctl_munmap *ud)
1546{
1547 int err = 0;
1548 struct fastrpc_mmap *map = 0;
1549
1550 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1551 if (err)
1552 goto bail;
1553 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1554 if (err)
1555 goto bail;
1556 fastrpc_mmap_free(map);
1557bail:
1558 if (err && map)
1559 fastrpc_mmap_add(map);
1560 return err;
1561}
1562
1563static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1564 struct fastrpc_ioctl_mmap *ud)
1565{
1566
1567 struct fastrpc_mmap *map = 0;
1568 int err = 0;
1569
1570 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001571 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001572 return 0;
1573
1574 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1575 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1576 if (err)
1577 goto bail;
1578 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1579 if (err)
1580 goto bail;
1581 ud->vaddrout = map->raddr;
1582 bail:
1583 if (err && map)
1584 fastrpc_mmap_free(map);
1585 return err;
1586}
1587
1588static void fastrpc_channel_close(struct kref *kref)
1589{
1590 struct fastrpc_apps *me = &gfa;
1591 struct fastrpc_channel_ctx *ctx;
1592 int cid;
1593
1594 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1595 cid = ctx - &gcinfo[0];
1596 fastrpc_glink_close(ctx->chan, cid);
1597 ctx->chan = 0;
1598 mutex_unlock(&me->smd_mutex);
1599 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1600 MAJOR(me->dev_no), cid);
1601}
1602
1603static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1604
1605static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1606 int secure, struct fastrpc_session_ctx **session)
1607{
1608 struct fastrpc_apps *me = &gfa;
1609 int idx = 0, err = 0;
1610
1611 if (chan->sesscount) {
1612 for (idx = 0; idx < chan->sesscount; ++idx) {
1613 if (!chan->session[idx].used &&
1614 chan->session[idx].smmu.secure == secure) {
1615 chan->session[idx].used = 1;
1616 break;
1617 }
1618 }
1619 VERIFY(err, idx < chan->sesscount);
1620 if (err)
1621 goto bail;
1622 chan->session[idx].smmu.faults = 0;
1623 } else {
1624 VERIFY(err, me->dev != NULL);
1625 if (err)
1626 goto bail;
1627 chan->session[0].dev = me->dev;
1628 }
1629
1630 *session = &chan->session[idx];
1631 bail:
1632 return err;
1633}
1634
1635bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1636{
1637 if (glink_queue_rx_intent(h, NULL, size))
1638 return false;
1639 return true;
1640}
1641
1642void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1643 const void *pkt_priv, const void *ptr)
1644{
1645}
1646
1647void fastrpc_glink_notify_rx(void *handle, const void *priv,
1648 const void *pkt_priv, const void *ptr, size_t size)
1649{
1650 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1651 int len = size;
1652
1653 while (len >= sizeof(*rsp) && rsp) {
1654 rsp->ctx = rsp->ctx & ~1;
1655 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1656 rsp++;
1657 len = len - sizeof(*rsp);
1658 }
1659 glink_rx_done(handle, ptr, true);
1660}
1661
1662void fastrpc_glink_notify_state(void *handle, const void *priv,
1663 unsigned int event)
1664{
1665 struct fastrpc_apps *me = &gfa;
1666 int cid = (int)(uintptr_t)priv;
1667 struct fastrpc_glink_info *link;
1668
1669 if (cid < 0 || cid >= NUM_CHANNELS)
1670 return;
1671 link = &me->channel[cid].link;
1672 switch (event) {
1673 case GLINK_CONNECTED:
1674 link->port_state = FASTRPC_LINK_CONNECTED;
1675 complete(&me->channel[cid].work);
1676 break;
1677 case GLINK_LOCAL_DISCONNECTED:
1678 link->port_state = FASTRPC_LINK_DISCONNECTED;
1679 break;
1680 case GLINK_REMOTE_DISCONNECTED:
1681 if (me->channel[cid].chan &&
1682 link->link_state == FASTRPC_LINK_STATE_UP) {
1683 fastrpc_glink_close(me->channel[cid].chan, cid);
1684 me->channel[cid].chan = 0;
1685 link->port_state = FASTRPC_LINK_DISCONNECTED;
1686 }
1687 break;
1688 default:
1689 break;
1690 }
1691}
1692
1693static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1694 struct fastrpc_session_ctx **session)
1695{
1696 int err = 0;
1697 struct fastrpc_apps *me = &gfa;
1698
1699 mutex_lock(&me->smd_mutex);
1700 if (!*session)
1701 err = fastrpc_session_alloc_locked(chan, secure, session);
1702 mutex_unlock(&me->smd_mutex);
1703 return err;
1704}
1705
1706static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1707 struct fastrpc_session_ctx *session)
1708{
1709 struct fastrpc_apps *me = &gfa;
1710
1711 mutex_lock(&me->smd_mutex);
1712 session->used = 0;
1713 mutex_unlock(&me->smd_mutex);
1714}
1715
1716static int fastrpc_file_free(struct fastrpc_file *fl)
1717{
1718 struct hlist_node *n;
1719 struct fastrpc_mmap *map = 0;
1720 int cid;
1721
1722 if (!fl)
1723 return 0;
1724 cid = fl->cid;
1725
1726 spin_lock(&fl->apps->hlock);
1727 hlist_del_init(&fl->hn);
1728 spin_unlock(&fl->apps->hlock);
1729
1730 (void)fastrpc_release_current_dsp_process(fl);
1731 fastrpc_context_list_dtor(fl);
1732 fastrpc_buf_list_free(fl);
1733 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1734 fastrpc_mmap_free(map);
1735 }
1736 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1737 kref_put_mutex(&fl->apps->channel[cid].kref,
1738 fastrpc_channel_close, &fl->apps->smd_mutex);
1739 if (fl->sctx)
1740 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1741 if (fl->secsctx)
1742 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1743 kfree(fl);
1744 return 0;
1745}
1746
1747static int fastrpc_device_release(struct inode *inode, struct file *file)
1748{
1749 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1750
1751 if (fl) {
1752 fastrpc_file_free(fl);
1753 file->private_data = 0;
1754 }
1755 return 0;
1756}
1757
1758static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1759 void *priv)
1760{
1761 struct fastrpc_apps *me = &gfa;
1762 int cid = (int)((uintptr_t)priv);
1763 struct fastrpc_glink_info *link;
1764
1765 if (cid < 0 || cid >= NUM_CHANNELS)
1766 return;
1767
1768 link = &me->channel[cid].link;
1769 switch (cb_info->link_state) {
1770 case GLINK_LINK_STATE_UP:
1771 link->link_state = FASTRPC_LINK_STATE_UP;
1772 complete(&me->channel[cid].work);
1773 break;
1774 case GLINK_LINK_STATE_DOWN:
1775 link->link_state = FASTRPC_LINK_STATE_DOWN;
1776 break;
1777 default:
1778 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1779 break;
1780 }
1781}
1782
1783static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1784{
1785 int err = 0;
1786 struct fastrpc_glink_info *link;
1787
1788 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1789 if (err)
1790 goto bail;
1791
1792 link = &me->channel[cid].link;
1793 if (link->link_notify_handle != NULL)
1794 goto bail;
1795
1796 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1797 link->link_notify_handle = glink_register_link_state_cb(
1798 &link->link_info,
1799 (void *)((uintptr_t)cid));
1800 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1801 if (err) {
1802 link->link_notify_handle = NULL;
1803 goto bail;
1804 }
1805 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1806 RPC_TIMEOUT));
1807bail:
1808 return err;
1809}
1810
1811static void fastrpc_glink_close(void *chan, int cid)
1812{
1813 int err = 0;
1814 struct fastrpc_glink_info *link;
1815
1816 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1817 if (err)
1818 return;
1819 link = &gfa.channel[cid].link;
1820
1821 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1822 link->port_state == FASTRPC_LINK_CONNECTING) {
1823 link->port_state = FASTRPC_LINK_DISCONNECTING;
1824 glink_close(chan);
1825 }
1826}
1827
1828static int fastrpc_glink_open(int cid)
1829{
1830 int err = 0;
1831 void *handle = NULL;
1832 struct fastrpc_apps *me = &gfa;
1833 struct glink_open_config *cfg;
1834 struct fastrpc_glink_info *link;
1835
1836 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1837 if (err)
1838 goto bail;
1839 link = &me->channel[cid].link;
1840 cfg = &me->channel[cid].link.cfg;
1841 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1842 if (err)
1843 goto bail;
1844
1845 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1846 link->port_state == FASTRPC_LINK_CONNECTING) {
1847 goto bail;
1848 }
1849
1850 link->port_state = FASTRPC_LINK_CONNECTING;
1851 cfg->priv = (void *)(uintptr_t)cid;
1852 cfg->edge = gcinfo[cid].link.link_info.edge;
1853 cfg->transport = gcinfo[cid].link.link_info.transport;
1854 cfg->name = FASTRPC_GLINK_GUID;
1855 cfg->notify_rx = fastrpc_glink_notify_rx;
1856 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1857 cfg->notify_state = fastrpc_glink_notify_state;
1858 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1859 handle = glink_open(cfg);
1860 VERIFY(err, !IS_ERR_OR_NULL(handle));
1861 if (err)
1862 goto bail;
1863 me->channel[cid].chan = handle;
1864bail:
1865 return err;
1866}
1867
1868static int fastrpc_device_open(struct inode *inode, struct file *filp)
1869{
1870 int cid = MINOR(inode->i_rdev);
1871 int err = 0;
1872 struct fastrpc_apps *me = &gfa;
1873 struct fastrpc_file *fl = 0;
1874
1875 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
1876 if (err)
1877 return err;
1878
1879 filp->private_data = fl;
1880
1881 mutex_lock(&me->smd_mutex);
1882
1883 context_list_ctor(&fl->clst);
1884 spin_lock_init(&fl->hlock);
1885 INIT_HLIST_HEAD(&fl->maps);
1886 INIT_HLIST_HEAD(&fl->bufs);
1887 INIT_HLIST_NODE(&fl->hn);
1888 fl->tgid = current->tgid;
1889 fl->apps = me;
1890 fl->cid = cid;
1891 VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
1892 &fl->sctx));
1893 if (err)
1894 goto bail;
1895 fl->cid = cid;
1896 fl->ssrcount = me->channel[cid].ssrcount;
1897 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
1898 (me->channel[cid].chan == 0)) {
1899 fastrpc_glink_register(cid, me);
1900 VERIFY(err, 0 == fastrpc_glink_open(cid));
1901 if (err)
1902 goto bail;
1903
1904 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1905 RPC_TIMEOUT));
1906 if (err) {
1907 me->channel[cid].chan = 0;
1908 goto bail;
1909 }
1910 kref_init(&me->channel[cid].kref);
1911 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
1912 MAJOR(me->dev_no), cid);
1913 if (me->channel[cid].ssrcount !=
1914 me->channel[cid].prevssrcount) {
1915 me->channel[cid].prevssrcount =
1916 me->channel[cid].ssrcount;
1917 }
1918 }
1919 spin_lock(&me->hlock);
1920 hlist_add_head(&fl->hn, &me->drivers);
1921 spin_unlock(&me->hlock);
1922
1923bail:
1924 mutex_unlock(&me->smd_mutex);
1925
1926 if (err)
1927 fastrpc_device_release(inode, filp);
1928 return err;
1929}
1930
1931static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
1932{
1933 int err = 0;
1934
1935 VERIFY(err, fl && fl->sctx);
1936 if (err)
1937 goto bail;
1938 *info = (fl->sctx->smmu.enabled ? 1 : 0);
1939bail:
1940 return err;
1941}
1942
1943static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1944 unsigned long ioctl_param)
1945{
1946 union {
1947 struct fastrpc_ioctl_invoke_attrs inv;
1948 struct fastrpc_ioctl_mmap mmap;
1949 struct fastrpc_ioctl_munmap munmap;
1950 struct fastrpc_ioctl_init init;
1951 } p;
1952 void *param = (char *)ioctl_param;
1953 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1954 int size = 0, err = 0;
1955 uint32_t info;
1956
1957 p.inv.fds = 0;
1958 p.inv.attrs = 0;
1959
1960 switch (ioctl_num) {
1961 case FASTRPC_IOCTL_INVOKE:
1962 size = sizeof(struct fastrpc_ioctl_invoke);
1963 case FASTRPC_IOCTL_INVOKE_FD:
1964 if (!size)
1965 size = sizeof(struct fastrpc_ioctl_invoke_fd);
1966 /* fall through */
1967 case FASTRPC_IOCTL_INVOKE_ATTRS:
1968 if (!size)
1969 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
1970 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
1971 if (err)
1972 goto bail;
1973 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
1974 0, &p.inv)));
1975 if (err)
1976 goto bail;
1977 break;
1978 case FASTRPC_IOCTL_MMAP:
1979 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
1980 sizeof(p.mmap)));
1981 if (err)
1982 goto bail;
1983 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
1984 if (err)
1985 goto bail;
1986 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
1987 if (err)
1988 goto bail;
1989 break;
1990 case FASTRPC_IOCTL_MUNMAP:
1991 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
1992 sizeof(p.munmap)));
1993 if (err)
1994 goto bail;
1995 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
1996 &p.munmap)));
1997 if (err)
1998 goto bail;
1999 break;
2000 case FASTRPC_IOCTL_SETMODE:
2001 switch ((uint32_t)ioctl_param) {
2002 case FASTRPC_MODE_PARALLEL:
2003 case FASTRPC_MODE_SERIAL:
2004 fl->mode = (uint32_t)ioctl_param;
2005 break;
2006 default:
2007 err = -ENOTTY;
2008 break;
2009 }
2010 break;
2011 case FASTRPC_IOCTL_GETINFO:
2012 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2013 if (err)
2014 goto bail;
2015 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2016 if (err)
2017 goto bail;
2018 break;
2019 case FASTRPC_IOCTL_INIT:
2020 VERIFY(err, 0 == copy_from_user(&p.init, param,
2021 sizeof(p.init)));
2022 if (err)
2023 goto bail;
2024 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2025 if (err)
2026 goto bail;
2027 break;
2028
2029 default:
2030 err = -ENOTTY;
2031 pr_info("bad ioctl: %d\n", ioctl_num);
2032 break;
2033 }
2034 bail:
2035 return err;
2036}
2037
2038static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2039 unsigned long code,
2040 void *data)
2041{
2042 struct fastrpc_apps *me = &gfa;
2043 struct fastrpc_channel_ctx *ctx;
2044 int cid;
2045
2046 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2047 cid = ctx - &me->channel[0];
2048 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2049 mutex_lock(&me->smd_mutex);
2050 ctx->ssrcount++;
2051 if (ctx->chan) {
2052 fastrpc_glink_close(ctx->chan, cid);
2053 ctx->chan = 0;
2054 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2055 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2056 }
2057 mutex_unlock(&me->smd_mutex);
2058 fastrpc_notify_drivers(me, cid);
2059 }
2060
2061 return NOTIFY_DONE;
2062}
2063
2064static const struct file_operations fops = {
2065 .open = fastrpc_device_open,
2066 .release = fastrpc_device_release,
2067 .unlocked_ioctl = fastrpc_device_ioctl,
2068 .compat_ioctl = compat_fastrpc_device_ioctl,
2069};
2070
2071static const struct of_device_id fastrpc_match_table[] = {
2072 { .compatible = "qcom,msm-fastrpc-adsp", },
2073 { .compatible = "qcom,msm-fastrpc-compute", },
2074 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2075 { .compatible = "qcom,msm-adsprpc-mem-region", },
2076 {}
2077};
2078
2079static int fastrpc_cb_probe(struct device *dev)
2080{
2081 struct fastrpc_channel_ctx *chan;
2082 struct fastrpc_session_ctx *sess;
2083 struct of_phandle_args iommuspec;
2084 const char *name;
2085 unsigned int start = 0x80000000;
2086 int err = 0, i;
2087 int secure_vmid = VMID_CP_PIXEL;
2088
2089 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2090 if (err)
2091 goto bail;
2092 for (i = 0; i < NUM_CHANNELS; i++) {
2093 if (!gcinfo[i].name)
2094 continue;
2095 if (!strcmp(name, gcinfo[i].name))
2096 break;
2097 }
2098 VERIFY(err, i < NUM_CHANNELS);
2099 if (err)
2100 goto bail;
2101 chan = &gcinfo[i];
2102 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2103 if (err)
2104 goto bail;
2105
2106 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2107 "#iommu-cells", 0, &iommuspec));
2108 if (err)
2109 goto bail;
2110 sess = &chan->session[chan->sesscount];
2111 sess->smmu.cb = iommuspec.args[0] & 0xf;
2112 sess->used = 0;
2113 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2114 "dma-coherent");
2115 sess->smmu.secure = of_property_read_bool(dev->of_node,
2116 "qcom,secure-context-bank");
2117 if (sess->smmu.secure)
2118 start = 0x60000000;
2119 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2120 arm_iommu_create_mapping(&platform_bus_type,
2121 start, 0x7fffffff)));
2122 if (err)
2123 goto bail;
2124
2125 if (sess->smmu.secure)
2126 iommu_domain_set_attr(sess->smmu.mapping->domain,
2127 DOMAIN_ATTR_SECURE_VMID,
2128 &secure_vmid);
2129
2130 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2131 if (err)
2132 goto bail;
2133 sess->dev = dev;
2134 sess->smmu.enabled = 1;
2135 chan->sesscount++;
2136bail:
2137 return err;
2138}
2139
2140static int fastrpc_probe(struct platform_device *pdev)
2141{
2142 int err = 0;
2143 struct fastrpc_apps *me = &gfa;
2144 struct device *dev = &pdev->dev;
2145 struct smq_phy_page range;
2146 struct device_node *ion_node, *node;
2147 struct platform_device *ion_pdev;
2148 struct cma *cma;
2149 uint32_t val;
2150
2151 if (of_device_is_compatible(dev->of_node,
2152 "qcom,msm-fastrpc-compute-cb"))
2153 return fastrpc_cb_probe(dev);
2154
2155 if (of_device_is_compatible(dev->of_node,
2156 "qcom,msm-adsprpc-mem-region")) {
2157 me->dev = dev;
2158 range.addr = 0;
2159 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2160 if (ion_node) {
2161 for_each_available_child_of_node(ion_node, node) {
2162 if (of_property_read_u32(node, "reg", &val))
2163 continue;
2164 if (val != ION_ADSP_HEAP_ID)
2165 continue;
2166 ion_pdev = of_find_device_by_node(node);
2167 if (!ion_pdev)
2168 break;
2169 cma = dev_get_cma_area(&ion_pdev->dev);
2170 if (cma) {
2171 range.addr = cma_get_base(cma);
2172 range.size = (size_t)cma_get_size(cma);
2173 }
2174 break;
2175 }
2176 }
2177 if (range.addr) {
2178 int srcVM[1] = {VMID_HLOS};
2179 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2180 VMID_ADSP_Q6};
2181 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2182 PERM_READ | PERM_WRITE | PERM_EXEC,
2183 PERM_READ | PERM_WRITE | PERM_EXEC,
2184 PERM_READ | PERM_WRITE | PERM_EXEC,
2185 };
2186
2187 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2188 srcVM, 1, destVM, destVMperm, 4));
2189 if (err)
2190 goto bail;
2191 }
2192 return 0;
2193 }
2194
2195 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2196 fastrpc_match_table,
2197 NULL, &pdev->dev));
2198 if (err)
2199 goto bail;
2200bail:
2201 return err;
2202}
2203
2204static void fastrpc_deinit(void)
2205{
2206 struct fastrpc_apps *me = &gfa;
2207 struct fastrpc_channel_ctx *chan = gcinfo;
2208 int i, j;
2209
2210 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2211 if (chan->chan) {
2212 kref_put_mutex(&chan->kref,
2213 fastrpc_channel_close, &me->smd_mutex);
2214 chan->chan = 0;
2215 }
2216 for (j = 0; j < NUM_SESSIONS; j++) {
2217 struct fastrpc_session_ctx *sess = &chan->session[j];
2218
2219 if (sess->smmu.enabled) {
2220 arm_iommu_detach_device(sess->dev);
2221 sess->dev = 0;
2222 }
2223 if (sess->smmu.mapping) {
2224 arm_iommu_release_mapping(sess->smmu.mapping);
2225 sess->smmu.mapping = 0;
2226 }
2227 }
2228 }
2229}
2230
2231static struct platform_driver fastrpc_driver = {
2232 .probe = fastrpc_probe,
2233 .driver = {
2234 .name = "fastrpc",
2235 .owner = THIS_MODULE,
2236 .of_match_table = fastrpc_match_table,
2237 },
2238};
2239
2240static int __init fastrpc_device_init(void)
2241{
2242 struct fastrpc_apps *me = &gfa;
2243 int err = 0, i;
2244
2245 memset(me, 0, sizeof(*me));
2246
2247 fastrpc_init(me);
2248 me->dev = NULL;
2249 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2250 if (err)
2251 goto register_bail;
2252 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2253 DEVICE_NAME));
2254 if (err)
2255 goto alloc_chrdev_bail;
2256 cdev_init(&me->cdev, &fops);
2257 me->cdev.owner = THIS_MODULE;
2258 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
2259 NUM_CHANNELS));
2260 if (err)
2261 goto cdev_init_bail;
2262 me->class = class_create(THIS_MODULE, "fastrpc");
2263 VERIFY(err, !IS_ERR(me->class));
2264 if (err)
2265 goto class_create_bail;
2266 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
2267 for (i = 0; i < NUM_CHANNELS; i++) {
2268 if (!gcinfo[i].name)
2269 continue;
2270 me->channel[i].dev = device_create(me->class, NULL,
2271 MKDEV(MAJOR(me->dev_no), i),
2272 NULL, gcinfo[i].name);
2273 VERIFY(err, !IS_ERR(me->channel[i].dev));
2274 if (err)
2275 goto device_create_bail;
2276 me->channel[i].ssrcount = 0;
2277 me->channel[i].prevssrcount = 0;
2278 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2279 me->channel[i].handle = subsys_notif_register_notifier(
2280 gcinfo[i].subsys,
2281 &me->channel[i].nb);
2282 }
2283
2284 me->client = msm_ion_client_create(DEVICE_NAME);
2285 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2286 if (err)
2287 goto device_create_bail;
2288 return 0;
2289device_create_bail:
2290 for (i = 0; i < NUM_CHANNELS; i++) {
2291 if (IS_ERR_OR_NULL(me->channel[i].dev))
2292 continue;
2293 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2294 subsys_notif_unregister_notifier(me->channel[i].handle,
2295 &me->channel[i].nb);
2296 }
2297 class_destroy(me->class);
2298class_create_bail:
2299 cdev_del(&me->cdev);
2300cdev_init_bail:
2301 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2302alloc_chrdev_bail:
2303register_bail:
2304 fastrpc_deinit();
2305 return err;
2306}
2307
2308static void __exit fastrpc_device_exit(void)
2309{
2310 struct fastrpc_apps *me = &gfa;
2311 int i;
2312
2313 fastrpc_file_list_dtor(me);
2314 fastrpc_deinit();
2315 for (i = 0; i < NUM_CHANNELS; i++) {
2316 if (!gcinfo[i].name)
2317 continue;
2318 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2319 subsys_notif_unregister_notifier(me->channel[i].handle,
2320 &me->channel[i].nb);
2321 }
2322 class_destroy(me->class);
2323 cdev_del(&me->cdev);
2324 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2325 ion_client_destroy(me->client);
2326}
2327
2328late_initcall(fastrpc_device_init);
2329module_exit(fastrpc_device_exit);
2330
2331MODULE_LICENSE("GPL v2");