blob: efaa9d13a821caed1793588e9d6c7d25758dd28e [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
47
48#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
49#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
50#define TZ_PIL_AUTH_QDSP6_PROC 1
51#define FASTRPC_ENOSUCH 39
52#define VMID_SSC_Q6 5
53#define VMID_ADSP_Q6 6
54
55#define RPC_TIMEOUT (5 * HZ)
56#define BALIGN 128
57#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
58#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080059#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
62
63#define FASTRPC_LINK_STATE_DOWN (0x0)
64#define FASTRPC_LINK_STATE_UP (0x1)
65#define FASTRPC_LINK_DISCONNECTED (0x0)
66#define FASTRPC_LINK_CONNECTING (0x1)
67#define FASTRPC_LINK_CONNECTED (0x3)
68#define FASTRPC_LINK_DISCONNECTING (0x7)
69
70static int fastrpc_glink_open(int cid);
71static void fastrpc_glink_close(void *chan, int cid);
72
73static inline uint64_t buf_page_start(uint64_t buf)
74{
75 uint64_t start = (uint64_t) buf & PAGE_MASK;
76 return start;
77}
78
79static inline uint64_t buf_page_offset(uint64_t buf)
80{
81 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
82 return offset;
83}
84
85static inline int buf_num_pages(uint64_t buf, ssize_t len)
86{
87 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
88 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
89 int nPages = end - start + 1;
90 return nPages;
91}
92
93static inline uint64_t buf_page_size(uint32_t size)
94{
95 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
96
97 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
98}
99
100static inline void *uint64_to_ptr(uint64_t addr)
101{
102 void *ptr = (void *)((uintptr_t)addr);
103
104 return ptr;
105}
106
107static inline uint64_t ptr_to_uint64(void *ptr)
108{
109 uint64_t addr = (uint64_t)((uintptr_t)ptr);
110
111 return addr;
112}
113
114struct fastrpc_file;
115
116struct fastrpc_buf {
117 struct hlist_node hn;
118 struct fastrpc_file *fl;
119 void *virt;
120 uint64_t phys;
121 ssize_t size;
122};
123
124struct fastrpc_ctx_lst;
125
126struct overlap {
127 uintptr_t start;
128 uintptr_t end;
129 int raix;
130 uintptr_t mstart;
131 uintptr_t mend;
132 uintptr_t offset;
133};
134
135struct smq_invoke_ctx {
136 struct hlist_node hn;
137 struct completion work;
138 int retval;
139 int pid;
140 int tgid;
141 remote_arg_t *lpra;
142 remote_arg64_t *rpra;
143 int *fds;
144 unsigned int *attrs;
145 struct fastrpc_mmap **maps;
146 struct fastrpc_buf *buf;
147 ssize_t used;
148 struct fastrpc_file *fl;
149 uint32_t sc;
150 struct overlap *overs;
151 struct overlap **overps;
152 struct smq_msg msg;
153};
154
155struct fastrpc_ctx_lst {
156 struct hlist_head pending;
157 struct hlist_head interrupted;
158};
159
160struct fastrpc_smmu {
161 struct dma_iommu_mapping *mapping;
162 int cb;
163 int enabled;
164 int faults;
165 int secure;
166 int coherent;
167};
168
169struct fastrpc_session_ctx {
170 struct device *dev;
171 struct fastrpc_smmu smmu;
172 int used;
173};
174
175struct fastrpc_glink_info {
176 int link_state;
177 int port_state;
178 struct glink_open_config cfg;
179 struct glink_link_info link_info;
180 void *link_notify_handle;
181};
182
183struct fastrpc_channel_ctx {
184 char *name;
185 char *subsys;
186 void *chan;
187 struct device *dev;
188 struct fastrpc_session_ctx session[NUM_SESSIONS];
189 struct completion work;
190 struct notifier_block nb;
191 struct kref kref;
192 int sesscount;
193 int ssrcount;
194 void *handle;
195 int prevssrcount;
196 int vmid;
197 struct fastrpc_glink_info link;
198};
199
200struct fastrpc_apps {
201 struct fastrpc_channel_ctx *channel;
202 struct cdev cdev;
203 struct class *class;
204 struct mutex smd_mutex;
205 struct smq_phy_page range;
206 struct hlist_head maps;
207 dev_t dev_no;
208 int compat;
209 struct hlist_head drivers;
210 spinlock_t hlock;
211 struct ion_client *client;
212 struct device *dev;
213};
214
215struct fastrpc_mmap {
216 struct hlist_node hn;
217 struct fastrpc_file *fl;
218 struct fastrpc_apps *apps;
219 int fd;
220 uint32_t flags;
221 struct dma_buf *buf;
222 struct sg_table *table;
223 struct dma_buf_attachment *attach;
224 struct ion_handle *handle;
225 uint64_t phys;
226 ssize_t size;
227 uintptr_t va;
228 ssize_t len;
229 int refs;
230 uintptr_t raddr;
231 int uncached;
232 int secure;
233 uintptr_t attr;
234};
235
236struct fastrpc_file {
237 struct hlist_node hn;
238 spinlock_t hlock;
239 struct hlist_head maps;
240 struct hlist_head bufs;
241 struct fastrpc_ctx_lst clst;
242 struct fastrpc_session_ctx *sctx;
243 struct fastrpc_session_ctx *secsctx;
244 uint32_t mode;
245 int tgid;
246 int cid;
247 int ssrcount;
248 int pd;
249 struct fastrpc_apps *apps;
250};
251
252static struct fastrpc_apps gfa;
253
254static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
255 {
256 .name = "adsprpc-smd",
257 .subsys = "adsp",
258 .link.link_info.edge = "lpass",
259 .link.link_info.transport = "smem",
260 },
261 {
262 .name = "sdsprpc-smd",
263 .subsys = "dsps",
264 .link.link_info.edge = "dsps",
265 .link.link_info.transport = "smem",
266 },
267 {
268 .name = "mdsprpc-smd",
269 .subsys = "modem",
270 .link.link_info.edge = "mpss",
271 .link.link_info.transport = "smem",
272 },
273 {
274 .name = "cdsprpc-smd",
275 .subsys = "cdsp",
276 .link.link_info.edge = "cdsp",
277 .link.link_info.transport = "smem",
278 },
279};
280
281static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
282{
283 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
284 int vmid;
285
286 if (!fl)
287 return;
288 if (cache) {
289 spin_lock(&fl->hlock);
290 hlist_add_head(&buf->hn, &fl->bufs);
291 spin_unlock(&fl->hlock);
292 return;
293 }
294 if (!IS_ERR_OR_NULL(buf->virt)) {
295 int destVM[1] = {VMID_HLOS};
296 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
297
298 if (fl->sctx->smmu.cb)
299 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
300 vmid = fl->apps->channel[fl->cid].vmid;
301 if (vmid) {
302 int srcVM[2] = {VMID_HLOS, vmid};
303
304 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
305 srcVM, 2, destVM, destVMperm, 1);
306 }
307 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
308 buf->phys);
309 }
310 kfree(buf);
311}
312
313static void fastrpc_buf_list_free(struct fastrpc_file *fl)
314{
315 struct fastrpc_buf *buf, *free;
316
317 do {
318 struct hlist_node *n;
319
320 free = 0;
321 spin_lock(&fl->hlock);
322 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
323 hlist_del_init(&buf->hn);
324 free = buf;
325 break;
326 }
327 spin_unlock(&fl->hlock);
328 if (free)
329 fastrpc_buf_free(free, 0);
330 } while (free);
331}
332
333static void fastrpc_mmap_add(struct fastrpc_mmap *map)
334{
335 struct fastrpc_file *fl = map->fl;
336
337 spin_lock(&fl->hlock);
338 hlist_add_head(&map->hn, &fl->maps);
339 spin_unlock(&fl->hlock);
340}
341
342static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800343 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700344{
345 struct fastrpc_mmap *match = 0, *map;
346 struct hlist_node *n;
347
348 spin_lock(&fl->hlock);
349 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
350 if (va >= map->va &&
351 va + len <= map->va + map->len &&
352 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800353 if (refs)
354 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700355 match = map;
356 break;
357 }
358 }
359 spin_unlock(&fl->hlock);
360 if (match) {
361 *ppmap = match;
362 return 0;
363 }
364 return -ENOTTY;
365}
366
367static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
368 ssize_t len, struct fastrpc_mmap **ppmap)
369{
370 struct fastrpc_mmap *match = 0, *map;
371 struct hlist_node *n;
372 struct fastrpc_apps *me = &gfa;
373
374 spin_lock(&me->hlock);
375 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
376 if (map->raddr == va &&
377 map->raddr + map->len == va + len &&
378 map->refs == 1) {
379 match = map;
380 hlist_del_init(&map->hn);
381 break;
382 }
383 }
384 spin_unlock(&me->hlock);
385 if (match) {
386 *ppmap = match;
387 return 0;
388 }
389 spin_lock(&fl->hlock);
390 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
391 if (map->raddr == va &&
392 map->raddr + map->len == va + len &&
393 map->refs == 1) {
394 match = map;
395 hlist_del_init(&map->hn);
396 break;
397 }
398 }
399 spin_unlock(&fl->hlock);
400 if (match) {
401 *ppmap = match;
402 return 0;
403 }
404 return -ENOTTY;
405}
406
407static void fastrpc_mmap_free(struct fastrpc_mmap *map)
408{
409 struct fastrpc_file *fl;
410 int vmid;
411 struct fastrpc_session_ctx *sess;
412 int destVM[1] = {VMID_HLOS};
413 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
414
415 if (!map)
416 return;
417 fl = map->fl;
418 spin_lock(&fl->hlock);
419 map->refs--;
420 if (!map->refs)
421 hlist_del_init(&map->hn);
422 spin_unlock(&fl->hlock);
423 if (map->refs > 0)
424 return;
425 if (map->secure)
426 sess = fl->secsctx;
427 else
428 sess = fl->sctx;
429
430 if (!IS_ERR_OR_NULL(map->handle))
431 ion_free(fl->apps->client, map->handle);
432 if (sess->smmu.enabled) {
433 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800434 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700435 map->table->sgl,
436 map->table->nents, DMA_BIDIRECTIONAL,
437 map->buf);
438 }
439 vmid = fl->apps->channel[fl->cid].vmid;
440 if (vmid && map->phys) {
441 int srcVM[2] = {VMID_HLOS, vmid};
442
443 hyp_assign_phys(map->phys, buf_page_size(map->size),
444 srcVM, 2, destVM, destVMperm, 1);
445 }
446
447 if (!IS_ERR_OR_NULL(map->table))
448 dma_buf_unmap_attachment(map->attach, map->table,
449 DMA_BIDIRECTIONAL);
450 if (!IS_ERR_OR_NULL(map->attach))
451 dma_buf_detach(map->buf, map->attach);
452 if (!IS_ERR_OR_NULL(map->buf))
453 dma_buf_put(map->buf);
454 kfree(map);
455}
456
457static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
458 struct fastrpc_session_ctx **session);
459
460static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
461 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
462 struct fastrpc_mmap **ppmap)
463{
464 struct fastrpc_session_ctx *sess;
465 struct fastrpc_apps *apps = fl->apps;
466 int cid = fl->cid;
467 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
468 struct fastrpc_mmap *map = 0;
469 unsigned long attrs;
470 unsigned long flags;
471 int err = 0, vmid;
472
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800473 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700474 return 0;
475 map = kzalloc(sizeof(*map), GFP_KERNEL);
476 VERIFY(err, !IS_ERR_OR_NULL(map));
477 if (err)
478 goto bail;
479 INIT_HLIST_NODE(&map->hn);
480 map->flags = mflags;
481 map->refs = 1;
482 map->fl = fl;
483 map->fd = fd;
484 map->attr = attr;
485 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
486 ion_import_dma_buf_fd(fl->apps->client, fd)));
487 if (err)
488 goto bail;
489 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
490 &flags));
491 if (err)
492 goto bail;
493
494 map->uncached = !ION_IS_CACHED(flags);
495 if (map->attr & FASTRPC_ATTR_NOVA)
496 map->uncached = 1;
497
498 map->secure = flags & ION_FLAG_SECURE;
499 if (map->secure) {
500 if (!fl->secsctx)
501 err = fastrpc_session_alloc(chan, 1,
502 &fl->secsctx);
503 if (err)
504 goto bail;
505 }
506 if (map->secure)
507 sess = fl->secsctx;
508 else
509 sess = fl->sctx;
510
511 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
512 if (err)
513 goto bail;
514 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
515 dma_buf_attach(map->buf, sess->dev)));
516 if (err)
517 goto bail;
518 VERIFY(err, !IS_ERR_OR_NULL(map->table =
519 dma_buf_map_attachment(map->attach,
520 DMA_BIDIRECTIONAL)));
521 if (err)
522 goto bail;
523 if (sess->smmu.enabled) {
524 attrs = DMA_ATTR_EXEC_MAPPING;
525 VERIFY(err, map->table->nents ==
526 msm_dma_map_sg_attrs(sess->dev,
527 map->table->sgl, map->table->nents,
528 DMA_BIDIRECTIONAL, map->buf, attrs));
529 if (err)
530 goto bail;
531 } else {
532 VERIFY(err, map->table->nents == 1);
533 if (err)
534 goto bail;
535 }
536 map->phys = sg_dma_address(map->table->sgl);
537 if (sess->smmu.cb) {
538 map->phys += ((uint64_t)sess->smmu.cb << 32);
539 map->size = sg_dma_len(map->table->sgl);
540 } else {
541 map->size = buf_page_size(len);
542 }
543 vmid = fl->apps->channel[fl->cid].vmid;
544 if (vmid) {
545 int srcVM[1] = {VMID_HLOS};
546 int destVM[2] = {VMID_HLOS, vmid};
547 int destVMperm[2] = {PERM_READ | PERM_WRITE,
548 PERM_READ | PERM_WRITE | PERM_EXEC};
549
550 VERIFY(err, !hyp_assign_phys(map->phys,
551 buf_page_size(map->size),
552 srcVM, 1, destVM, destVMperm, 2));
553 if (err)
554 goto bail;
555 }
556 map->va = va;
557 map->len = len;
558
559 fastrpc_mmap_add(map);
560 *ppmap = map;
561
562bail:
563 if (err && map)
564 fastrpc_mmap_free(map);
565 return err;
566}
567
568static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
569 struct fastrpc_buf **obuf)
570{
571 int err = 0, vmid;
572 struct fastrpc_buf *buf = 0, *fr = 0;
573 struct hlist_node *n;
574
575 VERIFY(err, size > 0);
576 if (err)
577 goto bail;
578
579 /* find the smallest buffer that fits in the cache */
580 spin_lock(&fl->hlock);
581 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
582 if (buf->size >= size && (!fr || fr->size > buf->size))
583 fr = buf;
584 }
585 if (fr)
586 hlist_del_init(&fr->hn);
587 spin_unlock(&fl->hlock);
588 if (fr) {
589 *obuf = fr;
590 return 0;
591 }
592 buf = 0;
593 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
594 if (err)
595 goto bail;
596 INIT_HLIST_NODE(&buf->hn);
597 buf->fl = fl;
598 buf->virt = 0;
599 buf->phys = 0;
600 buf->size = size;
601 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
602 (void *)&buf->phys, GFP_KERNEL);
603 if (IS_ERR_OR_NULL(buf->virt)) {
604 /* free cache and retry */
605 fastrpc_buf_list_free(fl);
606 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
607 (void *)&buf->phys, GFP_KERNEL);
608 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
609 }
610 if (err)
611 goto bail;
612 if (fl->sctx->smmu.cb)
613 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
614 vmid = fl->apps->channel[fl->cid].vmid;
615 if (vmid) {
616 int srcVM[1] = {VMID_HLOS};
617 int destVM[2] = {VMID_HLOS, vmid};
618 int destVMperm[2] = {PERM_READ | PERM_WRITE,
619 PERM_READ | PERM_WRITE | PERM_EXEC};
620
621 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
622 srcVM, 1, destVM, destVMperm, 2));
623 if (err)
624 goto bail;
625 }
626
627 *obuf = buf;
628 bail:
629 if (err && buf)
630 fastrpc_buf_free(buf, 0);
631 return err;
632}
633
634
635static int context_restore_interrupted(struct fastrpc_file *fl,
636 struct fastrpc_ioctl_invoke_attrs *inv,
637 struct smq_invoke_ctx **po)
638{
639 int err = 0;
640 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
641 struct hlist_node *n;
642 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
643
644 spin_lock(&fl->hlock);
645 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
646 if (ictx->pid == current->pid) {
647 if (invoke->sc != ictx->sc || ictx->fl != fl)
648 err = -1;
649 else {
650 ctx = ictx;
651 hlist_del_init(&ctx->hn);
652 hlist_add_head(&ctx->hn, &fl->clst.pending);
653 }
654 break;
655 }
656 }
657 spin_unlock(&fl->hlock);
658 if (ctx)
659 *po = ctx;
660 return err;
661}
662
663#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
664static int overlap_ptr_cmp(const void *a, const void *b)
665{
666 struct overlap *pa = *((struct overlap **)a);
667 struct overlap *pb = *((struct overlap **)b);
668 /* sort with lowest starting buffer first */
669 int st = CMP(pa->start, pb->start);
670 /* sort with highest ending buffer first */
671 int ed = CMP(pb->end, pa->end);
672 return st == 0 ? ed : st;
673}
674
Sathish Ambley9466d672017-01-25 10:51:55 -0800675static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700676{
Sathish Ambley9466d672017-01-25 10:51:55 -0800677 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700678 remote_arg_t *lpra = ctx->lpra;
679 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
680 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
681 int nbufs = inbufs + outbufs;
682 struct overlap max;
683
684 for (i = 0; i < nbufs; ++i) {
685 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
686 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800687 if (lpra[i].buf.len) {
688 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
689 if (err)
690 goto bail;
691 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700692 ctx->overs[i].raix = i;
693 ctx->overps[i] = &ctx->overs[i];
694 }
695 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
696 max.start = 0;
697 max.end = 0;
698 for (i = 0; i < nbufs; ++i) {
699 if (ctx->overps[i]->start < max.end) {
700 ctx->overps[i]->mstart = max.end;
701 ctx->overps[i]->mend = ctx->overps[i]->end;
702 ctx->overps[i]->offset = max.end -
703 ctx->overps[i]->start;
704 if (ctx->overps[i]->end > max.end) {
705 max.end = ctx->overps[i]->end;
706 } else {
707 ctx->overps[i]->mend = 0;
708 ctx->overps[i]->mstart = 0;
709 }
710 } else {
711 ctx->overps[i]->mend = ctx->overps[i]->end;
712 ctx->overps[i]->mstart = ctx->overps[i]->start;
713 ctx->overps[i]->offset = 0;
714 max = *ctx->overps[i];
715 }
716 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800717bail:
718 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719}
720
721#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
722 do {\
723 if (!(kernel))\
724 VERIFY(err, 0 == copy_from_user((dst), (src),\
725 (size)));\
726 else\
727 memmove((dst), (src), (size));\
728 } while (0)
729
730#define K_COPY_TO_USER(err, kernel, dst, src, size) \
731 do {\
732 if (!(kernel))\
733 VERIFY(err, 0 == copy_to_user((dst), (src),\
734 (size)));\
735 else\
736 memmove((dst), (src), (size));\
737 } while (0)
738
739
740static void context_free(struct smq_invoke_ctx *ctx);
741
742static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
743 struct fastrpc_ioctl_invoke_attrs *invokefd,
744 struct smq_invoke_ctx **po)
745{
746 int err = 0, bufs, size = 0;
747 struct smq_invoke_ctx *ctx = 0;
748 struct fastrpc_ctx_lst *clst = &fl->clst;
749 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
750
751 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
752 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
753 sizeof(*ctx->fds) * (bufs) +
754 sizeof(*ctx->attrs) * (bufs) +
755 sizeof(*ctx->overs) * (bufs) +
756 sizeof(*ctx->overps) * (bufs);
757
758 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
759 if (err)
760 goto bail;
761
762 INIT_HLIST_NODE(&ctx->hn);
763 hlist_add_fake(&ctx->hn);
764 ctx->fl = fl;
765 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
766 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
767 ctx->fds = (int *)(&ctx->lpra[bufs]);
768 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
769 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
770 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
771
772 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
773 bufs * sizeof(*ctx->lpra));
774 if (err)
775 goto bail;
776
777 if (invokefd->fds) {
778 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
779 bufs * sizeof(*ctx->fds));
780 if (err)
781 goto bail;
782 }
783 if (invokefd->attrs) {
784 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
785 bufs * sizeof(*ctx->attrs));
786 if (err)
787 goto bail;
788 }
789
790 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800791 if (bufs) {
792 VERIFY(err, 0 == context_build_overlap(ctx));
793 if (err)
794 goto bail;
795 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700796 ctx->retval = -1;
797 ctx->pid = current->pid;
798 ctx->tgid = current->tgid;
799 init_completion(&ctx->work);
800
801 spin_lock(&fl->hlock);
802 hlist_add_head(&ctx->hn, &clst->pending);
803 spin_unlock(&fl->hlock);
804
805 *po = ctx;
806bail:
807 if (ctx && err)
808 context_free(ctx);
809 return err;
810}
811
812static void context_save_interrupted(struct smq_invoke_ctx *ctx)
813{
814 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
815
816 spin_lock(&ctx->fl->hlock);
817 hlist_del_init(&ctx->hn);
818 hlist_add_head(&ctx->hn, &clst->interrupted);
819 spin_unlock(&ctx->fl->hlock);
820 /* free the cache on power collapse */
821 fastrpc_buf_list_free(ctx->fl);
822}
823
824static void context_free(struct smq_invoke_ctx *ctx)
825{
826 int i;
827 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
828 REMOTE_SCALARS_OUTBUFS(ctx->sc);
829 spin_lock(&ctx->fl->hlock);
830 hlist_del_init(&ctx->hn);
831 spin_unlock(&ctx->fl->hlock);
832 for (i = 0; i < nbufs; ++i)
833 fastrpc_mmap_free(ctx->maps[i]);
834 fastrpc_buf_free(ctx->buf, 1);
835 kfree(ctx);
836}
837
838static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
839{
840 ctx->retval = retval;
841 complete(&ctx->work);
842}
843
844
845static void fastrpc_notify_users(struct fastrpc_file *me)
846{
847 struct smq_invoke_ctx *ictx;
848 struct hlist_node *n;
849
850 spin_lock(&me->hlock);
851 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
852 complete(&ictx->work);
853 }
854 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
855 complete(&ictx->work);
856 }
857 spin_unlock(&me->hlock);
858
859}
860
861static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
862{
863 struct fastrpc_file *fl;
864 struct hlist_node *n;
865
866 spin_lock(&me->hlock);
867 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
868 if (fl->cid == cid)
869 fastrpc_notify_users(fl);
870 }
871 spin_unlock(&me->hlock);
872
873}
874static void context_list_ctor(struct fastrpc_ctx_lst *me)
875{
876 INIT_HLIST_HEAD(&me->interrupted);
877 INIT_HLIST_HEAD(&me->pending);
878}
879
880static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
881{
882 struct fastrpc_ctx_lst *clst = &fl->clst;
883 struct smq_invoke_ctx *ictx = 0, *ctxfree;
884 struct hlist_node *n;
885
886 do {
887 ctxfree = 0;
888 spin_lock(&fl->hlock);
889 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
890 hlist_del_init(&ictx->hn);
891 ctxfree = ictx;
892 break;
893 }
894 spin_unlock(&fl->hlock);
895 if (ctxfree)
896 context_free(ctxfree);
897 } while (ctxfree);
898 do {
899 ctxfree = 0;
900 spin_lock(&fl->hlock);
901 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
902 hlist_del_init(&ictx->hn);
903 ctxfree = ictx;
904 break;
905 }
906 spin_unlock(&fl->hlock);
907 if (ctxfree)
908 context_free(ctxfree);
909 } while (ctxfree);
910}
911
912static int fastrpc_file_free(struct fastrpc_file *fl);
913static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
914{
915 struct fastrpc_file *fl, *free;
916 struct hlist_node *n;
917
918 do {
919 free = 0;
920 spin_lock(&me->hlock);
921 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
922 hlist_del_init(&fl->hn);
923 free = fl;
924 break;
925 }
926 spin_unlock(&me->hlock);
927 if (free)
928 fastrpc_file_free(free);
929 } while (free);
930}
931
932static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
933{
934 remote_arg64_t *rpra;
935 remote_arg_t *lpra = ctx->lpra;
936 struct smq_invoke_buf *list;
937 struct smq_phy_page *pages, *ipage;
938 uint32_t sc = ctx->sc;
939 int inbufs = REMOTE_SCALARS_INBUFS(sc);
940 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800941 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700942 uintptr_t args;
943 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800944 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700945 int err = 0;
946 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800947 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700948
949 /* calculate size of the metadata */
950 rpra = 0;
951 list = smq_invoke_buf_start(rpra, sc);
952 pages = smq_phy_page_start(sc, list);
953 ipage = pages;
954
955 for (i = 0; i < bufs; ++i) {
956 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
957 ssize_t len = lpra[i].buf.len;
958
959 if (ctx->fds[i] && (ctx->fds[i] != -1))
960 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
961 ctx->attrs[i], buf, len,
962 mflags, &ctx->maps[i]);
963 ipage += 1;
964 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800965 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
966 for (i = bufs; i < bufs + handles; i++) {
967 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
968 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
969 if (err)
970 goto bail;
971 ipage += 1;
972 }
973 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700974 /* calculate len requreed for copying */
975 for (oix = 0; oix < inbufs + outbufs; ++oix) {
976 int i = ctx->overps[oix]->raix;
977 ssize_t len = lpra[i].buf.len;
978
979 if (!len)
980 continue;
981 if (ctx->maps[i])
982 continue;
983 if (ctx->overps[oix]->offset == 0)
984 copylen = ALIGN(copylen, BALIGN);
985 copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
986 }
987 ctx->used = copylen;
988
989 /* allocate new buffer */
990 if (copylen) {
991 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
992 if (err)
993 goto bail;
994 }
995 /* copy metadata */
996 rpra = ctx->buf->virt;
997 ctx->rpra = rpra;
998 list = smq_invoke_buf_start(rpra, sc);
999 pages = smq_phy_page_start(sc, list);
1000 ipage = pages;
1001 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001002 for (i = 0; i < bufs + handles; ++i) {
1003 if (lpra[i].buf.len)
1004 list[i].num = 1;
1005 else
1006 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001007 list[i].pgidx = ipage - pages;
1008 ipage++;
1009 }
1010 /* map ion buffers */
1011 for (i = 0; i < inbufs + outbufs; ++i) {
1012 struct fastrpc_mmap *map = ctx->maps[i];
1013 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1014 ssize_t len = lpra[i].buf.len;
1015
1016 rpra[i].buf.pv = 0;
1017 rpra[i].buf.len = len;
1018 if (!len)
1019 continue;
1020 if (map) {
1021 struct vm_area_struct *vma;
1022 uintptr_t offset;
1023 int num = buf_num_pages(buf, len);
1024 int idx = list[i].pgidx;
1025
1026 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001027 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001028 } else {
1029 down_read(&current->mm->mmap_sem);
1030 VERIFY(err, NULL != (vma = find_vma(current->mm,
1031 map->va)));
1032 if (err) {
1033 up_read(&current->mm->mmap_sem);
1034 goto bail;
1035 }
1036 offset = buf_page_start(buf) - vma->vm_start;
1037 up_read(&current->mm->mmap_sem);
1038 VERIFY(err, offset < (uintptr_t)map->size);
1039 if (err)
1040 goto bail;
1041 }
1042 pages[idx].addr = map->phys + offset;
1043 pages[idx].size = num << PAGE_SHIFT;
1044 }
1045 rpra[i].buf.pv = buf;
1046 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001047 for (i = bufs; i < bufs + handles; ++i) {
1048 struct fastrpc_mmap *map = ctx->maps[i];
1049
1050 pages[i].addr = map->phys;
1051 pages[i].size = map->size;
1052 }
1053 fdlist = (uint64_t *)&pages[bufs + handles];
1054 for (i = 0; i < M_FDLIST; i++)
1055 fdlist[i] = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001056 /* copy non ion buffers */
1057 rlen = copylen - metalen;
1058 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1059 int i = ctx->overps[oix]->raix;
1060 struct fastrpc_mmap *map = ctx->maps[i];
1061 int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1062 uint64_t buf;
1063 ssize_t len = lpra[i].buf.len;
1064
1065 if (!len)
1066 continue;
1067 if (map)
1068 continue;
1069 if (ctx->overps[oix]->offset == 0) {
1070 rlen -= ALIGN(args, BALIGN) - args;
1071 args = ALIGN(args, BALIGN);
1072 }
1073 VERIFY(err, rlen >= mlen);
1074 if (err)
1075 goto bail;
1076 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1077 pages[list[i].pgidx].addr = ctx->buf->phys -
1078 ctx->overps[oix]->offset +
1079 (copylen - rlen);
1080 pages[list[i].pgidx].addr =
1081 buf_page_start(pages[list[i].pgidx].addr);
1082 buf = rpra[i].buf.pv;
1083 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1084 if (i < inbufs) {
1085 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1086 lpra[i].buf.pv, len);
1087 if (err)
1088 goto bail;
1089 }
1090 args = args + mlen;
1091 rlen -= mlen;
1092 }
1093
1094 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1095 int i = ctx->overps[oix]->raix;
1096 struct fastrpc_mmap *map = ctx->maps[i];
1097
1098 if (ctx->fl->sctx->smmu.coherent)
1099 continue;
1100 if (map && map->uncached)
1101 continue;
1102 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1103 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1104 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1105 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001106 for (i = bufs; i < bufs + handles; i++) {
1107 rpra[i].dma.fd = ctx->fds[i];
1108 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1109 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001110 }
1111 if (!ctx->fl->sctx->smmu.coherent)
1112 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
1113 bail:
1114 return err;
1115}
1116
1117static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1118 remote_arg_t *upra)
1119{
1120 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001121 struct smq_invoke_buf *list;
1122 struct smq_phy_page *pages;
1123 struct fastrpc_mmap *mmap;
1124 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001125 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001126 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001127 int err = 0;
1128
1129 inbufs = REMOTE_SCALARS_INBUFS(sc);
1130 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001131 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1132 list = smq_invoke_buf_start(ctx->rpra, sc);
1133 pages = smq_phy_page_start(sc, list);
1134 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001135 for (i = inbufs; i < inbufs + outbufs; ++i) {
1136 if (!ctx->maps[i]) {
1137 K_COPY_TO_USER(err, kernel,
1138 ctx->lpra[i].buf.pv,
1139 uint64_to_ptr(rpra[i].buf.pv),
1140 rpra[i].buf.len);
1141 if (err)
1142 goto bail;
1143 } else {
1144 fastrpc_mmap_free(ctx->maps[i]);
1145 ctx->maps[i] = 0;
1146 }
1147 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001148 if (inbufs + outbufs + handles) {
1149 for (i = 0; i < M_FDLIST; i++) {
1150 if (!fdlist[i])
1151 break;
1152 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001153 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001154 fastrpc_mmap_free(mmap);
1155 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001156 }
1157 bail:
1158 return err;
1159}
1160
1161static void inv_args_pre(struct smq_invoke_ctx *ctx)
1162{
1163 int i, inbufs, outbufs;
1164 uint32_t sc = ctx->sc;
1165 remote_arg64_t *rpra = ctx->rpra;
1166 uintptr_t end;
1167
1168 inbufs = REMOTE_SCALARS_INBUFS(sc);
1169 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1170 for (i = inbufs; i < inbufs + outbufs; ++i) {
1171 struct fastrpc_mmap *map = ctx->maps[i];
1172
1173 if (map && map->uncached)
1174 continue;
1175 if (!rpra[i].buf.len)
1176 continue;
1177 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1178 buf_page_start(rpra[i].buf.pv))
1179 continue;
1180 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1181 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1182 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1183 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1184 rpra[i].buf.len);
1185 if (!IS_CACHE_ALIGNED(end))
1186 dmac_flush_range((char *)end,
1187 (char *)end + 1);
1188 }
1189}
1190
1191static void inv_args(struct smq_invoke_ctx *ctx)
1192{
1193 int i, inbufs, outbufs;
1194 uint32_t sc = ctx->sc;
1195 remote_arg64_t *rpra = ctx->rpra;
1196 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001197
1198 inbufs = REMOTE_SCALARS_INBUFS(sc);
1199 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1200 for (i = inbufs; i < inbufs + outbufs; ++i) {
1201 struct fastrpc_mmap *map = ctx->maps[i];
1202
1203 if (map && map->uncached)
1204 continue;
1205 if (!rpra[i].buf.len)
1206 continue;
1207 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1208 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001209 continue;
1210 }
1211 if (map && map->handle)
1212 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1213 (char *)uint64_to_ptr(rpra[i].buf.pv),
1214 rpra[i].buf.len, ION_IOC_INV_CACHES);
1215 else
1216 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1217 (char *)uint64_to_ptr(rpra[i].buf.pv
1218 + rpra[i].buf.len));
1219 }
1220
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001221 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 dmac_inv_range(rpra, (char *)rpra + used);
1223}
1224
1225static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1226 uint32_t kernel, uint32_t handle)
1227{
1228 struct smq_msg *msg = &ctx->msg;
1229 struct fastrpc_file *fl = ctx->fl;
1230 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1231 int err = 0;
1232
1233 VERIFY(err, 0 != channel_ctx->chan);
1234 if (err)
1235 goto bail;
1236 msg->pid = current->tgid;
1237 msg->tid = current->pid;
1238 if (kernel)
1239 msg->pid = 0;
1240 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1241 msg->invoke.header.handle = handle;
1242 msg->invoke.header.sc = ctx->sc;
1243 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1244 msg->invoke.page.size = buf_page_size(ctx->used);
1245
1246 if (fl->ssrcount != channel_ctx->ssrcount) {
1247 err = -ECONNRESET;
1248 goto bail;
1249 }
1250 VERIFY(err, channel_ctx->link.port_state ==
1251 FASTRPC_LINK_CONNECTED);
1252 if (err)
1253 goto bail;
1254 err = glink_tx(channel_ctx->chan,
1255 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1256 GLINK_TX_REQ_INTENT);
1257 bail:
1258 return err;
1259}
1260
1261static void fastrpc_init(struct fastrpc_apps *me)
1262{
1263 int i;
1264
1265 INIT_HLIST_HEAD(&me->drivers);
1266 spin_lock_init(&me->hlock);
1267 mutex_init(&me->smd_mutex);
1268 me->channel = &gcinfo[0];
1269 for (i = 0; i < NUM_CHANNELS; i++) {
1270 init_completion(&me->channel[i].work);
1271 me->channel[i].sesscount = 0;
1272 }
1273}
1274
1275static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1276
1277static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1278 uint32_t kernel,
1279 struct fastrpc_ioctl_invoke_attrs *inv)
1280{
1281 struct smq_invoke_ctx *ctx = 0;
1282 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1283 int cid = fl->cid;
1284 int interrupted = 0;
1285 int err = 0;
1286
1287 if (!kernel) {
1288 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1289 &ctx));
1290 if (err)
1291 goto bail;
1292 if (fl->sctx->smmu.faults)
1293 err = FASTRPC_ENOSUCH;
1294 if (err)
1295 goto bail;
1296 if (ctx)
1297 goto wait;
1298 }
1299
1300 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1301 if (err)
1302 goto bail;
1303
1304 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
1305 VERIFY(err, 0 == get_args(kernel, ctx));
1306 if (err)
1307 goto bail;
1308 }
1309
1310 if (!fl->sctx->smmu.coherent) {
1311 inv_args_pre(ctx);
1312 if (mode == FASTRPC_MODE_SERIAL)
1313 inv_args(ctx);
1314 }
1315 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
1316 if (err)
1317 goto bail;
1318 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1319 inv_args(ctx);
1320 wait:
1321 if (kernel)
1322 wait_for_completion(&ctx->work);
1323 else {
1324 interrupted = wait_for_completion_interruptible(&ctx->work);
1325 VERIFY(err, 0 == (err = interrupted));
1326 if (err)
1327 goto bail;
1328 }
1329 VERIFY(err, 0 == (err = ctx->retval));
1330 if (err)
1331 goto bail;
1332 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
1333 if (err)
1334 goto bail;
1335 bail:
1336 if (ctx && interrupted == -ERESTARTSYS)
1337 context_save_interrupted(ctx);
1338 else if (ctx)
1339 context_free(ctx);
1340 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1341 err = ECONNRESET;
1342 return err;
1343}
1344
1345static int fastrpc_init_process(struct fastrpc_file *fl,
1346 struct fastrpc_ioctl_init *init)
1347{
1348 int err = 0;
1349 struct fastrpc_ioctl_invoke_attrs ioctl;
1350 struct smq_phy_page pages[1];
1351 struct fastrpc_mmap *file = 0, *mem = 0;
1352
1353 if (init->flags == FASTRPC_INIT_ATTACH) {
1354 remote_arg_t ra[1];
1355 int tgid = current->tgid;
1356
1357 ra[0].buf.pv = (void *)&tgid;
1358 ra[0].buf.len = sizeof(tgid);
1359 ioctl.inv.handle = 1;
1360 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1361 ioctl.inv.pra = ra;
1362 ioctl.fds = 0;
1363 ioctl.attrs = 0;
1364 fl->pd = 0;
1365 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1366 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1367 if (err)
1368 goto bail;
1369 } else if (init->flags == FASTRPC_INIT_CREATE) {
1370 remote_arg_t ra[4];
1371 int fds[4];
1372 int mflags = 0;
1373 struct {
1374 int pgid;
1375 int namelen;
1376 int filelen;
1377 int pageslen;
1378 } inbuf;
1379
1380 inbuf.pgid = current->tgid;
1381 inbuf.namelen = strlen(current->comm) + 1;
1382 inbuf.filelen = init->filelen;
1383 fl->pd = 1;
1384 if (init->filelen) {
1385 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1386 init->file, init->filelen, mflags, &file));
1387 if (err)
1388 goto bail;
1389 }
1390 inbuf.pageslen = 1;
1391 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1392 init->mem, init->memlen, mflags, &mem));
1393 if (err)
1394 goto bail;
1395 inbuf.pageslen = 1;
1396 ra[0].buf.pv = (void *)&inbuf;
1397 ra[0].buf.len = sizeof(inbuf);
1398 fds[0] = 0;
1399
1400 ra[1].buf.pv = (void *)current->comm;
1401 ra[1].buf.len = inbuf.namelen;
1402 fds[1] = 0;
1403
1404 ra[2].buf.pv = (void *)init->file;
1405 ra[2].buf.len = inbuf.filelen;
1406 fds[2] = init->filefd;
1407
1408 pages[0].addr = mem->phys;
1409 pages[0].size = mem->size;
1410 ra[3].buf.pv = (void *)pages;
1411 ra[3].buf.len = 1 * sizeof(*pages);
1412 fds[3] = 0;
1413
1414 ioctl.inv.handle = 1;
1415 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
1416 ioctl.inv.pra = ra;
1417 ioctl.fds = fds;
1418 ioctl.attrs = 0;
1419 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1420 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1421 if (err)
1422 goto bail;
1423 } else {
1424 err = -ENOTTY;
1425 }
1426bail:
1427 if (mem && err)
1428 fastrpc_mmap_free(mem);
1429 if (file)
1430 fastrpc_mmap_free(file);
1431 return err;
1432}
1433
1434static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1435{
1436 int err = 0;
1437 struct fastrpc_ioctl_invoke_attrs ioctl;
1438 remote_arg_t ra[1];
1439 int tgid = 0;
1440
1441 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1442 if (err)
1443 goto bail;
1444 tgid = fl->tgid;
1445 ra[0].buf.pv = (void *)&tgid;
1446 ra[0].buf.len = sizeof(tgid);
1447 ioctl.inv.handle = 1;
1448 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1449 ioctl.inv.pra = ra;
1450 ioctl.fds = 0;
1451 ioctl.attrs = 0;
1452 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1453 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1454bail:
1455 return err;
1456}
1457
1458static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1459 struct fastrpc_mmap *map)
1460{
1461 struct fastrpc_ioctl_invoke_attrs ioctl;
1462 struct smq_phy_page page;
1463 int num = 1;
1464 remote_arg_t ra[3];
1465 int err = 0;
1466 struct {
1467 int pid;
1468 uint32_t flags;
1469 uintptr_t vaddrin;
1470 int num;
1471 } inargs;
1472 struct {
1473 uintptr_t vaddrout;
1474 } routargs;
1475
1476 inargs.pid = current->tgid;
1477 inargs.vaddrin = (uintptr_t)map->va;
1478 inargs.flags = flags;
1479 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1480 ra[0].buf.pv = (void *)&inargs;
1481 ra[0].buf.len = sizeof(inargs);
1482 page.addr = map->phys;
1483 page.size = map->size;
1484 ra[1].buf.pv = (void *)&page;
1485 ra[1].buf.len = num * sizeof(page);
1486
1487 ra[2].buf.pv = (void *)&routargs;
1488 ra[2].buf.len = sizeof(routargs);
1489
1490 ioctl.inv.handle = 1;
1491 if (fl->apps->compat)
1492 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1493 else
1494 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1495 ioctl.inv.pra = ra;
1496 ioctl.fds = 0;
1497 ioctl.attrs = 0;
1498 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1499 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1500 map->raddr = (uintptr_t)routargs.vaddrout;
1501
1502 return err;
1503}
1504
1505static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1506 struct fastrpc_mmap *map)
1507{
1508 struct fastrpc_ioctl_invoke_attrs ioctl;
1509 remote_arg_t ra[1];
1510 int err = 0;
1511 struct {
1512 int pid;
1513 uintptr_t vaddrout;
1514 ssize_t size;
1515 } inargs;
1516
1517 inargs.pid = current->tgid;
1518 inargs.size = map->size;
1519 inargs.vaddrout = map->raddr;
1520 ra[0].buf.pv = (void *)&inargs;
1521 ra[0].buf.len = sizeof(inargs);
1522
1523 ioctl.inv.handle = 1;
1524 if (fl->apps->compat)
1525 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1526 else
1527 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1528 ioctl.inv.pra = ra;
1529 ioctl.fds = 0;
1530 ioctl.attrs = 0;
1531 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1532 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1533 return err;
1534}
1535
1536static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1537 ssize_t len, struct fastrpc_mmap **ppmap);
1538
1539static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1540
1541static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1542 struct fastrpc_ioctl_munmap *ud)
1543{
1544 int err = 0;
1545 struct fastrpc_mmap *map = 0;
1546
1547 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1548 if (err)
1549 goto bail;
1550 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1551 if (err)
1552 goto bail;
1553 fastrpc_mmap_free(map);
1554bail:
1555 if (err && map)
1556 fastrpc_mmap_add(map);
1557 return err;
1558}
1559
1560static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1561 struct fastrpc_ioctl_mmap *ud)
1562{
1563
1564 struct fastrpc_mmap *map = 0;
1565 int err = 0;
1566
1567 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001568 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569 return 0;
1570
1571 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1572 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1573 if (err)
1574 goto bail;
1575 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1576 if (err)
1577 goto bail;
1578 ud->vaddrout = map->raddr;
1579 bail:
1580 if (err && map)
1581 fastrpc_mmap_free(map);
1582 return err;
1583}
1584
1585static void fastrpc_channel_close(struct kref *kref)
1586{
1587 struct fastrpc_apps *me = &gfa;
1588 struct fastrpc_channel_ctx *ctx;
1589 int cid;
1590
1591 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1592 cid = ctx - &gcinfo[0];
1593 fastrpc_glink_close(ctx->chan, cid);
1594 ctx->chan = 0;
1595 mutex_unlock(&me->smd_mutex);
1596 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1597 MAJOR(me->dev_no), cid);
1598}
1599
1600static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1601
1602static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1603 int secure, struct fastrpc_session_ctx **session)
1604{
1605 struct fastrpc_apps *me = &gfa;
1606 int idx = 0, err = 0;
1607
1608 if (chan->sesscount) {
1609 for (idx = 0; idx < chan->sesscount; ++idx) {
1610 if (!chan->session[idx].used &&
1611 chan->session[idx].smmu.secure == secure) {
1612 chan->session[idx].used = 1;
1613 break;
1614 }
1615 }
1616 VERIFY(err, idx < chan->sesscount);
1617 if (err)
1618 goto bail;
1619 chan->session[idx].smmu.faults = 0;
1620 } else {
1621 VERIFY(err, me->dev != NULL);
1622 if (err)
1623 goto bail;
1624 chan->session[0].dev = me->dev;
1625 }
1626
1627 *session = &chan->session[idx];
1628 bail:
1629 return err;
1630}
1631
1632bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1633{
1634 if (glink_queue_rx_intent(h, NULL, size))
1635 return false;
1636 return true;
1637}
1638
1639void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1640 const void *pkt_priv, const void *ptr)
1641{
1642}
1643
1644void fastrpc_glink_notify_rx(void *handle, const void *priv,
1645 const void *pkt_priv, const void *ptr, size_t size)
1646{
1647 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1648 int len = size;
1649
1650 while (len >= sizeof(*rsp) && rsp) {
1651 rsp->ctx = rsp->ctx & ~1;
1652 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1653 rsp++;
1654 len = len - sizeof(*rsp);
1655 }
1656 glink_rx_done(handle, ptr, true);
1657}
1658
1659void fastrpc_glink_notify_state(void *handle, const void *priv,
1660 unsigned int event)
1661{
1662 struct fastrpc_apps *me = &gfa;
1663 int cid = (int)(uintptr_t)priv;
1664 struct fastrpc_glink_info *link;
1665
1666 if (cid < 0 || cid >= NUM_CHANNELS)
1667 return;
1668 link = &me->channel[cid].link;
1669 switch (event) {
1670 case GLINK_CONNECTED:
1671 link->port_state = FASTRPC_LINK_CONNECTED;
1672 complete(&me->channel[cid].work);
1673 break;
1674 case GLINK_LOCAL_DISCONNECTED:
1675 link->port_state = FASTRPC_LINK_DISCONNECTED;
1676 break;
1677 case GLINK_REMOTE_DISCONNECTED:
1678 if (me->channel[cid].chan &&
1679 link->link_state == FASTRPC_LINK_STATE_UP) {
1680 fastrpc_glink_close(me->channel[cid].chan, cid);
1681 me->channel[cid].chan = 0;
1682 link->port_state = FASTRPC_LINK_DISCONNECTED;
1683 }
1684 break;
1685 default:
1686 break;
1687 }
1688}
1689
1690static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1691 struct fastrpc_session_ctx **session)
1692{
1693 int err = 0;
1694 struct fastrpc_apps *me = &gfa;
1695
1696 mutex_lock(&me->smd_mutex);
1697 if (!*session)
1698 err = fastrpc_session_alloc_locked(chan, secure, session);
1699 mutex_unlock(&me->smd_mutex);
1700 return err;
1701}
1702
1703static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1704 struct fastrpc_session_ctx *session)
1705{
1706 struct fastrpc_apps *me = &gfa;
1707
1708 mutex_lock(&me->smd_mutex);
1709 session->used = 0;
1710 mutex_unlock(&me->smd_mutex);
1711}
1712
1713static int fastrpc_file_free(struct fastrpc_file *fl)
1714{
1715 struct hlist_node *n;
1716 struct fastrpc_mmap *map = 0;
1717 int cid;
1718
1719 if (!fl)
1720 return 0;
1721 cid = fl->cid;
1722
1723 spin_lock(&fl->apps->hlock);
1724 hlist_del_init(&fl->hn);
1725 spin_unlock(&fl->apps->hlock);
1726
1727 (void)fastrpc_release_current_dsp_process(fl);
1728 fastrpc_context_list_dtor(fl);
1729 fastrpc_buf_list_free(fl);
1730 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1731 fastrpc_mmap_free(map);
1732 }
1733 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1734 kref_put_mutex(&fl->apps->channel[cid].kref,
1735 fastrpc_channel_close, &fl->apps->smd_mutex);
1736 if (fl->sctx)
1737 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1738 if (fl->secsctx)
1739 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1740 kfree(fl);
1741 return 0;
1742}
1743
1744static int fastrpc_device_release(struct inode *inode, struct file *file)
1745{
1746 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1747
1748 if (fl) {
1749 fastrpc_file_free(fl);
1750 file->private_data = 0;
1751 }
1752 return 0;
1753}
1754
1755static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1756 void *priv)
1757{
1758 struct fastrpc_apps *me = &gfa;
1759 int cid = (int)((uintptr_t)priv);
1760 struct fastrpc_glink_info *link;
1761
1762 if (cid < 0 || cid >= NUM_CHANNELS)
1763 return;
1764
1765 link = &me->channel[cid].link;
1766 switch (cb_info->link_state) {
1767 case GLINK_LINK_STATE_UP:
1768 link->link_state = FASTRPC_LINK_STATE_UP;
1769 complete(&me->channel[cid].work);
1770 break;
1771 case GLINK_LINK_STATE_DOWN:
1772 link->link_state = FASTRPC_LINK_STATE_DOWN;
1773 break;
1774 default:
1775 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1776 break;
1777 }
1778}
1779
1780static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1781{
1782 int err = 0;
1783 struct fastrpc_glink_info *link;
1784
1785 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1786 if (err)
1787 goto bail;
1788
1789 link = &me->channel[cid].link;
1790 if (link->link_notify_handle != NULL)
1791 goto bail;
1792
1793 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1794 link->link_notify_handle = glink_register_link_state_cb(
1795 &link->link_info,
1796 (void *)((uintptr_t)cid));
1797 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1798 if (err) {
1799 link->link_notify_handle = NULL;
1800 goto bail;
1801 }
1802 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1803 RPC_TIMEOUT));
1804bail:
1805 return err;
1806}
1807
1808static void fastrpc_glink_close(void *chan, int cid)
1809{
1810 int err = 0;
1811 struct fastrpc_glink_info *link;
1812
1813 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1814 if (err)
1815 return;
1816 link = &gfa.channel[cid].link;
1817
1818 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1819 link->port_state == FASTRPC_LINK_CONNECTING) {
1820 link->port_state = FASTRPC_LINK_DISCONNECTING;
1821 glink_close(chan);
1822 }
1823}
1824
1825static int fastrpc_glink_open(int cid)
1826{
1827 int err = 0;
1828 void *handle = NULL;
1829 struct fastrpc_apps *me = &gfa;
1830 struct glink_open_config *cfg;
1831 struct fastrpc_glink_info *link;
1832
1833 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1834 if (err)
1835 goto bail;
1836 link = &me->channel[cid].link;
1837 cfg = &me->channel[cid].link.cfg;
1838 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1839 if (err)
1840 goto bail;
1841
1842 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1843 link->port_state == FASTRPC_LINK_CONNECTING) {
1844 goto bail;
1845 }
1846
1847 link->port_state = FASTRPC_LINK_CONNECTING;
1848 cfg->priv = (void *)(uintptr_t)cid;
1849 cfg->edge = gcinfo[cid].link.link_info.edge;
1850 cfg->transport = gcinfo[cid].link.link_info.transport;
1851 cfg->name = FASTRPC_GLINK_GUID;
1852 cfg->notify_rx = fastrpc_glink_notify_rx;
1853 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1854 cfg->notify_state = fastrpc_glink_notify_state;
1855 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1856 handle = glink_open(cfg);
1857 VERIFY(err, !IS_ERR_OR_NULL(handle));
1858 if (err)
1859 goto bail;
1860 me->channel[cid].chan = handle;
1861bail:
1862 return err;
1863}
1864
1865static int fastrpc_device_open(struct inode *inode, struct file *filp)
1866{
1867 int cid = MINOR(inode->i_rdev);
1868 int err = 0;
1869 struct fastrpc_apps *me = &gfa;
1870 struct fastrpc_file *fl = 0;
1871
1872 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
1873 if (err)
1874 return err;
1875
1876 filp->private_data = fl;
1877
1878 mutex_lock(&me->smd_mutex);
1879
1880 context_list_ctor(&fl->clst);
1881 spin_lock_init(&fl->hlock);
1882 INIT_HLIST_HEAD(&fl->maps);
1883 INIT_HLIST_HEAD(&fl->bufs);
1884 INIT_HLIST_NODE(&fl->hn);
1885 fl->tgid = current->tgid;
1886 fl->apps = me;
1887 fl->cid = cid;
1888 VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
1889 &fl->sctx));
1890 if (err)
1891 goto bail;
1892 fl->cid = cid;
1893 fl->ssrcount = me->channel[cid].ssrcount;
1894 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
1895 (me->channel[cid].chan == 0)) {
1896 fastrpc_glink_register(cid, me);
1897 VERIFY(err, 0 == fastrpc_glink_open(cid));
1898 if (err)
1899 goto bail;
1900
1901 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1902 RPC_TIMEOUT));
1903 if (err) {
1904 me->channel[cid].chan = 0;
1905 goto bail;
1906 }
1907 kref_init(&me->channel[cid].kref);
1908 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
1909 MAJOR(me->dev_no), cid);
1910 if (me->channel[cid].ssrcount !=
1911 me->channel[cid].prevssrcount) {
1912 me->channel[cid].prevssrcount =
1913 me->channel[cid].ssrcount;
1914 }
1915 }
1916 spin_lock(&me->hlock);
1917 hlist_add_head(&fl->hn, &me->drivers);
1918 spin_unlock(&me->hlock);
1919
1920bail:
1921 mutex_unlock(&me->smd_mutex);
1922
1923 if (err)
1924 fastrpc_device_release(inode, filp);
1925 return err;
1926}
1927
1928static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
1929{
1930 int err = 0;
1931
1932 VERIFY(err, fl && fl->sctx);
1933 if (err)
1934 goto bail;
1935 *info = (fl->sctx->smmu.enabled ? 1 : 0);
1936bail:
1937 return err;
1938}
1939
1940static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1941 unsigned long ioctl_param)
1942{
1943 union {
1944 struct fastrpc_ioctl_invoke_attrs inv;
1945 struct fastrpc_ioctl_mmap mmap;
1946 struct fastrpc_ioctl_munmap munmap;
1947 struct fastrpc_ioctl_init init;
1948 } p;
1949 void *param = (char *)ioctl_param;
1950 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1951 int size = 0, err = 0;
1952 uint32_t info;
1953
1954 p.inv.fds = 0;
1955 p.inv.attrs = 0;
1956
1957 switch (ioctl_num) {
1958 case FASTRPC_IOCTL_INVOKE:
1959 size = sizeof(struct fastrpc_ioctl_invoke);
1960 case FASTRPC_IOCTL_INVOKE_FD:
1961 if (!size)
1962 size = sizeof(struct fastrpc_ioctl_invoke_fd);
1963 /* fall through */
1964 case FASTRPC_IOCTL_INVOKE_ATTRS:
1965 if (!size)
1966 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
1967 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
1968 if (err)
1969 goto bail;
1970 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
1971 0, &p.inv)));
1972 if (err)
1973 goto bail;
1974 break;
1975 case FASTRPC_IOCTL_MMAP:
1976 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
1977 sizeof(p.mmap)));
1978 if (err)
1979 goto bail;
1980 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
1981 if (err)
1982 goto bail;
1983 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
1984 if (err)
1985 goto bail;
1986 break;
1987 case FASTRPC_IOCTL_MUNMAP:
1988 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
1989 sizeof(p.munmap)));
1990 if (err)
1991 goto bail;
1992 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
1993 &p.munmap)));
1994 if (err)
1995 goto bail;
1996 break;
1997 case FASTRPC_IOCTL_SETMODE:
1998 switch ((uint32_t)ioctl_param) {
1999 case FASTRPC_MODE_PARALLEL:
2000 case FASTRPC_MODE_SERIAL:
2001 fl->mode = (uint32_t)ioctl_param;
2002 break;
2003 default:
2004 err = -ENOTTY;
2005 break;
2006 }
2007 break;
2008 case FASTRPC_IOCTL_GETINFO:
2009 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2010 if (err)
2011 goto bail;
2012 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2013 if (err)
2014 goto bail;
2015 break;
2016 case FASTRPC_IOCTL_INIT:
2017 VERIFY(err, 0 == copy_from_user(&p.init, param,
2018 sizeof(p.init)));
2019 if (err)
2020 goto bail;
2021 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2022 if (err)
2023 goto bail;
2024 break;
2025
2026 default:
2027 err = -ENOTTY;
2028 pr_info("bad ioctl: %d\n", ioctl_num);
2029 break;
2030 }
2031 bail:
2032 return err;
2033}
2034
2035static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2036 unsigned long code,
2037 void *data)
2038{
2039 struct fastrpc_apps *me = &gfa;
2040 struct fastrpc_channel_ctx *ctx;
2041 int cid;
2042
2043 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2044 cid = ctx - &me->channel[0];
2045 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2046 mutex_lock(&me->smd_mutex);
2047 ctx->ssrcount++;
2048 if (ctx->chan) {
2049 fastrpc_glink_close(ctx->chan, cid);
2050 ctx->chan = 0;
2051 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2052 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2053 }
2054 mutex_unlock(&me->smd_mutex);
2055 fastrpc_notify_drivers(me, cid);
2056 }
2057
2058 return NOTIFY_DONE;
2059}
2060
2061static const struct file_operations fops = {
2062 .open = fastrpc_device_open,
2063 .release = fastrpc_device_release,
2064 .unlocked_ioctl = fastrpc_device_ioctl,
2065 .compat_ioctl = compat_fastrpc_device_ioctl,
2066};
2067
2068static const struct of_device_id fastrpc_match_table[] = {
2069 { .compatible = "qcom,msm-fastrpc-adsp", },
2070 { .compatible = "qcom,msm-fastrpc-compute", },
2071 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2072 { .compatible = "qcom,msm-adsprpc-mem-region", },
2073 {}
2074};
2075
2076static int fastrpc_cb_probe(struct device *dev)
2077{
2078 struct fastrpc_channel_ctx *chan;
2079 struct fastrpc_session_ctx *sess;
2080 struct of_phandle_args iommuspec;
2081 const char *name;
2082 unsigned int start = 0x80000000;
2083 int err = 0, i;
2084 int secure_vmid = VMID_CP_PIXEL;
2085
2086 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2087 if (err)
2088 goto bail;
2089 for (i = 0; i < NUM_CHANNELS; i++) {
2090 if (!gcinfo[i].name)
2091 continue;
2092 if (!strcmp(name, gcinfo[i].name))
2093 break;
2094 }
2095 VERIFY(err, i < NUM_CHANNELS);
2096 if (err)
2097 goto bail;
2098 chan = &gcinfo[i];
2099 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2100 if (err)
2101 goto bail;
2102
2103 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2104 "#iommu-cells", 0, &iommuspec));
2105 if (err)
2106 goto bail;
2107 sess = &chan->session[chan->sesscount];
2108 sess->smmu.cb = iommuspec.args[0] & 0xf;
2109 sess->used = 0;
2110 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2111 "dma-coherent");
2112 sess->smmu.secure = of_property_read_bool(dev->of_node,
2113 "qcom,secure-context-bank");
2114 if (sess->smmu.secure)
2115 start = 0x60000000;
2116 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2117 arm_iommu_create_mapping(&platform_bus_type,
2118 start, 0x7fffffff)));
2119 if (err)
2120 goto bail;
2121
2122 if (sess->smmu.secure)
2123 iommu_domain_set_attr(sess->smmu.mapping->domain,
2124 DOMAIN_ATTR_SECURE_VMID,
2125 &secure_vmid);
2126
2127 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2128 if (err)
2129 goto bail;
2130 sess->dev = dev;
2131 sess->smmu.enabled = 1;
2132 chan->sesscount++;
2133bail:
2134 return err;
2135}
2136
2137static int fastrpc_probe(struct platform_device *pdev)
2138{
2139 int err = 0;
2140 struct fastrpc_apps *me = &gfa;
2141 struct device *dev = &pdev->dev;
2142 struct smq_phy_page range;
2143 struct device_node *ion_node, *node;
2144 struct platform_device *ion_pdev;
2145 struct cma *cma;
2146 uint32_t val;
2147
2148 if (of_device_is_compatible(dev->of_node,
2149 "qcom,msm-fastrpc-compute-cb"))
2150 return fastrpc_cb_probe(dev);
2151
2152 if (of_device_is_compatible(dev->of_node,
2153 "qcom,msm-adsprpc-mem-region")) {
2154 me->dev = dev;
2155 range.addr = 0;
2156 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2157 if (ion_node) {
2158 for_each_available_child_of_node(ion_node, node) {
2159 if (of_property_read_u32(node, "reg", &val))
2160 continue;
2161 if (val != ION_ADSP_HEAP_ID)
2162 continue;
2163 ion_pdev = of_find_device_by_node(node);
2164 if (!ion_pdev)
2165 break;
2166 cma = dev_get_cma_area(&ion_pdev->dev);
2167 if (cma) {
2168 range.addr = cma_get_base(cma);
2169 range.size = (size_t)cma_get_size(cma);
2170 }
2171 break;
2172 }
2173 }
2174 if (range.addr) {
2175 int srcVM[1] = {VMID_HLOS};
2176 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2177 VMID_ADSP_Q6};
2178 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2179 PERM_READ | PERM_WRITE | PERM_EXEC,
2180 PERM_READ | PERM_WRITE | PERM_EXEC,
2181 PERM_READ | PERM_WRITE | PERM_EXEC,
2182 };
2183
2184 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2185 srcVM, 1, destVM, destVMperm, 4));
2186 if (err)
2187 goto bail;
2188 }
2189 return 0;
2190 }
2191
2192 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2193 fastrpc_match_table,
2194 NULL, &pdev->dev));
2195 if (err)
2196 goto bail;
2197bail:
2198 return err;
2199}
2200
2201static void fastrpc_deinit(void)
2202{
2203 struct fastrpc_apps *me = &gfa;
2204 struct fastrpc_channel_ctx *chan = gcinfo;
2205 int i, j;
2206
2207 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2208 if (chan->chan) {
2209 kref_put_mutex(&chan->kref,
2210 fastrpc_channel_close, &me->smd_mutex);
2211 chan->chan = 0;
2212 }
2213 for (j = 0; j < NUM_SESSIONS; j++) {
2214 struct fastrpc_session_ctx *sess = &chan->session[j];
2215
2216 if (sess->smmu.enabled) {
2217 arm_iommu_detach_device(sess->dev);
2218 sess->dev = 0;
2219 }
2220 if (sess->smmu.mapping) {
2221 arm_iommu_release_mapping(sess->smmu.mapping);
2222 sess->smmu.mapping = 0;
2223 }
2224 }
2225 }
2226}
2227
2228static struct platform_driver fastrpc_driver = {
2229 .probe = fastrpc_probe,
2230 .driver = {
2231 .name = "fastrpc",
2232 .owner = THIS_MODULE,
2233 .of_match_table = fastrpc_match_table,
2234 },
2235};
2236
2237static int __init fastrpc_device_init(void)
2238{
2239 struct fastrpc_apps *me = &gfa;
2240 int err = 0, i;
2241
2242 memset(me, 0, sizeof(*me));
2243
2244 fastrpc_init(me);
2245 me->dev = NULL;
2246 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2247 if (err)
2248 goto register_bail;
2249 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2250 DEVICE_NAME));
2251 if (err)
2252 goto alloc_chrdev_bail;
2253 cdev_init(&me->cdev, &fops);
2254 me->cdev.owner = THIS_MODULE;
2255 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
2256 NUM_CHANNELS));
2257 if (err)
2258 goto cdev_init_bail;
2259 me->class = class_create(THIS_MODULE, "fastrpc");
2260 VERIFY(err, !IS_ERR(me->class));
2261 if (err)
2262 goto class_create_bail;
2263 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
2264 for (i = 0; i < NUM_CHANNELS; i++) {
2265 if (!gcinfo[i].name)
2266 continue;
2267 me->channel[i].dev = device_create(me->class, NULL,
2268 MKDEV(MAJOR(me->dev_no), i),
2269 NULL, gcinfo[i].name);
2270 VERIFY(err, !IS_ERR(me->channel[i].dev));
2271 if (err)
2272 goto device_create_bail;
2273 me->channel[i].ssrcount = 0;
2274 me->channel[i].prevssrcount = 0;
2275 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2276 me->channel[i].handle = subsys_notif_register_notifier(
2277 gcinfo[i].subsys,
2278 &me->channel[i].nb);
2279 }
2280
2281 me->client = msm_ion_client_create(DEVICE_NAME);
2282 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2283 if (err)
2284 goto device_create_bail;
2285 return 0;
2286device_create_bail:
2287 for (i = 0; i < NUM_CHANNELS; i++) {
2288 if (IS_ERR_OR_NULL(me->channel[i].dev))
2289 continue;
2290 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2291 subsys_notif_unregister_notifier(me->channel[i].handle,
2292 &me->channel[i].nb);
2293 }
2294 class_destroy(me->class);
2295class_create_bail:
2296 cdev_del(&me->cdev);
2297cdev_init_bail:
2298 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2299alloc_chrdev_bail:
2300register_bail:
2301 fastrpc_deinit();
2302 return err;
2303}
2304
2305static void __exit fastrpc_device_exit(void)
2306{
2307 struct fastrpc_apps *me = &gfa;
2308 int i;
2309
2310 fastrpc_file_list_dtor(me);
2311 fastrpc_deinit();
2312 for (i = 0; i < NUM_CHANNELS; i++) {
2313 if (!gcinfo[i].name)
2314 continue;
2315 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2316 subsys_notif_unregister_notifier(me->channel[i].handle,
2317 &me->channel[i].nb);
2318 }
2319 class_destroy(me->class);
2320 cdev_del(&me->cdev);
2321 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2322 ion_client_destroy(me->client);
2323}
2324
2325late_initcall(fastrpc_device_init);
2326module_exit(fastrpc_device_exit);
2327
2328MODULE_LICENSE("GPL v2");