blob: 8ea7c3156456e2f3270c4deead65724bb8efb648 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080061#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062
63#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
64
65#define FASTRPC_LINK_STATE_DOWN (0x0)
66#define FASTRPC_LINK_STATE_UP (0x1)
67#define FASTRPC_LINK_DISCONNECTED (0x0)
68#define FASTRPC_LINK_CONNECTING (0x1)
69#define FASTRPC_LINK_CONNECTED (0x3)
70#define FASTRPC_LINK_DISCONNECTING (0x7)
71
Sathish Ambleya21b5b52017-01-11 16:11:01 -080072#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
73#define FASTRPC_STATIC_HANDLE_LISTENER (3)
74#define FASTRPC_STATIC_HANDLE_MAX (20)
75
76#define PERF_END (void)0
77
78#define PERF(enb, cnt, ff) \
79 {\
80 struct timespec startT = {0};\
81 if (enb) {\
82 getnstimeofday(&startT);\
83 } \
84 ff ;\
85 if (enb) {\
86 cnt += getnstimediff(&startT);\
87 } \
88 }
89
Sathish Ambley69e1ab02016-10-18 10:28:15 -070090static int fastrpc_glink_open(int cid);
91static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080092static struct dentry *debugfs_root;
93static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070094
95static inline uint64_t buf_page_start(uint64_t buf)
96{
97 uint64_t start = (uint64_t) buf & PAGE_MASK;
98 return start;
99}
100
101static inline uint64_t buf_page_offset(uint64_t buf)
102{
103 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
104 return offset;
105}
106
107static inline int buf_num_pages(uint64_t buf, ssize_t len)
108{
109 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
110 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
111 int nPages = end - start + 1;
112 return nPages;
113}
114
115static inline uint64_t buf_page_size(uint32_t size)
116{
117 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
118
119 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
120}
121
122static inline void *uint64_to_ptr(uint64_t addr)
123{
124 void *ptr = (void *)((uintptr_t)addr);
125
126 return ptr;
127}
128
129static inline uint64_t ptr_to_uint64(void *ptr)
130{
131 uint64_t addr = (uint64_t)((uintptr_t)ptr);
132
133 return addr;
134}
135
136struct fastrpc_file;
137
138struct fastrpc_buf {
139 struct hlist_node hn;
140 struct fastrpc_file *fl;
141 void *virt;
142 uint64_t phys;
143 ssize_t size;
144};
145
146struct fastrpc_ctx_lst;
147
148struct overlap {
149 uintptr_t start;
150 uintptr_t end;
151 int raix;
152 uintptr_t mstart;
153 uintptr_t mend;
154 uintptr_t offset;
155};
156
157struct smq_invoke_ctx {
158 struct hlist_node hn;
159 struct completion work;
160 int retval;
161 int pid;
162 int tgid;
163 remote_arg_t *lpra;
164 remote_arg64_t *rpra;
165 int *fds;
166 unsigned int *attrs;
167 struct fastrpc_mmap **maps;
168 struct fastrpc_buf *buf;
169 ssize_t used;
170 struct fastrpc_file *fl;
171 uint32_t sc;
172 struct overlap *overs;
173 struct overlap **overps;
174 struct smq_msg msg;
175};
176
177struct fastrpc_ctx_lst {
178 struct hlist_head pending;
179 struct hlist_head interrupted;
180};
181
182struct fastrpc_smmu {
183 struct dma_iommu_mapping *mapping;
184 int cb;
185 int enabled;
186 int faults;
187 int secure;
188 int coherent;
189};
190
191struct fastrpc_session_ctx {
192 struct device *dev;
193 struct fastrpc_smmu smmu;
194 int used;
195};
196
197struct fastrpc_glink_info {
198 int link_state;
199 int port_state;
200 struct glink_open_config cfg;
201 struct glink_link_info link_info;
202 void *link_notify_handle;
203};
204
205struct fastrpc_channel_ctx {
206 char *name;
207 char *subsys;
208 void *chan;
209 struct device *dev;
210 struct fastrpc_session_ctx session[NUM_SESSIONS];
211 struct completion work;
212 struct notifier_block nb;
213 struct kref kref;
214 int sesscount;
215 int ssrcount;
216 void *handle;
217 int prevssrcount;
218 int vmid;
219 struct fastrpc_glink_info link;
220};
221
222struct fastrpc_apps {
223 struct fastrpc_channel_ctx *channel;
224 struct cdev cdev;
225 struct class *class;
226 struct mutex smd_mutex;
227 struct smq_phy_page range;
228 struct hlist_head maps;
229 dev_t dev_no;
230 int compat;
231 struct hlist_head drivers;
232 spinlock_t hlock;
233 struct ion_client *client;
234 struct device *dev;
235};
236
237struct fastrpc_mmap {
238 struct hlist_node hn;
239 struct fastrpc_file *fl;
240 struct fastrpc_apps *apps;
241 int fd;
242 uint32_t flags;
243 struct dma_buf *buf;
244 struct sg_table *table;
245 struct dma_buf_attachment *attach;
246 struct ion_handle *handle;
247 uint64_t phys;
248 ssize_t size;
249 uintptr_t va;
250 ssize_t len;
251 int refs;
252 uintptr_t raddr;
253 int uncached;
254 int secure;
255 uintptr_t attr;
256};
257
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800258struct fastrpc_perf {
259 int64_t count;
260 int64_t flush;
261 int64_t map;
262 int64_t copy;
263 int64_t link;
264 int64_t getargs;
265 int64_t putargs;
266 int64_t invargs;
267 int64_t invoke;
268};
269
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700270struct fastrpc_file {
271 struct hlist_node hn;
272 spinlock_t hlock;
273 struct hlist_head maps;
274 struct hlist_head bufs;
275 struct fastrpc_ctx_lst clst;
276 struct fastrpc_session_ctx *sctx;
277 struct fastrpc_session_ctx *secsctx;
278 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800279 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280 int tgid;
281 int cid;
282 int ssrcount;
283 int pd;
284 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800285 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800286 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700287};
288
289static struct fastrpc_apps gfa;
290
291static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
292 {
293 .name = "adsprpc-smd",
294 .subsys = "adsp",
295 .link.link_info.edge = "lpass",
296 .link.link_info.transport = "smem",
297 },
298 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700299 .name = "mdsprpc-smd",
300 .subsys = "modem",
301 .link.link_info.edge = "mpss",
302 .link.link_info.transport = "smem",
303 },
304 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800305 .name = "sdsprpc-smd",
306 .subsys = "slpi",
307 .link.link_info.edge = "dsps",
308 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800309 },
310 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311 .name = "cdsprpc-smd",
312 .subsys = "cdsp",
313 .link.link_info.edge = "cdsp",
314 .link.link_info.transport = "smem",
315 },
316};
317
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800318static inline int64_t getnstimediff(struct timespec *start)
319{
320 int64_t ns;
321 struct timespec ts, b;
322
323 getnstimeofday(&ts);
324 b = timespec_sub(ts, *start);
325 ns = timespec_to_ns(&b);
326 return ns;
327}
328
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700329static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
330{
331 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
332 int vmid;
333
334 if (!fl)
335 return;
336 if (cache) {
337 spin_lock(&fl->hlock);
338 hlist_add_head(&buf->hn, &fl->bufs);
339 spin_unlock(&fl->hlock);
340 return;
341 }
342 if (!IS_ERR_OR_NULL(buf->virt)) {
343 int destVM[1] = {VMID_HLOS};
344 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
345
346 if (fl->sctx->smmu.cb)
347 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
348 vmid = fl->apps->channel[fl->cid].vmid;
349 if (vmid) {
350 int srcVM[2] = {VMID_HLOS, vmid};
351
352 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
353 srcVM, 2, destVM, destVMperm, 1);
354 }
355 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
356 buf->phys);
357 }
358 kfree(buf);
359}
360
361static void fastrpc_buf_list_free(struct fastrpc_file *fl)
362{
363 struct fastrpc_buf *buf, *free;
364
365 do {
366 struct hlist_node *n;
367
368 free = 0;
369 spin_lock(&fl->hlock);
370 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
371 hlist_del_init(&buf->hn);
372 free = buf;
373 break;
374 }
375 spin_unlock(&fl->hlock);
376 if (free)
377 fastrpc_buf_free(free, 0);
378 } while (free);
379}
380
381static void fastrpc_mmap_add(struct fastrpc_mmap *map)
382{
383 struct fastrpc_file *fl = map->fl;
384
385 spin_lock(&fl->hlock);
386 hlist_add_head(&map->hn, &fl->maps);
387 spin_unlock(&fl->hlock);
388}
389
390static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800391 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700392{
393 struct fastrpc_mmap *match = 0, *map;
394 struct hlist_node *n;
395
396 spin_lock(&fl->hlock);
397 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
398 if (va >= map->va &&
399 va + len <= map->va + map->len &&
400 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800401 if (refs)
402 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700403 match = map;
404 break;
405 }
406 }
407 spin_unlock(&fl->hlock);
408 if (match) {
409 *ppmap = match;
410 return 0;
411 }
412 return -ENOTTY;
413}
414
415static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
416 ssize_t len, struct fastrpc_mmap **ppmap)
417{
418 struct fastrpc_mmap *match = 0, *map;
419 struct hlist_node *n;
420 struct fastrpc_apps *me = &gfa;
421
422 spin_lock(&me->hlock);
423 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
424 if (map->raddr == va &&
425 map->raddr + map->len == va + len &&
426 map->refs == 1) {
427 match = map;
428 hlist_del_init(&map->hn);
429 break;
430 }
431 }
432 spin_unlock(&me->hlock);
433 if (match) {
434 *ppmap = match;
435 return 0;
436 }
437 spin_lock(&fl->hlock);
438 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
439 if (map->raddr == va &&
440 map->raddr + map->len == va + len &&
441 map->refs == 1) {
442 match = map;
443 hlist_del_init(&map->hn);
444 break;
445 }
446 }
447 spin_unlock(&fl->hlock);
448 if (match) {
449 *ppmap = match;
450 return 0;
451 }
452 return -ENOTTY;
453}
454
455static void fastrpc_mmap_free(struct fastrpc_mmap *map)
456{
457 struct fastrpc_file *fl;
458 int vmid;
459 struct fastrpc_session_ctx *sess;
460 int destVM[1] = {VMID_HLOS};
461 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
462
463 if (!map)
464 return;
465 fl = map->fl;
466 spin_lock(&fl->hlock);
467 map->refs--;
468 if (!map->refs)
469 hlist_del_init(&map->hn);
470 spin_unlock(&fl->hlock);
471 if (map->refs > 0)
472 return;
473 if (map->secure)
474 sess = fl->secsctx;
475 else
476 sess = fl->sctx;
477
478 if (!IS_ERR_OR_NULL(map->handle))
479 ion_free(fl->apps->client, map->handle);
480 if (sess->smmu.enabled) {
481 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800482 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700483 map->table->sgl,
484 map->table->nents, DMA_BIDIRECTIONAL,
485 map->buf);
486 }
487 vmid = fl->apps->channel[fl->cid].vmid;
488 if (vmid && map->phys) {
489 int srcVM[2] = {VMID_HLOS, vmid};
490
491 hyp_assign_phys(map->phys, buf_page_size(map->size),
492 srcVM, 2, destVM, destVMperm, 1);
493 }
494
495 if (!IS_ERR_OR_NULL(map->table))
496 dma_buf_unmap_attachment(map->attach, map->table,
497 DMA_BIDIRECTIONAL);
498 if (!IS_ERR_OR_NULL(map->attach))
499 dma_buf_detach(map->buf, map->attach);
500 if (!IS_ERR_OR_NULL(map->buf))
501 dma_buf_put(map->buf);
502 kfree(map);
503}
504
505static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
506 struct fastrpc_session_ctx **session);
507
508static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
509 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
510 struct fastrpc_mmap **ppmap)
511{
512 struct fastrpc_session_ctx *sess;
513 struct fastrpc_apps *apps = fl->apps;
514 int cid = fl->cid;
515 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
516 struct fastrpc_mmap *map = 0;
517 unsigned long attrs;
518 unsigned long flags;
519 int err = 0, vmid;
520
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800521 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700522 return 0;
523 map = kzalloc(sizeof(*map), GFP_KERNEL);
524 VERIFY(err, !IS_ERR_OR_NULL(map));
525 if (err)
526 goto bail;
527 INIT_HLIST_NODE(&map->hn);
528 map->flags = mflags;
529 map->refs = 1;
530 map->fl = fl;
531 map->fd = fd;
532 map->attr = attr;
533 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
534 ion_import_dma_buf_fd(fl->apps->client, fd)));
535 if (err)
536 goto bail;
537 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
538 &flags));
539 if (err)
540 goto bail;
541
542 map->uncached = !ION_IS_CACHED(flags);
543 if (map->attr & FASTRPC_ATTR_NOVA)
544 map->uncached = 1;
545
546 map->secure = flags & ION_FLAG_SECURE;
547 if (map->secure) {
548 if (!fl->secsctx)
549 err = fastrpc_session_alloc(chan, 1,
550 &fl->secsctx);
551 if (err)
552 goto bail;
553 }
554 if (map->secure)
555 sess = fl->secsctx;
556 else
557 sess = fl->sctx;
558
559 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
560 if (err)
561 goto bail;
562 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
563 dma_buf_attach(map->buf, sess->dev)));
564 if (err)
565 goto bail;
566 VERIFY(err, !IS_ERR_OR_NULL(map->table =
567 dma_buf_map_attachment(map->attach,
568 DMA_BIDIRECTIONAL)));
569 if (err)
570 goto bail;
571 if (sess->smmu.enabled) {
572 attrs = DMA_ATTR_EXEC_MAPPING;
573 VERIFY(err, map->table->nents ==
574 msm_dma_map_sg_attrs(sess->dev,
575 map->table->sgl, map->table->nents,
576 DMA_BIDIRECTIONAL, map->buf, attrs));
577 if (err)
578 goto bail;
579 } else {
580 VERIFY(err, map->table->nents == 1);
581 if (err)
582 goto bail;
583 }
584 map->phys = sg_dma_address(map->table->sgl);
585 if (sess->smmu.cb) {
586 map->phys += ((uint64_t)sess->smmu.cb << 32);
587 map->size = sg_dma_len(map->table->sgl);
588 } else {
589 map->size = buf_page_size(len);
590 }
591 vmid = fl->apps->channel[fl->cid].vmid;
592 if (vmid) {
593 int srcVM[1] = {VMID_HLOS};
594 int destVM[2] = {VMID_HLOS, vmid};
595 int destVMperm[2] = {PERM_READ | PERM_WRITE,
596 PERM_READ | PERM_WRITE | PERM_EXEC};
597
598 VERIFY(err, !hyp_assign_phys(map->phys,
599 buf_page_size(map->size),
600 srcVM, 1, destVM, destVMperm, 2));
601 if (err)
602 goto bail;
603 }
604 map->va = va;
605 map->len = len;
606
607 fastrpc_mmap_add(map);
608 *ppmap = map;
609
610bail:
611 if (err && map)
612 fastrpc_mmap_free(map);
613 return err;
614}
615
616static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
617 struct fastrpc_buf **obuf)
618{
619 int err = 0, vmid;
620 struct fastrpc_buf *buf = 0, *fr = 0;
621 struct hlist_node *n;
622
623 VERIFY(err, size > 0);
624 if (err)
625 goto bail;
626
627 /* find the smallest buffer that fits in the cache */
628 spin_lock(&fl->hlock);
629 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
630 if (buf->size >= size && (!fr || fr->size > buf->size))
631 fr = buf;
632 }
633 if (fr)
634 hlist_del_init(&fr->hn);
635 spin_unlock(&fl->hlock);
636 if (fr) {
637 *obuf = fr;
638 return 0;
639 }
640 buf = 0;
641 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
642 if (err)
643 goto bail;
644 INIT_HLIST_NODE(&buf->hn);
645 buf->fl = fl;
646 buf->virt = 0;
647 buf->phys = 0;
648 buf->size = size;
649 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
650 (void *)&buf->phys, GFP_KERNEL);
651 if (IS_ERR_OR_NULL(buf->virt)) {
652 /* free cache and retry */
653 fastrpc_buf_list_free(fl);
654 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
655 (void *)&buf->phys, GFP_KERNEL);
656 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
657 }
658 if (err)
659 goto bail;
660 if (fl->sctx->smmu.cb)
661 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
662 vmid = fl->apps->channel[fl->cid].vmid;
663 if (vmid) {
664 int srcVM[1] = {VMID_HLOS};
665 int destVM[2] = {VMID_HLOS, vmid};
666 int destVMperm[2] = {PERM_READ | PERM_WRITE,
667 PERM_READ | PERM_WRITE | PERM_EXEC};
668
669 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
670 srcVM, 1, destVM, destVMperm, 2));
671 if (err)
672 goto bail;
673 }
674
675 *obuf = buf;
676 bail:
677 if (err && buf)
678 fastrpc_buf_free(buf, 0);
679 return err;
680}
681
682
683static int context_restore_interrupted(struct fastrpc_file *fl,
684 struct fastrpc_ioctl_invoke_attrs *inv,
685 struct smq_invoke_ctx **po)
686{
687 int err = 0;
688 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
689 struct hlist_node *n;
690 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
691
692 spin_lock(&fl->hlock);
693 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
694 if (ictx->pid == current->pid) {
695 if (invoke->sc != ictx->sc || ictx->fl != fl)
696 err = -1;
697 else {
698 ctx = ictx;
699 hlist_del_init(&ctx->hn);
700 hlist_add_head(&ctx->hn, &fl->clst.pending);
701 }
702 break;
703 }
704 }
705 spin_unlock(&fl->hlock);
706 if (ctx)
707 *po = ctx;
708 return err;
709}
710
711#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
712static int overlap_ptr_cmp(const void *a, const void *b)
713{
714 struct overlap *pa = *((struct overlap **)a);
715 struct overlap *pb = *((struct overlap **)b);
716 /* sort with lowest starting buffer first */
717 int st = CMP(pa->start, pb->start);
718 /* sort with highest ending buffer first */
719 int ed = CMP(pb->end, pa->end);
720 return st == 0 ? ed : st;
721}
722
Sathish Ambley9466d672017-01-25 10:51:55 -0800723static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700724{
Sathish Ambley9466d672017-01-25 10:51:55 -0800725 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700726 remote_arg_t *lpra = ctx->lpra;
727 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
728 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
729 int nbufs = inbufs + outbufs;
730 struct overlap max;
731
732 for (i = 0; i < nbufs; ++i) {
733 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
734 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800735 if (lpra[i].buf.len) {
736 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
737 if (err)
738 goto bail;
739 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700740 ctx->overs[i].raix = i;
741 ctx->overps[i] = &ctx->overs[i];
742 }
743 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
744 max.start = 0;
745 max.end = 0;
746 for (i = 0; i < nbufs; ++i) {
747 if (ctx->overps[i]->start < max.end) {
748 ctx->overps[i]->mstart = max.end;
749 ctx->overps[i]->mend = ctx->overps[i]->end;
750 ctx->overps[i]->offset = max.end -
751 ctx->overps[i]->start;
752 if (ctx->overps[i]->end > max.end) {
753 max.end = ctx->overps[i]->end;
754 } else {
755 ctx->overps[i]->mend = 0;
756 ctx->overps[i]->mstart = 0;
757 }
758 } else {
759 ctx->overps[i]->mend = ctx->overps[i]->end;
760 ctx->overps[i]->mstart = ctx->overps[i]->start;
761 ctx->overps[i]->offset = 0;
762 max = *ctx->overps[i];
763 }
764 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800765bail:
766 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700767}
768
769#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
770 do {\
771 if (!(kernel))\
772 VERIFY(err, 0 == copy_from_user((dst), (src),\
773 (size)));\
774 else\
775 memmove((dst), (src), (size));\
776 } while (0)
777
778#define K_COPY_TO_USER(err, kernel, dst, src, size) \
779 do {\
780 if (!(kernel))\
781 VERIFY(err, 0 == copy_to_user((dst), (src),\
782 (size)));\
783 else\
784 memmove((dst), (src), (size));\
785 } while (0)
786
787
788static void context_free(struct smq_invoke_ctx *ctx);
789
790static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
791 struct fastrpc_ioctl_invoke_attrs *invokefd,
792 struct smq_invoke_ctx **po)
793{
794 int err = 0, bufs, size = 0;
795 struct smq_invoke_ctx *ctx = 0;
796 struct fastrpc_ctx_lst *clst = &fl->clst;
797 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
798
799 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
800 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
801 sizeof(*ctx->fds) * (bufs) +
802 sizeof(*ctx->attrs) * (bufs) +
803 sizeof(*ctx->overs) * (bufs) +
804 sizeof(*ctx->overps) * (bufs);
805
806 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
807 if (err)
808 goto bail;
809
810 INIT_HLIST_NODE(&ctx->hn);
811 hlist_add_fake(&ctx->hn);
812 ctx->fl = fl;
813 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
814 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
815 ctx->fds = (int *)(&ctx->lpra[bufs]);
816 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
817 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
818 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
819
820 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
821 bufs * sizeof(*ctx->lpra));
822 if (err)
823 goto bail;
824
825 if (invokefd->fds) {
826 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
827 bufs * sizeof(*ctx->fds));
828 if (err)
829 goto bail;
830 }
831 if (invokefd->attrs) {
832 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
833 bufs * sizeof(*ctx->attrs));
834 if (err)
835 goto bail;
836 }
837
838 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800839 if (bufs) {
840 VERIFY(err, 0 == context_build_overlap(ctx));
841 if (err)
842 goto bail;
843 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700844 ctx->retval = -1;
845 ctx->pid = current->pid;
846 ctx->tgid = current->tgid;
847 init_completion(&ctx->work);
848
849 spin_lock(&fl->hlock);
850 hlist_add_head(&ctx->hn, &clst->pending);
851 spin_unlock(&fl->hlock);
852
853 *po = ctx;
854bail:
855 if (ctx && err)
856 context_free(ctx);
857 return err;
858}
859
860static void context_save_interrupted(struct smq_invoke_ctx *ctx)
861{
862 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
863
864 spin_lock(&ctx->fl->hlock);
865 hlist_del_init(&ctx->hn);
866 hlist_add_head(&ctx->hn, &clst->interrupted);
867 spin_unlock(&ctx->fl->hlock);
868 /* free the cache on power collapse */
869 fastrpc_buf_list_free(ctx->fl);
870}
871
872static void context_free(struct smq_invoke_ctx *ctx)
873{
874 int i;
875 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
876 REMOTE_SCALARS_OUTBUFS(ctx->sc);
877 spin_lock(&ctx->fl->hlock);
878 hlist_del_init(&ctx->hn);
879 spin_unlock(&ctx->fl->hlock);
880 for (i = 0; i < nbufs; ++i)
881 fastrpc_mmap_free(ctx->maps[i]);
882 fastrpc_buf_free(ctx->buf, 1);
883 kfree(ctx);
884}
885
886static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
887{
888 ctx->retval = retval;
889 complete(&ctx->work);
890}
891
892
893static void fastrpc_notify_users(struct fastrpc_file *me)
894{
895 struct smq_invoke_ctx *ictx;
896 struct hlist_node *n;
897
898 spin_lock(&me->hlock);
899 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
900 complete(&ictx->work);
901 }
902 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
903 complete(&ictx->work);
904 }
905 spin_unlock(&me->hlock);
906
907}
908
909static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
910{
911 struct fastrpc_file *fl;
912 struct hlist_node *n;
913
914 spin_lock(&me->hlock);
915 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
916 if (fl->cid == cid)
917 fastrpc_notify_users(fl);
918 }
919 spin_unlock(&me->hlock);
920
921}
922static void context_list_ctor(struct fastrpc_ctx_lst *me)
923{
924 INIT_HLIST_HEAD(&me->interrupted);
925 INIT_HLIST_HEAD(&me->pending);
926}
927
928static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
929{
930 struct fastrpc_ctx_lst *clst = &fl->clst;
931 struct smq_invoke_ctx *ictx = 0, *ctxfree;
932 struct hlist_node *n;
933
934 do {
935 ctxfree = 0;
936 spin_lock(&fl->hlock);
937 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
938 hlist_del_init(&ictx->hn);
939 ctxfree = ictx;
940 break;
941 }
942 spin_unlock(&fl->hlock);
943 if (ctxfree)
944 context_free(ctxfree);
945 } while (ctxfree);
946 do {
947 ctxfree = 0;
948 spin_lock(&fl->hlock);
949 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
950 hlist_del_init(&ictx->hn);
951 ctxfree = ictx;
952 break;
953 }
954 spin_unlock(&fl->hlock);
955 if (ctxfree)
956 context_free(ctxfree);
957 } while (ctxfree);
958}
959
960static int fastrpc_file_free(struct fastrpc_file *fl);
961static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
962{
963 struct fastrpc_file *fl, *free;
964 struct hlist_node *n;
965
966 do {
967 free = 0;
968 spin_lock(&me->hlock);
969 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
970 hlist_del_init(&fl->hn);
971 free = fl;
972 break;
973 }
974 spin_unlock(&me->hlock);
975 if (free)
976 fastrpc_file_free(free);
977 } while (free);
978}
979
980static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
981{
982 remote_arg64_t *rpra;
983 remote_arg_t *lpra = ctx->lpra;
984 struct smq_invoke_buf *list;
985 struct smq_phy_page *pages, *ipage;
986 uint32_t sc = ctx->sc;
987 int inbufs = REMOTE_SCALARS_INBUFS(sc);
988 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800989 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700990 uintptr_t args;
991 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800992 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700993 int err = 0;
994 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800995 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700996
997 /* calculate size of the metadata */
998 rpra = 0;
999 list = smq_invoke_buf_start(rpra, sc);
1000 pages = smq_phy_page_start(sc, list);
1001 ipage = pages;
1002
1003 for (i = 0; i < bufs; ++i) {
1004 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1005 ssize_t len = lpra[i].buf.len;
1006
1007 if (ctx->fds[i] && (ctx->fds[i] != -1))
1008 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1009 ctx->attrs[i], buf, len,
1010 mflags, &ctx->maps[i]);
1011 ipage += 1;
1012 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001013 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1014 for (i = bufs; i < bufs + handles; i++) {
1015 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1016 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1017 if (err)
1018 goto bail;
1019 ipage += 1;
1020 }
1021 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001022 /* calculate len requreed for copying */
1023 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1024 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001025 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001026 ssize_t len = lpra[i].buf.len;
1027
1028 if (!len)
1029 continue;
1030 if (ctx->maps[i])
1031 continue;
1032 if (ctx->overps[oix]->offset == 0)
1033 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001034 mstart = ctx->overps[oix]->mstart;
1035 mend = ctx->overps[oix]->mend;
1036 VERIFY(err, (mend - mstart) <= LONG_MAX);
1037 if (err)
1038 goto bail;
1039 copylen += mend - mstart;
1040 VERIFY(err, copylen >= 0);
1041 if (err)
1042 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001043 }
1044 ctx->used = copylen;
1045
1046 /* allocate new buffer */
1047 if (copylen) {
1048 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1049 if (err)
1050 goto bail;
1051 }
1052 /* copy metadata */
1053 rpra = ctx->buf->virt;
1054 ctx->rpra = rpra;
1055 list = smq_invoke_buf_start(rpra, sc);
1056 pages = smq_phy_page_start(sc, list);
1057 ipage = pages;
1058 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001059 for (i = 0; i < bufs + handles; ++i) {
1060 if (lpra[i].buf.len)
1061 list[i].num = 1;
1062 else
1063 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001064 list[i].pgidx = ipage - pages;
1065 ipage++;
1066 }
1067 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001068 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 for (i = 0; i < inbufs + outbufs; ++i) {
1070 struct fastrpc_mmap *map = ctx->maps[i];
1071 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1072 ssize_t len = lpra[i].buf.len;
1073
1074 rpra[i].buf.pv = 0;
1075 rpra[i].buf.len = len;
1076 if (!len)
1077 continue;
1078 if (map) {
1079 struct vm_area_struct *vma;
1080 uintptr_t offset;
1081 int num = buf_num_pages(buf, len);
1082 int idx = list[i].pgidx;
1083
1084 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001085 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001086 } else {
1087 down_read(&current->mm->mmap_sem);
1088 VERIFY(err, NULL != (vma = find_vma(current->mm,
1089 map->va)));
1090 if (err) {
1091 up_read(&current->mm->mmap_sem);
1092 goto bail;
1093 }
1094 offset = buf_page_start(buf) - vma->vm_start;
1095 up_read(&current->mm->mmap_sem);
1096 VERIFY(err, offset < (uintptr_t)map->size);
1097 if (err)
1098 goto bail;
1099 }
1100 pages[idx].addr = map->phys + offset;
1101 pages[idx].size = num << PAGE_SHIFT;
1102 }
1103 rpra[i].buf.pv = buf;
1104 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001105 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001106 for (i = bufs; i < bufs + handles; ++i) {
1107 struct fastrpc_mmap *map = ctx->maps[i];
1108
1109 pages[i].addr = map->phys;
1110 pages[i].size = map->size;
1111 }
1112 fdlist = (uint64_t *)&pages[bufs + handles];
1113 for (i = 0; i < M_FDLIST; i++)
1114 fdlist[i] = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001115
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001116 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001117 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 rlen = copylen - metalen;
1119 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1120 int i = ctx->overps[oix]->raix;
1121 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001122 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 uint64_t buf;
1124 ssize_t len = lpra[i].buf.len;
1125
1126 if (!len)
1127 continue;
1128 if (map)
1129 continue;
1130 if (ctx->overps[oix]->offset == 0) {
1131 rlen -= ALIGN(args, BALIGN) - args;
1132 args = ALIGN(args, BALIGN);
1133 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001134 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001135 VERIFY(err, rlen >= mlen);
1136 if (err)
1137 goto bail;
1138 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1139 pages[list[i].pgidx].addr = ctx->buf->phys -
1140 ctx->overps[oix]->offset +
1141 (copylen - rlen);
1142 pages[list[i].pgidx].addr =
1143 buf_page_start(pages[list[i].pgidx].addr);
1144 buf = rpra[i].buf.pv;
1145 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1146 if (i < inbufs) {
1147 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1148 lpra[i].buf.pv, len);
1149 if (err)
1150 goto bail;
1151 }
1152 args = args + mlen;
1153 rlen -= mlen;
1154 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001155 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001156
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001157 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001158 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1159 int i = ctx->overps[oix]->raix;
1160 struct fastrpc_mmap *map = ctx->maps[i];
1161
1162 if (ctx->fl->sctx->smmu.coherent)
1163 continue;
1164 if (map && map->uncached)
1165 continue;
1166 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1167 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1168 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1169 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001170 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001171 for (i = bufs; i < bufs + handles; i++) {
1172 rpra[i].dma.fd = ctx->fds[i];
1173 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1174 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001175 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001176
1177 if (!ctx->fl->sctx->smmu.coherent) {
1178 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001180 PERF_END);
1181 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001182 bail:
1183 return err;
1184}
1185
1186static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1187 remote_arg_t *upra)
1188{
1189 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001190 struct smq_invoke_buf *list;
1191 struct smq_phy_page *pages;
1192 struct fastrpc_mmap *mmap;
1193 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001194 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001195 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001196 int err = 0;
1197
1198 inbufs = REMOTE_SCALARS_INBUFS(sc);
1199 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001200 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1201 list = smq_invoke_buf_start(ctx->rpra, sc);
1202 pages = smq_phy_page_start(sc, list);
1203 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001204 for (i = inbufs; i < inbufs + outbufs; ++i) {
1205 if (!ctx->maps[i]) {
1206 K_COPY_TO_USER(err, kernel,
1207 ctx->lpra[i].buf.pv,
1208 uint64_to_ptr(rpra[i].buf.pv),
1209 rpra[i].buf.len);
1210 if (err)
1211 goto bail;
1212 } else {
1213 fastrpc_mmap_free(ctx->maps[i]);
1214 ctx->maps[i] = 0;
1215 }
1216 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001217 if (inbufs + outbufs + handles) {
1218 for (i = 0; i < M_FDLIST; i++) {
1219 if (!fdlist[i])
1220 break;
1221 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001222 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001223 fastrpc_mmap_free(mmap);
1224 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001225 }
1226 bail:
1227 return err;
1228}
1229
1230static void inv_args_pre(struct smq_invoke_ctx *ctx)
1231{
1232 int i, inbufs, outbufs;
1233 uint32_t sc = ctx->sc;
1234 remote_arg64_t *rpra = ctx->rpra;
1235 uintptr_t end;
1236
1237 inbufs = REMOTE_SCALARS_INBUFS(sc);
1238 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1239 for (i = inbufs; i < inbufs + outbufs; ++i) {
1240 struct fastrpc_mmap *map = ctx->maps[i];
1241
1242 if (map && map->uncached)
1243 continue;
1244 if (!rpra[i].buf.len)
1245 continue;
1246 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1247 buf_page_start(rpra[i].buf.pv))
1248 continue;
1249 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1250 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1251 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1252 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1253 rpra[i].buf.len);
1254 if (!IS_CACHE_ALIGNED(end))
1255 dmac_flush_range((char *)end,
1256 (char *)end + 1);
1257 }
1258}
1259
1260static void inv_args(struct smq_invoke_ctx *ctx)
1261{
1262 int i, inbufs, outbufs;
1263 uint32_t sc = ctx->sc;
1264 remote_arg64_t *rpra = ctx->rpra;
1265 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001266
1267 inbufs = REMOTE_SCALARS_INBUFS(sc);
1268 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1269 for (i = inbufs; i < inbufs + outbufs; ++i) {
1270 struct fastrpc_mmap *map = ctx->maps[i];
1271
1272 if (map && map->uncached)
1273 continue;
1274 if (!rpra[i].buf.len)
1275 continue;
1276 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1277 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001278 continue;
1279 }
1280 if (map && map->handle)
1281 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1282 (char *)uint64_to_ptr(rpra[i].buf.pv),
1283 rpra[i].buf.len, ION_IOC_INV_CACHES);
1284 else
1285 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1286 (char *)uint64_to_ptr(rpra[i].buf.pv
1287 + rpra[i].buf.len));
1288 }
1289
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001290 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001291 dmac_inv_range(rpra, (char *)rpra + used);
1292}
1293
1294static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1295 uint32_t kernel, uint32_t handle)
1296{
1297 struct smq_msg *msg = &ctx->msg;
1298 struct fastrpc_file *fl = ctx->fl;
1299 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1300 int err = 0;
1301
1302 VERIFY(err, 0 != channel_ctx->chan);
1303 if (err)
1304 goto bail;
1305 msg->pid = current->tgid;
1306 msg->tid = current->pid;
1307 if (kernel)
1308 msg->pid = 0;
1309 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1310 msg->invoke.header.handle = handle;
1311 msg->invoke.header.sc = ctx->sc;
1312 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1313 msg->invoke.page.size = buf_page_size(ctx->used);
1314
1315 if (fl->ssrcount != channel_ctx->ssrcount) {
1316 err = -ECONNRESET;
1317 goto bail;
1318 }
1319 VERIFY(err, channel_ctx->link.port_state ==
1320 FASTRPC_LINK_CONNECTED);
1321 if (err)
1322 goto bail;
1323 err = glink_tx(channel_ctx->chan,
1324 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1325 GLINK_TX_REQ_INTENT);
1326 bail:
1327 return err;
1328}
1329
1330static void fastrpc_init(struct fastrpc_apps *me)
1331{
1332 int i;
1333
1334 INIT_HLIST_HEAD(&me->drivers);
1335 spin_lock_init(&me->hlock);
1336 mutex_init(&me->smd_mutex);
1337 me->channel = &gcinfo[0];
1338 for (i = 0; i < NUM_CHANNELS; i++) {
1339 init_completion(&me->channel[i].work);
1340 me->channel[i].sesscount = 0;
1341 }
1342}
1343
1344static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1345
1346static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1347 uint32_t kernel,
1348 struct fastrpc_ioctl_invoke_attrs *inv)
1349{
1350 struct smq_invoke_ctx *ctx = 0;
1351 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1352 int cid = fl->cid;
1353 int interrupted = 0;
1354 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001355 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001356
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001357 if (fl->profile)
1358 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001359 if (!kernel) {
1360 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1361 &ctx));
1362 if (err)
1363 goto bail;
1364 if (fl->sctx->smmu.faults)
1365 err = FASTRPC_ENOSUCH;
1366 if (err)
1367 goto bail;
1368 if (ctx)
1369 goto wait;
1370 }
1371
1372 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1373 if (err)
1374 goto bail;
1375
1376 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001377 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001378 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001379 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001380 if (err)
1381 goto bail;
1382 }
1383
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001384 PERF(fl->profile, fl->perf.invargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001385 if (!fl->sctx->smmu.coherent) {
1386 inv_args_pre(ctx);
1387 if (mode == FASTRPC_MODE_SERIAL)
1388 inv_args(ctx);
1389 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001390 PERF_END);
1391
1392 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001393 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001394 PERF_END);
1395
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001396 if (err)
1397 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001398 PERF(fl->profile, fl->perf.invargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001399 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1400 inv_args(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001401 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001402 wait:
1403 if (kernel)
1404 wait_for_completion(&ctx->work);
1405 else {
1406 interrupted = wait_for_completion_interruptible(&ctx->work);
1407 VERIFY(err, 0 == (err = interrupted));
1408 if (err)
1409 goto bail;
1410 }
1411 VERIFY(err, 0 == (err = ctx->retval));
1412 if (err)
1413 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001414
1415 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001416 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001417 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001418 if (err)
1419 goto bail;
1420 bail:
1421 if (ctx && interrupted == -ERESTARTSYS)
1422 context_save_interrupted(ctx);
1423 else if (ctx)
1424 context_free(ctx);
1425 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1426 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001427
1428 if (fl->profile && !interrupted) {
1429 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1430 fl->perf.invoke += getnstimediff(&invoket);
1431 if (!(invoke->handle >= 0 &&
1432 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1433 fl->perf.count++;
1434 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001435 return err;
1436}
1437
Sathish Ambley36849af2017-02-02 09:35:55 -08001438static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001439static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001440 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441{
1442 int err = 0;
1443 struct fastrpc_ioctl_invoke_attrs ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001444 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001445 struct smq_phy_page pages[1];
1446 struct fastrpc_mmap *file = 0, *mem = 0;
1447
Sathish Ambley36849af2017-02-02 09:35:55 -08001448 VERIFY(err, !fastrpc_channel_open(fl));
1449 if (err)
1450 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001451 if (init->flags == FASTRPC_INIT_ATTACH) {
1452 remote_arg_t ra[1];
1453 int tgid = current->tgid;
1454
1455 ra[0].buf.pv = (void *)&tgid;
1456 ra[0].buf.len = sizeof(tgid);
1457 ioctl.inv.handle = 1;
1458 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1459 ioctl.inv.pra = ra;
1460 ioctl.fds = 0;
1461 ioctl.attrs = 0;
1462 fl->pd = 0;
1463 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1464 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1465 if (err)
1466 goto bail;
1467 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001468 remote_arg_t ra[6];
1469 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001470 int mflags = 0;
1471 struct {
1472 int pgid;
1473 int namelen;
1474 int filelen;
1475 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001476 int attrs;
1477 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001478 } inbuf;
1479
1480 inbuf.pgid = current->tgid;
1481 inbuf.namelen = strlen(current->comm) + 1;
1482 inbuf.filelen = init->filelen;
1483 fl->pd = 1;
1484 if (init->filelen) {
1485 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1486 init->file, init->filelen, mflags, &file));
1487 if (err)
1488 goto bail;
1489 }
1490 inbuf.pageslen = 1;
1491 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1492 init->mem, init->memlen, mflags, &mem));
1493 if (err)
1494 goto bail;
1495 inbuf.pageslen = 1;
1496 ra[0].buf.pv = (void *)&inbuf;
1497 ra[0].buf.len = sizeof(inbuf);
1498 fds[0] = 0;
1499
1500 ra[1].buf.pv = (void *)current->comm;
1501 ra[1].buf.len = inbuf.namelen;
1502 fds[1] = 0;
1503
1504 ra[2].buf.pv = (void *)init->file;
1505 ra[2].buf.len = inbuf.filelen;
1506 fds[2] = init->filefd;
1507
1508 pages[0].addr = mem->phys;
1509 pages[0].size = mem->size;
1510 ra[3].buf.pv = (void *)pages;
1511 ra[3].buf.len = 1 * sizeof(*pages);
1512 fds[3] = 0;
1513
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001514 inbuf.attrs = uproc->attrs;
1515 ra[4].buf.pv = (void *)&(inbuf.attrs);
1516 ra[4].buf.len = sizeof(inbuf.attrs);
1517 fds[4] = 0;
1518
1519 inbuf.siglen = uproc->siglen;
1520 ra[5].buf.pv = (void *)&(inbuf.siglen);
1521 ra[5].buf.len = sizeof(inbuf.siglen);
1522 fds[5] = 0;
1523
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001524 ioctl.inv.handle = 1;
1525 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001526 if (uproc->attrs)
1527 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001528 ioctl.inv.pra = ra;
1529 ioctl.fds = fds;
1530 ioctl.attrs = 0;
1531 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1532 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1533 if (err)
1534 goto bail;
1535 } else {
1536 err = -ENOTTY;
1537 }
1538bail:
1539 if (mem && err)
1540 fastrpc_mmap_free(mem);
1541 if (file)
1542 fastrpc_mmap_free(file);
1543 return err;
1544}
1545
1546static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1547{
1548 int err = 0;
1549 struct fastrpc_ioctl_invoke_attrs ioctl;
1550 remote_arg_t ra[1];
1551 int tgid = 0;
1552
Sathish Ambley36849af2017-02-02 09:35:55 -08001553 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1554 if (err)
1555 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001556 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1557 if (err)
1558 goto bail;
1559 tgid = fl->tgid;
1560 ra[0].buf.pv = (void *)&tgid;
1561 ra[0].buf.len = sizeof(tgid);
1562 ioctl.inv.handle = 1;
1563 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1564 ioctl.inv.pra = ra;
1565 ioctl.fds = 0;
1566 ioctl.attrs = 0;
1567 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1568 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1569bail:
1570 return err;
1571}
1572
1573static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1574 struct fastrpc_mmap *map)
1575{
1576 struct fastrpc_ioctl_invoke_attrs ioctl;
1577 struct smq_phy_page page;
1578 int num = 1;
1579 remote_arg_t ra[3];
1580 int err = 0;
1581 struct {
1582 int pid;
1583 uint32_t flags;
1584 uintptr_t vaddrin;
1585 int num;
1586 } inargs;
1587 struct {
1588 uintptr_t vaddrout;
1589 } routargs;
1590
1591 inargs.pid = current->tgid;
1592 inargs.vaddrin = (uintptr_t)map->va;
1593 inargs.flags = flags;
1594 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1595 ra[0].buf.pv = (void *)&inargs;
1596 ra[0].buf.len = sizeof(inargs);
1597 page.addr = map->phys;
1598 page.size = map->size;
1599 ra[1].buf.pv = (void *)&page;
1600 ra[1].buf.len = num * sizeof(page);
1601
1602 ra[2].buf.pv = (void *)&routargs;
1603 ra[2].buf.len = sizeof(routargs);
1604
1605 ioctl.inv.handle = 1;
1606 if (fl->apps->compat)
1607 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1608 else
1609 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1610 ioctl.inv.pra = ra;
1611 ioctl.fds = 0;
1612 ioctl.attrs = 0;
1613 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1614 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1615 map->raddr = (uintptr_t)routargs.vaddrout;
1616
1617 return err;
1618}
1619
1620static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1621 struct fastrpc_mmap *map)
1622{
1623 struct fastrpc_ioctl_invoke_attrs ioctl;
1624 remote_arg_t ra[1];
1625 int err = 0;
1626 struct {
1627 int pid;
1628 uintptr_t vaddrout;
1629 ssize_t size;
1630 } inargs;
1631
1632 inargs.pid = current->tgid;
1633 inargs.size = map->size;
1634 inargs.vaddrout = map->raddr;
1635 ra[0].buf.pv = (void *)&inargs;
1636 ra[0].buf.len = sizeof(inargs);
1637
1638 ioctl.inv.handle = 1;
1639 if (fl->apps->compat)
1640 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1641 else
1642 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1643 ioctl.inv.pra = ra;
1644 ioctl.fds = 0;
1645 ioctl.attrs = 0;
1646 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1647 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1648 return err;
1649}
1650
1651static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1652 ssize_t len, struct fastrpc_mmap **ppmap);
1653
1654static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1655
1656static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1657 struct fastrpc_ioctl_munmap *ud)
1658{
1659 int err = 0;
1660 struct fastrpc_mmap *map = 0;
1661
1662 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1663 if (err)
1664 goto bail;
1665 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1666 if (err)
1667 goto bail;
1668 fastrpc_mmap_free(map);
1669bail:
1670 if (err && map)
1671 fastrpc_mmap_add(map);
1672 return err;
1673}
1674
1675static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1676 struct fastrpc_ioctl_mmap *ud)
1677{
1678
1679 struct fastrpc_mmap *map = 0;
1680 int err = 0;
1681
1682 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001683 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001684 return 0;
1685
1686 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1687 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1688 if (err)
1689 goto bail;
1690 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1691 if (err)
1692 goto bail;
1693 ud->vaddrout = map->raddr;
1694 bail:
1695 if (err && map)
1696 fastrpc_mmap_free(map);
1697 return err;
1698}
1699
1700static void fastrpc_channel_close(struct kref *kref)
1701{
1702 struct fastrpc_apps *me = &gfa;
1703 struct fastrpc_channel_ctx *ctx;
1704 int cid;
1705
1706 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1707 cid = ctx - &gcinfo[0];
1708 fastrpc_glink_close(ctx->chan, cid);
1709 ctx->chan = 0;
1710 mutex_unlock(&me->smd_mutex);
1711 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1712 MAJOR(me->dev_no), cid);
1713}
1714
1715static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1716
1717static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1718 int secure, struct fastrpc_session_ctx **session)
1719{
1720 struct fastrpc_apps *me = &gfa;
1721 int idx = 0, err = 0;
1722
1723 if (chan->sesscount) {
1724 for (idx = 0; idx < chan->sesscount; ++idx) {
1725 if (!chan->session[idx].used &&
1726 chan->session[idx].smmu.secure == secure) {
1727 chan->session[idx].used = 1;
1728 break;
1729 }
1730 }
1731 VERIFY(err, idx < chan->sesscount);
1732 if (err)
1733 goto bail;
1734 chan->session[idx].smmu.faults = 0;
1735 } else {
1736 VERIFY(err, me->dev != NULL);
1737 if (err)
1738 goto bail;
1739 chan->session[0].dev = me->dev;
1740 }
1741
1742 *session = &chan->session[idx];
1743 bail:
1744 return err;
1745}
1746
1747bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1748{
1749 if (glink_queue_rx_intent(h, NULL, size))
1750 return false;
1751 return true;
1752}
1753
1754void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1755 const void *pkt_priv, const void *ptr)
1756{
1757}
1758
1759void fastrpc_glink_notify_rx(void *handle, const void *priv,
1760 const void *pkt_priv, const void *ptr, size_t size)
1761{
1762 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1763 int len = size;
1764
1765 while (len >= sizeof(*rsp) && rsp) {
1766 rsp->ctx = rsp->ctx & ~1;
1767 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1768 rsp++;
1769 len = len - sizeof(*rsp);
1770 }
1771 glink_rx_done(handle, ptr, true);
1772}
1773
1774void fastrpc_glink_notify_state(void *handle, const void *priv,
1775 unsigned int event)
1776{
1777 struct fastrpc_apps *me = &gfa;
1778 int cid = (int)(uintptr_t)priv;
1779 struct fastrpc_glink_info *link;
1780
1781 if (cid < 0 || cid >= NUM_CHANNELS)
1782 return;
1783 link = &me->channel[cid].link;
1784 switch (event) {
1785 case GLINK_CONNECTED:
1786 link->port_state = FASTRPC_LINK_CONNECTED;
1787 complete(&me->channel[cid].work);
1788 break;
1789 case GLINK_LOCAL_DISCONNECTED:
1790 link->port_state = FASTRPC_LINK_DISCONNECTED;
1791 break;
1792 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301793 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001794 fastrpc_glink_close(me->channel[cid].chan, cid);
1795 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001796 }
1797 break;
1798 default:
1799 break;
1800 }
1801}
1802
1803static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1804 struct fastrpc_session_ctx **session)
1805{
1806 int err = 0;
1807 struct fastrpc_apps *me = &gfa;
1808
1809 mutex_lock(&me->smd_mutex);
1810 if (!*session)
1811 err = fastrpc_session_alloc_locked(chan, secure, session);
1812 mutex_unlock(&me->smd_mutex);
1813 return err;
1814}
1815
1816static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1817 struct fastrpc_session_ctx *session)
1818{
1819 struct fastrpc_apps *me = &gfa;
1820
1821 mutex_lock(&me->smd_mutex);
1822 session->used = 0;
1823 mutex_unlock(&me->smd_mutex);
1824}
1825
1826static int fastrpc_file_free(struct fastrpc_file *fl)
1827{
1828 struct hlist_node *n;
1829 struct fastrpc_mmap *map = 0;
1830 int cid;
1831
1832 if (!fl)
1833 return 0;
1834 cid = fl->cid;
1835
1836 spin_lock(&fl->apps->hlock);
1837 hlist_del_init(&fl->hn);
1838 spin_unlock(&fl->apps->hlock);
1839
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001840 if (!fl->sctx) {
1841 kfree(fl);
1842 return 0;
1843 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001844 (void)fastrpc_release_current_dsp_process(fl);
1845 fastrpc_context_list_dtor(fl);
1846 fastrpc_buf_list_free(fl);
1847 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1848 fastrpc_mmap_free(map);
1849 }
1850 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1851 kref_put_mutex(&fl->apps->channel[cid].kref,
1852 fastrpc_channel_close, &fl->apps->smd_mutex);
1853 if (fl->sctx)
1854 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1855 if (fl->secsctx)
1856 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1857 kfree(fl);
1858 return 0;
1859}
1860
1861static int fastrpc_device_release(struct inode *inode, struct file *file)
1862{
1863 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1864
1865 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001866 if (fl->debugfs_file != NULL)
1867 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001868 fastrpc_file_free(fl);
1869 file->private_data = 0;
1870 }
1871 return 0;
1872}
1873
1874static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1875 void *priv)
1876{
1877 struct fastrpc_apps *me = &gfa;
1878 int cid = (int)((uintptr_t)priv);
1879 struct fastrpc_glink_info *link;
1880
1881 if (cid < 0 || cid >= NUM_CHANNELS)
1882 return;
1883
1884 link = &me->channel[cid].link;
1885 switch (cb_info->link_state) {
1886 case GLINK_LINK_STATE_UP:
1887 link->link_state = FASTRPC_LINK_STATE_UP;
1888 complete(&me->channel[cid].work);
1889 break;
1890 case GLINK_LINK_STATE_DOWN:
1891 link->link_state = FASTRPC_LINK_STATE_DOWN;
1892 break;
1893 default:
1894 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1895 break;
1896 }
1897}
1898
1899static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1900{
1901 int err = 0;
1902 struct fastrpc_glink_info *link;
1903
1904 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1905 if (err)
1906 goto bail;
1907
1908 link = &me->channel[cid].link;
1909 if (link->link_notify_handle != NULL)
1910 goto bail;
1911
1912 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1913 link->link_notify_handle = glink_register_link_state_cb(
1914 &link->link_info,
1915 (void *)((uintptr_t)cid));
1916 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1917 if (err) {
1918 link->link_notify_handle = NULL;
1919 goto bail;
1920 }
1921 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1922 RPC_TIMEOUT));
1923bail:
1924 return err;
1925}
1926
1927static void fastrpc_glink_close(void *chan, int cid)
1928{
1929 int err = 0;
1930 struct fastrpc_glink_info *link;
1931
1932 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1933 if (err)
1934 return;
1935 link = &gfa.channel[cid].link;
1936
1937 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1938 link->port_state == FASTRPC_LINK_CONNECTING) {
1939 link->port_state = FASTRPC_LINK_DISCONNECTING;
1940 glink_close(chan);
1941 }
1942}
1943
1944static int fastrpc_glink_open(int cid)
1945{
1946 int err = 0;
1947 void *handle = NULL;
1948 struct fastrpc_apps *me = &gfa;
1949 struct glink_open_config *cfg;
1950 struct fastrpc_glink_info *link;
1951
1952 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1953 if (err)
1954 goto bail;
1955 link = &me->channel[cid].link;
1956 cfg = &me->channel[cid].link.cfg;
1957 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1958 if (err)
1959 goto bail;
1960
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301961 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
1962 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001963 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001964
1965 link->port_state = FASTRPC_LINK_CONNECTING;
1966 cfg->priv = (void *)(uintptr_t)cid;
1967 cfg->edge = gcinfo[cid].link.link_info.edge;
1968 cfg->transport = gcinfo[cid].link.link_info.transport;
1969 cfg->name = FASTRPC_GLINK_GUID;
1970 cfg->notify_rx = fastrpc_glink_notify_rx;
1971 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1972 cfg->notify_state = fastrpc_glink_notify_state;
1973 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1974 handle = glink_open(cfg);
1975 VERIFY(err, !IS_ERR_OR_NULL(handle));
1976 if (err)
1977 goto bail;
1978 me->channel[cid].chan = handle;
1979bail:
1980 return err;
1981}
1982
Sathish Ambley1ca68232017-01-19 10:32:55 -08001983static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
1984{
1985 filp->private_data = inode->i_private;
1986 return 0;
1987}
1988
1989static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
1990 size_t count, loff_t *position)
1991{
1992 struct fastrpc_file *fl = filp->private_data;
1993 struct hlist_node *n;
1994 struct fastrpc_buf *buf = 0;
1995 struct fastrpc_mmap *map = 0;
1996 struct smq_invoke_ctx *ictx = 0;
1997 struct fastrpc_channel_ctx *chan;
1998 struct fastrpc_session_ctx *sess;
1999 unsigned int len = 0;
2000 int i, j, ret = 0;
2001 char *fileinfo = NULL;
2002
2003 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2004 if (!fileinfo)
2005 goto bail;
2006 if (fl == NULL) {
2007 for (i = 0; i < NUM_CHANNELS; i++) {
2008 chan = &gcinfo[i];
2009 len += scnprintf(fileinfo + len,
2010 DEBUGFS_SIZE - len, "%s\n\n",
2011 chan->name);
2012 len += scnprintf(fileinfo + len,
2013 DEBUGFS_SIZE - len, "%s %d\n",
2014 "sesscount:", chan->sesscount);
2015 for (j = 0; j < chan->sesscount; j++) {
2016 sess = &chan->session[j];
2017 len += scnprintf(fileinfo + len,
2018 DEBUGFS_SIZE - len,
2019 "%s%d\n\n", "SESSION", j);
2020 len += scnprintf(fileinfo + len,
2021 DEBUGFS_SIZE - len,
2022 "%s %d\n", "sid:",
2023 sess->smmu.cb);
2024 len += scnprintf(fileinfo + len,
2025 DEBUGFS_SIZE - len,
2026 "%s %d\n", "SECURE:",
2027 sess->smmu.secure);
2028 }
2029 }
2030 } else {
2031 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2032 "%s %d\n\n",
2033 "PROCESS_ID:", fl->tgid);
2034 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2035 "%s %d\n\n",
2036 "CHANNEL_ID:", fl->cid);
2037 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2038 "%s %d\n\n",
2039 "SSRCOUNT:", fl->ssrcount);
2040 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2041 "%s\n",
2042 "LIST OF BUFS:");
2043 spin_lock(&fl->hlock);
2044 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2045 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2046 "%s %p %s %p %s %llx\n", "buf:",
2047 buf, "buf->virt:", buf->virt,
2048 "buf->phys:", buf->phys);
2049 }
2050 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2051 "\n%s\n",
2052 "LIST OF MAPS:");
2053 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2054 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2055 "%s %p %s %lx %s %llx\n",
2056 "map:", map,
2057 "map->va:", map->va,
2058 "map->phys:", map->phys);
2059 }
2060 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2061 "\n%s\n",
2062 "LIST OF PENDING SMQCONTEXTS:");
2063 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2064 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2065 "%s %p %s %u %s %u %s %u\n",
2066 "smqcontext:", ictx,
2067 "sc:", ictx->sc,
2068 "tid:", ictx->pid,
2069 "handle", ictx->rpra->h);
2070 }
2071 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2072 "\n%s\n",
2073 "LIST OF INTERRUPTED SMQCONTEXTS:");
2074 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2075 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2076 "%s %p %s %u %s %u %s %u\n",
2077 "smqcontext:", ictx,
2078 "sc:", ictx->sc,
2079 "tid:", ictx->pid,
2080 "handle", ictx->rpra->h);
2081 }
2082 spin_unlock(&fl->hlock);
2083 }
2084 if (len > DEBUGFS_SIZE)
2085 len = DEBUGFS_SIZE;
2086 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2087 kfree(fileinfo);
2088bail:
2089 return ret;
2090}
2091
2092static const struct file_operations debugfs_fops = {
2093 .open = fastrpc_debugfs_open,
2094 .read = fastrpc_debugfs_read,
2095};
Sathish Ambley36849af2017-02-02 09:35:55 -08002096static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002097{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002098 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002099 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002100
2101 mutex_lock(&me->smd_mutex);
2102
Sathish Ambley36849af2017-02-02 09:35:55 -08002103 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002104 if (err)
2105 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002106 cid = fl->cid;
2107 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2108 if (err)
2109 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002110 fl->ssrcount = me->channel[cid].ssrcount;
2111 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2112 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302113 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2114 if (err)
2115 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002116 VERIFY(err, 0 == fastrpc_glink_open(cid));
2117 if (err)
2118 goto bail;
2119
2120 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2121 RPC_TIMEOUT));
2122 if (err) {
2123 me->channel[cid].chan = 0;
2124 goto bail;
2125 }
2126 kref_init(&me->channel[cid].kref);
2127 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2128 MAJOR(me->dev_no), cid);
2129 if (me->channel[cid].ssrcount !=
2130 me->channel[cid].prevssrcount) {
2131 me->channel[cid].prevssrcount =
2132 me->channel[cid].ssrcount;
2133 }
2134 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002135
2136bail:
2137 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002138 return err;
2139}
2140
Sathish Ambley36849af2017-02-02 09:35:55 -08002141static int fastrpc_device_open(struct inode *inode, struct file *filp)
2142{
2143 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002144 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002145 struct fastrpc_file *fl = 0;
2146 struct fastrpc_apps *me = &gfa;
2147
2148 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2149 if (err)
2150 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002151 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2152 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002153 context_list_ctor(&fl->clst);
2154 spin_lock_init(&fl->hlock);
2155 INIT_HLIST_HEAD(&fl->maps);
2156 INIT_HLIST_HEAD(&fl->bufs);
2157 INIT_HLIST_NODE(&fl->hn);
2158 fl->tgid = current->tgid;
2159 fl->apps = me;
2160 fl->mode = FASTRPC_MODE_SERIAL;
2161 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002162 if (debugfs_file != NULL)
2163 fl->debugfs_file = debugfs_file;
2164 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002165 filp->private_data = fl;
2166 spin_lock(&me->hlock);
2167 hlist_add_head(&fl->hn, &me->drivers);
2168 spin_unlock(&me->hlock);
2169 return 0;
2170}
2171
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002172static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2173{
2174 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002175 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002176
Sathish Ambley36849af2017-02-02 09:35:55 -08002177 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002178 if (err)
2179 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002180 if (fl->cid == -1) {
2181 cid = *info;
2182 VERIFY(err, cid < NUM_CHANNELS);
2183 if (err)
2184 goto bail;
2185 fl->cid = cid;
2186 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2187 VERIFY(err, !fastrpc_session_alloc_locked(
2188 &fl->apps->channel[cid], 0, &fl->sctx));
2189 if (err)
2190 goto bail;
2191 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2193bail:
2194 return err;
2195}
2196
2197static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2198 unsigned long ioctl_param)
2199{
2200 union {
2201 struct fastrpc_ioctl_invoke_attrs inv;
2202 struct fastrpc_ioctl_mmap mmap;
2203 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002204 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002205 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002206 } p;
2207 void *param = (char *)ioctl_param;
2208 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2209 int size = 0, err = 0;
2210 uint32_t info;
2211
2212 p.inv.fds = 0;
2213 p.inv.attrs = 0;
2214
2215 switch (ioctl_num) {
2216 case FASTRPC_IOCTL_INVOKE:
2217 size = sizeof(struct fastrpc_ioctl_invoke);
2218 case FASTRPC_IOCTL_INVOKE_FD:
2219 if (!size)
2220 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2221 /* fall through */
2222 case FASTRPC_IOCTL_INVOKE_ATTRS:
2223 if (!size)
2224 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
2225 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2226 if (err)
2227 goto bail;
2228 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2229 0, &p.inv)));
2230 if (err)
2231 goto bail;
2232 break;
2233 case FASTRPC_IOCTL_MMAP:
2234 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2235 sizeof(p.mmap)));
2236 if (err)
2237 goto bail;
2238 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2239 if (err)
2240 goto bail;
2241 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2242 if (err)
2243 goto bail;
2244 break;
2245 case FASTRPC_IOCTL_MUNMAP:
2246 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2247 sizeof(p.munmap)));
2248 if (err)
2249 goto bail;
2250 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2251 &p.munmap)));
2252 if (err)
2253 goto bail;
2254 break;
2255 case FASTRPC_IOCTL_SETMODE:
2256 switch ((uint32_t)ioctl_param) {
2257 case FASTRPC_MODE_PARALLEL:
2258 case FASTRPC_MODE_SERIAL:
2259 fl->mode = (uint32_t)ioctl_param;
2260 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002261 case FASTRPC_MODE_PROFILE:
2262 fl->profile = (uint32_t)ioctl_param;
2263 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002264 default:
2265 err = -ENOTTY;
2266 break;
2267 }
2268 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002269 case FASTRPC_IOCTL_GETPERF:
2270 VERIFY(err, 0 == copy_from_user(&p.perf,
2271 param, sizeof(p.perf)));
2272 if (err)
2273 goto bail;
2274 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2275 if (p.perf.keys) {
2276 char *keys = PERF_KEYS;
2277
2278 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2279 keys, strlen(keys)+1));
2280 if (err)
2281 goto bail;
2282 }
2283 if (p.perf.data) {
2284 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2285 &fl->perf, sizeof(fl->perf)));
2286 }
2287 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2288 if (err)
2289 goto bail;
2290 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002291 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002292 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2293 if (err)
2294 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002295 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2296 if (err)
2297 goto bail;
2298 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2299 if (err)
2300 goto bail;
2301 break;
2302 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002303 p.init.attrs = 0;
2304 p.init.siglen = 0;
2305 size = sizeof(struct fastrpc_ioctl_init);
2306 /* fall through */
2307 case FASTRPC_IOCTL_INIT_ATTRS:
2308 if (!size)
2309 size = sizeof(struct fastrpc_ioctl_init_attrs);
2310 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002311 if (err)
2312 goto bail;
2313 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2314 if (err)
2315 goto bail;
2316 break;
2317
2318 default:
2319 err = -ENOTTY;
2320 pr_info("bad ioctl: %d\n", ioctl_num);
2321 break;
2322 }
2323 bail:
2324 return err;
2325}
2326
2327static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2328 unsigned long code,
2329 void *data)
2330{
2331 struct fastrpc_apps *me = &gfa;
2332 struct fastrpc_channel_ctx *ctx;
2333 int cid;
2334
2335 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2336 cid = ctx - &me->channel[0];
2337 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2338 mutex_lock(&me->smd_mutex);
2339 ctx->ssrcount++;
2340 if (ctx->chan) {
2341 fastrpc_glink_close(ctx->chan, cid);
2342 ctx->chan = 0;
2343 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2344 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2345 }
2346 mutex_unlock(&me->smd_mutex);
2347 fastrpc_notify_drivers(me, cid);
2348 }
2349
2350 return NOTIFY_DONE;
2351}
2352
2353static const struct file_operations fops = {
2354 .open = fastrpc_device_open,
2355 .release = fastrpc_device_release,
2356 .unlocked_ioctl = fastrpc_device_ioctl,
2357 .compat_ioctl = compat_fastrpc_device_ioctl,
2358};
2359
2360static const struct of_device_id fastrpc_match_table[] = {
2361 { .compatible = "qcom,msm-fastrpc-adsp", },
2362 { .compatible = "qcom,msm-fastrpc-compute", },
2363 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2364 { .compatible = "qcom,msm-adsprpc-mem-region", },
2365 {}
2366};
2367
2368static int fastrpc_cb_probe(struct device *dev)
2369{
2370 struct fastrpc_channel_ctx *chan;
2371 struct fastrpc_session_ctx *sess;
2372 struct of_phandle_args iommuspec;
2373 const char *name;
2374 unsigned int start = 0x80000000;
2375 int err = 0, i;
2376 int secure_vmid = VMID_CP_PIXEL;
2377
2378 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2379 if (err)
2380 goto bail;
2381 for (i = 0; i < NUM_CHANNELS; i++) {
2382 if (!gcinfo[i].name)
2383 continue;
2384 if (!strcmp(name, gcinfo[i].name))
2385 break;
2386 }
2387 VERIFY(err, i < NUM_CHANNELS);
2388 if (err)
2389 goto bail;
2390 chan = &gcinfo[i];
2391 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2392 if (err)
2393 goto bail;
2394
2395 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2396 "#iommu-cells", 0, &iommuspec));
2397 if (err)
2398 goto bail;
2399 sess = &chan->session[chan->sesscount];
2400 sess->smmu.cb = iommuspec.args[0] & 0xf;
2401 sess->used = 0;
2402 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2403 "dma-coherent");
2404 sess->smmu.secure = of_property_read_bool(dev->of_node,
2405 "qcom,secure-context-bank");
2406 if (sess->smmu.secure)
2407 start = 0x60000000;
2408 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2409 arm_iommu_create_mapping(&platform_bus_type,
2410 start, 0x7fffffff)));
2411 if (err)
2412 goto bail;
2413
2414 if (sess->smmu.secure)
2415 iommu_domain_set_attr(sess->smmu.mapping->domain,
2416 DOMAIN_ATTR_SECURE_VMID,
2417 &secure_vmid);
2418
2419 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2420 if (err)
2421 goto bail;
2422 sess->dev = dev;
2423 sess->smmu.enabled = 1;
2424 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002425 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2426 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002427bail:
2428 return err;
2429}
2430
2431static int fastrpc_probe(struct platform_device *pdev)
2432{
2433 int err = 0;
2434 struct fastrpc_apps *me = &gfa;
2435 struct device *dev = &pdev->dev;
2436 struct smq_phy_page range;
2437 struct device_node *ion_node, *node;
2438 struct platform_device *ion_pdev;
2439 struct cma *cma;
2440 uint32_t val;
2441
2442 if (of_device_is_compatible(dev->of_node,
2443 "qcom,msm-fastrpc-compute-cb"))
2444 return fastrpc_cb_probe(dev);
2445
2446 if (of_device_is_compatible(dev->of_node,
2447 "qcom,msm-adsprpc-mem-region")) {
2448 me->dev = dev;
2449 range.addr = 0;
2450 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2451 if (ion_node) {
2452 for_each_available_child_of_node(ion_node, node) {
2453 if (of_property_read_u32(node, "reg", &val))
2454 continue;
2455 if (val != ION_ADSP_HEAP_ID)
2456 continue;
2457 ion_pdev = of_find_device_by_node(node);
2458 if (!ion_pdev)
2459 break;
2460 cma = dev_get_cma_area(&ion_pdev->dev);
2461 if (cma) {
2462 range.addr = cma_get_base(cma);
2463 range.size = (size_t)cma_get_size(cma);
2464 }
2465 break;
2466 }
2467 }
2468 if (range.addr) {
2469 int srcVM[1] = {VMID_HLOS};
2470 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2471 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002472 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002473 PERM_READ | PERM_WRITE | PERM_EXEC,
2474 PERM_READ | PERM_WRITE | PERM_EXEC,
2475 PERM_READ | PERM_WRITE | PERM_EXEC,
2476 };
2477
2478 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2479 srcVM, 1, destVM, destVMperm, 4));
2480 if (err)
2481 goto bail;
2482 }
2483 return 0;
2484 }
2485
2486 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2487 fastrpc_match_table,
2488 NULL, &pdev->dev));
2489 if (err)
2490 goto bail;
2491bail:
2492 return err;
2493}
2494
2495static void fastrpc_deinit(void)
2496{
2497 struct fastrpc_apps *me = &gfa;
2498 struct fastrpc_channel_ctx *chan = gcinfo;
2499 int i, j;
2500
2501 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2502 if (chan->chan) {
2503 kref_put_mutex(&chan->kref,
2504 fastrpc_channel_close, &me->smd_mutex);
2505 chan->chan = 0;
2506 }
2507 for (j = 0; j < NUM_SESSIONS; j++) {
2508 struct fastrpc_session_ctx *sess = &chan->session[j];
2509
2510 if (sess->smmu.enabled) {
2511 arm_iommu_detach_device(sess->dev);
2512 sess->dev = 0;
2513 }
2514 if (sess->smmu.mapping) {
2515 arm_iommu_release_mapping(sess->smmu.mapping);
2516 sess->smmu.mapping = 0;
2517 }
2518 }
2519 }
2520}
2521
2522static struct platform_driver fastrpc_driver = {
2523 .probe = fastrpc_probe,
2524 .driver = {
2525 .name = "fastrpc",
2526 .owner = THIS_MODULE,
2527 .of_match_table = fastrpc_match_table,
2528 },
2529};
2530
2531static int __init fastrpc_device_init(void)
2532{
2533 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002534 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002535 int err = 0, i;
2536
2537 memset(me, 0, sizeof(*me));
2538
2539 fastrpc_init(me);
2540 me->dev = NULL;
2541 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2542 if (err)
2543 goto register_bail;
2544 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2545 DEVICE_NAME));
2546 if (err)
2547 goto alloc_chrdev_bail;
2548 cdev_init(&me->cdev, &fops);
2549 me->cdev.owner = THIS_MODULE;
2550 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002551 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552 if (err)
2553 goto cdev_init_bail;
2554 me->class = class_create(THIS_MODULE, "fastrpc");
2555 VERIFY(err, !IS_ERR(me->class));
2556 if (err)
2557 goto class_create_bail;
2558 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002559 dev = device_create(me->class, NULL,
2560 MKDEV(MAJOR(me->dev_no), 0),
2561 NULL, gcinfo[0].name);
2562 VERIFY(err, !IS_ERR_OR_NULL(dev));
2563 if (err)
2564 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002565 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002566 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002567 me->channel[i].ssrcount = 0;
2568 me->channel[i].prevssrcount = 0;
2569 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2570 me->channel[i].handle = subsys_notif_register_notifier(
2571 gcinfo[i].subsys,
2572 &me->channel[i].nb);
2573 }
2574
2575 me->client = msm_ion_client_create(DEVICE_NAME);
2576 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2577 if (err)
2578 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002579 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002580 return 0;
2581device_create_bail:
2582 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002583 if (me->channel[i].handle)
2584 subsys_notif_unregister_notifier(me->channel[i].handle,
2585 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002586 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002587 if (!IS_ERR_OR_NULL(dev))
2588 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002589 class_destroy(me->class);
2590class_create_bail:
2591 cdev_del(&me->cdev);
2592cdev_init_bail:
2593 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2594alloc_chrdev_bail:
2595register_bail:
2596 fastrpc_deinit();
2597 return err;
2598}
2599
2600static void __exit fastrpc_device_exit(void)
2601{
2602 struct fastrpc_apps *me = &gfa;
2603 int i;
2604
2605 fastrpc_file_list_dtor(me);
2606 fastrpc_deinit();
2607 for (i = 0; i < NUM_CHANNELS; i++) {
2608 if (!gcinfo[i].name)
2609 continue;
2610 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2611 subsys_notif_unregister_notifier(me->channel[i].handle,
2612 &me->channel[i].nb);
2613 }
2614 class_destroy(me->class);
2615 cdev_del(&me->cdev);
2616 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2617 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002618 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002619}
2620
2621late_initcall(fastrpc_device_init);
2622module_exit(fastrpc_device_exit);
2623
2624MODULE_LICENSE("GPL v2");