blob: 521a9746713176a605ce36a80f777b5f0af80d56 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080061#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062
63#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
64
65#define FASTRPC_LINK_STATE_DOWN (0x0)
66#define FASTRPC_LINK_STATE_UP (0x1)
67#define FASTRPC_LINK_DISCONNECTED (0x0)
68#define FASTRPC_LINK_CONNECTING (0x1)
69#define FASTRPC_LINK_CONNECTED (0x3)
70#define FASTRPC_LINK_DISCONNECTING (0x7)
71
Sathish Ambleya21b5b52017-01-11 16:11:01 -080072#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
73#define FASTRPC_STATIC_HANDLE_LISTENER (3)
74#define FASTRPC_STATIC_HANDLE_MAX (20)
75
76#define PERF_END (void)0
77
78#define PERF(enb, cnt, ff) \
79 {\
80 struct timespec startT = {0};\
81 if (enb) {\
82 getnstimeofday(&startT);\
83 } \
84 ff ;\
85 if (enb) {\
86 cnt += getnstimediff(&startT);\
87 } \
88 }
89
Sathish Ambley69e1ab02016-10-18 10:28:15 -070090static int fastrpc_glink_open(int cid);
91static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080092static struct dentry *debugfs_root;
93static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070094
95static inline uint64_t buf_page_start(uint64_t buf)
96{
97 uint64_t start = (uint64_t) buf & PAGE_MASK;
98 return start;
99}
100
101static inline uint64_t buf_page_offset(uint64_t buf)
102{
103 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
104 return offset;
105}
106
107static inline int buf_num_pages(uint64_t buf, ssize_t len)
108{
109 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
110 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
111 int nPages = end - start + 1;
112 return nPages;
113}
114
115static inline uint64_t buf_page_size(uint32_t size)
116{
117 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
118
119 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
120}
121
122static inline void *uint64_to_ptr(uint64_t addr)
123{
124 void *ptr = (void *)((uintptr_t)addr);
125
126 return ptr;
127}
128
129static inline uint64_t ptr_to_uint64(void *ptr)
130{
131 uint64_t addr = (uint64_t)((uintptr_t)ptr);
132
133 return addr;
134}
135
136struct fastrpc_file;
137
138struct fastrpc_buf {
139 struct hlist_node hn;
140 struct fastrpc_file *fl;
141 void *virt;
142 uint64_t phys;
143 ssize_t size;
144};
145
146struct fastrpc_ctx_lst;
147
148struct overlap {
149 uintptr_t start;
150 uintptr_t end;
151 int raix;
152 uintptr_t mstart;
153 uintptr_t mend;
154 uintptr_t offset;
155};
156
157struct smq_invoke_ctx {
158 struct hlist_node hn;
159 struct completion work;
160 int retval;
161 int pid;
162 int tgid;
163 remote_arg_t *lpra;
164 remote_arg64_t *rpra;
165 int *fds;
166 unsigned int *attrs;
167 struct fastrpc_mmap **maps;
168 struct fastrpc_buf *buf;
169 ssize_t used;
170 struct fastrpc_file *fl;
171 uint32_t sc;
172 struct overlap *overs;
173 struct overlap **overps;
174 struct smq_msg msg;
175};
176
177struct fastrpc_ctx_lst {
178 struct hlist_head pending;
179 struct hlist_head interrupted;
180};
181
182struct fastrpc_smmu {
183 struct dma_iommu_mapping *mapping;
184 int cb;
185 int enabled;
186 int faults;
187 int secure;
188 int coherent;
189};
190
191struct fastrpc_session_ctx {
192 struct device *dev;
193 struct fastrpc_smmu smmu;
194 int used;
195};
196
197struct fastrpc_glink_info {
198 int link_state;
199 int port_state;
200 struct glink_open_config cfg;
201 struct glink_link_info link_info;
202 void *link_notify_handle;
203};
204
205struct fastrpc_channel_ctx {
206 char *name;
207 char *subsys;
208 void *chan;
209 struct device *dev;
210 struct fastrpc_session_ctx session[NUM_SESSIONS];
211 struct completion work;
212 struct notifier_block nb;
213 struct kref kref;
214 int sesscount;
215 int ssrcount;
216 void *handle;
217 int prevssrcount;
218 int vmid;
219 struct fastrpc_glink_info link;
220};
221
222struct fastrpc_apps {
223 struct fastrpc_channel_ctx *channel;
224 struct cdev cdev;
225 struct class *class;
226 struct mutex smd_mutex;
227 struct smq_phy_page range;
228 struct hlist_head maps;
229 dev_t dev_no;
230 int compat;
231 struct hlist_head drivers;
232 spinlock_t hlock;
233 struct ion_client *client;
234 struct device *dev;
235};
236
237struct fastrpc_mmap {
238 struct hlist_node hn;
239 struct fastrpc_file *fl;
240 struct fastrpc_apps *apps;
241 int fd;
242 uint32_t flags;
243 struct dma_buf *buf;
244 struct sg_table *table;
245 struct dma_buf_attachment *attach;
246 struct ion_handle *handle;
247 uint64_t phys;
248 ssize_t size;
249 uintptr_t va;
250 ssize_t len;
251 int refs;
252 uintptr_t raddr;
253 int uncached;
254 int secure;
255 uintptr_t attr;
256};
257
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800258struct fastrpc_perf {
259 int64_t count;
260 int64_t flush;
261 int64_t map;
262 int64_t copy;
263 int64_t link;
264 int64_t getargs;
265 int64_t putargs;
266 int64_t invargs;
267 int64_t invoke;
268};
269
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700270struct fastrpc_file {
271 struct hlist_node hn;
272 spinlock_t hlock;
273 struct hlist_head maps;
274 struct hlist_head bufs;
275 struct fastrpc_ctx_lst clst;
276 struct fastrpc_session_ctx *sctx;
277 struct fastrpc_session_ctx *secsctx;
278 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800279 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280 int tgid;
281 int cid;
282 int ssrcount;
283 int pd;
284 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800285 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800286 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700287};
288
289static struct fastrpc_apps gfa;
290
291static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
292 {
293 .name = "adsprpc-smd",
294 .subsys = "adsp",
295 .link.link_info.edge = "lpass",
296 .link.link_info.transport = "smem",
297 },
298 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700299 .name = "mdsprpc-smd",
300 .subsys = "modem",
301 .link.link_info.edge = "mpss",
302 .link.link_info.transport = "smem",
303 },
304 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800305 .name = "sdsprpc-smd",
306 .subsys = "slpi",
307 .link.link_info.edge = "dsps",
308 .link.link_info.transport = "smem",
309 .vmid = VMID_SSC_Q6,
310 },
311 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700312 .name = "cdsprpc-smd",
313 .subsys = "cdsp",
314 .link.link_info.edge = "cdsp",
315 .link.link_info.transport = "smem",
316 },
317};
318
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800319static inline int64_t getnstimediff(struct timespec *start)
320{
321 int64_t ns;
322 struct timespec ts, b;
323
324 getnstimeofday(&ts);
325 b = timespec_sub(ts, *start);
326 ns = timespec_to_ns(&b);
327 return ns;
328}
329
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700330static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
331{
332 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
333 int vmid;
334
335 if (!fl)
336 return;
337 if (cache) {
338 spin_lock(&fl->hlock);
339 hlist_add_head(&buf->hn, &fl->bufs);
340 spin_unlock(&fl->hlock);
341 return;
342 }
343 if (!IS_ERR_OR_NULL(buf->virt)) {
344 int destVM[1] = {VMID_HLOS};
345 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
346
347 if (fl->sctx->smmu.cb)
348 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
349 vmid = fl->apps->channel[fl->cid].vmid;
350 if (vmid) {
351 int srcVM[2] = {VMID_HLOS, vmid};
352
353 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
354 srcVM, 2, destVM, destVMperm, 1);
355 }
356 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
357 buf->phys);
358 }
359 kfree(buf);
360}
361
362static void fastrpc_buf_list_free(struct fastrpc_file *fl)
363{
364 struct fastrpc_buf *buf, *free;
365
366 do {
367 struct hlist_node *n;
368
369 free = 0;
370 spin_lock(&fl->hlock);
371 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
372 hlist_del_init(&buf->hn);
373 free = buf;
374 break;
375 }
376 spin_unlock(&fl->hlock);
377 if (free)
378 fastrpc_buf_free(free, 0);
379 } while (free);
380}
381
382static void fastrpc_mmap_add(struct fastrpc_mmap *map)
383{
384 struct fastrpc_file *fl = map->fl;
385
386 spin_lock(&fl->hlock);
387 hlist_add_head(&map->hn, &fl->maps);
388 spin_unlock(&fl->hlock);
389}
390
391static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800392 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393{
394 struct fastrpc_mmap *match = 0, *map;
395 struct hlist_node *n;
396
397 spin_lock(&fl->hlock);
398 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
399 if (va >= map->va &&
400 va + len <= map->va + map->len &&
401 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800402 if (refs)
403 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700404 match = map;
405 break;
406 }
407 }
408 spin_unlock(&fl->hlock);
409 if (match) {
410 *ppmap = match;
411 return 0;
412 }
413 return -ENOTTY;
414}
415
416static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
417 ssize_t len, struct fastrpc_mmap **ppmap)
418{
419 struct fastrpc_mmap *match = 0, *map;
420 struct hlist_node *n;
421 struct fastrpc_apps *me = &gfa;
422
423 spin_lock(&me->hlock);
424 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
425 if (map->raddr == va &&
426 map->raddr + map->len == va + len &&
427 map->refs == 1) {
428 match = map;
429 hlist_del_init(&map->hn);
430 break;
431 }
432 }
433 spin_unlock(&me->hlock);
434 if (match) {
435 *ppmap = match;
436 return 0;
437 }
438 spin_lock(&fl->hlock);
439 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
440 if (map->raddr == va &&
441 map->raddr + map->len == va + len &&
442 map->refs == 1) {
443 match = map;
444 hlist_del_init(&map->hn);
445 break;
446 }
447 }
448 spin_unlock(&fl->hlock);
449 if (match) {
450 *ppmap = match;
451 return 0;
452 }
453 return -ENOTTY;
454}
455
456static void fastrpc_mmap_free(struct fastrpc_mmap *map)
457{
458 struct fastrpc_file *fl;
459 int vmid;
460 struct fastrpc_session_ctx *sess;
461 int destVM[1] = {VMID_HLOS};
462 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
463
464 if (!map)
465 return;
466 fl = map->fl;
467 spin_lock(&fl->hlock);
468 map->refs--;
469 if (!map->refs)
470 hlist_del_init(&map->hn);
471 spin_unlock(&fl->hlock);
472 if (map->refs > 0)
473 return;
474 if (map->secure)
475 sess = fl->secsctx;
476 else
477 sess = fl->sctx;
478
479 if (!IS_ERR_OR_NULL(map->handle))
480 ion_free(fl->apps->client, map->handle);
481 if (sess->smmu.enabled) {
482 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800483 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700484 map->table->sgl,
485 map->table->nents, DMA_BIDIRECTIONAL,
486 map->buf);
487 }
488 vmid = fl->apps->channel[fl->cid].vmid;
489 if (vmid && map->phys) {
490 int srcVM[2] = {VMID_HLOS, vmid};
491
492 hyp_assign_phys(map->phys, buf_page_size(map->size),
493 srcVM, 2, destVM, destVMperm, 1);
494 }
495
496 if (!IS_ERR_OR_NULL(map->table))
497 dma_buf_unmap_attachment(map->attach, map->table,
498 DMA_BIDIRECTIONAL);
499 if (!IS_ERR_OR_NULL(map->attach))
500 dma_buf_detach(map->buf, map->attach);
501 if (!IS_ERR_OR_NULL(map->buf))
502 dma_buf_put(map->buf);
503 kfree(map);
504}
505
506static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
507 struct fastrpc_session_ctx **session);
508
509static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
510 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
511 struct fastrpc_mmap **ppmap)
512{
513 struct fastrpc_session_ctx *sess;
514 struct fastrpc_apps *apps = fl->apps;
515 int cid = fl->cid;
516 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
517 struct fastrpc_mmap *map = 0;
518 unsigned long attrs;
519 unsigned long flags;
520 int err = 0, vmid;
521
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800522 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700523 return 0;
524 map = kzalloc(sizeof(*map), GFP_KERNEL);
525 VERIFY(err, !IS_ERR_OR_NULL(map));
526 if (err)
527 goto bail;
528 INIT_HLIST_NODE(&map->hn);
529 map->flags = mflags;
530 map->refs = 1;
531 map->fl = fl;
532 map->fd = fd;
533 map->attr = attr;
534 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
535 ion_import_dma_buf_fd(fl->apps->client, fd)));
536 if (err)
537 goto bail;
538 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
539 &flags));
540 if (err)
541 goto bail;
542
543 map->uncached = !ION_IS_CACHED(flags);
544 if (map->attr & FASTRPC_ATTR_NOVA)
545 map->uncached = 1;
546
547 map->secure = flags & ION_FLAG_SECURE;
548 if (map->secure) {
549 if (!fl->secsctx)
550 err = fastrpc_session_alloc(chan, 1,
551 &fl->secsctx);
552 if (err)
553 goto bail;
554 }
555 if (map->secure)
556 sess = fl->secsctx;
557 else
558 sess = fl->sctx;
559
560 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
561 if (err)
562 goto bail;
563 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
564 dma_buf_attach(map->buf, sess->dev)));
565 if (err)
566 goto bail;
567 VERIFY(err, !IS_ERR_OR_NULL(map->table =
568 dma_buf_map_attachment(map->attach,
569 DMA_BIDIRECTIONAL)));
570 if (err)
571 goto bail;
572 if (sess->smmu.enabled) {
573 attrs = DMA_ATTR_EXEC_MAPPING;
574 VERIFY(err, map->table->nents ==
575 msm_dma_map_sg_attrs(sess->dev,
576 map->table->sgl, map->table->nents,
577 DMA_BIDIRECTIONAL, map->buf, attrs));
578 if (err)
579 goto bail;
580 } else {
581 VERIFY(err, map->table->nents == 1);
582 if (err)
583 goto bail;
584 }
585 map->phys = sg_dma_address(map->table->sgl);
586 if (sess->smmu.cb) {
587 map->phys += ((uint64_t)sess->smmu.cb << 32);
588 map->size = sg_dma_len(map->table->sgl);
589 } else {
590 map->size = buf_page_size(len);
591 }
592 vmid = fl->apps->channel[fl->cid].vmid;
593 if (vmid) {
594 int srcVM[1] = {VMID_HLOS};
595 int destVM[2] = {VMID_HLOS, vmid};
596 int destVMperm[2] = {PERM_READ | PERM_WRITE,
597 PERM_READ | PERM_WRITE | PERM_EXEC};
598
599 VERIFY(err, !hyp_assign_phys(map->phys,
600 buf_page_size(map->size),
601 srcVM, 1, destVM, destVMperm, 2));
602 if (err)
603 goto bail;
604 }
605 map->va = va;
606 map->len = len;
607
608 fastrpc_mmap_add(map);
609 *ppmap = map;
610
611bail:
612 if (err && map)
613 fastrpc_mmap_free(map);
614 return err;
615}
616
617static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
618 struct fastrpc_buf **obuf)
619{
620 int err = 0, vmid;
621 struct fastrpc_buf *buf = 0, *fr = 0;
622 struct hlist_node *n;
623
624 VERIFY(err, size > 0);
625 if (err)
626 goto bail;
627
628 /* find the smallest buffer that fits in the cache */
629 spin_lock(&fl->hlock);
630 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
631 if (buf->size >= size && (!fr || fr->size > buf->size))
632 fr = buf;
633 }
634 if (fr)
635 hlist_del_init(&fr->hn);
636 spin_unlock(&fl->hlock);
637 if (fr) {
638 *obuf = fr;
639 return 0;
640 }
641 buf = 0;
642 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
643 if (err)
644 goto bail;
645 INIT_HLIST_NODE(&buf->hn);
646 buf->fl = fl;
647 buf->virt = 0;
648 buf->phys = 0;
649 buf->size = size;
650 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
651 (void *)&buf->phys, GFP_KERNEL);
652 if (IS_ERR_OR_NULL(buf->virt)) {
653 /* free cache and retry */
654 fastrpc_buf_list_free(fl);
655 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
656 (void *)&buf->phys, GFP_KERNEL);
657 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
658 }
659 if (err)
660 goto bail;
661 if (fl->sctx->smmu.cb)
662 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
663 vmid = fl->apps->channel[fl->cid].vmid;
664 if (vmid) {
665 int srcVM[1] = {VMID_HLOS};
666 int destVM[2] = {VMID_HLOS, vmid};
667 int destVMperm[2] = {PERM_READ | PERM_WRITE,
668 PERM_READ | PERM_WRITE | PERM_EXEC};
669
670 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
671 srcVM, 1, destVM, destVMperm, 2));
672 if (err)
673 goto bail;
674 }
675
676 *obuf = buf;
677 bail:
678 if (err && buf)
679 fastrpc_buf_free(buf, 0);
680 return err;
681}
682
683
684static int context_restore_interrupted(struct fastrpc_file *fl,
685 struct fastrpc_ioctl_invoke_attrs *inv,
686 struct smq_invoke_ctx **po)
687{
688 int err = 0;
689 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
690 struct hlist_node *n;
691 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
692
693 spin_lock(&fl->hlock);
694 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
695 if (ictx->pid == current->pid) {
696 if (invoke->sc != ictx->sc || ictx->fl != fl)
697 err = -1;
698 else {
699 ctx = ictx;
700 hlist_del_init(&ctx->hn);
701 hlist_add_head(&ctx->hn, &fl->clst.pending);
702 }
703 break;
704 }
705 }
706 spin_unlock(&fl->hlock);
707 if (ctx)
708 *po = ctx;
709 return err;
710}
711
712#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
713static int overlap_ptr_cmp(const void *a, const void *b)
714{
715 struct overlap *pa = *((struct overlap **)a);
716 struct overlap *pb = *((struct overlap **)b);
717 /* sort with lowest starting buffer first */
718 int st = CMP(pa->start, pb->start);
719 /* sort with highest ending buffer first */
720 int ed = CMP(pb->end, pa->end);
721 return st == 0 ? ed : st;
722}
723
Sathish Ambley9466d672017-01-25 10:51:55 -0800724static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700725{
Sathish Ambley9466d672017-01-25 10:51:55 -0800726 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700727 remote_arg_t *lpra = ctx->lpra;
728 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
729 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
730 int nbufs = inbufs + outbufs;
731 struct overlap max;
732
733 for (i = 0; i < nbufs; ++i) {
734 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
735 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800736 if (lpra[i].buf.len) {
737 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
738 if (err)
739 goto bail;
740 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700741 ctx->overs[i].raix = i;
742 ctx->overps[i] = &ctx->overs[i];
743 }
744 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
745 max.start = 0;
746 max.end = 0;
747 for (i = 0; i < nbufs; ++i) {
748 if (ctx->overps[i]->start < max.end) {
749 ctx->overps[i]->mstart = max.end;
750 ctx->overps[i]->mend = ctx->overps[i]->end;
751 ctx->overps[i]->offset = max.end -
752 ctx->overps[i]->start;
753 if (ctx->overps[i]->end > max.end) {
754 max.end = ctx->overps[i]->end;
755 } else {
756 ctx->overps[i]->mend = 0;
757 ctx->overps[i]->mstart = 0;
758 }
759 } else {
760 ctx->overps[i]->mend = ctx->overps[i]->end;
761 ctx->overps[i]->mstart = ctx->overps[i]->start;
762 ctx->overps[i]->offset = 0;
763 max = *ctx->overps[i];
764 }
765 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800766bail:
767 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700768}
769
770#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
771 do {\
772 if (!(kernel))\
773 VERIFY(err, 0 == copy_from_user((dst), (src),\
774 (size)));\
775 else\
776 memmove((dst), (src), (size));\
777 } while (0)
778
779#define K_COPY_TO_USER(err, kernel, dst, src, size) \
780 do {\
781 if (!(kernel))\
782 VERIFY(err, 0 == copy_to_user((dst), (src),\
783 (size)));\
784 else\
785 memmove((dst), (src), (size));\
786 } while (0)
787
788
789static void context_free(struct smq_invoke_ctx *ctx);
790
791static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
792 struct fastrpc_ioctl_invoke_attrs *invokefd,
793 struct smq_invoke_ctx **po)
794{
795 int err = 0, bufs, size = 0;
796 struct smq_invoke_ctx *ctx = 0;
797 struct fastrpc_ctx_lst *clst = &fl->clst;
798 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
799
800 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
801 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
802 sizeof(*ctx->fds) * (bufs) +
803 sizeof(*ctx->attrs) * (bufs) +
804 sizeof(*ctx->overs) * (bufs) +
805 sizeof(*ctx->overps) * (bufs);
806
807 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
808 if (err)
809 goto bail;
810
811 INIT_HLIST_NODE(&ctx->hn);
812 hlist_add_fake(&ctx->hn);
813 ctx->fl = fl;
814 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
815 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
816 ctx->fds = (int *)(&ctx->lpra[bufs]);
817 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
818 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
819 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
820
821 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
822 bufs * sizeof(*ctx->lpra));
823 if (err)
824 goto bail;
825
826 if (invokefd->fds) {
827 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
828 bufs * sizeof(*ctx->fds));
829 if (err)
830 goto bail;
831 }
832 if (invokefd->attrs) {
833 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
834 bufs * sizeof(*ctx->attrs));
835 if (err)
836 goto bail;
837 }
838
839 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800840 if (bufs) {
841 VERIFY(err, 0 == context_build_overlap(ctx));
842 if (err)
843 goto bail;
844 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700845 ctx->retval = -1;
846 ctx->pid = current->pid;
847 ctx->tgid = current->tgid;
848 init_completion(&ctx->work);
849
850 spin_lock(&fl->hlock);
851 hlist_add_head(&ctx->hn, &clst->pending);
852 spin_unlock(&fl->hlock);
853
854 *po = ctx;
855bail:
856 if (ctx && err)
857 context_free(ctx);
858 return err;
859}
860
861static void context_save_interrupted(struct smq_invoke_ctx *ctx)
862{
863 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
864
865 spin_lock(&ctx->fl->hlock);
866 hlist_del_init(&ctx->hn);
867 hlist_add_head(&ctx->hn, &clst->interrupted);
868 spin_unlock(&ctx->fl->hlock);
869 /* free the cache on power collapse */
870 fastrpc_buf_list_free(ctx->fl);
871}
872
873static void context_free(struct smq_invoke_ctx *ctx)
874{
875 int i;
876 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
877 REMOTE_SCALARS_OUTBUFS(ctx->sc);
878 spin_lock(&ctx->fl->hlock);
879 hlist_del_init(&ctx->hn);
880 spin_unlock(&ctx->fl->hlock);
881 for (i = 0; i < nbufs; ++i)
882 fastrpc_mmap_free(ctx->maps[i]);
883 fastrpc_buf_free(ctx->buf, 1);
884 kfree(ctx);
885}
886
887static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
888{
889 ctx->retval = retval;
890 complete(&ctx->work);
891}
892
893
894static void fastrpc_notify_users(struct fastrpc_file *me)
895{
896 struct smq_invoke_ctx *ictx;
897 struct hlist_node *n;
898
899 spin_lock(&me->hlock);
900 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
901 complete(&ictx->work);
902 }
903 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
904 complete(&ictx->work);
905 }
906 spin_unlock(&me->hlock);
907
908}
909
910static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
911{
912 struct fastrpc_file *fl;
913 struct hlist_node *n;
914
915 spin_lock(&me->hlock);
916 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
917 if (fl->cid == cid)
918 fastrpc_notify_users(fl);
919 }
920 spin_unlock(&me->hlock);
921
922}
923static void context_list_ctor(struct fastrpc_ctx_lst *me)
924{
925 INIT_HLIST_HEAD(&me->interrupted);
926 INIT_HLIST_HEAD(&me->pending);
927}
928
929static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
930{
931 struct fastrpc_ctx_lst *clst = &fl->clst;
932 struct smq_invoke_ctx *ictx = 0, *ctxfree;
933 struct hlist_node *n;
934
935 do {
936 ctxfree = 0;
937 spin_lock(&fl->hlock);
938 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
939 hlist_del_init(&ictx->hn);
940 ctxfree = ictx;
941 break;
942 }
943 spin_unlock(&fl->hlock);
944 if (ctxfree)
945 context_free(ctxfree);
946 } while (ctxfree);
947 do {
948 ctxfree = 0;
949 spin_lock(&fl->hlock);
950 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
951 hlist_del_init(&ictx->hn);
952 ctxfree = ictx;
953 break;
954 }
955 spin_unlock(&fl->hlock);
956 if (ctxfree)
957 context_free(ctxfree);
958 } while (ctxfree);
959}
960
961static int fastrpc_file_free(struct fastrpc_file *fl);
962static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
963{
964 struct fastrpc_file *fl, *free;
965 struct hlist_node *n;
966
967 do {
968 free = 0;
969 spin_lock(&me->hlock);
970 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
971 hlist_del_init(&fl->hn);
972 free = fl;
973 break;
974 }
975 spin_unlock(&me->hlock);
976 if (free)
977 fastrpc_file_free(free);
978 } while (free);
979}
980
981static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
982{
983 remote_arg64_t *rpra;
984 remote_arg_t *lpra = ctx->lpra;
985 struct smq_invoke_buf *list;
986 struct smq_phy_page *pages, *ipage;
987 uint32_t sc = ctx->sc;
988 int inbufs = REMOTE_SCALARS_INBUFS(sc);
989 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800990 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700991 uintptr_t args;
992 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800993 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700994 int err = 0;
995 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800996 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700997
998 /* calculate size of the metadata */
999 rpra = 0;
1000 list = smq_invoke_buf_start(rpra, sc);
1001 pages = smq_phy_page_start(sc, list);
1002 ipage = pages;
1003
1004 for (i = 0; i < bufs; ++i) {
1005 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1006 ssize_t len = lpra[i].buf.len;
1007
1008 if (ctx->fds[i] && (ctx->fds[i] != -1))
1009 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1010 ctx->attrs[i], buf, len,
1011 mflags, &ctx->maps[i]);
1012 ipage += 1;
1013 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001014 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1015 for (i = bufs; i < bufs + handles; i++) {
1016 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1017 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1018 if (err)
1019 goto bail;
1020 ipage += 1;
1021 }
1022 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001023 /* calculate len requreed for copying */
1024 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1025 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001026 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001027 ssize_t len = lpra[i].buf.len;
1028
1029 if (!len)
1030 continue;
1031 if (ctx->maps[i])
1032 continue;
1033 if (ctx->overps[oix]->offset == 0)
1034 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001035 mstart = ctx->overps[oix]->mstart;
1036 mend = ctx->overps[oix]->mend;
1037 VERIFY(err, (mend - mstart) <= LONG_MAX);
1038 if (err)
1039 goto bail;
1040 copylen += mend - mstart;
1041 VERIFY(err, copylen >= 0);
1042 if (err)
1043 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001044 }
1045 ctx->used = copylen;
1046
1047 /* allocate new buffer */
1048 if (copylen) {
1049 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1050 if (err)
1051 goto bail;
1052 }
1053 /* copy metadata */
1054 rpra = ctx->buf->virt;
1055 ctx->rpra = rpra;
1056 list = smq_invoke_buf_start(rpra, sc);
1057 pages = smq_phy_page_start(sc, list);
1058 ipage = pages;
1059 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001060 for (i = 0; i < bufs + handles; ++i) {
1061 if (lpra[i].buf.len)
1062 list[i].num = 1;
1063 else
1064 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001065 list[i].pgidx = ipage - pages;
1066 ipage++;
1067 }
1068 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001069 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001070 for (i = 0; i < inbufs + outbufs; ++i) {
1071 struct fastrpc_mmap *map = ctx->maps[i];
1072 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1073 ssize_t len = lpra[i].buf.len;
1074
1075 rpra[i].buf.pv = 0;
1076 rpra[i].buf.len = len;
1077 if (!len)
1078 continue;
1079 if (map) {
1080 struct vm_area_struct *vma;
1081 uintptr_t offset;
1082 int num = buf_num_pages(buf, len);
1083 int idx = list[i].pgidx;
1084
1085 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001086 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001087 } else {
1088 down_read(&current->mm->mmap_sem);
1089 VERIFY(err, NULL != (vma = find_vma(current->mm,
1090 map->va)));
1091 if (err) {
1092 up_read(&current->mm->mmap_sem);
1093 goto bail;
1094 }
1095 offset = buf_page_start(buf) - vma->vm_start;
1096 up_read(&current->mm->mmap_sem);
1097 VERIFY(err, offset < (uintptr_t)map->size);
1098 if (err)
1099 goto bail;
1100 }
1101 pages[idx].addr = map->phys + offset;
1102 pages[idx].size = num << PAGE_SHIFT;
1103 }
1104 rpra[i].buf.pv = buf;
1105 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001106 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001107 for (i = bufs; i < bufs + handles; ++i) {
1108 struct fastrpc_mmap *map = ctx->maps[i];
1109
1110 pages[i].addr = map->phys;
1111 pages[i].size = map->size;
1112 }
1113 fdlist = (uint64_t *)&pages[bufs + handles];
1114 for (i = 0; i < M_FDLIST; i++)
1115 fdlist[i] = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001116
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001117 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001118 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001119 rlen = copylen - metalen;
1120 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1121 int i = ctx->overps[oix]->raix;
1122 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001123 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001124 uint64_t buf;
1125 ssize_t len = lpra[i].buf.len;
1126
1127 if (!len)
1128 continue;
1129 if (map)
1130 continue;
1131 if (ctx->overps[oix]->offset == 0) {
1132 rlen -= ALIGN(args, BALIGN) - args;
1133 args = ALIGN(args, BALIGN);
1134 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001135 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 VERIFY(err, rlen >= mlen);
1137 if (err)
1138 goto bail;
1139 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1140 pages[list[i].pgidx].addr = ctx->buf->phys -
1141 ctx->overps[oix]->offset +
1142 (copylen - rlen);
1143 pages[list[i].pgidx].addr =
1144 buf_page_start(pages[list[i].pgidx].addr);
1145 buf = rpra[i].buf.pv;
1146 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1147 if (i < inbufs) {
1148 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1149 lpra[i].buf.pv, len);
1150 if (err)
1151 goto bail;
1152 }
1153 args = args + mlen;
1154 rlen -= mlen;
1155 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001156 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001157
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001158 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001159 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1160 int i = ctx->overps[oix]->raix;
1161 struct fastrpc_mmap *map = ctx->maps[i];
1162
1163 if (ctx->fl->sctx->smmu.coherent)
1164 continue;
1165 if (map && map->uncached)
1166 continue;
1167 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1168 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1169 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1170 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001171 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001172 for (i = bufs; i < bufs + handles; i++) {
1173 rpra[i].dma.fd = ctx->fds[i];
1174 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1175 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001177
1178 if (!ctx->fl->sctx->smmu.coherent) {
1179 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001180 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001181 PERF_END);
1182 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001183 bail:
1184 return err;
1185}
1186
1187static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1188 remote_arg_t *upra)
1189{
1190 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001191 struct smq_invoke_buf *list;
1192 struct smq_phy_page *pages;
1193 struct fastrpc_mmap *mmap;
1194 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001195 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001196 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001197 int err = 0;
1198
1199 inbufs = REMOTE_SCALARS_INBUFS(sc);
1200 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001201 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1202 list = smq_invoke_buf_start(ctx->rpra, sc);
1203 pages = smq_phy_page_start(sc, list);
1204 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001205 for (i = inbufs; i < inbufs + outbufs; ++i) {
1206 if (!ctx->maps[i]) {
1207 K_COPY_TO_USER(err, kernel,
1208 ctx->lpra[i].buf.pv,
1209 uint64_to_ptr(rpra[i].buf.pv),
1210 rpra[i].buf.len);
1211 if (err)
1212 goto bail;
1213 } else {
1214 fastrpc_mmap_free(ctx->maps[i]);
1215 ctx->maps[i] = 0;
1216 }
1217 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001218 if (inbufs + outbufs + handles) {
1219 for (i = 0; i < M_FDLIST; i++) {
1220 if (!fdlist[i])
1221 break;
1222 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001223 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001224 fastrpc_mmap_free(mmap);
1225 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001226 }
1227 bail:
1228 return err;
1229}
1230
1231static void inv_args_pre(struct smq_invoke_ctx *ctx)
1232{
1233 int i, inbufs, outbufs;
1234 uint32_t sc = ctx->sc;
1235 remote_arg64_t *rpra = ctx->rpra;
1236 uintptr_t end;
1237
1238 inbufs = REMOTE_SCALARS_INBUFS(sc);
1239 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1240 for (i = inbufs; i < inbufs + outbufs; ++i) {
1241 struct fastrpc_mmap *map = ctx->maps[i];
1242
1243 if (map && map->uncached)
1244 continue;
1245 if (!rpra[i].buf.len)
1246 continue;
1247 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1248 buf_page_start(rpra[i].buf.pv))
1249 continue;
1250 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1251 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1252 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1253 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1254 rpra[i].buf.len);
1255 if (!IS_CACHE_ALIGNED(end))
1256 dmac_flush_range((char *)end,
1257 (char *)end + 1);
1258 }
1259}
1260
1261static void inv_args(struct smq_invoke_ctx *ctx)
1262{
1263 int i, inbufs, outbufs;
1264 uint32_t sc = ctx->sc;
1265 remote_arg64_t *rpra = ctx->rpra;
1266 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001267
1268 inbufs = REMOTE_SCALARS_INBUFS(sc);
1269 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1270 for (i = inbufs; i < inbufs + outbufs; ++i) {
1271 struct fastrpc_mmap *map = ctx->maps[i];
1272
1273 if (map && map->uncached)
1274 continue;
1275 if (!rpra[i].buf.len)
1276 continue;
1277 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1278 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001279 continue;
1280 }
1281 if (map && map->handle)
1282 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1283 (char *)uint64_to_ptr(rpra[i].buf.pv),
1284 rpra[i].buf.len, ION_IOC_INV_CACHES);
1285 else
1286 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1287 (char *)uint64_to_ptr(rpra[i].buf.pv
1288 + rpra[i].buf.len));
1289 }
1290
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001291 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001292 dmac_inv_range(rpra, (char *)rpra + used);
1293}
1294
1295static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1296 uint32_t kernel, uint32_t handle)
1297{
1298 struct smq_msg *msg = &ctx->msg;
1299 struct fastrpc_file *fl = ctx->fl;
1300 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1301 int err = 0;
1302
1303 VERIFY(err, 0 != channel_ctx->chan);
1304 if (err)
1305 goto bail;
1306 msg->pid = current->tgid;
1307 msg->tid = current->pid;
1308 if (kernel)
1309 msg->pid = 0;
1310 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1311 msg->invoke.header.handle = handle;
1312 msg->invoke.header.sc = ctx->sc;
1313 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1314 msg->invoke.page.size = buf_page_size(ctx->used);
1315
1316 if (fl->ssrcount != channel_ctx->ssrcount) {
1317 err = -ECONNRESET;
1318 goto bail;
1319 }
1320 VERIFY(err, channel_ctx->link.port_state ==
1321 FASTRPC_LINK_CONNECTED);
1322 if (err)
1323 goto bail;
1324 err = glink_tx(channel_ctx->chan,
1325 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1326 GLINK_TX_REQ_INTENT);
1327 bail:
1328 return err;
1329}
1330
1331static void fastrpc_init(struct fastrpc_apps *me)
1332{
1333 int i;
1334
1335 INIT_HLIST_HEAD(&me->drivers);
1336 spin_lock_init(&me->hlock);
1337 mutex_init(&me->smd_mutex);
1338 me->channel = &gcinfo[0];
1339 for (i = 0; i < NUM_CHANNELS; i++) {
1340 init_completion(&me->channel[i].work);
1341 me->channel[i].sesscount = 0;
1342 }
1343}
1344
1345static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1346
1347static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1348 uint32_t kernel,
1349 struct fastrpc_ioctl_invoke_attrs *inv)
1350{
1351 struct smq_invoke_ctx *ctx = 0;
1352 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1353 int cid = fl->cid;
1354 int interrupted = 0;
1355 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001356 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001357
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001358 if (fl->profile)
1359 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360 if (!kernel) {
1361 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1362 &ctx));
1363 if (err)
1364 goto bail;
1365 if (fl->sctx->smmu.faults)
1366 err = FASTRPC_ENOSUCH;
1367 if (err)
1368 goto bail;
1369 if (ctx)
1370 goto wait;
1371 }
1372
1373 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1374 if (err)
1375 goto bail;
1376
1377 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001378 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001379 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001380 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001381 if (err)
1382 goto bail;
1383 }
1384
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001385 PERF(fl->profile, fl->perf.invargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001386 if (!fl->sctx->smmu.coherent) {
1387 inv_args_pre(ctx);
1388 if (mode == FASTRPC_MODE_SERIAL)
1389 inv_args(ctx);
1390 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001391 PERF_END);
1392
1393 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001394 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001395 PERF_END);
1396
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001397 if (err)
1398 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001399 PERF(fl->profile, fl->perf.invargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001400 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1401 inv_args(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001402 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001403 wait:
1404 if (kernel)
1405 wait_for_completion(&ctx->work);
1406 else {
1407 interrupted = wait_for_completion_interruptible(&ctx->work);
1408 VERIFY(err, 0 == (err = interrupted));
1409 if (err)
1410 goto bail;
1411 }
1412 VERIFY(err, 0 == (err = ctx->retval));
1413 if (err)
1414 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001415
1416 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001417 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001418 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001419 if (err)
1420 goto bail;
1421 bail:
1422 if (ctx && interrupted == -ERESTARTSYS)
1423 context_save_interrupted(ctx);
1424 else if (ctx)
1425 context_free(ctx);
1426 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1427 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001428
1429 if (fl->profile && !interrupted) {
1430 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1431 fl->perf.invoke += getnstimediff(&invoket);
1432 if (!(invoke->handle >= 0 &&
1433 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1434 fl->perf.count++;
1435 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436 return err;
1437}
1438
Sathish Ambley36849af2017-02-02 09:35:55 -08001439static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001440static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001441 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442{
1443 int err = 0;
1444 struct fastrpc_ioctl_invoke_attrs ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001445 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001446 struct smq_phy_page pages[1];
1447 struct fastrpc_mmap *file = 0, *mem = 0;
1448
Sathish Ambley36849af2017-02-02 09:35:55 -08001449 VERIFY(err, !fastrpc_channel_open(fl));
1450 if (err)
1451 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452 if (init->flags == FASTRPC_INIT_ATTACH) {
1453 remote_arg_t ra[1];
1454 int tgid = current->tgid;
1455
1456 ra[0].buf.pv = (void *)&tgid;
1457 ra[0].buf.len = sizeof(tgid);
1458 ioctl.inv.handle = 1;
1459 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1460 ioctl.inv.pra = ra;
1461 ioctl.fds = 0;
1462 ioctl.attrs = 0;
1463 fl->pd = 0;
1464 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1465 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1466 if (err)
1467 goto bail;
1468 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001469 remote_arg_t ra[6];
1470 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001471 int mflags = 0;
1472 struct {
1473 int pgid;
1474 int namelen;
1475 int filelen;
1476 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001477 int attrs;
1478 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001479 } inbuf;
1480
1481 inbuf.pgid = current->tgid;
1482 inbuf.namelen = strlen(current->comm) + 1;
1483 inbuf.filelen = init->filelen;
1484 fl->pd = 1;
1485 if (init->filelen) {
1486 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1487 init->file, init->filelen, mflags, &file));
1488 if (err)
1489 goto bail;
1490 }
1491 inbuf.pageslen = 1;
1492 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1493 init->mem, init->memlen, mflags, &mem));
1494 if (err)
1495 goto bail;
1496 inbuf.pageslen = 1;
1497 ra[0].buf.pv = (void *)&inbuf;
1498 ra[0].buf.len = sizeof(inbuf);
1499 fds[0] = 0;
1500
1501 ra[1].buf.pv = (void *)current->comm;
1502 ra[1].buf.len = inbuf.namelen;
1503 fds[1] = 0;
1504
1505 ra[2].buf.pv = (void *)init->file;
1506 ra[2].buf.len = inbuf.filelen;
1507 fds[2] = init->filefd;
1508
1509 pages[0].addr = mem->phys;
1510 pages[0].size = mem->size;
1511 ra[3].buf.pv = (void *)pages;
1512 ra[3].buf.len = 1 * sizeof(*pages);
1513 fds[3] = 0;
1514
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001515 inbuf.attrs = uproc->attrs;
1516 ra[4].buf.pv = (void *)&(inbuf.attrs);
1517 ra[4].buf.len = sizeof(inbuf.attrs);
1518 fds[4] = 0;
1519
1520 inbuf.siglen = uproc->siglen;
1521 ra[5].buf.pv = (void *)&(inbuf.siglen);
1522 ra[5].buf.len = sizeof(inbuf.siglen);
1523 fds[5] = 0;
1524
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001525 ioctl.inv.handle = 1;
1526 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001527 if (uproc->attrs)
1528 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001529 ioctl.inv.pra = ra;
1530 ioctl.fds = fds;
1531 ioctl.attrs = 0;
1532 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1533 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1534 if (err)
1535 goto bail;
1536 } else {
1537 err = -ENOTTY;
1538 }
1539bail:
1540 if (mem && err)
1541 fastrpc_mmap_free(mem);
1542 if (file)
1543 fastrpc_mmap_free(file);
1544 return err;
1545}
1546
1547static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1548{
1549 int err = 0;
1550 struct fastrpc_ioctl_invoke_attrs ioctl;
1551 remote_arg_t ra[1];
1552 int tgid = 0;
1553
Sathish Ambley36849af2017-02-02 09:35:55 -08001554 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1555 if (err)
1556 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001557 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1558 if (err)
1559 goto bail;
1560 tgid = fl->tgid;
1561 ra[0].buf.pv = (void *)&tgid;
1562 ra[0].buf.len = sizeof(tgid);
1563 ioctl.inv.handle = 1;
1564 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1565 ioctl.inv.pra = ra;
1566 ioctl.fds = 0;
1567 ioctl.attrs = 0;
1568 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1569 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1570bail:
1571 return err;
1572}
1573
1574static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1575 struct fastrpc_mmap *map)
1576{
1577 struct fastrpc_ioctl_invoke_attrs ioctl;
1578 struct smq_phy_page page;
1579 int num = 1;
1580 remote_arg_t ra[3];
1581 int err = 0;
1582 struct {
1583 int pid;
1584 uint32_t flags;
1585 uintptr_t vaddrin;
1586 int num;
1587 } inargs;
1588 struct {
1589 uintptr_t vaddrout;
1590 } routargs;
1591
1592 inargs.pid = current->tgid;
1593 inargs.vaddrin = (uintptr_t)map->va;
1594 inargs.flags = flags;
1595 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1596 ra[0].buf.pv = (void *)&inargs;
1597 ra[0].buf.len = sizeof(inargs);
1598 page.addr = map->phys;
1599 page.size = map->size;
1600 ra[1].buf.pv = (void *)&page;
1601 ra[1].buf.len = num * sizeof(page);
1602
1603 ra[2].buf.pv = (void *)&routargs;
1604 ra[2].buf.len = sizeof(routargs);
1605
1606 ioctl.inv.handle = 1;
1607 if (fl->apps->compat)
1608 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1609 else
1610 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1611 ioctl.inv.pra = ra;
1612 ioctl.fds = 0;
1613 ioctl.attrs = 0;
1614 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1615 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1616 map->raddr = (uintptr_t)routargs.vaddrout;
1617
1618 return err;
1619}
1620
1621static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1622 struct fastrpc_mmap *map)
1623{
1624 struct fastrpc_ioctl_invoke_attrs ioctl;
1625 remote_arg_t ra[1];
1626 int err = 0;
1627 struct {
1628 int pid;
1629 uintptr_t vaddrout;
1630 ssize_t size;
1631 } inargs;
1632
1633 inargs.pid = current->tgid;
1634 inargs.size = map->size;
1635 inargs.vaddrout = map->raddr;
1636 ra[0].buf.pv = (void *)&inargs;
1637 ra[0].buf.len = sizeof(inargs);
1638
1639 ioctl.inv.handle = 1;
1640 if (fl->apps->compat)
1641 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1642 else
1643 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1644 ioctl.inv.pra = ra;
1645 ioctl.fds = 0;
1646 ioctl.attrs = 0;
1647 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1648 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1649 return err;
1650}
1651
1652static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1653 ssize_t len, struct fastrpc_mmap **ppmap);
1654
1655static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1656
1657static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1658 struct fastrpc_ioctl_munmap *ud)
1659{
1660 int err = 0;
1661 struct fastrpc_mmap *map = 0;
1662
1663 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1664 if (err)
1665 goto bail;
1666 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1667 if (err)
1668 goto bail;
1669 fastrpc_mmap_free(map);
1670bail:
1671 if (err && map)
1672 fastrpc_mmap_add(map);
1673 return err;
1674}
1675
1676static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1677 struct fastrpc_ioctl_mmap *ud)
1678{
1679
1680 struct fastrpc_mmap *map = 0;
1681 int err = 0;
1682
1683 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001684 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001685 return 0;
1686
1687 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1688 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1689 if (err)
1690 goto bail;
1691 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1692 if (err)
1693 goto bail;
1694 ud->vaddrout = map->raddr;
1695 bail:
1696 if (err && map)
1697 fastrpc_mmap_free(map);
1698 return err;
1699}
1700
1701static void fastrpc_channel_close(struct kref *kref)
1702{
1703 struct fastrpc_apps *me = &gfa;
1704 struct fastrpc_channel_ctx *ctx;
1705 int cid;
1706
1707 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1708 cid = ctx - &gcinfo[0];
1709 fastrpc_glink_close(ctx->chan, cid);
1710 ctx->chan = 0;
1711 mutex_unlock(&me->smd_mutex);
1712 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1713 MAJOR(me->dev_no), cid);
1714}
1715
1716static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1717
1718static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1719 int secure, struct fastrpc_session_ctx **session)
1720{
1721 struct fastrpc_apps *me = &gfa;
1722 int idx = 0, err = 0;
1723
1724 if (chan->sesscount) {
1725 for (idx = 0; idx < chan->sesscount; ++idx) {
1726 if (!chan->session[idx].used &&
1727 chan->session[idx].smmu.secure == secure) {
1728 chan->session[idx].used = 1;
1729 break;
1730 }
1731 }
1732 VERIFY(err, idx < chan->sesscount);
1733 if (err)
1734 goto bail;
1735 chan->session[idx].smmu.faults = 0;
1736 } else {
1737 VERIFY(err, me->dev != NULL);
1738 if (err)
1739 goto bail;
1740 chan->session[0].dev = me->dev;
1741 }
1742
1743 *session = &chan->session[idx];
1744 bail:
1745 return err;
1746}
1747
1748bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1749{
1750 if (glink_queue_rx_intent(h, NULL, size))
1751 return false;
1752 return true;
1753}
1754
1755void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1756 const void *pkt_priv, const void *ptr)
1757{
1758}
1759
1760void fastrpc_glink_notify_rx(void *handle, const void *priv,
1761 const void *pkt_priv, const void *ptr, size_t size)
1762{
1763 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1764 int len = size;
1765
1766 while (len >= sizeof(*rsp) && rsp) {
1767 rsp->ctx = rsp->ctx & ~1;
1768 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1769 rsp++;
1770 len = len - sizeof(*rsp);
1771 }
1772 glink_rx_done(handle, ptr, true);
1773}
1774
1775void fastrpc_glink_notify_state(void *handle, const void *priv,
1776 unsigned int event)
1777{
1778 struct fastrpc_apps *me = &gfa;
1779 int cid = (int)(uintptr_t)priv;
1780 struct fastrpc_glink_info *link;
1781
1782 if (cid < 0 || cid >= NUM_CHANNELS)
1783 return;
1784 link = &me->channel[cid].link;
1785 switch (event) {
1786 case GLINK_CONNECTED:
1787 link->port_state = FASTRPC_LINK_CONNECTED;
1788 complete(&me->channel[cid].work);
1789 break;
1790 case GLINK_LOCAL_DISCONNECTED:
1791 link->port_state = FASTRPC_LINK_DISCONNECTED;
1792 break;
1793 case GLINK_REMOTE_DISCONNECTED:
1794 if (me->channel[cid].chan &&
1795 link->link_state == FASTRPC_LINK_STATE_UP) {
1796 fastrpc_glink_close(me->channel[cid].chan, cid);
1797 me->channel[cid].chan = 0;
1798 link->port_state = FASTRPC_LINK_DISCONNECTED;
1799 }
1800 break;
1801 default:
1802 break;
1803 }
1804}
1805
1806static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1807 struct fastrpc_session_ctx **session)
1808{
1809 int err = 0;
1810 struct fastrpc_apps *me = &gfa;
1811
1812 mutex_lock(&me->smd_mutex);
1813 if (!*session)
1814 err = fastrpc_session_alloc_locked(chan, secure, session);
1815 mutex_unlock(&me->smd_mutex);
1816 return err;
1817}
1818
1819static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1820 struct fastrpc_session_ctx *session)
1821{
1822 struct fastrpc_apps *me = &gfa;
1823
1824 mutex_lock(&me->smd_mutex);
1825 session->used = 0;
1826 mutex_unlock(&me->smd_mutex);
1827}
1828
1829static int fastrpc_file_free(struct fastrpc_file *fl)
1830{
1831 struct hlist_node *n;
1832 struct fastrpc_mmap *map = 0;
1833 int cid;
1834
1835 if (!fl)
1836 return 0;
1837 cid = fl->cid;
1838
1839 spin_lock(&fl->apps->hlock);
1840 hlist_del_init(&fl->hn);
1841 spin_unlock(&fl->apps->hlock);
1842
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001843 if (!fl->sctx) {
1844 kfree(fl);
1845 return 0;
1846 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001847 (void)fastrpc_release_current_dsp_process(fl);
1848 fastrpc_context_list_dtor(fl);
1849 fastrpc_buf_list_free(fl);
1850 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1851 fastrpc_mmap_free(map);
1852 }
1853 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1854 kref_put_mutex(&fl->apps->channel[cid].kref,
1855 fastrpc_channel_close, &fl->apps->smd_mutex);
1856 if (fl->sctx)
1857 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1858 if (fl->secsctx)
1859 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1860 kfree(fl);
1861 return 0;
1862}
1863
1864static int fastrpc_device_release(struct inode *inode, struct file *file)
1865{
1866 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1867
1868 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001869 if (fl->debugfs_file != NULL)
1870 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001871 fastrpc_file_free(fl);
1872 file->private_data = 0;
1873 }
1874 return 0;
1875}
1876
1877static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1878 void *priv)
1879{
1880 struct fastrpc_apps *me = &gfa;
1881 int cid = (int)((uintptr_t)priv);
1882 struct fastrpc_glink_info *link;
1883
1884 if (cid < 0 || cid >= NUM_CHANNELS)
1885 return;
1886
1887 link = &me->channel[cid].link;
1888 switch (cb_info->link_state) {
1889 case GLINK_LINK_STATE_UP:
1890 link->link_state = FASTRPC_LINK_STATE_UP;
1891 complete(&me->channel[cid].work);
1892 break;
1893 case GLINK_LINK_STATE_DOWN:
1894 link->link_state = FASTRPC_LINK_STATE_DOWN;
1895 break;
1896 default:
1897 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1898 break;
1899 }
1900}
1901
1902static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1903{
1904 int err = 0;
1905 struct fastrpc_glink_info *link;
1906
1907 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1908 if (err)
1909 goto bail;
1910
1911 link = &me->channel[cid].link;
1912 if (link->link_notify_handle != NULL)
1913 goto bail;
1914
1915 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1916 link->link_notify_handle = glink_register_link_state_cb(
1917 &link->link_info,
1918 (void *)((uintptr_t)cid));
1919 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1920 if (err) {
1921 link->link_notify_handle = NULL;
1922 goto bail;
1923 }
1924 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1925 RPC_TIMEOUT));
1926bail:
1927 return err;
1928}
1929
1930static void fastrpc_glink_close(void *chan, int cid)
1931{
1932 int err = 0;
1933 struct fastrpc_glink_info *link;
1934
1935 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1936 if (err)
1937 return;
1938 link = &gfa.channel[cid].link;
1939
1940 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1941 link->port_state == FASTRPC_LINK_CONNECTING) {
1942 link->port_state = FASTRPC_LINK_DISCONNECTING;
1943 glink_close(chan);
1944 }
1945}
1946
1947static int fastrpc_glink_open(int cid)
1948{
1949 int err = 0;
1950 void *handle = NULL;
1951 struct fastrpc_apps *me = &gfa;
1952 struct glink_open_config *cfg;
1953 struct fastrpc_glink_info *link;
1954
1955 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1956 if (err)
1957 goto bail;
1958 link = &me->channel[cid].link;
1959 cfg = &me->channel[cid].link.cfg;
1960 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1961 if (err)
1962 goto bail;
1963
1964 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1965 link->port_state == FASTRPC_LINK_CONNECTING) {
1966 goto bail;
1967 }
1968
1969 link->port_state = FASTRPC_LINK_CONNECTING;
1970 cfg->priv = (void *)(uintptr_t)cid;
1971 cfg->edge = gcinfo[cid].link.link_info.edge;
1972 cfg->transport = gcinfo[cid].link.link_info.transport;
1973 cfg->name = FASTRPC_GLINK_GUID;
1974 cfg->notify_rx = fastrpc_glink_notify_rx;
1975 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1976 cfg->notify_state = fastrpc_glink_notify_state;
1977 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1978 handle = glink_open(cfg);
1979 VERIFY(err, !IS_ERR_OR_NULL(handle));
1980 if (err)
1981 goto bail;
1982 me->channel[cid].chan = handle;
1983bail:
1984 return err;
1985}
1986
Sathish Ambley1ca68232017-01-19 10:32:55 -08001987static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
1988{
1989 filp->private_data = inode->i_private;
1990 return 0;
1991}
1992
1993static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
1994 size_t count, loff_t *position)
1995{
1996 struct fastrpc_file *fl = filp->private_data;
1997 struct hlist_node *n;
1998 struct fastrpc_buf *buf = 0;
1999 struct fastrpc_mmap *map = 0;
2000 struct smq_invoke_ctx *ictx = 0;
2001 struct fastrpc_channel_ctx *chan;
2002 struct fastrpc_session_ctx *sess;
2003 unsigned int len = 0;
2004 int i, j, ret = 0;
2005 char *fileinfo = NULL;
2006
2007 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2008 if (!fileinfo)
2009 goto bail;
2010 if (fl == NULL) {
2011 for (i = 0; i < NUM_CHANNELS; i++) {
2012 chan = &gcinfo[i];
2013 len += scnprintf(fileinfo + len,
2014 DEBUGFS_SIZE - len, "%s\n\n",
2015 chan->name);
2016 len += scnprintf(fileinfo + len,
2017 DEBUGFS_SIZE - len, "%s %d\n",
2018 "sesscount:", chan->sesscount);
2019 for (j = 0; j < chan->sesscount; j++) {
2020 sess = &chan->session[j];
2021 len += scnprintf(fileinfo + len,
2022 DEBUGFS_SIZE - len,
2023 "%s%d\n\n", "SESSION", j);
2024 len += scnprintf(fileinfo + len,
2025 DEBUGFS_SIZE - len,
2026 "%s %d\n", "sid:",
2027 sess->smmu.cb);
2028 len += scnprintf(fileinfo + len,
2029 DEBUGFS_SIZE - len,
2030 "%s %d\n", "SECURE:",
2031 sess->smmu.secure);
2032 }
2033 }
2034 } else {
2035 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2036 "%s %d\n\n",
2037 "PROCESS_ID:", fl->tgid);
2038 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2039 "%s %d\n\n",
2040 "CHANNEL_ID:", fl->cid);
2041 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2042 "%s %d\n\n",
2043 "SSRCOUNT:", fl->ssrcount);
2044 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2045 "%s\n",
2046 "LIST OF BUFS:");
2047 spin_lock(&fl->hlock);
2048 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2049 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2050 "%s %p %s %p %s %llx\n", "buf:",
2051 buf, "buf->virt:", buf->virt,
2052 "buf->phys:", buf->phys);
2053 }
2054 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2055 "\n%s\n",
2056 "LIST OF MAPS:");
2057 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2058 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2059 "%s %p %s %lx %s %llx\n",
2060 "map:", map,
2061 "map->va:", map->va,
2062 "map->phys:", map->phys);
2063 }
2064 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2065 "\n%s\n",
2066 "LIST OF PENDING SMQCONTEXTS:");
2067 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2068 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2069 "%s %p %s %u %s %u %s %u\n",
2070 "smqcontext:", ictx,
2071 "sc:", ictx->sc,
2072 "tid:", ictx->pid,
2073 "handle", ictx->rpra->h);
2074 }
2075 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2076 "\n%s\n",
2077 "LIST OF INTERRUPTED SMQCONTEXTS:");
2078 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2079 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2080 "%s %p %s %u %s %u %s %u\n",
2081 "smqcontext:", ictx,
2082 "sc:", ictx->sc,
2083 "tid:", ictx->pid,
2084 "handle", ictx->rpra->h);
2085 }
2086 spin_unlock(&fl->hlock);
2087 }
2088 if (len > DEBUGFS_SIZE)
2089 len = DEBUGFS_SIZE;
2090 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2091 kfree(fileinfo);
2092bail:
2093 return ret;
2094}
2095
2096static const struct file_operations debugfs_fops = {
2097 .open = fastrpc_debugfs_open,
2098 .read = fastrpc_debugfs_read,
2099};
Sathish Ambley36849af2017-02-02 09:35:55 -08002100static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002101{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002102 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002103 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002104
2105 mutex_lock(&me->smd_mutex);
2106
Sathish Ambley36849af2017-02-02 09:35:55 -08002107 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002108 if (err)
2109 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002110 cid = fl->cid;
2111 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2112 if (err)
2113 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002114 fl->ssrcount = me->channel[cid].ssrcount;
2115 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2116 (me->channel[cid].chan == 0)) {
2117 fastrpc_glink_register(cid, me);
2118 VERIFY(err, 0 == fastrpc_glink_open(cid));
2119 if (err)
2120 goto bail;
2121
2122 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2123 RPC_TIMEOUT));
2124 if (err) {
2125 me->channel[cid].chan = 0;
2126 goto bail;
2127 }
2128 kref_init(&me->channel[cid].kref);
2129 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2130 MAJOR(me->dev_no), cid);
2131 if (me->channel[cid].ssrcount !=
2132 me->channel[cid].prevssrcount) {
2133 me->channel[cid].prevssrcount =
2134 me->channel[cid].ssrcount;
2135 }
2136 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002137
2138bail:
2139 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002140 return err;
2141}
2142
Sathish Ambley36849af2017-02-02 09:35:55 -08002143static int fastrpc_device_open(struct inode *inode, struct file *filp)
2144{
2145 int err = 0;
2146 struct fastrpc_file *fl = 0;
2147 struct fastrpc_apps *me = &gfa;
2148
2149 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2150 if (err)
2151 return err;
2152
2153 context_list_ctor(&fl->clst);
2154 spin_lock_init(&fl->hlock);
2155 INIT_HLIST_HEAD(&fl->maps);
2156 INIT_HLIST_HEAD(&fl->bufs);
2157 INIT_HLIST_NODE(&fl->hn);
2158 fl->tgid = current->tgid;
2159 fl->apps = me;
2160 fl->mode = FASTRPC_MODE_SERIAL;
2161 fl->cid = -1;
2162 filp->private_data = fl;
2163 spin_lock(&me->hlock);
2164 hlist_add_head(&fl->hn, &me->drivers);
2165 spin_unlock(&me->hlock);
2166 return 0;
2167}
2168
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002169static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2170{
2171 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002172 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002173
Sathish Ambley36849af2017-02-02 09:35:55 -08002174 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002175 if (err)
2176 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002177 if (fl->cid == -1) {
2178 cid = *info;
2179 VERIFY(err, cid < NUM_CHANNELS);
2180 if (err)
2181 goto bail;
2182 fl->cid = cid;
2183 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2184 VERIFY(err, !fastrpc_session_alloc_locked(
2185 &fl->apps->channel[cid], 0, &fl->sctx));
2186 if (err)
2187 goto bail;
2188 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002189 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2190bail:
2191 return err;
2192}
2193
2194static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2195 unsigned long ioctl_param)
2196{
2197 union {
2198 struct fastrpc_ioctl_invoke_attrs inv;
2199 struct fastrpc_ioctl_mmap mmap;
2200 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002201 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002202 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002203 } p;
2204 void *param = (char *)ioctl_param;
2205 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2206 int size = 0, err = 0;
2207 uint32_t info;
2208
2209 p.inv.fds = 0;
2210 p.inv.attrs = 0;
2211
2212 switch (ioctl_num) {
2213 case FASTRPC_IOCTL_INVOKE:
2214 size = sizeof(struct fastrpc_ioctl_invoke);
2215 case FASTRPC_IOCTL_INVOKE_FD:
2216 if (!size)
2217 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2218 /* fall through */
2219 case FASTRPC_IOCTL_INVOKE_ATTRS:
2220 if (!size)
2221 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
2222 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2223 if (err)
2224 goto bail;
2225 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2226 0, &p.inv)));
2227 if (err)
2228 goto bail;
2229 break;
2230 case FASTRPC_IOCTL_MMAP:
2231 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2232 sizeof(p.mmap)));
2233 if (err)
2234 goto bail;
2235 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2236 if (err)
2237 goto bail;
2238 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2239 if (err)
2240 goto bail;
2241 break;
2242 case FASTRPC_IOCTL_MUNMAP:
2243 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2244 sizeof(p.munmap)));
2245 if (err)
2246 goto bail;
2247 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2248 &p.munmap)));
2249 if (err)
2250 goto bail;
2251 break;
2252 case FASTRPC_IOCTL_SETMODE:
2253 switch ((uint32_t)ioctl_param) {
2254 case FASTRPC_MODE_PARALLEL:
2255 case FASTRPC_MODE_SERIAL:
2256 fl->mode = (uint32_t)ioctl_param;
2257 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002258 case FASTRPC_MODE_PROFILE:
2259 fl->profile = (uint32_t)ioctl_param;
2260 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002261 default:
2262 err = -ENOTTY;
2263 break;
2264 }
2265 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002266 case FASTRPC_IOCTL_GETPERF:
2267 VERIFY(err, 0 == copy_from_user(&p.perf,
2268 param, sizeof(p.perf)));
2269 if (err)
2270 goto bail;
2271 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2272 if (p.perf.keys) {
2273 char *keys = PERF_KEYS;
2274
2275 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2276 keys, strlen(keys)+1));
2277 if (err)
2278 goto bail;
2279 }
2280 if (p.perf.data) {
2281 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2282 &fl->perf, sizeof(fl->perf)));
2283 }
2284 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2285 if (err)
2286 goto bail;
2287 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002288 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002289 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2290 if (err)
2291 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002292 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2293 if (err)
2294 goto bail;
2295 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2296 if (err)
2297 goto bail;
2298 break;
2299 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002300 p.init.attrs = 0;
2301 p.init.siglen = 0;
2302 size = sizeof(struct fastrpc_ioctl_init);
2303 /* fall through */
2304 case FASTRPC_IOCTL_INIT_ATTRS:
2305 if (!size)
2306 size = sizeof(struct fastrpc_ioctl_init_attrs);
2307 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002308 if (err)
2309 goto bail;
2310 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2311 if (err)
2312 goto bail;
2313 break;
2314
2315 default:
2316 err = -ENOTTY;
2317 pr_info("bad ioctl: %d\n", ioctl_num);
2318 break;
2319 }
2320 bail:
2321 return err;
2322}
2323
2324static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2325 unsigned long code,
2326 void *data)
2327{
2328 struct fastrpc_apps *me = &gfa;
2329 struct fastrpc_channel_ctx *ctx;
2330 int cid;
2331
2332 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2333 cid = ctx - &me->channel[0];
2334 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2335 mutex_lock(&me->smd_mutex);
2336 ctx->ssrcount++;
2337 if (ctx->chan) {
2338 fastrpc_glink_close(ctx->chan, cid);
2339 ctx->chan = 0;
2340 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2341 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2342 }
2343 mutex_unlock(&me->smd_mutex);
2344 fastrpc_notify_drivers(me, cid);
2345 }
2346
2347 return NOTIFY_DONE;
2348}
2349
2350static const struct file_operations fops = {
2351 .open = fastrpc_device_open,
2352 .release = fastrpc_device_release,
2353 .unlocked_ioctl = fastrpc_device_ioctl,
2354 .compat_ioctl = compat_fastrpc_device_ioctl,
2355};
2356
2357static const struct of_device_id fastrpc_match_table[] = {
2358 { .compatible = "qcom,msm-fastrpc-adsp", },
2359 { .compatible = "qcom,msm-fastrpc-compute", },
2360 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2361 { .compatible = "qcom,msm-adsprpc-mem-region", },
2362 {}
2363};
2364
2365static int fastrpc_cb_probe(struct device *dev)
2366{
2367 struct fastrpc_channel_ctx *chan;
2368 struct fastrpc_session_ctx *sess;
2369 struct of_phandle_args iommuspec;
2370 const char *name;
2371 unsigned int start = 0x80000000;
2372 int err = 0, i;
2373 int secure_vmid = VMID_CP_PIXEL;
2374
2375 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2376 if (err)
2377 goto bail;
2378 for (i = 0; i < NUM_CHANNELS; i++) {
2379 if (!gcinfo[i].name)
2380 continue;
2381 if (!strcmp(name, gcinfo[i].name))
2382 break;
2383 }
2384 VERIFY(err, i < NUM_CHANNELS);
2385 if (err)
2386 goto bail;
2387 chan = &gcinfo[i];
2388 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2389 if (err)
2390 goto bail;
2391
2392 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2393 "#iommu-cells", 0, &iommuspec));
2394 if (err)
2395 goto bail;
2396 sess = &chan->session[chan->sesscount];
2397 sess->smmu.cb = iommuspec.args[0] & 0xf;
2398 sess->used = 0;
2399 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2400 "dma-coherent");
2401 sess->smmu.secure = of_property_read_bool(dev->of_node,
2402 "qcom,secure-context-bank");
2403 if (sess->smmu.secure)
2404 start = 0x60000000;
2405 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2406 arm_iommu_create_mapping(&platform_bus_type,
2407 start, 0x7fffffff)));
2408 if (err)
2409 goto bail;
2410
2411 if (sess->smmu.secure)
2412 iommu_domain_set_attr(sess->smmu.mapping->domain,
2413 DOMAIN_ATTR_SECURE_VMID,
2414 &secure_vmid);
2415
2416 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2417 if (err)
2418 goto bail;
2419 sess->dev = dev;
2420 sess->smmu.enabled = 1;
2421 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002422 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2423 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002424bail:
2425 return err;
2426}
2427
2428static int fastrpc_probe(struct platform_device *pdev)
2429{
2430 int err = 0;
2431 struct fastrpc_apps *me = &gfa;
2432 struct device *dev = &pdev->dev;
2433 struct smq_phy_page range;
2434 struct device_node *ion_node, *node;
2435 struct platform_device *ion_pdev;
2436 struct cma *cma;
2437 uint32_t val;
2438
2439 if (of_device_is_compatible(dev->of_node,
2440 "qcom,msm-fastrpc-compute-cb"))
2441 return fastrpc_cb_probe(dev);
2442
2443 if (of_device_is_compatible(dev->of_node,
2444 "qcom,msm-adsprpc-mem-region")) {
2445 me->dev = dev;
2446 range.addr = 0;
2447 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2448 if (ion_node) {
2449 for_each_available_child_of_node(ion_node, node) {
2450 if (of_property_read_u32(node, "reg", &val))
2451 continue;
2452 if (val != ION_ADSP_HEAP_ID)
2453 continue;
2454 ion_pdev = of_find_device_by_node(node);
2455 if (!ion_pdev)
2456 break;
2457 cma = dev_get_cma_area(&ion_pdev->dev);
2458 if (cma) {
2459 range.addr = cma_get_base(cma);
2460 range.size = (size_t)cma_get_size(cma);
2461 }
2462 break;
2463 }
2464 }
2465 if (range.addr) {
2466 int srcVM[1] = {VMID_HLOS};
2467 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2468 VMID_ADSP_Q6};
2469 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2470 PERM_READ | PERM_WRITE | PERM_EXEC,
2471 PERM_READ | PERM_WRITE | PERM_EXEC,
2472 PERM_READ | PERM_WRITE | PERM_EXEC,
2473 };
2474
2475 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2476 srcVM, 1, destVM, destVMperm, 4));
2477 if (err)
2478 goto bail;
2479 }
2480 return 0;
2481 }
2482
2483 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2484 fastrpc_match_table,
2485 NULL, &pdev->dev));
2486 if (err)
2487 goto bail;
2488bail:
2489 return err;
2490}
2491
2492static void fastrpc_deinit(void)
2493{
2494 struct fastrpc_apps *me = &gfa;
2495 struct fastrpc_channel_ctx *chan = gcinfo;
2496 int i, j;
2497
2498 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2499 if (chan->chan) {
2500 kref_put_mutex(&chan->kref,
2501 fastrpc_channel_close, &me->smd_mutex);
2502 chan->chan = 0;
2503 }
2504 for (j = 0; j < NUM_SESSIONS; j++) {
2505 struct fastrpc_session_ctx *sess = &chan->session[j];
2506
2507 if (sess->smmu.enabled) {
2508 arm_iommu_detach_device(sess->dev);
2509 sess->dev = 0;
2510 }
2511 if (sess->smmu.mapping) {
2512 arm_iommu_release_mapping(sess->smmu.mapping);
2513 sess->smmu.mapping = 0;
2514 }
2515 }
2516 }
2517}
2518
2519static struct platform_driver fastrpc_driver = {
2520 .probe = fastrpc_probe,
2521 .driver = {
2522 .name = "fastrpc",
2523 .owner = THIS_MODULE,
2524 .of_match_table = fastrpc_match_table,
2525 },
2526};
2527
2528static int __init fastrpc_device_init(void)
2529{
2530 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002531 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002532 int err = 0, i;
2533
2534 memset(me, 0, sizeof(*me));
2535
2536 fastrpc_init(me);
2537 me->dev = NULL;
2538 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2539 if (err)
2540 goto register_bail;
2541 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2542 DEVICE_NAME));
2543 if (err)
2544 goto alloc_chrdev_bail;
2545 cdev_init(&me->cdev, &fops);
2546 me->cdev.owner = THIS_MODULE;
2547 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002548 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002549 if (err)
2550 goto cdev_init_bail;
2551 me->class = class_create(THIS_MODULE, "fastrpc");
2552 VERIFY(err, !IS_ERR(me->class));
2553 if (err)
2554 goto class_create_bail;
2555 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002556 dev = device_create(me->class, NULL,
2557 MKDEV(MAJOR(me->dev_no), 0),
2558 NULL, gcinfo[0].name);
2559 VERIFY(err, !IS_ERR_OR_NULL(dev));
2560 if (err)
2561 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002562 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002563 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002564 me->channel[i].ssrcount = 0;
2565 me->channel[i].prevssrcount = 0;
2566 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2567 me->channel[i].handle = subsys_notif_register_notifier(
2568 gcinfo[i].subsys,
2569 &me->channel[i].nb);
2570 }
2571
2572 me->client = msm_ion_client_create(DEVICE_NAME);
2573 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2574 if (err)
2575 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002576 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002577 return 0;
2578device_create_bail:
2579 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002580 if (me->channel[i].handle)
2581 subsys_notif_unregister_notifier(me->channel[i].handle,
2582 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002583 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002584 if (!IS_ERR_OR_NULL(dev))
2585 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002586 class_destroy(me->class);
2587class_create_bail:
2588 cdev_del(&me->cdev);
2589cdev_init_bail:
2590 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2591alloc_chrdev_bail:
2592register_bail:
2593 fastrpc_deinit();
2594 return err;
2595}
2596
2597static void __exit fastrpc_device_exit(void)
2598{
2599 struct fastrpc_apps *me = &gfa;
2600 int i;
2601
2602 fastrpc_file_list_dtor(me);
2603 fastrpc_deinit();
2604 for (i = 0; i < NUM_CHANNELS; i++) {
2605 if (!gcinfo[i].name)
2606 continue;
2607 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2608 subsys_notif_unregister_notifier(me->channel[i].handle,
2609 &me->channel[i].nb);
2610 }
2611 class_destroy(me->class);
2612 cdev_del(&me->cdev);
2613 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2614 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002615 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002616}
2617
2618late_initcall(fastrpc_device_init);
2619module_exit(fastrpc_device_exit);
2620
2621MODULE_LICENSE("GPL v2");