blob: 1f605b4d26ab54780c8541eec7f2fc940282a4ea [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070063
64#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
65
66#define FASTRPC_LINK_STATE_DOWN (0x0)
67#define FASTRPC_LINK_STATE_UP (0x1)
68#define FASTRPC_LINK_DISCONNECTED (0x0)
69#define FASTRPC_LINK_CONNECTING (0x1)
70#define FASTRPC_LINK_CONNECTED (0x3)
71#define FASTRPC_LINK_DISCONNECTING (0x7)
72
Sathish Ambleya21b5b52017-01-11 16:11:01 -080073#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
74#define FASTRPC_STATIC_HANDLE_LISTENER (3)
75#define FASTRPC_STATIC_HANDLE_MAX (20)
76
77#define PERF_END (void)0
78
79#define PERF(enb, cnt, ff) \
80 {\
81 struct timespec startT = {0};\
82 if (enb) {\
83 getnstimeofday(&startT);\
84 } \
85 ff ;\
86 if (enb) {\
87 cnt += getnstimediff(&startT);\
88 } \
89 }
90
Sathish Ambley69e1ab02016-10-18 10:28:15 -070091static int fastrpc_glink_open(int cid);
92static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080093static struct dentry *debugfs_root;
94static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96static inline uint64_t buf_page_start(uint64_t buf)
97{
98 uint64_t start = (uint64_t) buf & PAGE_MASK;
99 return start;
100}
101
102static inline uint64_t buf_page_offset(uint64_t buf)
103{
104 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
105 return offset;
106}
107
108static inline int buf_num_pages(uint64_t buf, ssize_t len)
109{
110 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
111 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
112 int nPages = end - start + 1;
113 return nPages;
114}
115
116static inline uint64_t buf_page_size(uint32_t size)
117{
118 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
119
120 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
121}
122
123static inline void *uint64_to_ptr(uint64_t addr)
124{
125 void *ptr = (void *)((uintptr_t)addr);
126
127 return ptr;
128}
129
130static inline uint64_t ptr_to_uint64(void *ptr)
131{
132 uint64_t addr = (uint64_t)((uintptr_t)ptr);
133
134 return addr;
135}
136
137struct fastrpc_file;
138
139struct fastrpc_buf {
140 struct hlist_node hn;
141 struct fastrpc_file *fl;
142 void *virt;
143 uint64_t phys;
144 ssize_t size;
145};
146
147struct fastrpc_ctx_lst;
148
149struct overlap {
150 uintptr_t start;
151 uintptr_t end;
152 int raix;
153 uintptr_t mstart;
154 uintptr_t mend;
155 uintptr_t offset;
156};
157
158struct smq_invoke_ctx {
159 struct hlist_node hn;
160 struct completion work;
161 int retval;
162 int pid;
163 int tgid;
164 remote_arg_t *lpra;
165 remote_arg64_t *rpra;
166 int *fds;
167 unsigned int *attrs;
168 struct fastrpc_mmap **maps;
169 struct fastrpc_buf *buf;
170 ssize_t used;
171 struct fastrpc_file *fl;
172 uint32_t sc;
173 struct overlap *overs;
174 struct overlap **overps;
175 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700176 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700177};
178
179struct fastrpc_ctx_lst {
180 struct hlist_head pending;
181 struct hlist_head interrupted;
182};
183
184struct fastrpc_smmu {
185 struct dma_iommu_mapping *mapping;
186 int cb;
187 int enabled;
188 int faults;
189 int secure;
190 int coherent;
191};
192
193struct fastrpc_session_ctx {
194 struct device *dev;
195 struct fastrpc_smmu smmu;
196 int used;
197};
198
199struct fastrpc_glink_info {
200 int link_state;
201 int port_state;
202 struct glink_open_config cfg;
203 struct glink_link_info link_info;
204 void *link_notify_handle;
205};
206
207struct fastrpc_channel_ctx {
208 char *name;
209 char *subsys;
210 void *chan;
211 struct device *dev;
212 struct fastrpc_session_ctx session[NUM_SESSIONS];
213 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530214 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700215 struct notifier_block nb;
216 struct kref kref;
217 int sesscount;
218 int ssrcount;
219 void *handle;
220 int prevssrcount;
221 int vmid;
222 struct fastrpc_glink_info link;
223};
224
225struct fastrpc_apps {
226 struct fastrpc_channel_ctx *channel;
227 struct cdev cdev;
228 struct class *class;
229 struct mutex smd_mutex;
230 struct smq_phy_page range;
231 struct hlist_head maps;
232 dev_t dev_no;
233 int compat;
234 struct hlist_head drivers;
235 spinlock_t hlock;
236 struct ion_client *client;
237 struct device *dev;
238};
239
240struct fastrpc_mmap {
241 struct hlist_node hn;
242 struct fastrpc_file *fl;
243 struct fastrpc_apps *apps;
244 int fd;
245 uint32_t flags;
246 struct dma_buf *buf;
247 struct sg_table *table;
248 struct dma_buf_attachment *attach;
249 struct ion_handle *handle;
250 uint64_t phys;
251 ssize_t size;
252 uintptr_t va;
253 ssize_t len;
254 int refs;
255 uintptr_t raddr;
256 int uncached;
257 int secure;
258 uintptr_t attr;
259};
260
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800261struct fastrpc_perf {
262 int64_t count;
263 int64_t flush;
264 int64_t map;
265 int64_t copy;
266 int64_t link;
267 int64_t getargs;
268 int64_t putargs;
269 int64_t invargs;
270 int64_t invoke;
271};
272
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273struct fastrpc_file {
274 struct hlist_node hn;
275 spinlock_t hlock;
276 struct hlist_head maps;
277 struct hlist_head bufs;
278 struct fastrpc_ctx_lst clst;
279 struct fastrpc_session_ctx *sctx;
280 struct fastrpc_session_ctx *secsctx;
281 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800282 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700283 int tgid;
284 int cid;
285 int ssrcount;
286 int pd;
287 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800288 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800289 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290};
291
292static struct fastrpc_apps gfa;
293
294static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
295 {
296 .name = "adsprpc-smd",
297 .subsys = "adsp",
298 .link.link_info.edge = "lpass",
299 .link.link_info.transport = "smem",
300 },
301 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700302 .name = "mdsprpc-smd",
303 .subsys = "modem",
304 .link.link_info.edge = "mpss",
305 .link.link_info.transport = "smem",
306 },
307 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800308 .name = "sdsprpc-smd",
309 .subsys = "slpi",
310 .link.link_info.edge = "dsps",
311 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800312 },
313 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314 .name = "cdsprpc-smd",
315 .subsys = "cdsp",
316 .link.link_info.edge = "cdsp",
317 .link.link_info.transport = "smem",
318 },
319};
320
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800321static inline int64_t getnstimediff(struct timespec *start)
322{
323 int64_t ns;
324 struct timespec ts, b;
325
326 getnstimeofday(&ts);
327 b = timespec_sub(ts, *start);
328 ns = timespec_to_ns(&b);
329 return ns;
330}
331
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700332static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
333{
334 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
335 int vmid;
336
337 if (!fl)
338 return;
339 if (cache) {
340 spin_lock(&fl->hlock);
341 hlist_add_head(&buf->hn, &fl->bufs);
342 spin_unlock(&fl->hlock);
343 return;
344 }
345 if (!IS_ERR_OR_NULL(buf->virt)) {
346 int destVM[1] = {VMID_HLOS};
347 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
348
349 if (fl->sctx->smmu.cb)
350 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
351 vmid = fl->apps->channel[fl->cid].vmid;
352 if (vmid) {
353 int srcVM[2] = {VMID_HLOS, vmid};
354
355 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
356 srcVM, 2, destVM, destVMperm, 1);
357 }
358 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
359 buf->phys);
360 }
361 kfree(buf);
362}
363
364static void fastrpc_buf_list_free(struct fastrpc_file *fl)
365{
366 struct fastrpc_buf *buf, *free;
367
368 do {
369 struct hlist_node *n;
370
371 free = 0;
372 spin_lock(&fl->hlock);
373 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
374 hlist_del_init(&buf->hn);
375 free = buf;
376 break;
377 }
378 spin_unlock(&fl->hlock);
379 if (free)
380 fastrpc_buf_free(free, 0);
381 } while (free);
382}
383
384static void fastrpc_mmap_add(struct fastrpc_mmap *map)
385{
386 struct fastrpc_file *fl = map->fl;
387
388 spin_lock(&fl->hlock);
389 hlist_add_head(&map->hn, &fl->maps);
390 spin_unlock(&fl->hlock);
391}
392
393static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800394 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700395{
396 struct fastrpc_mmap *match = 0, *map;
397 struct hlist_node *n;
398
399 spin_lock(&fl->hlock);
400 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
401 if (va >= map->va &&
402 va + len <= map->va + map->len &&
403 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800404 if (refs)
405 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700406 match = map;
407 break;
408 }
409 }
410 spin_unlock(&fl->hlock);
411 if (match) {
412 *ppmap = match;
413 return 0;
414 }
415 return -ENOTTY;
416}
417
418static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
419 ssize_t len, struct fastrpc_mmap **ppmap)
420{
421 struct fastrpc_mmap *match = 0, *map;
422 struct hlist_node *n;
423 struct fastrpc_apps *me = &gfa;
424
425 spin_lock(&me->hlock);
426 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
427 if (map->raddr == va &&
428 map->raddr + map->len == va + len &&
429 map->refs == 1) {
430 match = map;
431 hlist_del_init(&map->hn);
432 break;
433 }
434 }
435 spin_unlock(&me->hlock);
436 if (match) {
437 *ppmap = match;
438 return 0;
439 }
440 spin_lock(&fl->hlock);
441 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
442 if (map->raddr == va &&
443 map->raddr + map->len == va + len &&
444 map->refs == 1) {
445 match = map;
446 hlist_del_init(&map->hn);
447 break;
448 }
449 }
450 spin_unlock(&fl->hlock);
451 if (match) {
452 *ppmap = match;
453 return 0;
454 }
455 return -ENOTTY;
456}
457
458static void fastrpc_mmap_free(struct fastrpc_mmap *map)
459{
460 struct fastrpc_file *fl;
461 int vmid;
462 struct fastrpc_session_ctx *sess;
463 int destVM[1] = {VMID_HLOS};
464 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
465
466 if (!map)
467 return;
468 fl = map->fl;
469 spin_lock(&fl->hlock);
470 map->refs--;
471 if (!map->refs)
472 hlist_del_init(&map->hn);
473 spin_unlock(&fl->hlock);
474 if (map->refs > 0)
475 return;
476 if (map->secure)
477 sess = fl->secsctx;
478 else
479 sess = fl->sctx;
480
481 if (!IS_ERR_OR_NULL(map->handle))
482 ion_free(fl->apps->client, map->handle);
483 if (sess->smmu.enabled) {
484 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800485 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486 map->table->sgl,
487 map->table->nents, DMA_BIDIRECTIONAL,
488 map->buf);
489 }
490 vmid = fl->apps->channel[fl->cid].vmid;
491 if (vmid && map->phys) {
492 int srcVM[2] = {VMID_HLOS, vmid};
493
494 hyp_assign_phys(map->phys, buf_page_size(map->size),
495 srcVM, 2, destVM, destVMperm, 1);
496 }
497
498 if (!IS_ERR_OR_NULL(map->table))
499 dma_buf_unmap_attachment(map->attach, map->table,
500 DMA_BIDIRECTIONAL);
501 if (!IS_ERR_OR_NULL(map->attach))
502 dma_buf_detach(map->buf, map->attach);
503 if (!IS_ERR_OR_NULL(map->buf))
504 dma_buf_put(map->buf);
505 kfree(map);
506}
507
508static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
509 struct fastrpc_session_ctx **session);
510
511static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
512 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
513 struct fastrpc_mmap **ppmap)
514{
515 struct fastrpc_session_ctx *sess;
516 struct fastrpc_apps *apps = fl->apps;
517 int cid = fl->cid;
518 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
519 struct fastrpc_mmap *map = 0;
520 unsigned long attrs;
521 unsigned long flags;
522 int err = 0, vmid;
523
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800524 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700525 return 0;
526 map = kzalloc(sizeof(*map), GFP_KERNEL);
527 VERIFY(err, !IS_ERR_OR_NULL(map));
528 if (err)
529 goto bail;
530 INIT_HLIST_NODE(&map->hn);
531 map->flags = mflags;
532 map->refs = 1;
533 map->fl = fl;
534 map->fd = fd;
535 map->attr = attr;
536 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
537 ion_import_dma_buf_fd(fl->apps->client, fd)));
538 if (err)
539 goto bail;
540 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
541 &flags));
542 if (err)
543 goto bail;
544
545 map->uncached = !ION_IS_CACHED(flags);
546 if (map->attr & FASTRPC_ATTR_NOVA)
547 map->uncached = 1;
548
549 map->secure = flags & ION_FLAG_SECURE;
550 if (map->secure) {
551 if (!fl->secsctx)
552 err = fastrpc_session_alloc(chan, 1,
553 &fl->secsctx);
554 if (err)
555 goto bail;
556 }
557 if (map->secure)
558 sess = fl->secsctx;
559 else
560 sess = fl->sctx;
561
562 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
563 if (err)
564 goto bail;
565 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
566 dma_buf_attach(map->buf, sess->dev)));
567 if (err)
568 goto bail;
569 VERIFY(err, !IS_ERR_OR_NULL(map->table =
570 dma_buf_map_attachment(map->attach,
571 DMA_BIDIRECTIONAL)));
572 if (err)
573 goto bail;
574 if (sess->smmu.enabled) {
575 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530576
577 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
578 (sess->smmu.coherent && map->uncached))
579 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
580 else if (map->attr & FASTRPC_ATTR_COHERENT)
581 attrs |= DMA_ATTR_FORCE_COHERENT;
582
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700583 VERIFY(err, map->table->nents ==
584 msm_dma_map_sg_attrs(sess->dev,
585 map->table->sgl, map->table->nents,
586 DMA_BIDIRECTIONAL, map->buf, attrs));
587 if (err)
588 goto bail;
589 } else {
590 VERIFY(err, map->table->nents == 1);
591 if (err)
592 goto bail;
593 }
594 map->phys = sg_dma_address(map->table->sgl);
595 if (sess->smmu.cb) {
596 map->phys += ((uint64_t)sess->smmu.cb << 32);
597 map->size = sg_dma_len(map->table->sgl);
598 } else {
599 map->size = buf_page_size(len);
600 }
601 vmid = fl->apps->channel[fl->cid].vmid;
602 if (vmid) {
603 int srcVM[1] = {VMID_HLOS};
604 int destVM[2] = {VMID_HLOS, vmid};
605 int destVMperm[2] = {PERM_READ | PERM_WRITE,
606 PERM_READ | PERM_WRITE | PERM_EXEC};
607
608 VERIFY(err, !hyp_assign_phys(map->phys,
609 buf_page_size(map->size),
610 srcVM, 1, destVM, destVMperm, 2));
611 if (err)
612 goto bail;
613 }
614 map->va = va;
615 map->len = len;
616
617 fastrpc_mmap_add(map);
618 *ppmap = map;
619
620bail:
621 if (err && map)
622 fastrpc_mmap_free(map);
623 return err;
624}
625
626static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
627 struct fastrpc_buf **obuf)
628{
629 int err = 0, vmid;
630 struct fastrpc_buf *buf = 0, *fr = 0;
631 struct hlist_node *n;
632
633 VERIFY(err, size > 0);
634 if (err)
635 goto bail;
636
637 /* find the smallest buffer that fits in the cache */
638 spin_lock(&fl->hlock);
639 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
640 if (buf->size >= size && (!fr || fr->size > buf->size))
641 fr = buf;
642 }
643 if (fr)
644 hlist_del_init(&fr->hn);
645 spin_unlock(&fl->hlock);
646 if (fr) {
647 *obuf = fr;
648 return 0;
649 }
650 buf = 0;
651 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
652 if (err)
653 goto bail;
654 INIT_HLIST_NODE(&buf->hn);
655 buf->fl = fl;
656 buf->virt = 0;
657 buf->phys = 0;
658 buf->size = size;
659 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
660 (void *)&buf->phys, GFP_KERNEL);
661 if (IS_ERR_OR_NULL(buf->virt)) {
662 /* free cache and retry */
663 fastrpc_buf_list_free(fl);
664 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
665 (void *)&buf->phys, GFP_KERNEL);
666 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
667 }
668 if (err)
669 goto bail;
670 if (fl->sctx->smmu.cb)
671 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
672 vmid = fl->apps->channel[fl->cid].vmid;
673 if (vmid) {
674 int srcVM[1] = {VMID_HLOS};
675 int destVM[2] = {VMID_HLOS, vmid};
676 int destVMperm[2] = {PERM_READ | PERM_WRITE,
677 PERM_READ | PERM_WRITE | PERM_EXEC};
678
679 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
680 srcVM, 1, destVM, destVMperm, 2));
681 if (err)
682 goto bail;
683 }
684
685 *obuf = buf;
686 bail:
687 if (err && buf)
688 fastrpc_buf_free(buf, 0);
689 return err;
690}
691
692
693static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700694 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700695 struct smq_invoke_ctx **po)
696{
697 int err = 0;
698 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
699 struct hlist_node *n;
700 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
701
702 spin_lock(&fl->hlock);
703 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
704 if (ictx->pid == current->pid) {
705 if (invoke->sc != ictx->sc || ictx->fl != fl)
706 err = -1;
707 else {
708 ctx = ictx;
709 hlist_del_init(&ctx->hn);
710 hlist_add_head(&ctx->hn, &fl->clst.pending);
711 }
712 break;
713 }
714 }
715 spin_unlock(&fl->hlock);
716 if (ctx)
717 *po = ctx;
718 return err;
719}
720
721#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
722static int overlap_ptr_cmp(const void *a, const void *b)
723{
724 struct overlap *pa = *((struct overlap **)a);
725 struct overlap *pb = *((struct overlap **)b);
726 /* sort with lowest starting buffer first */
727 int st = CMP(pa->start, pb->start);
728 /* sort with highest ending buffer first */
729 int ed = CMP(pb->end, pa->end);
730 return st == 0 ? ed : st;
731}
732
Sathish Ambley9466d672017-01-25 10:51:55 -0800733static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700734{
Sathish Ambley9466d672017-01-25 10:51:55 -0800735 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736 remote_arg_t *lpra = ctx->lpra;
737 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
738 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
739 int nbufs = inbufs + outbufs;
740 struct overlap max;
741
742 for (i = 0; i < nbufs; ++i) {
743 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
744 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800745 if (lpra[i].buf.len) {
746 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
747 if (err)
748 goto bail;
749 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700750 ctx->overs[i].raix = i;
751 ctx->overps[i] = &ctx->overs[i];
752 }
753 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
754 max.start = 0;
755 max.end = 0;
756 for (i = 0; i < nbufs; ++i) {
757 if (ctx->overps[i]->start < max.end) {
758 ctx->overps[i]->mstart = max.end;
759 ctx->overps[i]->mend = ctx->overps[i]->end;
760 ctx->overps[i]->offset = max.end -
761 ctx->overps[i]->start;
762 if (ctx->overps[i]->end > max.end) {
763 max.end = ctx->overps[i]->end;
764 } else {
765 ctx->overps[i]->mend = 0;
766 ctx->overps[i]->mstart = 0;
767 }
768 } else {
769 ctx->overps[i]->mend = ctx->overps[i]->end;
770 ctx->overps[i]->mstart = ctx->overps[i]->start;
771 ctx->overps[i]->offset = 0;
772 max = *ctx->overps[i];
773 }
774 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800775bail:
776 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777}
778
779#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
780 do {\
781 if (!(kernel))\
782 VERIFY(err, 0 == copy_from_user((dst), (src),\
783 (size)));\
784 else\
785 memmove((dst), (src), (size));\
786 } while (0)
787
788#define K_COPY_TO_USER(err, kernel, dst, src, size) \
789 do {\
790 if (!(kernel))\
791 VERIFY(err, 0 == copy_to_user((dst), (src),\
792 (size)));\
793 else\
794 memmove((dst), (src), (size));\
795 } while (0)
796
797
798static void context_free(struct smq_invoke_ctx *ctx);
799
800static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700801 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700802 struct smq_invoke_ctx **po)
803{
804 int err = 0, bufs, size = 0;
805 struct smq_invoke_ctx *ctx = 0;
806 struct fastrpc_ctx_lst *clst = &fl->clst;
807 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
808
809 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
810 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
811 sizeof(*ctx->fds) * (bufs) +
812 sizeof(*ctx->attrs) * (bufs) +
813 sizeof(*ctx->overs) * (bufs) +
814 sizeof(*ctx->overps) * (bufs);
815
816 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
817 if (err)
818 goto bail;
819
820 INIT_HLIST_NODE(&ctx->hn);
821 hlist_add_fake(&ctx->hn);
822 ctx->fl = fl;
823 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
824 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
825 ctx->fds = (int *)(&ctx->lpra[bufs]);
826 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
827 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
828 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
829
830 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
831 bufs * sizeof(*ctx->lpra));
832 if (err)
833 goto bail;
834
835 if (invokefd->fds) {
836 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
837 bufs * sizeof(*ctx->fds));
838 if (err)
839 goto bail;
840 }
841 if (invokefd->attrs) {
842 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
843 bufs * sizeof(*ctx->attrs));
844 if (err)
845 goto bail;
846 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700847 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700848 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800849 if (bufs) {
850 VERIFY(err, 0 == context_build_overlap(ctx));
851 if (err)
852 goto bail;
853 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700854 ctx->retval = -1;
855 ctx->pid = current->pid;
856 ctx->tgid = current->tgid;
857 init_completion(&ctx->work);
858
859 spin_lock(&fl->hlock);
860 hlist_add_head(&ctx->hn, &clst->pending);
861 spin_unlock(&fl->hlock);
862
863 *po = ctx;
864bail:
865 if (ctx && err)
866 context_free(ctx);
867 return err;
868}
869
870static void context_save_interrupted(struct smq_invoke_ctx *ctx)
871{
872 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
873
874 spin_lock(&ctx->fl->hlock);
875 hlist_del_init(&ctx->hn);
876 hlist_add_head(&ctx->hn, &clst->interrupted);
877 spin_unlock(&ctx->fl->hlock);
878 /* free the cache on power collapse */
879 fastrpc_buf_list_free(ctx->fl);
880}
881
882static void context_free(struct smq_invoke_ctx *ctx)
883{
884 int i;
885 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
886 REMOTE_SCALARS_OUTBUFS(ctx->sc);
887 spin_lock(&ctx->fl->hlock);
888 hlist_del_init(&ctx->hn);
889 spin_unlock(&ctx->fl->hlock);
890 for (i = 0; i < nbufs; ++i)
891 fastrpc_mmap_free(ctx->maps[i]);
892 fastrpc_buf_free(ctx->buf, 1);
893 kfree(ctx);
894}
895
896static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
897{
898 ctx->retval = retval;
899 complete(&ctx->work);
900}
901
902
903static void fastrpc_notify_users(struct fastrpc_file *me)
904{
905 struct smq_invoke_ctx *ictx;
906 struct hlist_node *n;
907
908 spin_lock(&me->hlock);
909 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
910 complete(&ictx->work);
911 }
912 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
913 complete(&ictx->work);
914 }
915 spin_unlock(&me->hlock);
916
917}
918
919static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
920{
921 struct fastrpc_file *fl;
922 struct hlist_node *n;
923
924 spin_lock(&me->hlock);
925 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
926 if (fl->cid == cid)
927 fastrpc_notify_users(fl);
928 }
929 spin_unlock(&me->hlock);
930
931}
932static void context_list_ctor(struct fastrpc_ctx_lst *me)
933{
934 INIT_HLIST_HEAD(&me->interrupted);
935 INIT_HLIST_HEAD(&me->pending);
936}
937
938static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
939{
940 struct fastrpc_ctx_lst *clst = &fl->clst;
941 struct smq_invoke_ctx *ictx = 0, *ctxfree;
942 struct hlist_node *n;
943
944 do {
945 ctxfree = 0;
946 spin_lock(&fl->hlock);
947 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
948 hlist_del_init(&ictx->hn);
949 ctxfree = ictx;
950 break;
951 }
952 spin_unlock(&fl->hlock);
953 if (ctxfree)
954 context_free(ctxfree);
955 } while (ctxfree);
956 do {
957 ctxfree = 0;
958 spin_lock(&fl->hlock);
959 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
960 hlist_del_init(&ictx->hn);
961 ctxfree = ictx;
962 break;
963 }
964 spin_unlock(&fl->hlock);
965 if (ctxfree)
966 context_free(ctxfree);
967 } while (ctxfree);
968}
969
970static int fastrpc_file_free(struct fastrpc_file *fl);
971static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
972{
973 struct fastrpc_file *fl, *free;
974 struct hlist_node *n;
975
976 do {
977 free = 0;
978 spin_lock(&me->hlock);
979 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
980 hlist_del_init(&fl->hn);
981 free = fl;
982 break;
983 }
984 spin_unlock(&me->hlock);
985 if (free)
986 fastrpc_file_free(free);
987 } while (free);
988}
989
990static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
991{
992 remote_arg64_t *rpra;
993 remote_arg_t *lpra = ctx->lpra;
994 struct smq_invoke_buf *list;
995 struct smq_phy_page *pages, *ipage;
996 uint32_t sc = ctx->sc;
997 int inbufs = REMOTE_SCALARS_INBUFS(sc);
998 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800999 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001000 uintptr_t args;
1001 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001002 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001003 int err = 0;
1004 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001005 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001006 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001007
1008 /* calculate size of the metadata */
1009 rpra = 0;
1010 list = smq_invoke_buf_start(rpra, sc);
1011 pages = smq_phy_page_start(sc, list);
1012 ipage = pages;
1013
1014 for (i = 0; i < bufs; ++i) {
1015 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1016 ssize_t len = lpra[i].buf.len;
1017
1018 if (ctx->fds[i] && (ctx->fds[i] != -1))
1019 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1020 ctx->attrs[i], buf, len,
1021 mflags, &ctx->maps[i]);
1022 ipage += 1;
1023 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001024 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1025 for (i = bufs; i < bufs + handles; i++) {
1026 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1027 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1028 if (err)
1029 goto bail;
1030 ipage += 1;
1031 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001032 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1033 (sizeof(uint32_t) * M_CRCLIST);
1034
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001035 /* calculate len requreed for copying */
1036 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1037 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001038 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001039 ssize_t len = lpra[i].buf.len;
1040
1041 if (!len)
1042 continue;
1043 if (ctx->maps[i])
1044 continue;
1045 if (ctx->overps[oix]->offset == 0)
1046 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001047 mstart = ctx->overps[oix]->mstart;
1048 mend = ctx->overps[oix]->mend;
1049 VERIFY(err, (mend - mstart) <= LONG_MAX);
1050 if (err)
1051 goto bail;
1052 copylen += mend - mstart;
1053 VERIFY(err, copylen >= 0);
1054 if (err)
1055 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001056 }
1057 ctx->used = copylen;
1058
1059 /* allocate new buffer */
1060 if (copylen) {
1061 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1062 if (err)
1063 goto bail;
1064 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301065 if (ctx->buf->virt && metalen <= copylen)
1066 memset(ctx->buf->virt, 0, metalen);
1067
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001068 /* copy metadata */
1069 rpra = ctx->buf->virt;
1070 ctx->rpra = rpra;
1071 list = smq_invoke_buf_start(rpra, sc);
1072 pages = smq_phy_page_start(sc, list);
1073 ipage = pages;
1074 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001075 for (i = 0; i < bufs + handles; ++i) {
1076 if (lpra[i].buf.len)
1077 list[i].num = 1;
1078 else
1079 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001080 list[i].pgidx = ipage - pages;
1081 ipage++;
1082 }
1083 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001084 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001085 for (i = 0; i < inbufs + outbufs; ++i) {
1086 struct fastrpc_mmap *map = ctx->maps[i];
1087 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1088 ssize_t len = lpra[i].buf.len;
1089
1090 rpra[i].buf.pv = 0;
1091 rpra[i].buf.len = len;
1092 if (!len)
1093 continue;
1094 if (map) {
1095 struct vm_area_struct *vma;
1096 uintptr_t offset;
1097 int num = buf_num_pages(buf, len);
1098 int idx = list[i].pgidx;
1099
1100 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001101 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001102 } else {
1103 down_read(&current->mm->mmap_sem);
1104 VERIFY(err, NULL != (vma = find_vma(current->mm,
1105 map->va)));
1106 if (err) {
1107 up_read(&current->mm->mmap_sem);
1108 goto bail;
1109 }
1110 offset = buf_page_start(buf) - vma->vm_start;
1111 up_read(&current->mm->mmap_sem);
1112 VERIFY(err, offset < (uintptr_t)map->size);
1113 if (err)
1114 goto bail;
1115 }
1116 pages[idx].addr = map->phys + offset;
1117 pages[idx].size = num << PAGE_SHIFT;
1118 }
1119 rpra[i].buf.pv = buf;
1120 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001121 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001122 for (i = bufs; i < bufs + handles; ++i) {
1123 struct fastrpc_mmap *map = ctx->maps[i];
1124
1125 pages[i].addr = map->phys;
1126 pages[i].size = map->size;
1127 }
1128 fdlist = (uint64_t *)&pages[bufs + handles];
1129 for (i = 0; i < M_FDLIST; i++)
1130 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001131 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301132 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001133
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001134 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001135 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 rlen = copylen - metalen;
1137 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1138 int i = ctx->overps[oix]->raix;
1139 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001140 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001141 uint64_t buf;
1142 ssize_t len = lpra[i].buf.len;
1143
1144 if (!len)
1145 continue;
1146 if (map)
1147 continue;
1148 if (ctx->overps[oix]->offset == 0) {
1149 rlen -= ALIGN(args, BALIGN) - args;
1150 args = ALIGN(args, BALIGN);
1151 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001152 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001153 VERIFY(err, rlen >= mlen);
1154 if (err)
1155 goto bail;
1156 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1157 pages[list[i].pgidx].addr = ctx->buf->phys -
1158 ctx->overps[oix]->offset +
1159 (copylen - rlen);
1160 pages[list[i].pgidx].addr =
1161 buf_page_start(pages[list[i].pgidx].addr);
1162 buf = rpra[i].buf.pv;
1163 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1164 if (i < inbufs) {
1165 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1166 lpra[i].buf.pv, len);
1167 if (err)
1168 goto bail;
1169 }
1170 args = args + mlen;
1171 rlen -= mlen;
1172 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001173 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001174
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001175 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1177 int i = ctx->overps[oix]->raix;
1178 struct fastrpc_mmap *map = ctx->maps[i];
1179
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001180 if (map && map->uncached)
1181 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301182 if (ctx->fl->sctx->smmu.coherent &&
1183 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1184 continue;
1185 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1186 continue;
1187
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1189 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1190 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1191 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001192 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001193 for (i = bufs; i < bufs + handles; i++) {
1194 rpra[i].dma.fd = ctx->fds[i];
1195 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1196 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001197 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001198
1199 if (!ctx->fl->sctx->smmu.coherent) {
1200 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001201 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001202 PERF_END);
1203 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001204 bail:
1205 return err;
1206}
1207
1208static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1209 remote_arg_t *upra)
1210{
1211 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001212 struct smq_invoke_buf *list;
1213 struct smq_phy_page *pages;
1214 struct fastrpc_mmap *mmap;
1215 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001216 uint32_t *crclist = NULL;
1217
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001218 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001219 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001220 int err = 0;
1221
1222 inbufs = REMOTE_SCALARS_INBUFS(sc);
1223 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001224 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1225 list = smq_invoke_buf_start(ctx->rpra, sc);
1226 pages = smq_phy_page_start(sc, list);
1227 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001228 crclist = (uint32_t *)(fdlist + M_FDLIST);
1229
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001230 for (i = inbufs; i < inbufs + outbufs; ++i) {
1231 if (!ctx->maps[i]) {
1232 K_COPY_TO_USER(err, kernel,
1233 ctx->lpra[i].buf.pv,
1234 uint64_to_ptr(rpra[i].buf.pv),
1235 rpra[i].buf.len);
1236 if (err)
1237 goto bail;
1238 } else {
1239 fastrpc_mmap_free(ctx->maps[i]);
1240 ctx->maps[i] = 0;
1241 }
1242 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001243 if (inbufs + outbufs + handles) {
1244 for (i = 0; i < M_FDLIST; i++) {
1245 if (!fdlist[i])
1246 break;
1247 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001248 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001249 fastrpc_mmap_free(mmap);
1250 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001251 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001252 if (ctx->crc && crclist && rpra)
1253 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1254 crclist, M_CRCLIST*sizeof(uint32_t));
1255
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001256 bail:
1257 return err;
1258}
1259
1260static void inv_args_pre(struct smq_invoke_ctx *ctx)
1261{
1262 int i, inbufs, outbufs;
1263 uint32_t sc = ctx->sc;
1264 remote_arg64_t *rpra = ctx->rpra;
1265 uintptr_t end;
1266
1267 inbufs = REMOTE_SCALARS_INBUFS(sc);
1268 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1269 for (i = inbufs; i < inbufs + outbufs; ++i) {
1270 struct fastrpc_mmap *map = ctx->maps[i];
1271
1272 if (map && map->uncached)
1273 continue;
1274 if (!rpra[i].buf.len)
1275 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301276 if (ctx->fl->sctx->smmu.coherent &&
1277 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1278 continue;
1279 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1280 continue;
1281
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001282 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1283 buf_page_start(rpra[i].buf.pv))
1284 continue;
1285 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1286 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1287 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1288 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1289 rpra[i].buf.len);
1290 if (!IS_CACHE_ALIGNED(end))
1291 dmac_flush_range((char *)end,
1292 (char *)end + 1);
1293 }
1294}
1295
1296static void inv_args(struct smq_invoke_ctx *ctx)
1297{
1298 int i, inbufs, outbufs;
1299 uint32_t sc = ctx->sc;
1300 remote_arg64_t *rpra = ctx->rpra;
1301 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001302
1303 inbufs = REMOTE_SCALARS_INBUFS(sc);
1304 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1305 for (i = inbufs; i < inbufs + outbufs; ++i) {
1306 struct fastrpc_mmap *map = ctx->maps[i];
1307
1308 if (map && map->uncached)
1309 continue;
1310 if (!rpra[i].buf.len)
1311 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301312 if (ctx->fl->sctx->smmu.coherent &&
1313 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1314 continue;
1315 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1316 continue;
1317
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001318 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1319 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001320 continue;
1321 }
1322 if (map && map->handle)
1323 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1324 (char *)uint64_to_ptr(rpra[i].buf.pv),
1325 rpra[i].buf.len, ION_IOC_INV_CACHES);
1326 else
1327 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1328 (char *)uint64_to_ptr(rpra[i].buf.pv
1329 + rpra[i].buf.len));
1330 }
1331
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001332 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001333 dmac_inv_range(rpra, (char *)rpra + used);
1334}
1335
1336static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1337 uint32_t kernel, uint32_t handle)
1338{
1339 struct smq_msg *msg = &ctx->msg;
1340 struct fastrpc_file *fl = ctx->fl;
1341 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1342 int err = 0;
1343
1344 VERIFY(err, 0 != channel_ctx->chan);
1345 if (err)
1346 goto bail;
1347 msg->pid = current->tgid;
1348 msg->tid = current->pid;
1349 if (kernel)
1350 msg->pid = 0;
1351 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1352 msg->invoke.header.handle = handle;
1353 msg->invoke.header.sc = ctx->sc;
1354 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1355 msg->invoke.page.size = buf_page_size(ctx->used);
1356
1357 if (fl->ssrcount != channel_ctx->ssrcount) {
1358 err = -ECONNRESET;
1359 goto bail;
1360 }
1361 VERIFY(err, channel_ctx->link.port_state ==
1362 FASTRPC_LINK_CONNECTED);
1363 if (err)
1364 goto bail;
1365 err = glink_tx(channel_ctx->chan,
1366 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1367 GLINK_TX_REQ_INTENT);
1368 bail:
1369 return err;
1370}
1371
1372static void fastrpc_init(struct fastrpc_apps *me)
1373{
1374 int i;
1375
1376 INIT_HLIST_HEAD(&me->drivers);
1377 spin_lock_init(&me->hlock);
1378 mutex_init(&me->smd_mutex);
1379 me->channel = &gcinfo[0];
1380 for (i = 0; i < NUM_CHANNELS; i++) {
1381 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301382 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001383 me->channel[i].sesscount = 0;
1384 }
1385}
1386
1387static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1388
1389static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1390 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001391 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001392{
1393 struct smq_invoke_ctx *ctx = 0;
1394 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1395 int cid = fl->cid;
1396 int interrupted = 0;
1397 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001398 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001399
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001400 if (fl->profile)
1401 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301402
1403 VERIFY(err, fl->sctx != NULL);
1404 if (err)
1405 goto bail;
1406 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1407 if (err)
1408 goto bail;
1409
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001410 if (!kernel) {
1411 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1412 &ctx));
1413 if (err)
1414 goto bail;
1415 if (fl->sctx->smmu.faults)
1416 err = FASTRPC_ENOSUCH;
1417 if (err)
1418 goto bail;
1419 if (ctx)
1420 goto wait;
1421 }
1422
1423 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1424 if (err)
1425 goto bail;
1426
1427 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001428 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001430 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001431 if (err)
1432 goto bail;
1433 }
1434
Sathish Ambleyc432b502017-06-05 12:03:42 -07001435 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001437 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001438 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001439 PERF_END);
1440
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441 if (err)
1442 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001443 wait:
1444 if (kernel)
1445 wait_for_completion(&ctx->work);
1446 else {
1447 interrupted = wait_for_completion_interruptible(&ctx->work);
1448 VERIFY(err, 0 == (err = interrupted));
1449 if (err)
1450 goto bail;
1451 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001452
1453 PERF(fl->profile, fl->perf.invargs,
1454 if (!fl->sctx->smmu.coherent)
1455 inv_args(ctx);
1456 PERF_END);
1457
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001458 VERIFY(err, 0 == (err = ctx->retval));
1459 if (err)
1460 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001461
1462 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001463 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001464 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001465 if (err)
1466 goto bail;
1467 bail:
1468 if (ctx && interrupted == -ERESTARTSYS)
1469 context_save_interrupted(ctx);
1470 else if (ctx)
1471 context_free(ctx);
1472 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1473 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001474
1475 if (fl->profile && !interrupted) {
1476 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1477 fl->perf.invoke += getnstimediff(&invoket);
1478 if (!(invoke->handle >= 0 &&
1479 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1480 fl->perf.count++;
1481 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001482 return err;
1483}
1484
Sathish Ambley36849af2017-02-02 09:35:55 -08001485static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001486static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001487 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001488{
1489 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001490 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001491 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001492 struct smq_phy_page pages[1];
1493 struct fastrpc_mmap *file = 0, *mem = 0;
1494
Sathish Ambley36849af2017-02-02 09:35:55 -08001495 VERIFY(err, !fastrpc_channel_open(fl));
1496 if (err)
1497 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001498 if (init->flags == FASTRPC_INIT_ATTACH) {
1499 remote_arg_t ra[1];
1500 int tgid = current->tgid;
1501
1502 ra[0].buf.pv = (void *)&tgid;
1503 ra[0].buf.len = sizeof(tgid);
1504 ioctl.inv.handle = 1;
1505 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1506 ioctl.inv.pra = ra;
1507 ioctl.fds = 0;
1508 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001509 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510 fl->pd = 0;
1511 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1512 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1513 if (err)
1514 goto bail;
1515 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001516 remote_arg_t ra[6];
1517 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001518 int mflags = 0;
1519 struct {
1520 int pgid;
1521 int namelen;
1522 int filelen;
1523 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001524 int attrs;
1525 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001526 } inbuf;
1527
1528 inbuf.pgid = current->tgid;
1529 inbuf.namelen = strlen(current->comm) + 1;
1530 inbuf.filelen = init->filelen;
1531 fl->pd = 1;
1532 if (init->filelen) {
1533 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1534 init->file, init->filelen, mflags, &file));
1535 if (err)
1536 goto bail;
1537 }
1538 inbuf.pageslen = 1;
1539 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1540 init->mem, init->memlen, mflags, &mem));
1541 if (err)
1542 goto bail;
1543 inbuf.pageslen = 1;
1544 ra[0].buf.pv = (void *)&inbuf;
1545 ra[0].buf.len = sizeof(inbuf);
1546 fds[0] = 0;
1547
1548 ra[1].buf.pv = (void *)current->comm;
1549 ra[1].buf.len = inbuf.namelen;
1550 fds[1] = 0;
1551
1552 ra[2].buf.pv = (void *)init->file;
1553 ra[2].buf.len = inbuf.filelen;
1554 fds[2] = init->filefd;
1555
1556 pages[0].addr = mem->phys;
1557 pages[0].size = mem->size;
1558 ra[3].buf.pv = (void *)pages;
1559 ra[3].buf.len = 1 * sizeof(*pages);
1560 fds[3] = 0;
1561
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001562 inbuf.attrs = uproc->attrs;
1563 ra[4].buf.pv = (void *)&(inbuf.attrs);
1564 ra[4].buf.len = sizeof(inbuf.attrs);
1565 fds[4] = 0;
1566
1567 inbuf.siglen = uproc->siglen;
1568 ra[5].buf.pv = (void *)&(inbuf.siglen);
1569 ra[5].buf.len = sizeof(inbuf.siglen);
1570 fds[5] = 0;
1571
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001572 ioctl.inv.handle = 1;
1573 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001574 if (uproc->attrs)
1575 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001576 ioctl.inv.pra = ra;
1577 ioctl.fds = fds;
1578 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001579 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001580 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1581 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1582 if (err)
1583 goto bail;
1584 } else {
1585 err = -ENOTTY;
1586 }
1587bail:
1588 if (mem && err)
1589 fastrpc_mmap_free(mem);
1590 if (file)
1591 fastrpc_mmap_free(file);
1592 return err;
1593}
1594
1595static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1596{
1597 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001598 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599 remote_arg_t ra[1];
1600 int tgid = 0;
1601
Sathish Ambley36849af2017-02-02 09:35:55 -08001602 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1603 if (err)
1604 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001605 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1606 if (err)
1607 goto bail;
1608 tgid = fl->tgid;
1609 ra[0].buf.pv = (void *)&tgid;
1610 ra[0].buf.len = sizeof(tgid);
1611 ioctl.inv.handle = 1;
1612 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1613 ioctl.inv.pra = ra;
1614 ioctl.fds = 0;
1615 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001616 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001617 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1618 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1619bail:
1620 return err;
1621}
1622
1623static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1624 struct fastrpc_mmap *map)
1625{
Sathish Ambleybae51902017-07-03 15:00:49 -07001626 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001627 struct smq_phy_page page;
1628 int num = 1;
1629 remote_arg_t ra[3];
1630 int err = 0;
1631 struct {
1632 int pid;
1633 uint32_t flags;
1634 uintptr_t vaddrin;
1635 int num;
1636 } inargs;
1637 struct {
1638 uintptr_t vaddrout;
1639 } routargs;
1640
1641 inargs.pid = current->tgid;
1642 inargs.vaddrin = (uintptr_t)map->va;
1643 inargs.flags = flags;
1644 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1645 ra[0].buf.pv = (void *)&inargs;
1646 ra[0].buf.len = sizeof(inargs);
1647 page.addr = map->phys;
1648 page.size = map->size;
1649 ra[1].buf.pv = (void *)&page;
1650 ra[1].buf.len = num * sizeof(page);
1651
1652 ra[2].buf.pv = (void *)&routargs;
1653 ra[2].buf.len = sizeof(routargs);
1654
1655 ioctl.inv.handle = 1;
1656 if (fl->apps->compat)
1657 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1658 else
1659 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1660 ioctl.inv.pra = ra;
1661 ioctl.fds = 0;
1662 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001663 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001664 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1665 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1666 map->raddr = (uintptr_t)routargs.vaddrout;
1667
1668 return err;
1669}
1670
1671static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1672 struct fastrpc_mmap *map)
1673{
Sathish Ambleybae51902017-07-03 15:00:49 -07001674 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001675 remote_arg_t ra[1];
1676 int err = 0;
1677 struct {
1678 int pid;
1679 uintptr_t vaddrout;
1680 ssize_t size;
1681 } inargs;
1682
1683 inargs.pid = current->tgid;
1684 inargs.size = map->size;
1685 inargs.vaddrout = map->raddr;
1686 ra[0].buf.pv = (void *)&inargs;
1687 ra[0].buf.len = sizeof(inargs);
1688
1689 ioctl.inv.handle = 1;
1690 if (fl->apps->compat)
1691 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1692 else
1693 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1694 ioctl.inv.pra = ra;
1695 ioctl.fds = 0;
1696 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001697 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001698 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1699 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1700 return err;
1701}
1702
1703static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1704 ssize_t len, struct fastrpc_mmap **ppmap);
1705
1706static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1707
1708static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1709 struct fastrpc_ioctl_munmap *ud)
1710{
1711 int err = 0;
1712 struct fastrpc_mmap *map = 0;
1713
1714 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1715 if (err)
1716 goto bail;
1717 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1718 if (err)
1719 goto bail;
1720 fastrpc_mmap_free(map);
1721bail:
1722 if (err && map)
1723 fastrpc_mmap_add(map);
1724 return err;
1725}
1726
1727static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1728 struct fastrpc_ioctl_mmap *ud)
1729{
1730
1731 struct fastrpc_mmap *map = 0;
1732 int err = 0;
1733
1734 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001735 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001736 return 0;
1737
1738 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1739 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1740 if (err)
1741 goto bail;
1742 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1743 if (err)
1744 goto bail;
1745 ud->vaddrout = map->raddr;
1746 bail:
1747 if (err && map)
1748 fastrpc_mmap_free(map);
1749 return err;
1750}
1751
1752static void fastrpc_channel_close(struct kref *kref)
1753{
1754 struct fastrpc_apps *me = &gfa;
1755 struct fastrpc_channel_ctx *ctx;
1756 int cid;
1757
1758 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1759 cid = ctx - &gcinfo[0];
1760 fastrpc_glink_close(ctx->chan, cid);
1761 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301762 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1763 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001764 mutex_unlock(&me->smd_mutex);
1765 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1766 MAJOR(me->dev_no), cid);
1767}
1768
1769static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1770
1771static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1772 int secure, struct fastrpc_session_ctx **session)
1773{
1774 struct fastrpc_apps *me = &gfa;
1775 int idx = 0, err = 0;
1776
1777 if (chan->sesscount) {
1778 for (idx = 0; idx < chan->sesscount; ++idx) {
1779 if (!chan->session[idx].used &&
1780 chan->session[idx].smmu.secure == secure) {
1781 chan->session[idx].used = 1;
1782 break;
1783 }
1784 }
1785 VERIFY(err, idx < chan->sesscount);
1786 if (err)
1787 goto bail;
1788 chan->session[idx].smmu.faults = 0;
1789 } else {
1790 VERIFY(err, me->dev != NULL);
1791 if (err)
1792 goto bail;
1793 chan->session[0].dev = me->dev;
1794 }
1795
1796 *session = &chan->session[idx];
1797 bail:
1798 return err;
1799}
1800
1801bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1802{
1803 if (glink_queue_rx_intent(h, NULL, size))
1804 return false;
1805 return true;
1806}
1807
1808void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1809 const void *pkt_priv, const void *ptr)
1810{
1811}
1812
1813void fastrpc_glink_notify_rx(void *handle, const void *priv,
1814 const void *pkt_priv, const void *ptr, size_t size)
1815{
1816 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1817 int len = size;
1818
1819 while (len >= sizeof(*rsp) && rsp) {
1820 rsp->ctx = rsp->ctx & ~1;
1821 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1822 rsp++;
1823 len = len - sizeof(*rsp);
1824 }
1825 glink_rx_done(handle, ptr, true);
1826}
1827
1828void fastrpc_glink_notify_state(void *handle, const void *priv,
1829 unsigned int event)
1830{
1831 struct fastrpc_apps *me = &gfa;
1832 int cid = (int)(uintptr_t)priv;
1833 struct fastrpc_glink_info *link;
1834
1835 if (cid < 0 || cid >= NUM_CHANNELS)
1836 return;
1837 link = &me->channel[cid].link;
1838 switch (event) {
1839 case GLINK_CONNECTED:
1840 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301841 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001842 break;
1843 case GLINK_LOCAL_DISCONNECTED:
1844 link->port_state = FASTRPC_LINK_DISCONNECTED;
1845 break;
1846 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301847 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001848 fastrpc_glink_close(me->channel[cid].chan, cid);
1849 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001850 }
1851 break;
1852 default:
1853 break;
1854 }
1855}
1856
1857static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1858 struct fastrpc_session_ctx **session)
1859{
1860 int err = 0;
1861 struct fastrpc_apps *me = &gfa;
1862
1863 mutex_lock(&me->smd_mutex);
1864 if (!*session)
1865 err = fastrpc_session_alloc_locked(chan, secure, session);
1866 mutex_unlock(&me->smd_mutex);
1867 return err;
1868}
1869
1870static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1871 struct fastrpc_session_ctx *session)
1872{
1873 struct fastrpc_apps *me = &gfa;
1874
1875 mutex_lock(&me->smd_mutex);
1876 session->used = 0;
1877 mutex_unlock(&me->smd_mutex);
1878}
1879
1880static int fastrpc_file_free(struct fastrpc_file *fl)
1881{
1882 struct hlist_node *n;
1883 struct fastrpc_mmap *map = 0;
1884 int cid;
1885
1886 if (!fl)
1887 return 0;
1888 cid = fl->cid;
1889
1890 spin_lock(&fl->apps->hlock);
1891 hlist_del_init(&fl->hn);
1892 spin_unlock(&fl->apps->hlock);
1893
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001894 if (!fl->sctx) {
1895 kfree(fl);
1896 return 0;
1897 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001898 (void)fastrpc_release_current_dsp_process(fl);
1899 fastrpc_context_list_dtor(fl);
1900 fastrpc_buf_list_free(fl);
1901 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1902 fastrpc_mmap_free(map);
1903 }
1904 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1905 kref_put_mutex(&fl->apps->channel[cid].kref,
1906 fastrpc_channel_close, &fl->apps->smd_mutex);
1907 if (fl->sctx)
1908 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1909 if (fl->secsctx)
1910 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1911 kfree(fl);
1912 return 0;
1913}
1914
1915static int fastrpc_device_release(struct inode *inode, struct file *file)
1916{
1917 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1918
1919 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001920 if (fl->debugfs_file != NULL)
1921 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001922 fastrpc_file_free(fl);
1923 file->private_data = 0;
1924 }
1925 return 0;
1926}
1927
1928static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1929 void *priv)
1930{
1931 struct fastrpc_apps *me = &gfa;
1932 int cid = (int)((uintptr_t)priv);
1933 struct fastrpc_glink_info *link;
1934
1935 if (cid < 0 || cid >= NUM_CHANNELS)
1936 return;
1937
1938 link = &me->channel[cid].link;
1939 switch (cb_info->link_state) {
1940 case GLINK_LINK_STATE_UP:
1941 link->link_state = FASTRPC_LINK_STATE_UP;
1942 complete(&me->channel[cid].work);
1943 break;
1944 case GLINK_LINK_STATE_DOWN:
1945 link->link_state = FASTRPC_LINK_STATE_DOWN;
1946 break;
1947 default:
1948 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1949 break;
1950 }
1951}
1952
1953static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1954{
1955 int err = 0;
1956 struct fastrpc_glink_info *link;
1957
1958 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1959 if (err)
1960 goto bail;
1961
1962 link = &me->channel[cid].link;
1963 if (link->link_notify_handle != NULL)
1964 goto bail;
1965
1966 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1967 link->link_notify_handle = glink_register_link_state_cb(
1968 &link->link_info,
1969 (void *)((uintptr_t)cid));
1970 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1971 if (err) {
1972 link->link_notify_handle = NULL;
1973 goto bail;
1974 }
1975 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1976 RPC_TIMEOUT));
1977bail:
1978 return err;
1979}
1980
1981static void fastrpc_glink_close(void *chan, int cid)
1982{
1983 int err = 0;
1984 struct fastrpc_glink_info *link;
1985
1986 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1987 if (err)
1988 return;
1989 link = &gfa.channel[cid].link;
1990
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301991 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001992 link->port_state = FASTRPC_LINK_DISCONNECTING;
1993 glink_close(chan);
1994 }
1995}
1996
1997static int fastrpc_glink_open(int cid)
1998{
1999 int err = 0;
2000 void *handle = NULL;
2001 struct fastrpc_apps *me = &gfa;
2002 struct glink_open_config *cfg;
2003 struct fastrpc_glink_info *link;
2004
2005 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2006 if (err)
2007 goto bail;
2008 link = &me->channel[cid].link;
2009 cfg = &me->channel[cid].link.cfg;
2010 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2011 if (err)
2012 goto bail;
2013
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302014 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2015 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002016 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002017
2018 link->port_state = FASTRPC_LINK_CONNECTING;
2019 cfg->priv = (void *)(uintptr_t)cid;
2020 cfg->edge = gcinfo[cid].link.link_info.edge;
2021 cfg->transport = gcinfo[cid].link.link_info.transport;
2022 cfg->name = FASTRPC_GLINK_GUID;
2023 cfg->notify_rx = fastrpc_glink_notify_rx;
2024 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2025 cfg->notify_state = fastrpc_glink_notify_state;
2026 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2027 handle = glink_open(cfg);
2028 VERIFY(err, !IS_ERR_OR_NULL(handle));
2029 if (err)
2030 goto bail;
2031 me->channel[cid].chan = handle;
2032bail:
2033 return err;
2034}
2035
Sathish Ambley1ca68232017-01-19 10:32:55 -08002036static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2037{
2038 filp->private_data = inode->i_private;
2039 return 0;
2040}
2041
2042static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2043 size_t count, loff_t *position)
2044{
2045 struct fastrpc_file *fl = filp->private_data;
2046 struct hlist_node *n;
2047 struct fastrpc_buf *buf = 0;
2048 struct fastrpc_mmap *map = 0;
2049 struct smq_invoke_ctx *ictx = 0;
2050 struct fastrpc_channel_ctx *chan;
2051 struct fastrpc_session_ctx *sess;
2052 unsigned int len = 0;
2053 int i, j, ret = 0;
2054 char *fileinfo = NULL;
2055
2056 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2057 if (!fileinfo)
2058 goto bail;
2059 if (fl == NULL) {
2060 for (i = 0; i < NUM_CHANNELS; i++) {
2061 chan = &gcinfo[i];
2062 len += scnprintf(fileinfo + len,
2063 DEBUGFS_SIZE - len, "%s\n\n",
2064 chan->name);
2065 len += scnprintf(fileinfo + len,
2066 DEBUGFS_SIZE - len, "%s %d\n",
2067 "sesscount:", chan->sesscount);
2068 for (j = 0; j < chan->sesscount; j++) {
2069 sess = &chan->session[j];
2070 len += scnprintf(fileinfo + len,
2071 DEBUGFS_SIZE - len,
2072 "%s%d\n\n", "SESSION", j);
2073 len += scnprintf(fileinfo + len,
2074 DEBUGFS_SIZE - len,
2075 "%s %d\n", "sid:",
2076 sess->smmu.cb);
2077 len += scnprintf(fileinfo + len,
2078 DEBUGFS_SIZE - len,
2079 "%s %d\n", "SECURE:",
2080 sess->smmu.secure);
2081 }
2082 }
2083 } else {
2084 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2085 "%s %d\n\n",
2086 "PROCESS_ID:", fl->tgid);
2087 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2088 "%s %d\n\n",
2089 "CHANNEL_ID:", fl->cid);
2090 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2091 "%s %d\n\n",
2092 "SSRCOUNT:", fl->ssrcount);
2093 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2094 "%s\n",
2095 "LIST OF BUFS:");
2096 spin_lock(&fl->hlock);
2097 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2098 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2099 "%s %p %s %p %s %llx\n", "buf:",
2100 buf, "buf->virt:", buf->virt,
2101 "buf->phys:", buf->phys);
2102 }
2103 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2104 "\n%s\n",
2105 "LIST OF MAPS:");
2106 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2107 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2108 "%s %p %s %lx %s %llx\n",
2109 "map:", map,
2110 "map->va:", map->va,
2111 "map->phys:", map->phys);
2112 }
2113 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2114 "\n%s\n",
2115 "LIST OF PENDING SMQCONTEXTS:");
2116 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2117 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2118 "%s %p %s %u %s %u %s %u\n",
2119 "smqcontext:", ictx,
2120 "sc:", ictx->sc,
2121 "tid:", ictx->pid,
2122 "handle", ictx->rpra->h);
2123 }
2124 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2125 "\n%s\n",
2126 "LIST OF INTERRUPTED SMQCONTEXTS:");
2127 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2128 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2129 "%s %p %s %u %s %u %s %u\n",
2130 "smqcontext:", ictx,
2131 "sc:", ictx->sc,
2132 "tid:", ictx->pid,
2133 "handle", ictx->rpra->h);
2134 }
2135 spin_unlock(&fl->hlock);
2136 }
2137 if (len > DEBUGFS_SIZE)
2138 len = DEBUGFS_SIZE;
2139 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2140 kfree(fileinfo);
2141bail:
2142 return ret;
2143}
2144
2145static const struct file_operations debugfs_fops = {
2146 .open = fastrpc_debugfs_open,
2147 .read = fastrpc_debugfs_read,
2148};
Sathish Ambley36849af2017-02-02 09:35:55 -08002149static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002150{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002151 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002152 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002153
2154 mutex_lock(&me->smd_mutex);
2155
Sathish Ambley36849af2017-02-02 09:35:55 -08002156 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002157 if (err)
2158 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002159 cid = fl->cid;
2160 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2161 if (err)
2162 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002163 fl->ssrcount = me->channel[cid].ssrcount;
2164 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2165 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302166 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2167 if (err)
2168 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002169 VERIFY(err, 0 == fastrpc_glink_open(cid));
2170 if (err)
2171 goto bail;
2172
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302173 VERIFY(err,
2174 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002175 RPC_TIMEOUT));
2176 if (err) {
2177 me->channel[cid].chan = 0;
2178 goto bail;
2179 }
2180 kref_init(&me->channel[cid].kref);
2181 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2182 MAJOR(me->dev_no), cid);
2183 if (me->channel[cid].ssrcount !=
2184 me->channel[cid].prevssrcount) {
2185 me->channel[cid].prevssrcount =
2186 me->channel[cid].ssrcount;
2187 }
2188 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002189
2190bail:
2191 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192 return err;
2193}
2194
Sathish Ambley36849af2017-02-02 09:35:55 -08002195static int fastrpc_device_open(struct inode *inode, struct file *filp)
2196{
2197 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002198 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002199 struct fastrpc_file *fl = 0;
2200 struct fastrpc_apps *me = &gfa;
2201
2202 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2203 if (err)
2204 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002205 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2206 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002207 context_list_ctor(&fl->clst);
2208 spin_lock_init(&fl->hlock);
2209 INIT_HLIST_HEAD(&fl->maps);
2210 INIT_HLIST_HEAD(&fl->bufs);
2211 INIT_HLIST_NODE(&fl->hn);
2212 fl->tgid = current->tgid;
2213 fl->apps = me;
2214 fl->mode = FASTRPC_MODE_SERIAL;
2215 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002216 if (debugfs_file != NULL)
2217 fl->debugfs_file = debugfs_file;
2218 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002219 filp->private_data = fl;
2220 spin_lock(&me->hlock);
2221 hlist_add_head(&fl->hn, &me->drivers);
2222 spin_unlock(&me->hlock);
2223 return 0;
2224}
2225
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002226static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2227{
2228 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002229 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002230
Sathish Ambley36849af2017-02-02 09:35:55 -08002231 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002232 if (err)
2233 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002234 if (fl->cid == -1) {
2235 cid = *info;
2236 VERIFY(err, cid < NUM_CHANNELS);
2237 if (err)
2238 goto bail;
2239 fl->cid = cid;
2240 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2241 VERIFY(err, !fastrpc_session_alloc_locked(
2242 &fl->apps->channel[cid], 0, &fl->sctx));
2243 if (err)
2244 goto bail;
2245 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002246 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2247bail:
2248 return err;
2249}
2250
2251static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2252 unsigned long ioctl_param)
2253{
2254 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002255 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002256 struct fastrpc_ioctl_mmap mmap;
2257 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002258 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002259 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002260 } p;
2261 void *param = (char *)ioctl_param;
2262 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2263 int size = 0, err = 0;
2264 uint32_t info;
2265
2266 p.inv.fds = 0;
2267 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002268 p.inv.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002269
2270 switch (ioctl_num) {
2271 case FASTRPC_IOCTL_INVOKE:
2272 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002273 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002274 case FASTRPC_IOCTL_INVOKE_FD:
2275 if (!size)
2276 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2277 /* fall through */
2278 case FASTRPC_IOCTL_INVOKE_ATTRS:
2279 if (!size)
2280 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002281 /* fall through */
2282 case FASTRPC_IOCTL_INVOKE_CRC:
2283 if (!size)
2284 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002285 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2286 if (err)
2287 goto bail;
2288 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2289 0, &p.inv)));
2290 if (err)
2291 goto bail;
2292 break;
2293 case FASTRPC_IOCTL_MMAP:
2294 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2295 sizeof(p.mmap)));
2296 if (err)
2297 goto bail;
2298 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2299 if (err)
2300 goto bail;
2301 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2302 if (err)
2303 goto bail;
2304 break;
2305 case FASTRPC_IOCTL_MUNMAP:
2306 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2307 sizeof(p.munmap)));
2308 if (err)
2309 goto bail;
2310 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2311 &p.munmap)));
2312 if (err)
2313 goto bail;
2314 break;
2315 case FASTRPC_IOCTL_SETMODE:
2316 switch ((uint32_t)ioctl_param) {
2317 case FASTRPC_MODE_PARALLEL:
2318 case FASTRPC_MODE_SERIAL:
2319 fl->mode = (uint32_t)ioctl_param;
2320 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002321 case FASTRPC_MODE_PROFILE:
2322 fl->profile = (uint32_t)ioctl_param;
2323 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002324 default:
2325 err = -ENOTTY;
2326 break;
2327 }
2328 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002329 case FASTRPC_IOCTL_GETPERF:
2330 VERIFY(err, 0 == copy_from_user(&p.perf,
2331 param, sizeof(p.perf)));
2332 if (err)
2333 goto bail;
2334 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2335 if (p.perf.keys) {
2336 char *keys = PERF_KEYS;
2337
2338 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2339 keys, strlen(keys)+1));
2340 if (err)
2341 goto bail;
2342 }
2343 if (p.perf.data) {
2344 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2345 &fl->perf, sizeof(fl->perf)));
2346 }
2347 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2348 if (err)
2349 goto bail;
2350 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002351 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002352 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2353 if (err)
2354 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002355 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2356 if (err)
2357 goto bail;
2358 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2359 if (err)
2360 goto bail;
2361 break;
2362 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002363 p.init.attrs = 0;
2364 p.init.siglen = 0;
2365 size = sizeof(struct fastrpc_ioctl_init);
2366 /* fall through */
2367 case FASTRPC_IOCTL_INIT_ATTRS:
2368 if (!size)
2369 size = sizeof(struct fastrpc_ioctl_init_attrs);
2370 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002371 if (err)
2372 goto bail;
2373 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2374 if (err)
2375 goto bail;
2376 break;
2377
2378 default:
2379 err = -ENOTTY;
2380 pr_info("bad ioctl: %d\n", ioctl_num);
2381 break;
2382 }
2383 bail:
2384 return err;
2385}
2386
2387static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2388 unsigned long code,
2389 void *data)
2390{
2391 struct fastrpc_apps *me = &gfa;
2392 struct fastrpc_channel_ctx *ctx;
2393 int cid;
2394
2395 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2396 cid = ctx - &me->channel[0];
2397 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2398 mutex_lock(&me->smd_mutex);
2399 ctx->ssrcount++;
2400 if (ctx->chan) {
2401 fastrpc_glink_close(ctx->chan, cid);
2402 ctx->chan = 0;
2403 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2404 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2405 }
2406 mutex_unlock(&me->smd_mutex);
2407 fastrpc_notify_drivers(me, cid);
2408 }
2409
2410 return NOTIFY_DONE;
2411}
2412
2413static const struct file_operations fops = {
2414 .open = fastrpc_device_open,
2415 .release = fastrpc_device_release,
2416 .unlocked_ioctl = fastrpc_device_ioctl,
2417 .compat_ioctl = compat_fastrpc_device_ioctl,
2418};
2419
2420static const struct of_device_id fastrpc_match_table[] = {
2421 { .compatible = "qcom,msm-fastrpc-adsp", },
2422 { .compatible = "qcom,msm-fastrpc-compute", },
2423 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2424 { .compatible = "qcom,msm-adsprpc-mem-region", },
2425 {}
2426};
2427
2428static int fastrpc_cb_probe(struct device *dev)
2429{
2430 struct fastrpc_channel_ctx *chan;
2431 struct fastrpc_session_ctx *sess;
2432 struct of_phandle_args iommuspec;
2433 const char *name;
2434 unsigned int start = 0x80000000;
2435 int err = 0, i;
2436 int secure_vmid = VMID_CP_PIXEL;
2437
2438 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2439 if (err)
2440 goto bail;
2441 for (i = 0; i < NUM_CHANNELS; i++) {
2442 if (!gcinfo[i].name)
2443 continue;
2444 if (!strcmp(name, gcinfo[i].name))
2445 break;
2446 }
2447 VERIFY(err, i < NUM_CHANNELS);
2448 if (err)
2449 goto bail;
2450 chan = &gcinfo[i];
2451 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2452 if (err)
2453 goto bail;
2454
2455 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2456 "#iommu-cells", 0, &iommuspec));
2457 if (err)
2458 goto bail;
2459 sess = &chan->session[chan->sesscount];
2460 sess->smmu.cb = iommuspec.args[0] & 0xf;
2461 sess->used = 0;
2462 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2463 "dma-coherent");
2464 sess->smmu.secure = of_property_read_bool(dev->of_node,
2465 "qcom,secure-context-bank");
2466 if (sess->smmu.secure)
2467 start = 0x60000000;
2468 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2469 arm_iommu_create_mapping(&platform_bus_type,
2470 start, 0x7fffffff)));
2471 if (err)
2472 goto bail;
2473
2474 if (sess->smmu.secure)
2475 iommu_domain_set_attr(sess->smmu.mapping->domain,
2476 DOMAIN_ATTR_SECURE_VMID,
2477 &secure_vmid);
2478
2479 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2480 if (err)
2481 goto bail;
2482 sess->dev = dev;
2483 sess->smmu.enabled = 1;
2484 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002485 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2486 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002487bail:
2488 return err;
2489}
2490
2491static int fastrpc_probe(struct platform_device *pdev)
2492{
2493 int err = 0;
2494 struct fastrpc_apps *me = &gfa;
2495 struct device *dev = &pdev->dev;
2496 struct smq_phy_page range;
2497 struct device_node *ion_node, *node;
2498 struct platform_device *ion_pdev;
2499 struct cma *cma;
2500 uint32_t val;
2501
2502 if (of_device_is_compatible(dev->of_node,
2503 "qcom,msm-fastrpc-compute-cb"))
2504 return fastrpc_cb_probe(dev);
2505
2506 if (of_device_is_compatible(dev->of_node,
2507 "qcom,msm-adsprpc-mem-region")) {
2508 me->dev = dev;
2509 range.addr = 0;
2510 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2511 if (ion_node) {
2512 for_each_available_child_of_node(ion_node, node) {
2513 if (of_property_read_u32(node, "reg", &val))
2514 continue;
2515 if (val != ION_ADSP_HEAP_ID)
2516 continue;
2517 ion_pdev = of_find_device_by_node(node);
2518 if (!ion_pdev)
2519 break;
2520 cma = dev_get_cma_area(&ion_pdev->dev);
2521 if (cma) {
2522 range.addr = cma_get_base(cma);
2523 range.size = (size_t)cma_get_size(cma);
2524 }
2525 break;
2526 }
2527 }
2528 if (range.addr) {
2529 int srcVM[1] = {VMID_HLOS};
2530 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2531 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002532 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002533 PERM_READ | PERM_WRITE | PERM_EXEC,
2534 PERM_READ | PERM_WRITE | PERM_EXEC,
2535 PERM_READ | PERM_WRITE | PERM_EXEC,
2536 };
2537
2538 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2539 srcVM, 1, destVM, destVMperm, 4));
2540 if (err)
2541 goto bail;
2542 }
2543 return 0;
2544 }
2545
2546 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2547 fastrpc_match_table,
2548 NULL, &pdev->dev));
2549 if (err)
2550 goto bail;
2551bail:
2552 return err;
2553}
2554
2555static void fastrpc_deinit(void)
2556{
2557 struct fastrpc_apps *me = &gfa;
2558 struct fastrpc_channel_ctx *chan = gcinfo;
2559 int i, j;
2560
2561 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2562 if (chan->chan) {
2563 kref_put_mutex(&chan->kref,
2564 fastrpc_channel_close, &me->smd_mutex);
2565 chan->chan = 0;
2566 }
2567 for (j = 0; j < NUM_SESSIONS; j++) {
2568 struct fastrpc_session_ctx *sess = &chan->session[j];
2569
2570 if (sess->smmu.enabled) {
2571 arm_iommu_detach_device(sess->dev);
2572 sess->dev = 0;
2573 }
2574 if (sess->smmu.mapping) {
2575 arm_iommu_release_mapping(sess->smmu.mapping);
2576 sess->smmu.mapping = 0;
2577 }
2578 }
2579 }
2580}
2581
2582static struct platform_driver fastrpc_driver = {
2583 .probe = fastrpc_probe,
2584 .driver = {
2585 .name = "fastrpc",
2586 .owner = THIS_MODULE,
2587 .of_match_table = fastrpc_match_table,
2588 },
2589};
2590
2591static int __init fastrpc_device_init(void)
2592{
2593 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002594 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002595 int err = 0, i;
2596
2597 memset(me, 0, sizeof(*me));
2598
2599 fastrpc_init(me);
2600 me->dev = NULL;
2601 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2602 if (err)
2603 goto register_bail;
2604 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2605 DEVICE_NAME));
2606 if (err)
2607 goto alloc_chrdev_bail;
2608 cdev_init(&me->cdev, &fops);
2609 me->cdev.owner = THIS_MODULE;
2610 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002611 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002612 if (err)
2613 goto cdev_init_bail;
2614 me->class = class_create(THIS_MODULE, "fastrpc");
2615 VERIFY(err, !IS_ERR(me->class));
2616 if (err)
2617 goto class_create_bail;
2618 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002619 dev = device_create(me->class, NULL,
2620 MKDEV(MAJOR(me->dev_no), 0),
2621 NULL, gcinfo[0].name);
2622 VERIFY(err, !IS_ERR_OR_NULL(dev));
2623 if (err)
2624 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002625 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002626 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002627 me->channel[i].ssrcount = 0;
2628 me->channel[i].prevssrcount = 0;
2629 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2630 me->channel[i].handle = subsys_notif_register_notifier(
2631 gcinfo[i].subsys,
2632 &me->channel[i].nb);
2633 }
2634
2635 me->client = msm_ion_client_create(DEVICE_NAME);
2636 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2637 if (err)
2638 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002639 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002640 return 0;
2641device_create_bail:
2642 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002643 if (me->channel[i].handle)
2644 subsys_notif_unregister_notifier(me->channel[i].handle,
2645 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002646 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002647 if (!IS_ERR_OR_NULL(dev))
2648 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002649 class_destroy(me->class);
2650class_create_bail:
2651 cdev_del(&me->cdev);
2652cdev_init_bail:
2653 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2654alloc_chrdev_bail:
2655register_bail:
2656 fastrpc_deinit();
2657 return err;
2658}
2659
2660static void __exit fastrpc_device_exit(void)
2661{
2662 struct fastrpc_apps *me = &gfa;
2663 int i;
2664
2665 fastrpc_file_list_dtor(me);
2666 fastrpc_deinit();
2667 for (i = 0; i < NUM_CHANNELS; i++) {
2668 if (!gcinfo[i].name)
2669 continue;
2670 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2671 subsys_notif_unregister_notifier(me->channel[i].handle,
2672 &me->channel[i].nb);
2673 }
2674 class_destroy(me->class);
2675 cdev_del(&me->cdev);
2676 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2677 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002678 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002679}
2680
2681late_initcall(fastrpc_device_init);
2682module_exit(fastrpc_device_exit);
2683
2684MODULE_LICENSE("GPL v2");