blob: 4e0dfd186343569cd7bdde903a92fa83ec97919a [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070063
64#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
65
66#define FASTRPC_LINK_STATE_DOWN (0x0)
67#define FASTRPC_LINK_STATE_UP (0x1)
68#define FASTRPC_LINK_DISCONNECTED (0x0)
69#define FASTRPC_LINK_CONNECTING (0x1)
70#define FASTRPC_LINK_CONNECTED (0x3)
71#define FASTRPC_LINK_DISCONNECTING (0x7)
72
Sathish Ambleya21b5b52017-01-11 16:11:01 -080073#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
74#define FASTRPC_STATIC_HANDLE_LISTENER (3)
75#define FASTRPC_STATIC_HANDLE_MAX (20)
76
77#define PERF_END (void)0
78
79#define PERF(enb, cnt, ff) \
80 {\
81 struct timespec startT = {0};\
82 if (enb) {\
83 getnstimeofday(&startT);\
84 } \
85 ff ;\
86 if (enb) {\
87 cnt += getnstimediff(&startT);\
88 } \
89 }
90
Sathish Ambley69e1ab02016-10-18 10:28:15 -070091static int fastrpc_glink_open(int cid);
92static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080093static struct dentry *debugfs_root;
94static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96static inline uint64_t buf_page_start(uint64_t buf)
97{
98 uint64_t start = (uint64_t) buf & PAGE_MASK;
99 return start;
100}
101
102static inline uint64_t buf_page_offset(uint64_t buf)
103{
104 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
105 return offset;
106}
107
108static inline int buf_num_pages(uint64_t buf, ssize_t len)
109{
110 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
111 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
112 int nPages = end - start + 1;
113 return nPages;
114}
115
116static inline uint64_t buf_page_size(uint32_t size)
117{
118 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
119
120 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
121}
122
123static inline void *uint64_to_ptr(uint64_t addr)
124{
125 void *ptr = (void *)((uintptr_t)addr);
126
127 return ptr;
128}
129
130static inline uint64_t ptr_to_uint64(void *ptr)
131{
132 uint64_t addr = (uint64_t)((uintptr_t)ptr);
133
134 return addr;
135}
136
137struct fastrpc_file;
138
139struct fastrpc_buf {
140 struct hlist_node hn;
141 struct fastrpc_file *fl;
142 void *virt;
143 uint64_t phys;
144 ssize_t size;
145};
146
147struct fastrpc_ctx_lst;
148
149struct overlap {
150 uintptr_t start;
151 uintptr_t end;
152 int raix;
153 uintptr_t mstart;
154 uintptr_t mend;
155 uintptr_t offset;
156};
157
158struct smq_invoke_ctx {
159 struct hlist_node hn;
160 struct completion work;
161 int retval;
162 int pid;
163 int tgid;
164 remote_arg_t *lpra;
165 remote_arg64_t *rpra;
166 int *fds;
167 unsigned int *attrs;
168 struct fastrpc_mmap **maps;
169 struct fastrpc_buf *buf;
170 ssize_t used;
171 struct fastrpc_file *fl;
172 uint32_t sc;
173 struct overlap *overs;
174 struct overlap **overps;
175 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700176 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700177};
178
179struct fastrpc_ctx_lst {
180 struct hlist_head pending;
181 struct hlist_head interrupted;
182};
183
184struct fastrpc_smmu {
185 struct dma_iommu_mapping *mapping;
186 int cb;
187 int enabled;
188 int faults;
189 int secure;
190 int coherent;
191};
192
193struct fastrpc_session_ctx {
194 struct device *dev;
195 struct fastrpc_smmu smmu;
196 int used;
197};
198
199struct fastrpc_glink_info {
200 int link_state;
201 int port_state;
202 struct glink_open_config cfg;
203 struct glink_link_info link_info;
204 void *link_notify_handle;
205};
206
207struct fastrpc_channel_ctx {
208 char *name;
209 char *subsys;
210 void *chan;
211 struct device *dev;
212 struct fastrpc_session_ctx session[NUM_SESSIONS];
213 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530214 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700215 struct notifier_block nb;
216 struct kref kref;
217 int sesscount;
218 int ssrcount;
219 void *handle;
220 int prevssrcount;
221 int vmid;
222 struct fastrpc_glink_info link;
223};
224
225struct fastrpc_apps {
226 struct fastrpc_channel_ctx *channel;
227 struct cdev cdev;
228 struct class *class;
229 struct mutex smd_mutex;
230 struct smq_phy_page range;
231 struct hlist_head maps;
232 dev_t dev_no;
233 int compat;
234 struct hlist_head drivers;
235 spinlock_t hlock;
236 struct ion_client *client;
237 struct device *dev;
238};
239
240struct fastrpc_mmap {
241 struct hlist_node hn;
242 struct fastrpc_file *fl;
243 struct fastrpc_apps *apps;
244 int fd;
245 uint32_t flags;
246 struct dma_buf *buf;
247 struct sg_table *table;
248 struct dma_buf_attachment *attach;
249 struct ion_handle *handle;
250 uint64_t phys;
251 ssize_t size;
252 uintptr_t va;
253 ssize_t len;
254 int refs;
255 uintptr_t raddr;
256 int uncached;
257 int secure;
258 uintptr_t attr;
259};
260
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800261struct fastrpc_perf {
262 int64_t count;
263 int64_t flush;
264 int64_t map;
265 int64_t copy;
266 int64_t link;
267 int64_t getargs;
268 int64_t putargs;
269 int64_t invargs;
270 int64_t invoke;
271};
272
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273struct fastrpc_file {
274 struct hlist_node hn;
275 spinlock_t hlock;
276 struct hlist_head maps;
277 struct hlist_head bufs;
278 struct fastrpc_ctx_lst clst;
279 struct fastrpc_session_ctx *sctx;
280 struct fastrpc_session_ctx *secsctx;
281 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800282 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700283 int tgid;
284 int cid;
285 int ssrcount;
286 int pd;
287 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800288 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800289 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290};
291
292static struct fastrpc_apps gfa;
293
294static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
295 {
296 .name = "adsprpc-smd",
297 .subsys = "adsp",
298 .link.link_info.edge = "lpass",
299 .link.link_info.transport = "smem",
300 },
301 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700302 .name = "mdsprpc-smd",
303 .subsys = "modem",
304 .link.link_info.edge = "mpss",
305 .link.link_info.transport = "smem",
306 },
307 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800308 .name = "sdsprpc-smd",
309 .subsys = "slpi",
310 .link.link_info.edge = "dsps",
311 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800312 },
313 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314 .name = "cdsprpc-smd",
315 .subsys = "cdsp",
316 .link.link_info.edge = "cdsp",
317 .link.link_info.transport = "smem",
318 },
319};
320
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800321static inline int64_t getnstimediff(struct timespec *start)
322{
323 int64_t ns;
324 struct timespec ts, b;
325
326 getnstimeofday(&ts);
327 b = timespec_sub(ts, *start);
328 ns = timespec_to_ns(&b);
329 return ns;
330}
331
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700332static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
333{
334 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
335 int vmid;
336
337 if (!fl)
338 return;
339 if (cache) {
340 spin_lock(&fl->hlock);
341 hlist_add_head(&buf->hn, &fl->bufs);
342 spin_unlock(&fl->hlock);
343 return;
344 }
345 if (!IS_ERR_OR_NULL(buf->virt)) {
346 int destVM[1] = {VMID_HLOS};
347 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
348
349 if (fl->sctx->smmu.cb)
350 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
351 vmid = fl->apps->channel[fl->cid].vmid;
352 if (vmid) {
353 int srcVM[2] = {VMID_HLOS, vmid};
354
355 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
356 srcVM, 2, destVM, destVMperm, 1);
357 }
358 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
359 buf->phys);
360 }
361 kfree(buf);
362}
363
364static void fastrpc_buf_list_free(struct fastrpc_file *fl)
365{
366 struct fastrpc_buf *buf, *free;
367
368 do {
369 struct hlist_node *n;
370
371 free = 0;
372 spin_lock(&fl->hlock);
373 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
374 hlist_del_init(&buf->hn);
375 free = buf;
376 break;
377 }
378 spin_unlock(&fl->hlock);
379 if (free)
380 fastrpc_buf_free(free, 0);
381 } while (free);
382}
383
384static void fastrpc_mmap_add(struct fastrpc_mmap *map)
385{
386 struct fastrpc_file *fl = map->fl;
387
388 spin_lock(&fl->hlock);
389 hlist_add_head(&map->hn, &fl->maps);
390 spin_unlock(&fl->hlock);
391}
392
393static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800394 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700395{
396 struct fastrpc_mmap *match = 0, *map;
397 struct hlist_node *n;
398
399 spin_lock(&fl->hlock);
400 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
401 if (va >= map->va &&
402 va + len <= map->va + map->len &&
403 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800404 if (refs)
405 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700406 match = map;
407 break;
408 }
409 }
410 spin_unlock(&fl->hlock);
411 if (match) {
412 *ppmap = match;
413 return 0;
414 }
415 return -ENOTTY;
416}
417
418static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
419 ssize_t len, struct fastrpc_mmap **ppmap)
420{
421 struct fastrpc_mmap *match = 0, *map;
422 struct hlist_node *n;
423 struct fastrpc_apps *me = &gfa;
424
425 spin_lock(&me->hlock);
426 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
427 if (map->raddr == va &&
428 map->raddr + map->len == va + len &&
429 map->refs == 1) {
430 match = map;
431 hlist_del_init(&map->hn);
432 break;
433 }
434 }
435 spin_unlock(&me->hlock);
436 if (match) {
437 *ppmap = match;
438 return 0;
439 }
440 spin_lock(&fl->hlock);
441 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
442 if (map->raddr == va &&
443 map->raddr + map->len == va + len &&
444 map->refs == 1) {
445 match = map;
446 hlist_del_init(&map->hn);
447 break;
448 }
449 }
450 spin_unlock(&fl->hlock);
451 if (match) {
452 *ppmap = match;
453 return 0;
454 }
455 return -ENOTTY;
456}
457
458static void fastrpc_mmap_free(struct fastrpc_mmap *map)
459{
460 struct fastrpc_file *fl;
461 int vmid;
462 struct fastrpc_session_ctx *sess;
463 int destVM[1] = {VMID_HLOS};
464 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
465
466 if (!map)
467 return;
468 fl = map->fl;
469 spin_lock(&fl->hlock);
470 map->refs--;
471 if (!map->refs)
472 hlist_del_init(&map->hn);
473 spin_unlock(&fl->hlock);
474 if (map->refs > 0)
475 return;
476 if (map->secure)
477 sess = fl->secsctx;
478 else
479 sess = fl->sctx;
480
481 if (!IS_ERR_OR_NULL(map->handle))
482 ion_free(fl->apps->client, map->handle);
483 if (sess->smmu.enabled) {
484 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800485 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486 map->table->sgl,
487 map->table->nents, DMA_BIDIRECTIONAL,
488 map->buf);
489 }
490 vmid = fl->apps->channel[fl->cid].vmid;
491 if (vmid && map->phys) {
492 int srcVM[2] = {VMID_HLOS, vmid};
493
494 hyp_assign_phys(map->phys, buf_page_size(map->size),
495 srcVM, 2, destVM, destVMperm, 1);
496 }
497
498 if (!IS_ERR_OR_NULL(map->table))
499 dma_buf_unmap_attachment(map->attach, map->table,
500 DMA_BIDIRECTIONAL);
501 if (!IS_ERR_OR_NULL(map->attach))
502 dma_buf_detach(map->buf, map->attach);
503 if (!IS_ERR_OR_NULL(map->buf))
504 dma_buf_put(map->buf);
505 kfree(map);
506}
507
508static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
509 struct fastrpc_session_ctx **session);
510
511static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
512 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
513 struct fastrpc_mmap **ppmap)
514{
515 struct fastrpc_session_ctx *sess;
516 struct fastrpc_apps *apps = fl->apps;
517 int cid = fl->cid;
518 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
519 struct fastrpc_mmap *map = 0;
520 unsigned long attrs;
521 unsigned long flags;
522 int err = 0, vmid;
523
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800524 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700525 return 0;
526 map = kzalloc(sizeof(*map), GFP_KERNEL);
527 VERIFY(err, !IS_ERR_OR_NULL(map));
528 if (err)
529 goto bail;
530 INIT_HLIST_NODE(&map->hn);
531 map->flags = mflags;
532 map->refs = 1;
533 map->fl = fl;
534 map->fd = fd;
535 map->attr = attr;
536 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
537 ion_import_dma_buf_fd(fl->apps->client, fd)));
538 if (err)
539 goto bail;
540 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
541 &flags));
542 if (err)
543 goto bail;
544
545 map->uncached = !ION_IS_CACHED(flags);
546 if (map->attr & FASTRPC_ATTR_NOVA)
547 map->uncached = 1;
548
549 map->secure = flags & ION_FLAG_SECURE;
550 if (map->secure) {
551 if (!fl->secsctx)
552 err = fastrpc_session_alloc(chan, 1,
553 &fl->secsctx);
554 if (err)
555 goto bail;
556 }
557 if (map->secure)
558 sess = fl->secsctx;
559 else
560 sess = fl->sctx;
561
562 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
563 if (err)
564 goto bail;
565 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
566 dma_buf_attach(map->buf, sess->dev)));
567 if (err)
568 goto bail;
569 VERIFY(err, !IS_ERR_OR_NULL(map->table =
570 dma_buf_map_attachment(map->attach,
571 DMA_BIDIRECTIONAL)));
572 if (err)
573 goto bail;
574 if (sess->smmu.enabled) {
575 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530576
577 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
578 (sess->smmu.coherent && map->uncached))
579 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
580 else if (map->attr & FASTRPC_ATTR_COHERENT)
581 attrs |= DMA_ATTR_FORCE_COHERENT;
582
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700583 VERIFY(err, map->table->nents ==
584 msm_dma_map_sg_attrs(sess->dev,
585 map->table->sgl, map->table->nents,
586 DMA_BIDIRECTIONAL, map->buf, attrs));
587 if (err)
588 goto bail;
589 } else {
590 VERIFY(err, map->table->nents == 1);
591 if (err)
592 goto bail;
593 }
594 map->phys = sg_dma_address(map->table->sgl);
595 if (sess->smmu.cb) {
596 map->phys += ((uint64_t)sess->smmu.cb << 32);
597 map->size = sg_dma_len(map->table->sgl);
598 } else {
599 map->size = buf_page_size(len);
600 }
601 vmid = fl->apps->channel[fl->cid].vmid;
602 if (vmid) {
603 int srcVM[1] = {VMID_HLOS};
604 int destVM[2] = {VMID_HLOS, vmid};
605 int destVMperm[2] = {PERM_READ | PERM_WRITE,
606 PERM_READ | PERM_WRITE | PERM_EXEC};
607
608 VERIFY(err, !hyp_assign_phys(map->phys,
609 buf_page_size(map->size),
610 srcVM, 1, destVM, destVMperm, 2));
611 if (err)
612 goto bail;
613 }
614 map->va = va;
615 map->len = len;
616
617 fastrpc_mmap_add(map);
618 *ppmap = map;
619
620bail:
621 if (err && map)
622 fastrpc_mmap_free(map);
623 return err;
624}
625
626static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
627 struct fastrpc_buf **obuf)
628{
629 int err = 0, vmid;
630 struct fastrpc_buf *buf = 0, *fr = 0;
631 struct hlist_node *n;
632
633 VERIFY(err, size > 0);
634 if (err)
635 goto bail;
636
637 /* find the smallest buffer that fits in the cache */
638 spin_lock(&fl->hlock);
639 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
640 if (buf->size >= size && (!fr || fr->size > buf->size))
641 fr = buf;
642 }
643 if (fr)
644 hlist_del_init(&fr->hn);
645 spin_unlock(&fl->hlock);
646 if (fr) {
647 *obuf = fr;
648 return 0;
649 }
650 buf = 0;
651 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
652 if (err)
653 goto bail;
654 INIT_HLIST_NODE(&buf->hn);
655 buf->fl = fl;
656 buf->virt = 0;
657 buf->phys = 0;
658 buf->size = size;
659 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
660 (void *)&buf->phys, GFP_KERNEL);
661 if (IS_ERR_OR_NULL(buf->virt)) {
662 /* free cache and retry */
663 fastrpc_buf_list_free(fl);
664 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
665 (void *)&buf->phys, GFP_KERNEL);
666 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
667 }
668 if (err)
669 goto bail;
670 if (fl->sctx->smmu.cb)
671 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
672 vmid = fl->apps->channel[fl->cid].vmid;
673 if (vmid) {
674 int srcVM[1] = {VMID_HLOS};
675 int destVM[2] = {VMID_HLOS, vmid};
676 int destVMperm[2] = {PERM_READ | PERM_WRITE,
677 PERM_READ | PERM_WRITE | PERM_EXEC};
678
679 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
680 srcVM, 1, destVM, destVMperm, 2));
681 if (err)
682 goto bail;
683 }
684
685 *obuf = buf;
686 bail:
687 if (err && buf)
688 fastrpc_buf_free(buf, 0);
689 return err;
690}
691
692
693static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700694 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700695 struct smq_invoke_ctx **po)
696{
697 int err = 0;
698 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
699 struct hlist_node *n;
700 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
701
702 spin_lock(&fl->hlock);
703 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
704 if (ictx->pid == current->pid) {
705 if (invoke->sc != ictx->sc || ictx->fl != fl)
706 err = -1;
707 else {
708 ctx = ictx;
709 hlist_del_init(&ctx->hn);
710 hlist_add_head(&ctx->hn, &fl->clst.pending);
711 }
712 break;
713 }
714 }
715 spin_unlock(&fl->hlock);
716 if (ctx)
717 *po = ctx;
718 return err;
719}
720
721#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
722static int overlap_ptr_cmp(const void *a, const void *b)
723{
724 struct overlap *pa = *((struct overlap **)a);
725 struct overlap *pb = *((struct overlap **)b);
726 /* sort with lowest starting buffer first */
727 int st = CMP(pa->start, pb->start);
728 /* sort with highest ending buffer first */
729 int ed = CMP(pb->end, pa->end);
730 return st == 0 ? ed : st;
731}
732
Sathish Ambley9466d672017-01-25 10:51:55 -0800733static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700734{
Sathish Ambley9466d672017-01-25 10:51:55 -0800735 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736 remote_arg_t *lpra = ctx->lpra;
737 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
738 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
739 int nbufs = inbufs + outbufs;
740 struct overlap max;
741
742 for (i = 0; i < nbufs; ++i) {
743 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
744 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800745 if (lpra[i].buf.len) {
746 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
747 if (err)
748 goto bail;
749 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700750 ctx->overs[i].raix = i;
751 ctx->overps[i] = &ctx->overs[i];
752 }
753 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
754 max.start = 0;
755 max.end = 0;
756 for (i = 0; i < nbufs; ++i) {
757 if (ctx->overps[i]->start < max.end) {
758 ctx->overps[i]->mstart = max.end;
759 ctx->overps[i]->mend = ctx->overps[i]->end;
760 ctx->overps[i]->offset = max.end -
761 ctx->overps[i]->start;
762 if (ctx->overps[i]->end > max.end) {
763 max.end = ctx->overps[i]->end;
764 } else {
765 ctx->overps[i]->mend = 0;
766 ctx->overps[i]->mstart = 0;
767 }
768 } else {
769 ctx->overps[i]->mend = ctx->overps[i]->end;
770 ctx->overps[i]->mstart = ctx->overps[i]->start;
771 ctx->overps[i]->offset = 0;
772 max = *ctx->overps[i];
773 }
774 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800775bail:
776 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777}
778
779#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
780 do {\
781 if (!(kernel))\
782 VERIFY(err, 0 == copy_from_user((dst), (src),\
783 (size)));\
784 else\
785 memmove((dst), (src), (size));\
786 } while (0)
787
788#define K_COPY_TO_USER(err, kernel, dst, src, size) \
789 do {\
790 if (!(kernel))\
791 VERIFY(err, 0 == copy_to_user((dst), (src),\
792 (size)));\
793 else\
794 memmove((dst), (src), (size));\
795 } while (0)
796
797
798static void context_free(struct smq_invoke_ctx *ctx);
799
800static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700801 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700802 struct smq_invoke_ctx **po)
803{
804 int err = 0, bufs, size = 0;
805 struct smq_invoke_ctx *ctx = 0;
806 struct fastrpc_ctx_lst *clst = &fl->clst;
807 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
808
809 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
810 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
811 sizeof(*ctx->fds) * (bufs) +
812 sizeof(*ctx->attrs) * (bufs) +
813 sizeof(*ctx->overs) * (bufs) +
814 sizeof(*ctx->overps) * (bufs);
815
816 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
817 if (err)
818 goto bail;
819
820 INIT_HLIST_NODE(&ctx->hn);
821 hlist_add_fake(&ctx->hn);
822 ctx->fl = fl;
823 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
824 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
825 ctx->fds = (int *)(&ctx->lpra[bufs]);
826 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
827 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
828 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
829
830 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
831 bufs * sizeof(*ctx->lpra));
832 if (err)
833 goto bail;
834
835 if (invokefd->fds) {
836 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
837 bufs * sizeof(*ctx->fds));
838 if (err)
839 goto bail;
840 }
841 if (invokefd->attrs) {
842 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
843 bufs * sizeof(*ctx->attrs));
844 if (err)
845 goto bail;
846 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700847 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700848 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800849 if (bufs) {
850 VERIFY(err, 0 == context_build_overlap(ctx));
851 if (err)
852 goto bail;
853 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700854 ctx->retval = -1;
855 ctx->pid = current->pid;
856 ctx->tgid = current->tgid;
857 init_completion(&ctx->work);
858
859 spin_lock(&fl->hlock);
860 hlist_add_head(&ctx->hn, &clst->pending);
861 spin_unlock(&fl->hlock);
862
863 *po = ctx;
864bail:
865 if (ctx && err)
866 context_free(ctx);
867 return err;
868}
869
870static void context_save_interrupted(struct smq_invoke_ctx *ctx)
871{
872 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
873
874 spin_lock(&ctx->fl->hlock);
875 hlist_del_init(&ctx->hn);
876 hlist_add_head(&ctx->hn, &clst->interrupted);
877 spin_unlock(&ctx->fl->hlock);
878 /* free the cache on power collapse */
879 fastrpc_buf_list_free(ctx->fl);
880}
881
882static void context_free(struct smq_invoke_ctx *ctx)
883{
884 int i;
885 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
886 REMOTE_SCALARS_OUTBUFS(ctx->sc);
887 spin_lock(&ctx->fl->hlock);
888 hlist_del_init(&ctx->hn);
889 spin_unlock(&ctx->fl->hlock);
890 for (i = 0; i < nbufs; ++i)
891 fastrpc_mmap_free(ctx->maps[i]);
892 fastrpc_buf_free(ctx->buf, 1);
893 kfree(ctx);
894}
895
896static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
897{
898 ctx->retval = retval;
899 complete(&ctx->work);
900}
901
902
903static void fastrpc_notify_users(struct fastrpc_file *me)
904{
905 struct smq_invoke_ctx *ictx;
906 struct hlist_node *n;
907
908 spin_lock(&me->hlock);
909 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
910 complete(&ictx->work);
911 }
912 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
913 complete(&ictx->work);
914 }
915 spin_unlock(&me->hlock);
916
917}
918
919static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
920{
921 struct fastrpc_file *fl;
922 struct hlist_node *n;
923
924 spin_lock(&me->hlock);
925 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
926 if (fl->cid == cid)
927 fastrpc_notify_users(fl);
928 }
929 spin_unlock(&me->hlock);
930
931}
932static void context_list_ctor(struct fastrpc_ctx_lst *me)
933{
934 INIT_HLIST_HEAD(&me->interrupted);
935 INIT_HLIST_HEAD(&me->pending);
936}
937
938static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
939{
940 struct fastrpc_ctx_lst *clst = &fl->clst;
941 struct smq_invoke_ctx *ictx = 0, *ctxfree;
942 struct hlist_node *n;
943
944 do {
945 ctxfree = 0;
946 spin_lock(&fl->hlock);
947 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
948 hlist_del_init(&ictx->hn);
949 ctxfree = ictx;
950 break;
951 }
952 spin_unlock(&fl->hlock);
953 if (ctxfree)
954 context_free(ctxfree);
955 } while (ctxfree);
956 do {
957 ctxfree = 0;
958 spin_lock(&fl->hlock);
959 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
960 hlist_del_init(&ictx->hn);
961 ctxfree = ictx;
962 break;
963 }
964 spin_unlock(&fl->hlock);
965 if (ctxfree)
966 context_free(ctxfree);
967 } while (ctxfree);
968}
969
970static int fastrpc_file_free(struct fastrpc_file *fl);
971static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
972{
973 struct fastrpc_file *fl, *free;
974 struct hlist_node *n;
975
976 do {
977 free = 0;
978 spin_lock(&me->hlock);
979 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
980 hlist_del_init(&fl->hn);
981 free = fl;
982 break;
983 }
984 spin_unlock(&me->hlock);
985 if (free)
986 fastrpc_file_free(free);
987 } while (free);
988}
989
990static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
991{
992 remote_arg64_t *rpra;
993 remote_arg_t *lpra = ctx->lpra;
994 struct smq_invoke_buf *list;
995 struct smq_phy_page *pages, *ipage;
996 uint32_t sc = ctx->sc;
997 int inbufs = REMOTE_SCALARS_INBUFS(sc);
998 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800999 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001000 uintptr_t args;
1001 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001002 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001003 int err = 0;
1004 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001005 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001006 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001007
1008 /* calculate size of the metadata */
1009 rpra = 0;
1010 list = smq_invoke_buf_start(rpra, sc);
1011 pages = smq_phy_page_start(sc, list);
1012 ipage = pages;
1013
1014 for (i = 0; i < bufs; ++i) {
1015 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1016 ssize_t len = lpra[i].buf.len;
1017
1018 if (ctx->fds[i] && (ctx->fds[i] != -1))
1019 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1020 ctx->attrs[i], buf, len,
1021 mflags, &ctx->maps[i]);
1022 ipage += 1;
1023 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001024 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1025 for (i = bufs; i < bufs + handles; i++) {
1026 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1027 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1028 if (err)
1029 goto bail;
1030 ipage += 1;
1031 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001032 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1033 (sizeof(uint32_t) * M_CRCLIST);
1034
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001035 /* calculate len requreed for copying */
1036 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1037 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001038 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001039 ssize_t len = lpra[i].buf.len;
1040
1041 if (!len)
1042 continue;
1043 if (ctx->maps[i])
1044 continue;
1045 if (ctx->overps[oix]->offset == 0)
1046 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001047 mstart = ctx->overps[oix]->mstart;
1048 mend = ctx->overps[oix]->mend;
1049 VERIFY(err, (mend - mstart) <= LONG_MAX);
1050 if (err)
1051 goto bail;
1052 copylen += mend - mstart;
1053 VERIFY(err, copylen >= 0);
1054 if (err)
1055 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001056 }
1057 ctx->used = copylen;
1058
1059 /* allocate new buffer */
1060 if (copylen) {
1061 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1062 if (err)
1063 goto bail;
1064 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301065 if (ctx->buf->virt && metalen <= copylen)
1066 memset(ctx->buf->virt, 0, metalen);
1067
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001068 /* copy metadata */
1069 rpra = ctx->buf->virt;
1070 ctx->rpra = rpra;
1071 list = smq_invoke_buf_start(rpra, sc);
1072 pages = smq_phy_page_start(sc, list);
1073 ipage = pages;
1074 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001075 for (i = 0; i < bufs + handles; ++i) {
1076 if (lpra[i].buf.len)
1077 list[i].num = 1;
1078 else
1079 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001080 list[i].pgidx = ipage - pages;
1081 ipage++;
1082 }
1083 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001084 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001085 for (i = 0; i < inbufs + outbufs; ++i) {
1086 struct fastrpc_mmap *map = ctx->maps[i];
1087 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1088 ssize_t len = lpra[i].buf.len;
1089
1090 rpra[i].buf.pv = 0;
1091 rpra[i].buf.len = len;
1092 if (!len)
1093 continue;
1094 if (map) {
1095 struct vm_area_struct *vma;
1096 uintptr_t offset;
1097 int num = buf_num_pages(buf, len);
1098 int idx = list[i].pgidx;
1099
1100 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001101 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001102 } else {
1103 down_read(&current->mm->mmap_sem);
1104 VERIFY(err, NULL != (vma = find_vma(current->mm,
1105 map->va)));
1106 if (err) {
1107 up_read(&current->mm->mmap_sem);
1108 goto bail;
1109 }
1110 offset = buf_page_start(buf) - vma->vm_start;
1111 up_read(&current->mm->mmap_sem);
1112 VERIFY(err, offset < (uintptr_t)map->size);
1113 if (err)
1114 goto bail;
1115 }
1116 pages[idx].addr = map->phys + offset;
1117 pages[idx].size = num << PAGE_SHIFT;
1118 }
1119 rpra[i].buf.pv = buf;
1120 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001121 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001122 for (i = bufs; i < bufs + handles; ++i) {
1123 struct fastrpc_mmap *map = ctx->maps[i];
1124
1125 pages[i].addr = map->phys;
1126 pages[i].size = map->size;
1127 }
1128 fdlist = (uint64_t *)&pages[bufs + handles];
1129 for (i = 0; i < M_FDLIST; i++)
1130 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001131 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301132 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001133
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001134 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001135 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 rlen = copylen - metalen;
1137 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1138 int i = ctx->overps[oix]->raix;
1139 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001140 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001141 uint64_t buf;
1142 ssize_t len = lpra[i].buf.len;
1143
1144 if (!len)
1145 continue;
1146 if (map)
1147 continue;
1148 if (ctx->overps[oix]->offset == 0) {
1149 rlen -= ALIGN(args, BALIGN) - args;
1150 args = ALIGN(args, BALIGN);
1151 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001152 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001153 VERIFY(err, rlen >= mlen);
1154 if (err)
1155 goto bail;
1156 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1157 pages[list[i].pgidx].addr = ctx->buf->phys -
1158 ctx->overps[oix]->offset +
1159 (copylen - rlen);
1160 pages[list[i].pgidx].addr =
1161 buf_page_start(pages[list[i].pgidx].addr);
1162 buf = rpra[i].buf.pv;
1163 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1164 if (i < inbufs) {
1165 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1166 lpra[i].buf.pv, len);
1167 if (err)
1168 goto bail;
1169 }
1170 args = args + mlen;
1171 rlen -= mlen;
1172 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001173 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001174
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001175 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1177 int i = ctx->overps[oix]->raix;
1178 struct fastrpc_mmap *map = ctx->maps[i];
1179
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001180 if (map && map->uncached)
1181 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301182 if (ctx->fl->sctx->smmu.coherent &&
1183 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1184 continue;
1185 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1186 continue;
1187
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1189 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1190 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1191 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001192 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001193 for (i = bufs; i < bufs + handles; i++) {
1194 rpra[i].dma.fd = ctx->fds[i];
1195 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1196 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001197 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001198
1199 if (!ctx->fl->sctx->smmu.coherent) {
1200 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001201 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001202 PERF_END);
1203 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001204 bail:
1205 return err;
1206}
1207
1208static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1209 remote_arg_t *upra)
1210{
1211 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001212 struct smq_invoke_buf *list;
1213 struct smq_phy_page *pages;
1214 struct fastrpc_mmap *mmap;
1215 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001216 uint32_t *crclist = NULL;
1217
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001218 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001219 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001220 int err = 0;
1221
1222 inbufs = REMOTE_SCALARS_INBUFS(sc);
1223 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001224 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1225 list = smq_invoke_buf_start(ctx->rpra, sc);
1226 pages = smq_phy_page_start(sc, list);
1227 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001228 crclist = (uint32_t *)(fdlist + M_FDLIST);
1229
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001230 for (i = inbufs; i < inbufs + outbufs; ++i) {
1231 if (!ctx->maps[i]) {
1232 K_COPY_TO_USER(err, kernel,
1233 ctx->lpra[i].buf.pv,
1234 uint64_to_ptr(rpra[i].buf.pv),
1235 rpra[i].buf.len);
1236 if (err)
1237 goto bail;
1238 } else {
1239 fastrpc_mmap_free(ctx->maps[i]);
1240 ctx->maps[i] = 0;
1241 }
1242 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001243 if (inbufs + outbufs + handles) {
1244 for (i = 0; i < M_FDLIST; i++) {
1245 if (!fdlist[i])
1246 break;
1247 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001248 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001249 fastrpc_mmap_free(mmap);
1250 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001251 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001252 if (ctx->crc && crclist && rpra)
1253 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1254 crclist, M_CRCLIST*sizeof(uint32_t));
1255
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001256 bail:
1257 return err;
1258}
1259
1260static void inv_args_pre(struct smq_invoke_ctx *ctx)
1261{
1262 int i, inbufs, outbufs;
1263 uint32_t sc = ctx->sc;
1264 remote_arg64_t *rpra = ctx->rpra;
1265 uintptr_t end;
1266
1267 inbufs = REMOTE_SCALARS_INBUFS(sc);
1268 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1269 for (i = inbufs; i < inbufs + outbufs; ++i) {
1270 struct fastrpc_mmap *map = ctx->maps[i];
1271
1272 if (map && map->uncached)
1273 continue;
1274 if (!rpra[i].buf.len)
1275 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301276 if (ctx->fl->sctx->smmu.coherent &&
1277 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1278 continue;
1279 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1280 continue;
1281
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001282 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1283 buf_page_start(rpra[i].buf.pv))
1284 continue;
1285 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1286 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1287 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1288 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1289 rpra[i].buf.len);
1290 if (!IS_CACHE_ALIGNED(end))
1291 dmac_flush_range((char *)end,
1292 (char *)end + 1);
1293 }
1294}
1295
1296static void inv_args(struct smq_invoke_ctx *ctx)
1297{
1298 int i, inbufs, outbufs;
1299 uint32_t sc = ctx->sc;
1300 remote_arg64_t *rpra = ctx->rpra;
1301 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001302
1303 inbufs = REMOTE_SCALARS_INBUFS(sc);
1304 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1305 for (i = inbufs; i < inbufs + outbufs; ++i) {
1306 struct fastrpc_mmap *map = ctx->maps[i];
1307
1308 if (map && map->uncached)
1309 continue;
1310 if (!rpra[i].buf.len)
1311 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301312 if (ctx->fl->sctx->smmu.coherent &&
1313 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1314 continue;
1315 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1316 continue;
1317
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001318 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1319 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001320 continue;
1321 }
1322 if (map && map->handle)
1323 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1324 (char *)uint64_to_ptr(rpra[i].buf.pv),
1325 rpra[i].buf.len, ION_IOC_INV_CACHES);
1326 else
1327 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1328 (char *)uint64_to_ptr(rpra[i].buf.pv
1329 + rpra[i].buf.len));
1330 }
1331
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001332 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001333 dmac_inv_range(rpra, (char *)rpra + used);
1334}
1335
1336static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1337 uint32_t kernel, uint32_t handle)
1338{
1339 struct smq_msg *msg = &ctx->msg;
1340 struct fastrpc_file *fl = ctx->fl;
1341 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1342 int err = 0;
1343
1344 VERIFY(err, 0 != channel_ctx->chan);
1345 if (err)
1346 goto bail;
1347 msg->pid = current->tgid;
1348 msg->tid = current->pid;
1349 if (kernel)
1350 msg->pid = 0;
1351 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1352 msg->invoke.header.handle = handle;
1353 msg->invoke.header.sc = ctx->sc;
1354 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1355 msg->invoke.page.size = buf_page_size(ctx->used);
1356
1357 if (fl->ssrcount != channel_ctx->ssrcount) {
1358 err = -ECONNRESET;
1359 goto bail;
1360 }
1361 VERIFY(err, channel_ctx->link.port_state ==
1362 FASTRPC_LINK_CONNECTED);
1363 if (err)
1364 goto bail;
1365 err = glink_tx(channel_ctx->chan,
1366 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1367 GLINK_TX_REQ_INTENT);
1368 bail:
1369 return err;
1370}
1371
1372static void fastrpc_init(struct fastrpc_apps *me)
1373{
1374 int i;
1375
1376 INIT_HLIST_HEAD(&me->drivers);
1377 spin_lock_init(&me->hlock);
1378 mutex_init(&me->smd_mutex);
1379 me->channel = &gcinfo[0];
1380 for (i = 0; i < NUM_CHANNELS; i++) {
1381 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301382 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001383 me->channel[i].sesscount = 0;
1384 }
1385}
1386
1387static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1388
1389static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1390 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001391 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001392{
1393 struct smq_invoke_ctx *ctx = 0;
1394 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1395 int cid = fl->cid;
1396 int interrupted = 0;
1397 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001398 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001399
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001400 if (fl->profile)
1401 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001402 if (!kernel) {
1403 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1404 &ctx));
1405 if (err)
1406 goto bail;
1407 if (fl->sctx->smmu.faults)
1408 err = FASTRPC_ENOSUCH;
1409 if (err)
1410 goto bail;
1411 if (ctx)
1412 goto wait;
1413 }
1414
1415 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1416 if (err)
1417 goto bail;
1418
1419 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001420 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001421 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001422 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001423 if (err)
1424 goto bail;
1425 }
1426
Sathish Ambleyc432b502017-06-05 12:03:42 -07001427 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001428 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001429 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001430 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001431 PERF_END);
1432
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001433 if (err)
1434 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001435 wait:
1436 if (kernel)
1437 wait_for_completion(&ctx->work);
1438 else {
1439 interrupted = wait_for_completion_interruptible(&ctx->work);
1440 VERIFY(err, 0 == (err = interrupted));
1441 if (err)
1442 goto bail;
1443 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001444
1445 PERF(fl->profile, fl->perf.invargs,
1446 if (!fl->sctx->smmu.coherent)
1447 inv_args(ctx);
1448 PERF_END);
1449
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001450 VERIFY(err, 0 == (err = ctx->retval));
1451 if (err)
1452 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001453
1454 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001455 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001456 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 if (err)
1458 goto bail;
1459 bail:
1460 if (ctx && interrupted == -ERESTARTSYS)
1461 context_save_interrupted(ctx);
1462 else if (ctx)
1463 context_free(ctx);
1464 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1465 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001466
1467 if (fl->profile && !interrupted) {
1468 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1469 fl->perf.invoke += getnstimediff(&invoket);
1470 if (!(invoke->handle >= 0 &&
1471 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1472 fl->perf.count++;
1473 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001474 return err;
1475}
1476
Sathish Ambley36849af2017-02-02 09:35:55 -08001477static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001478static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001479 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001480{
1481 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001482 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001483 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001484 struct smq_phy_page pages[1];
1485 struct fastrpc_mmap *file = 0, *mem = 0;
1486
Sathish Ambley36849af2017-02-02 09:35:55 -08001487 VERIFY(err, !fastrpc_channel_open(fl));
1488 if (err)
1489 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001490 if (init->flags == FASTRPC_INIT_ATTACH) {
1491 remote_arg_t ra[1];
1492 int tgid = current->tgid;
1493
1494 ra[0].buf.pv = (void *)&tgid;
1495 ra[0].buf.len = sizeof(tgid);
1496 ioctl.inv.handle = 1;
1497 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1498 ioctl.inv.pra = ra;
1499 ioctl.fds = 0;
1500 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001501 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001502 fl->pd = 0;
1503 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1504 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1505 if (err)
1506 goto bail;
1507 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001508 remote_arg_t ra[6];
1509 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510 int mflags = 0;
1511 struct {
1512 int pgid;
1513 int namelen;
1514 int filelen;
1515 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001516 int attrs;
1517 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001518 } inbuf;
1519
1520 inbuf.pgid = current->tgid;
1521 inbuf.namelen = strlen(current->comm) + 1;
1522 inbuf.filelen = init->filelen;
1523 fl->pd = 1;
1524 if (init->filelen) {
1525 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1526 init->file, init->filelen, mflags, &file));
1527 if (err)
1528 goto bail;
1529 }
1530 inbuf.pageslen = 1;
1531 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1532 init->mem, init->memlen, mflags, &mem));
1533 if (err)
1534 goto bail;
1535 inbuf.pageslen = 1;
1536 ra[0].buf.pv = (void *)&inbuf;
1537 ra[0].buf.len = sizeof(inbuf);
1538 fds[0] = 0;
1539
1540 ra[1].buf.pv = (void *)current->comm;
1541 ra[1].buf.len = inbuf.namelen;
1542 fds[1] = 0;
1543
1544 ra[2].buf.pv = (void *)init->file;
1545 ra[2].buf.len = inbuf.filelen;
1546 fds[2] = init->filefd;
1547
1548 pages[0].addr = mem->phys;
1549 pages[0].size = mem->size;
1550 ra[3].buf.pv = (void *)pages;
1551 ra[3].buf.len = 1 * sizeof(*pages);
1552 fds[3] = 0;
1553
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001554 inbuf.attrs = uproc->attrs;
1555 ra[4].buf.pv = (void *)&(inbuf.attrs);
1556 ra[4].buf.len = sizeof(inbuf.attrs);
1557 fds[4] = 0;
1558
1559 inbuf.siglen = uproc->siglen;
1560 ra[5].buf.pv = (void *)&(inbuf.siglen);
1561 ra[5].buf.len = sizeof(inbuf.siglen);
1562 fds[5] = 0;
1563
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001564 ioctl.inv.handle = 1;
1565 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001566 if (uproc->attrs)
1567 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001568 ioctl.inv.pra = ra;
1569 ioctl.fds = fds;
1570 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001571 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001572 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1573 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1574 if (err)
1575 goto bail;
1576 } else {
1577 err = -ENOTTY;
1578 }
1579bail:
1580 if (mem && err)
1581 fastrpc_mmap_free(mem);
1582 if (file)
1583 fastrpc_mmap_free(file);
1584 return err;
1585}
1586
1587static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1588{
1589 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001590 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001591 remote_arg_t ra[1];
1592 int tgid = 0;
1593
Sathish Ambley36849af2017-02-02 09:35:55 -08001594 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1595 if (err)
1596 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001597 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1598 if (err)
1599 goto bail;
1600 tgid = fl->tgid;
1601 ra[0].buf.pv = (void *)&tgid;
1602 ra[0].buf.len = sizeof(tgid);
1603 ioctl.inv.handle = 1;
1604 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1605 ioctl.inv.pra = ra;
1606 ioctl.fds = 0;
1607 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001608 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001609 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1610 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1611bail:
1612 return err;
1613}
1614
1615static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1616 struct fastrpc_mmap *map)
1617{
Sathish Ambleybae51902017-07-03 15:00:49 -07001618 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001619 struct smq_phy_page page;
1620 int num = 1;
1621 remote_arg_t ra[3];
1622 int err = 0;
1623 struct {
1624 int pid;
1625 uint32_t flags;
1626 uintptr_t vaddrin;
1627 int num;
1628 } inargs;
1629 struct {
1630 uintptr_t vaddrout;
1631 } routargs;
1632
1633 inargs.pid = current->tgid;
1634 inargs.vaddrin = (uintptr_t)map->va;
1635 inargs.flags = flags;
1636 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1637 ra[0].buf.pv = (void *)&inargs;
1638 ra[0].buf.len = sizeof(inargs);
1639 page.addr = map->phys;
1640 page.size = map->size;
1641 ra[1].buf.pv = (void *)&page;
1642 ra[1].buf.len = num * sizeof(page);
1643
1644 ra[2].buf.pv = (void *)&routargs;
1645 ra[2].buf.len = sizeof(routargs);
1646
1647 ioctl.inv.handle = 1;
1648 if (fl->apps->compat)
1649 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1650 else
1651 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1652 ioctl.inv.pra = ra;
1653 ioctl.fds = 0;
1654 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001655 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001656 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1657 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1658 map->raddr = (uintptr_t)routargs.vaddrout;
1659
1660 return err;
1661}
1662
1663static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1664 struct fastrpc_mmap *map)
1665{
Sathish Ambleybae51902017-07-03 15:00:49 -07001666 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001667 remote_arg_t ra[1];
1668 int err = 0;
1669 struct {
1670 int pid;
1671 uintptr_t vaddrout;
1672 ssize_t size;
1673 } inargs;
1674
1675 inargs.pid = current->tgid;
1676 inargs.size = map->size;
1677 inargs.vaddrout = map->raddr;
1678 ra[0].buf.pv = (void *)&inargs;
1679 ra[0].buf.len = sizeof(inargs);
1680
1681 ioctl.inv.handle = 1;
1682 if (fl->apps->compat)
1683 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1684 else
1685 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1686 ioctl.inv.pra = ra;
1687 ioctl.fds = 0;
1688 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001689 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001690 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1691 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1692 return err;
1693}
1694
1695static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1696 ssize_t len, struct fastrpc_mmap **ppmap);
1697
1698static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1699
1700static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1701 struct fastrpc_ioctl_munmap *ud)
1702{
1703 int err = 0;
1704 struct fastrpc_mmap *map = 0;
1705
1706 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1707 if (err)
1708 goto bail;
1709 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1710 if (err)
1711 goto bail;
1712 fastrpc_mmap_free(map);
1713bail:
1714 if (err && map)
1715 fastrpc_mmap_add(map);
1716 return err;
1717}
1718
1719static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1720 struct fastrpc_ioctl_mmap *ud)
1721{
1722
1723 struct fastrpc_mmap *map = 0;
1724 int err = 0;
1725
1726 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001727 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001728 return 0;
1729
1730 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1731 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1732 if (err)
1733 goto bail;
1734 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1735 if (err)
1736 goto bail;
1737 ud->vaddrout = map->raddr;
1738 bail:
1739 if (err && map)
1740 fastrpc_mmap_free(map);
1741 return err;
1742}
1743
1744static void fastrpc_channel_close(struct kref *kref)
1745{
1746 struct fastrpc_apps *me = &gfa;
1747 struct fastrpc_channel_ctx *ctx;
1748 int cid;
1749
1750 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1751 cid = ctx - &gcinfo[0];
1752 fastrpc_glink_close(ctx->chan, cid);
1753 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301754 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1755 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001756 mutex_unlock(&me->smd_mutex);
1757 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1758 MAJOR(me->dev_no), cid);
1759}
1760
1761static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1762
1763static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1764 int secure, struct fastrpc_session_ctx **session)
1765{
1766 struct fastrpc_apps *me = &gfa;
1767 int idx = 0, err = 0;
1768
1769 if (chan->sesscount) {
1770 for (idx = 0; idx < chan->sesscount; ++idx) {
1771 if (!chan->session[idx].used &&
1772 chan->session[idx].smmu.secure == secure) {
1773 chan->session[idx].used = 1;
1774 break;
1775 }
1776 }
1777 VERIFY(err, idx < chan->sesscount);
1778 if (err)
1779 goto bail;
1780 chan->session[idx].smmu.faults = 0;
1781 } else {
1782 VERIFY(err, me->dev != NULL);
1783 if (err)
1784 goto bail;
1785 chan->session[0].dev = me->dev;
1786 }
1787
1788 *session = &chan->session[idx];
1789 bail:
1790 return err;
1791}
1792
1793bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1794{
1795 if (glink_queue_rx_intent(h, NULL, size))
1796 return false;
1797 return true;
1798}
1799
1800void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1801 const void *pkt_priv, const void *ptr)
1802{
1803}
1804
1805void fastrpc_glink_notify_rx(void *handle, const void *priv,
1806 const void *pkt_priv, const void *ptr, size_t size)
1807{
1808 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1809 int len = size;
1810
1811 while (len >= sizeof(*rsp) && rsp) {
1812 rsp->ctx = rsp->ctx & ~1;
1813 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1814 rsp++;
1815 len = len - sizeof(*rsp);
1816 }
1817 glink_rx_done(handle, ptr, true);
1818}
1819
1820void fastrpc_glink_notify_state(void *handle, const void *priv,
1821 unsigned int event)
1822{
1823 struct fastrpc_apps *me = &gfa;
1824 int cid = (int)(uintptr_t)priv;
1825 struct fastrpc_glink_info *link;
1826
1827 if (cid < 0 || cid >= NUM_CHANNELS)
1828 return;
1829 link = &me->channel[cid].link;
1830 switch (event) {
1831 case GLINK_CONNECTED:
1832 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301833 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001834 break;
1835 case GLINK_LOCAL_DISCONNECTED:
1836 link->port_state = FASTRPC_LINK_DISCONNECTED;
1837 break;
1838 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301839 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001840 fastrpc_glink_close(me->channel[cid].chan, cid);
1841 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001842 }
1843 break;
1844 default:
1845 break;
1846 }
1847}
1848
1849static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1850 struct fastrpc_session_ctx **session)
1851{
1852 int err = 0;
1853 struct fastrpc_apps *me = &gfa;
1854
1855 mutex_lock(&me->smd_mutex);
1856 if (!*session)
1857 err = fastrpc_session_alloc_locked(chan, secure, session);
1858 mutex_unlock(&me->smd_mutex);
1859 return err;
1860}
1861
1862static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1863 struct fastrpc_session_ctx *session)
1864{
1865 struct fastrpc_apps *me = &gfa;
1866
1867 mutex_lock(&me->smd_mutex);
1868 session->used = 0;
1869 mutex_unlock(&me->smd_mutex);
1870}
1871
1872static int fastrpc_file_free(struct fastrpc_file *fl)
1873{
1874 struct hlist_node *n;
1875 struct fastrpc_mmap *map = 0;
1876 int cid;
1877
1878 if (!fl)
1879 return 0;
1880 cid = fl->cid;
1881
1882 spin_lock(&fl->apps->hlock);
1883 hlist_del_init(&fl->hn);
1884 spin_unlock(&fl->apps->hlock);
1885
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001886 if (!fl->sctx) {
1887 kfree(fl);
1888 return 0;
1889 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001890 (void)fastrpc_release_current_dsp_process(fl);
1891 fastrpc_context_list_dtor(fl);
1892 fastrpc_buf_list_free(fl);
1893 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1894 fastrpc_mmap_free(map);
1895 }
1896 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1897 kref_put_mutex(&fl->apps->channel[cid].kref,
1898 fastrpc_channel_close, &fl->apps->smd_mutex);
1899 if (fl->sctx)
1900 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1901 if (fl->secsctx)
1902 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1903 kfree(fl);
1904 return 0;
1905}
1906
1907static int fastrpc_device_release(struct inode *inode, struct file *file)
1908{
1909 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1910
1911 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001912 if (fl->debugfs_file != NULL)
1913 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001914 fastrpc_file_free(fl);
1915 file->private_data = 0;
1916 }
1917 return 0;
1918}
1919
1920static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1921 void *priv)
1922{
1923 struct fastrpc_apps *me = &gfa;
1924 int cid = (int)((uintptr_t)priv);
1925 struct fastrpc_glink_info *link;
1926
1927 if (cid < 0 || cid >= NUM_CHANNELS)
1928 return;
1929
1930 link = &me->channel[cid].link;
1931 switch (cb_info->link_state) {
1932 case GLINK_LINK_STATE_UP:
1933 link->link_state = FASTRPC_LINK_STATE_UP;
1934 complete(&me->channel[cid].work);
1935 break;
1936 case GLINK_LINK_STATE_DOWN:
1937 link->link_state = FASTRPC_LINK_STATE_DOWN;
1938 break;
1939 default:
1940 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1941 break;
1942 }
1943}
1944
1945static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1946{
1947 int err = 0;
1948 struct fastrpc_glink_info *link;
1949
1950 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1951 if (err)
1952 goto bail;
1953
1954 link = &me->channel[cid].link;
1955 if (link->link_notify_handle != NULL)
1956 goto bail;
1957
1958 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1959 link->link_notify_handle = glink_register_link_state_cb(
1960 &link->link_info,
1961 (void *)((uintptr_t)cid));
1962 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1963 if (err) {
1964 link->link_notify_handle = NULL;
1965 goto bail;
1966 }
1967 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1968 RPC_TIMEOUT));
1969bail:
1970 return err;
1971}
1972
1973static void fastrpc_glink_close(void *chan, int cid)
1974{
1975 int err = 0;
1976 struct fastrpc_glink_info *link;
1977
1978 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1979 if (err)
1980 return;
1981 link = &gfa.channel[cid].link;
1982
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301983 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001984 link->port_state = FASTRPC_LINK_DISCONNECTING;
1985 glink_close(chan);
1986 }
1987}
1988
1989static int fastrpc_glink_open(int cid)
1990{
1991 int err = 0;
1992 void *handle = NULL;
1993 struct fastrpc_apps *me = &gfa;
1994 struct glink_open_config *cfg;
1995 struct fastrpc_glink_info *link;
1996
1997 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1998 if (err)
1999 goto bail;
2000 link = &me->channel[cid].link;
2001 cfg = &me->channel[cid].link.cfg;
2002 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2003 if (err)
2004 goto bail;
2005
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302006 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2007 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002008 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002009
2010 link->port_state = FASTRPC_LINK_CONNECTING;
2011 cfg->priv = (void *)(uintptr_t)cid;
2012 cfg->edge = gcinfo[cid].link.link_info.edge;
2013 cfg->transport = gcinfo[cid].link.link_info.transport;
2014 cfg->name = FASTRPC_GLINK_GUID;
2015 cfg->notify_rx = fastrpc_glink_notify_rx;
2016 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2017 cfg->notify_state = fastrpc_glink_notify_state;
2018 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2019 handle = glink_open(cfg);
2020 VERIFY(err, !IS_ERR_OR_NULL(handle));
2021 if (err)
2022 goto bail;
2023 me->channel[cid].chan = handle;
2024bail:
2025 return err;
2026}
2027
Sathish Ambley1ca68232017-01-19 10:32:55 -08002028static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2029{
2030 filp->private_data = inode->i_private;
2031 return 0;
2032}
2033
2034static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2035 size_t count, loff_t *position)
2036{
2037 struct fastrpc_file *fl = filp->private_data;
2038 struct hlist_node *n;
2039 struct fastrpc_buf *buf = 0;
2040 struct fastrpc_mmap *map = 0;
2041 struct smq_invoke_ctx *ictx = 0;
2042 struct fastrpc_channel_ctx *chan;
2043 struct fastrpc_session_ctx *sess;
2044 unsigned int len = 0;
2045 int i, j, ret = 0;
2046 char *fileinfo = NULL;
2047
2048 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2049 if (!fileinfo)
2050 goto bail;
2051 if (fl == NULL) {
2052 for (i = 0; i < NUM_CHANNELS; i++) {
2053 chan = &gcinfo[i];
2054 len += scnprintf(fileinfo + len,
2055 DEBUGFS_SIZE - len, "%s\n\n",
2056 chan->name);
2057 len += scnprintf(fileinfo + len,
2058 DEBUGFS_SIZE - len, "%s %d\n",
2059 "sesscount:", chan->sesscount);
2060 for (j = 0; j < chan->sesscount; j++) {
2061 sess = &chan->session[j];
2062 len += scnprintf(fileinfo + len,
2063 DEBUGFS_SIZE - len,
2064 "%s%d\n\n", "SESSION", j);
2065 len += scnprintf(fileinfo + len,
2066 DEBUGFS_SIZE - len,
2067 "%s %d\n", "sid:",
2068 sess->smmu.cb);
2069 len += scnprintf(fileinfo + len,
2070 DEBUGFS_SIZE - len,
2071 "%s %d\n", "SECURE:",
2072 sess->smmu.secure);
2073 }
2074 }
2075 } else {
2076 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2077 "%s %d\n\n",
2078 "PROCESS_ID:", fl->tgid);
2079 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2080 "%s %d\n\n",
2081 "CHANNEL_ID:", fl->cid);
2082 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2083 "%s %d\n\n",
2084 "SSRCOUNT:", fl->ssrcount);
2085 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2086 "%s\n",
2087 "LIST OF BUFS:");
2088 spin_lock(&fl->hlock);
2089 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2090 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2091 "%s %p %s %p %s %llx\n", "buf:",
2092 buf, "buf->virt:", buf->virt,
2093 "buf->phys:", buf->phys);
2094 }
2095 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2096 "\n%s\n",
2097 "LIST OF MAPS:");
2098 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2099 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2100 "%s %p %s %lx %s %llx\n",
2101 "map:", map,
2102 "map->va:", map->va,
2103 "map->phys:", map->phys);
2104 }
2105 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2106 "\n%s\n",
2107 "LIST OF PENDING SMQCONTEXTS:");
2108 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2109 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2110 "%s %p %s %u %s %u %s %u\n",
2111 "smqcontext:", ictx,
2112 "sc:", ictx->sc,
2113 "tid:", ictx->pid,
2114 "handle", ictx->rpra->h);
2115 }
2116 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2117 "\n%s\n",
2118 "LIST OF INTERRUPTED SMQCONTEXTS:");
2119 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2120 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2121 "%s %p %s %u %s %u %s %u\n",
2122 "smqcontext:", ictx,
2123 "sc:", ictx->sc,
2124 "tid:", ictx->pid,
2125 "handle", ictx->rpra->h);
2126 }
2127 spin_unlock(&fl->hlock);
2128 }
2129 if (len > DEBUGFS_SIZE)
2130 len = DEBUGFS_SIZE;
2131 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2132 kfree(fileinfo);
2133bail:
2134 return ret;
2135}
2136
2137static const struct file_operations debugfs_fops = {
2138 .open = fastrpc_debugfs_open,
2139 .read = fastrpc_debugfs_read,
2140};
Sathish Ambley36849af2017-02-02 09:35:55 -08002141static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002142{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002143 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002144 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002145
2146 mutex_lock(&me->smd_mutex);
2147
Sathish Ambley36849af2017-02-02 09:35:55 -08002148 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002149 if (err)
2150 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002151 cid = fl->cid;
2152 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2153 if (err)
2154 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002155 fl->ssrcount = me->channel[cid].ssrcount;
2156 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2157 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302158 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2159 if (err)
2160 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002161 VERIFY(err, 0 == fastrpc_glink_open(cid));
2162 if (err)
2163 goto bail;
2164
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302165 VERIFY(err,
2166 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002167 RPC_TIMEOUT));
2168 if (err) {
2169 me->channel[cid].chan = 0;
2170 goto bail;
2171 }
2172 kref_init(&me->channel[cid].kref);
2173 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2174 MAJOR(me->dev_no), cid);
2175 if (me->channel[cid].ssrcount !=
2176 me->channel[cid].prevssrcount) {
2177 me->channel[cid].prevssrcount =
2178 me->channel[cid].ssrcount;
2179 }
2180 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002181
2182bail:
2183 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002184 return err;
2185}
2186
Sathish Ambley36849af2017-02-02 09:35:55 -08002187static int fastrpc_device_open(struct inode *inode, struct file *filp)
2188{
2189 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002190 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002191 struct fastrpc_file *fl = 0;
2192 struct fastrpc_apps *me = &gfa;
2193
2194 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2195 if (err)
2196 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002197 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2198 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002199 context_list_ctor(&fl->clst);
2200 spin_lock_init(&fl->hlock);
2201 INIT_HLIST_HEAD(&fl->maps);
2202 INIT_HLIST_HEAD(&fl->bufs);
2203 INIT_HLIST_NODE(&fl->hn);
2204 fl->tgid = current->tgid;
2205 fl->apps = me;
2206 fl->mode = FASTRPC_MODE_SERIAL;
2207 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002208 if (debugfs_file != NULL)
2209 fl->debugfs_file = debugfs_file;
2210 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002211 filp->private_data = fl;
2212 spin_lock(&me->hlock);
2213 hlist_add_head(&fl->hn, &me->drivers);
2214 spin_unlock(&me->hlock);
2215 return 0;
2216}
2217
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002218static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2219{
2220 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002221 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002222
Sathish Ambley36849af2017-02-02 09:35:55 -08002223 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002224 if (err)
2225 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002226 if (fl->cid == -1) {
2227 cid = *info;
2228 VERIFY(err, cid < NUM_CHANNELS);
2229 if (err)
2230 goto bail;
2231 fl->cid = cid;
2232 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2233 VERIFY(err, !fastrpc_session_alloc_locked(
2234 &fl->apps->channel[cid], 0, &fl->sctx));
2235 if (err)
2236 goto bail;
2237 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302238 VERIFY(err, fl->sctx != NULL);
2239 if (err)
2240 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002241 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2242bail:
2243 return err;
2244}
2245
2246static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2247 unsigned long ioctl_param)
2248{
2249 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002250 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002251 struct fastrpc_ioctl_mmap mmap;
2252 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002253 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002254 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002255 } p;
2256 void *param = (char *)ioctl_param;
2257 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2258 int size = 0, err = 0;
2259 uint32_t info;
2260
2261 p.inv.fds = 0;
2262 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002263 p.inv.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002264
2265 switch (ioctl_num) {
2266 case FASTRPC_IOCTL_INVOKE:
2267 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002268 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002269 case FASTRPC_IOCTL_INVOKE_FD:
2270 if (!size)
2271 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2272 /* fall through */
2273 case FASTRPC_IOCTL_INVOKE_ATTRS:
2274 if (!size)
2275 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002276 /* fall through */
2277 case FASTRPC_IOCTL_INVOKE_CRC:
2278 if (!size)
2279 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002280 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2281 if (err)
2282 goto bail;
2283 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2284 0, &p.inv)));
2285 if (err)
2286 goto bail;
2287 break;
2288 case FASTRPC_IOCTL_MMAP:
2289 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2290 sizeof(p.mmap)));
2291 if (err)
2292 goto bail;
2293 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2294 if (err)
2295 goto bail;
2296 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2297 if (err)
2298 goto bail;
2299 break;
2300 case FASTRPC_IOCTL_MUNMAP:
2301 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2302 sizeof(p.munmap)));
2303 if (err)
2304 goto bail;
2305 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2306 &p.munmap)));
2307 if (err)
2308 goto bail;
2309 break;
2310 case FASTRPC_IOCTL_SETMODE:
2311 switch ((uint32_t)ioctl_param) {
2312 case FASTRPC_MODE_PARALLEL:
2313 case FASTRPC_MODE_SERIAL:
2314 fl->mode = (uint32_t)ioctl_param;
2315 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002316 case FASTRPC_MODE_PROFILE:
2317 fl->profile = (uint32_t)ioctl_param;
2318 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002319 default:
2320 err = -ENOTTY;
2321 break;
2322 }
2323 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002324 case FASTRPC_IOCTL_GETPERF:
2325 VERIFY(err, 0 == copy_from_user(&p.perf,
2326 param, sizeof(p.perf)));
2327 if (err)
2328 goto bail;
2329 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2330 if (p.perf.keys) {
2331 char *keys = PERF_KEYS;
2332
2333 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2334 keys, strlen(keys)+1));
2335 if (err)
2336 goto bail;
2337 }
2338 if (p.perf.data) {
2339 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2340 &fl->perf, sizeof(fl->perf)));
2341 }
2342 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2343 if (err)
2344 goto bail;
2345 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002346 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002347 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2348 if (err)
2349 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002350 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2351 if (err)
2352 goto bail;
2353 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2354 if (err)
2355 goto bail;
2356 break;
2357 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002358 p.init.attrs = 0;
2359 p.init.siglen = 0;
2360 size = sizeof(struct fastrpc_ioctl_init);
2361 /* fall through */
2362 case FASTRPC_IOCTL_INIT_ATTRS:
2363 if (!size)
2364 size = sizeof(struct fastrpc_ioctl_init_attrs);
2365 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002366 if (err)
2367 goto bail;
2368 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2369 if (err)
2370 goto bail;
2371 break;
2372
2373 default:
2374 err = -ENOTTY;
2375 pr_info("bad ioctl: %d\n", ioctl_num);
2376 break;
2377 }
2378 bail:
2379 return err;
2380}
2381
2382static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2383 unsigned long code,
2384 void *data)
2385{
2386 struct fastrpc_apps *me = &gfa;
2387 struct fastrpc_channel_ctx *ctx;
2388 int cid;
2389
2390 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2391 cid = ctx - &me->channel[0];
2392 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2393 mutex_lock(&me->smd_mutex);
2394 ctx->ssrcount++;
2395 if (ctx->chan) {
2396 fastrpc_glink_close(ctx->chan, cid);
2397 ctx->chan = 0;
2398 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2399 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2400 }
2401 mutex_unlock(&me->smd_mutex);
2402 fastrpc_notify_drivers(me, cid);
2403 }
2404
2405 return NOTIFY_DONE;
2406}
2407
2408static const struct file_operations fops = {
2409 .open = fastrpc_device_open,
2410 .release = fastrpc_device_release,
2411 .unlocked_ioctl = fastrpc_device_ioctl,
2412 .compat_ioctl = compat_fastrpc_device_ioctl,
2413};
2414
2415static const struct of_device_id fastrpc_match_table[] = {
2416 { .compatible = "qcom,msm-fastrpc-adsp", },
2417 { .compatible = "qcom,msm-fastrpc-compute", },
2418 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2419 { .compatible = "qcom,msm-adsprpc-mem-region", },
2420 {}
2421};
2422
2423static int fastrpc_cb_probe(struct device *dev)
2424{
2425 struct fastrpc_channel_ctx *chan;
2426 struct fastrpc_session_ctx *sess;
2427 struct of_phandle_args iommuspec;
2428 const char *name;
2429 unsigned int start = 0x80000000;
2430 int err = 0, i;
2431 int secure_vmid = VMID_CP_PIXEL;
2432
2433 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2434 if (err)
2435 goto bail;
2436 for (i = 0; i < NUM_CHANNELS; i++) {
2437 if (!gcinfo[i].name)
2438 continue;
2439 if (!strcmp(name, gcinfo[i].name))
2440 break;
2441 }
2442 VERIFY(err, i < NUM_CHANNELS);
2443 if (err)
2444 goto bail;
2445 chan = &gcinfo[i];
2446 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2447 if (err)
2448 goto bail;
2449
2450 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2451 "#iommu-cells", 0, &iommuspec));
2452 if (err)
2453 goto bail;
2454 sess = &chan->session[chan->sesscount];
2455 sess->smmu.cb = iommuspec.args[0] & 0xf;
2456 sess->used = 0;
2457 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2458 "dma-coherent");
2459 sess->smmu.secure = of_property_read_bool(dev->of_node,
2460 "qcom,secure-context-bank");
2461 if (sess->smmu.secure)
2462 start = 0x60000000;
2463 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2464 arm_iommu_create_mapping(&platform_bus_type,
2465 start, 0x7fffffff)));
2466 if (err)
2467 goto bail;
2468
2469 if (sess->smmu.secure)
2470 iommu_domain_set_attr(sess->smmu.mapping->domain,
2471 DOMAIN_ATTR_SECURE_VMID,
2472 &secure_vmid);
2473
2474 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2475 if (err)
2476 goto bail;
2477 sess->dev = dev;
2478 sess->smmu.enabled = 1;
2479 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002480 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2481 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002482bail:
2483 return err;
2484}
2485
2486static int fastrpc_probe(struct platform_device *pdev)
2487{
2488 int err = 0;
2489 struct fastrpc_apps *me = &gfa;
2490 struct device *dev = &pdev->dev;
2491 struct smq_phy_page range;
2492 struct device_node *ion_node, *node;
2493 struct platform_device *ion_pdev;
2494 struct cma *cma;
2495 uint32_t val;
2496
2497 if (of_device_is_compatible(dev->of_node,
2498 "qcom,msm-fastrpc-compute-cb"))
2499 return fastrpc_cb_probe(dev);
2500
2501 if (of_device_is_compatible(dev->of_node,
2502 "qcom,msm-adsprpc-mem-region")) {
2503 me->dev = dev;
2504 range.addr = 0;
2505 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2506 if (ion_node) {
2507 for_each_available_child_of_node(ion_node, node) {
2508 if (of_property_read_u32(node, "reg", &val))
2509 continue;
2510 if (val != ION_ADSP_HEAP_ID)
2511 continue;
2512 ion_pdev = of_find_device_by_node(node);
2513 if (!ion_pdev)
2514 break;
2515 cma = dev_get_cma_area(&ion_pdev->dev);
2516 if (cma) {
2517 range.addr = cma_get_base(cma);
2518 range.size = (size_t)cma_get_size(cma);
2519 }
2520 break;
2521 }
2522 }
2523 if (range.addr) {
2524 int srcVM[1] = {VMID_HLOS};
2525 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2526 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002527 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002528 PERM_READ | PERM_WRITE | PERM_EXEC,
2529 PERM_READ | PERM_WRITE | PERM_EXEC,
2530 PERM_READ | PERM_WRITE | PERM_EXEC,
2531 };
2532
2533 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2534 srcVM, 1, destVM, destVMperm, 4));
2535 if (err)
2536 goto bail;
2537 }
2538 return 0;
2539 }
2540
2541 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2542 fastrpc_match_table,
2543 NULL, &pdev->dev));
2544 if (err)
2545 goto bail;
2546bail:
2547 return err;
2548}
2549
2550static void fastrpc_deinit(void)
2551{
2552 struct fastrpc_apps *me = &gfa;
2553 struct fastrpc_channel_ctx *chan = gcinfo;
2554 int i, j;
2555
2556 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2557 if (chan->chan) {
2558 kref_put_mutex(&chan->kref,
2559 fastrpc_channel_close, &me->smd_mutex);
2560 chan->chan = 0;
2561 }
2562 for (j = 0; j < NUM_SESSIONS; j++) {
2563 struct fastrpc_session_ctx *sess = &chan->session[j];
2564
2565 if (sess->smmu.enabled) {
2566 arm_iommu_detach_device(sess->dev);
2567 sess->dev = 0;
2568 }
2569 if (sess->smmu.mapping) {
2570 arm_iommu_release_mapping(sess->smmu.mapping);
2571 sess->smmu.mapping = 0;
2572 }
2573 }
2574 }
2575}
2576
2577static struct platform_driver fastrpc_driver = {
2578 .probe = fastrpc_probe,
2579 .driver = {
2580 .name = "fastrpc",
2581 .owner = THIS_MODULE,
2582 .of_match_table = fastrpc_match_table,
2583 },
2584};
2585
2586static int __init fastrpc_device_init(void)
2587{
2588 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002589 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002590 int err = 0, i;
2591
2592 memset(me, 0, sizeof(*me));
2593
2594 fastrpc_init(me);
2595 me->dev = NULL;
2596 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2597 if (err)
2598 goto register_bail;
2599 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2600 DEVICE_NAME));
2601 if (err)
2602 goto alloc_chrdev_bail;
2603 cdev_init(&me->cdev, &fops);
2604 me->cdev.owner = THIS_MODULE;
2605 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002606 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002607 if (err)
2608 goto cdev_init_bail;
2609 me->class = class_create(THIS_MODULE, "fastrpc");
2610 VERIFY(err, !IS_ERR(me->class));
2611 if (err)
2612 goto class_create_bail;
2613 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002614 dev = device_create(me->class, NULL,
2615 MKDEV(MAJOR(me->dev_no), 0),
2616 NULL, gcinfo[0].name);
2617 VERIFY(err, !IS_ERR_OR_NULL(dev));
2618 if (err)
2619 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002620 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002621 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002622 me->channel[i].ssrcount = 0;
2623 me->channel[i].prevssrcount = 0;
2624 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2625 me->channel[i].handle = subsys_notif_register_notifier(
2626 gcinfo[i].subsys,
2627 &me->channel[i].nb);
2628 }
2629
2630 me->client = msm_ion_client_create(DEVICE_NAME);
2631 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2632 if (err)
2633 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002634 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002635 return 0;
2636device_create_bail:
2637 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002638 if (me->channel[i].handle)
2639 subsys_notif_unregister_notifier(me->channel[i].handle,
2640 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002641 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002642 if (!IS_ERR_OR_NULL(dev))
2643 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002644 class_destroy(me->class);
2645class_create_bail:
2646 cdev_del(&me->cdev);
2647cdev_init_bail:
2648 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2649alloc_chrdev_bail:
2650register_bail:
2651 fastrpc_deinit();
2652 return err;
2653}
2654
2655static void __exit fastrpc_device_exit(void)
2656{
2657 struct fastrpc_apps *me = &gfa;
2658 int i;
2659
2660 fastrpc_file_list_dtor(me);
2661 fastrpc_deinit();
2662 for (i = 0; i < NUM_CHANNELS; i++) {
2663 if (!gcinfo[i].name)
2664 continue;
2665 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2666 subsys_notif_unregister_notifier(me->channel[i].handle,
2667 &me->channel[i].nb);
2668 }
2669 class_destroy(me->class);
2670 cdev_del(&me->cdev);
2671 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2672 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002673 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002674}
2675
2676late_initcall(fastrpc_device_init);
2677module_exit(fastrpc_device_exit);
2678
2679MODULE_LICENSE("GPL v2");