blob: bd2155a58b339cd534c1f01dc21100508729c637 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070063
64#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
65
66#define FASTRPC_LINK_STATE_DOWN (0x0)
67#define FASTRPC_LINK_STATE_UP (0x1)
68#define FASTRPC_LINK_DISCONNECTED (0x0)
69#define FASTRPC_LINK_CONNECTING (0x1)
70#define FASTRPC_LINK_CONNECTED (0x3)
71#define FASTRPC_LINK_DISCONNECTING (0x7)
72
Sathish Ambleya21b5b52017-01-11 16:11:01 -080073#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
74#define FASTRPC_STATIC_HANDLE_LISTENER (3)
75#define FASTRPC_STATIC_HANDLE_MAX (20)
76
77#define PERF_END (void)0
78
79#define PERF(enb, cnt, ff) \
80 {\
81 struct timespec startT = {0};\
82 if (enb) {\
83 getnstimeofday(&startT);\
84 } \
85 ff ;\
86 if (enb) {\
87 cnt += getnstimediff(&startT);\
88 } \
89 }
90
Sathish Ambley69e1ab02016-10-18 10:28:15 -070091static int fastrpc_glink_open(int cid);
92static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080093static struct dentry *debugfs_root;
94static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96static inline uint64_t buf_page_start(uint64_t buf)
97{
98 uint64_t start = (uint64_t) buf & PAGE_MASK;
99 return start;
100}
101
102static inline uint64_t buf_page_offset(uint64_t buf)
103{
104 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
105 return offset;
106}
107
108static inline int buf_num_pages(uint64_t buf, ssize_t len)
109{
110 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
111 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
112 int nPages = end - start + 1;
113 return nPages;
114}
115
116static inline uint64_t buf_page_size(uint32_t size)
117{
118 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
119
120 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
121}
122
123static inline void *uint64_to_ptr(uint64_t addr)
124{
125 void *ptr = (void *)((uintptr_t)addr);
126
127 return ptr;
128}
129
130static inline uint64_t ptr_to_uint64(void *ptr)
131{
132 uint64_t addr = (uint64_t)((uintptr_t)ptr);
133
134 return addr;
135}
136
137struct fastrpc_file;
138
139struct fastrpc_buf {
140 struct hlist_node hn;
141 struct fastrpc_file *fl;
142 void *virt;
143 uint64_t phys;
144 ssize_t size;
145};
146
147struct fastrpc_ctx_lst;
148
149struct overlap {
150 uintptr_t start;
151 uintptr_t end;
152 int raix;
153 uintptr_t mstart;
154 uintptr_t mend;
155 uintptr_t offset;
156};
157
158struct smq_invoke_ctx {
159 struct hlist_node hn;
160 struct completion work;
161 int retval;
162 int pid;
163 int tgid;
164 remote_arg_t *lpra;
165 remote_arg64_t *rpra;
166 int *fds;
167 unsigned int *attrs;
168 struct fastrpc_mmap **maps;
169 struct fastrpc_buf *buf;
170 ssize_t used;
171 struct fastrpc_file *fl;
172 uint32_t sc;
173 struct overlap *overs;
174 struct overlap **overps;
175 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700176 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700177};
178
179struct fastrpc_ctx_lst {
180 struct hlist_head pending;
181 struct hlist_head interrupted;
182};
183
184struct fastrpc_smmu {
185 struct dma_iommu_mapping *mapping;
186 int cb;
187 int enabled;
188 int faults;
189 int secure;
190 int coherent;
191};
192
193struct fastrpc_session_ctx {
194 struct device *dev;
195 struct fastrpc_smmu smmu;
196 int used;
197};
198
199struct fastrpc_glink_info {
200 int link_state;
201 int port_state;
202 struct glink_open_config cfg;
203 struct glink_link_info link_info;
204 void *link_notify_handle;
205};
206
207struct fastrpc_channel_ctx {
208 char *name;
209 char *subsys;
210 void *chan;
211 struct device *dev;
212 struct fastrpc_session_ctx session[NUM_SESSIONS];
213 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530214 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700215 struct notifier_block nb;
216 struct kref kref;
217 int sesscount;
218 int ssrcount;
219 void *handle;
220 int prevssrcount;
221 int vmid;
222 struct fastrpc_glink_info link;
223};
224
225struct fastrpc_apps {
226 struct fastrpc_channel_ctx *channel;
227 struct cdev cdev;
228 struct class *class;
229 struct mutex smd_mutex;
230 struct smq_phy_page range;
231 struct hlist_head maps;
232 dev_t dev_no;
233 int compat;
234 struct hlist_head drivers;
235 spinlock_t hlock;
236 struct ion_client *client;
237 struct device *dev;
238};
239
240struct fastrpc_mmap {
241 struct hlist_node hn;
242 struct fastrpc_file *fl;
243 struct fastrpc_apps *apps;
244 int fd;
245 uint32_t flags;
246 struct dma_buf *buf;
247 struct sg_table *table;
248 struct dma_buf_attachment *attach;
249 struct ion_handle *handle;
250 uint64_t phys;
251 ssize_t size;
252 uintptr_t va;
253 ssize_t len;
254 int refs;
255 uintptr_t raddr;
256 int uncached;
257 int secure;
258 uintptr_t attr;
259};
260
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800261struct fastrpc_perf {
262 int64_t count;
263 int64_t flush;
264 int64_t map;
265 int64_t copy;
266 int64_t link;
267 int64_t getargs;
268 int64_t putargs;
269 int64_t invargs;
270 int64_t invoke;
271};
272
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273struct fastrpc_file {
274 struct hlist_node hn;
275 spinlock_t hlock;
276 struct hlist_head maps;
277 struct hlist_head bufs;
278 struct fastrpc_ctx_lst clst;
279 struct fastrpc_session_ctx *sctx;
280 struct fastrpc_session_ctx *secsctx;
281 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800282 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700283 int tgid;
284 int cid;
285 int ssrcount;
286 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530287 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700288 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800289 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800290 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700291};
292
293static struct fastrpc_apps gfa;
294
295static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
296 {
297 .name = "adsprpc-smd",
298 .subsys = "adsp",
299 .link.link_info.edge = "lpass",
300 .link.link_info.transport = "smem",
301 },
302 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700303 .name = "mdsprpc-smd",
304 .subsys = "modem",
305 .link.link_info.edge = "mpss",
306 .link.link_info.transport = "smem",
307 },
308 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800309 .name = "sdsprpc-smd",
310 .subsys = "slpi",
311 .link.link_info.edge = "dsps",
312 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800313 },
314 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700315 .name = "cdsprpc-smd",
316 .subsys = "cdsp",
317 .link.link_info.edge = "cdsp",
318 .link.link_info.transport = "smem",
319 },
320};
321
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800322static inline int64_t getnstimediff(struct timespec *start)
323{
324 int64_t ns;
325 struct timespec ts, b;
326
327 getnstimeofday(&ts);
328 b = timespec_sub(ts, *start);
329 ns = timespec_to_ns(&b);
330 return ns;
331}
332
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700333static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
334{
335 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
336 int vmid;
337
338 if (!fl)
339 return;
340 if (cache) {
341 spin_lock(&fl->hlock);
342 hlist_add_head(&buf->hn, &fl->bufs);
343 spin_unlock(&fl->hlock);
344 return;
345 }
346 if (!IS_ERR_OR_NULL(buf->virt)) {
347 int destVM[1] = {VMID_HLOS};
348 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
349
350 if (fl->sctx->smmu.cb)
351 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
352 vmid = fl->apps->channel[fl->cid].vmid;
353 if (vmid) {
354 int srcVM[2] = {VMID_HLOS, vmid};
355
356 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
357 srcVM, 2, destVM, destVMperm, 1);
358 }
359 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
360 buf->phys);
361 }
362 kfree(buf);
363}
364
365static void fastrpc_buf_list_free(struct fastrpc_file *fl)
366{
367 struct fastrpc_buf *buf, *free;
368
369 do {
370 struct hlist_node *n;
371
372 free = 0;
373 spin_lock(&fl->hlock);
374 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
375 hlist_del_init(&buf->hn);
376 free = buf;
377 break;
378 }
379 spin_unlock(&fl->hlock);
380 if (free)
381 fastrpc_buf_free(free, 0);
382 } while (free);
383}
384
385static void fastrpc_mmap_add(struct fastrpc_mmap *map)
386{
387 struct fastrpc_file *fl = map->fl;
388
389 spin_lock(&fl->hlock);
390 hlist_add_head(&map->hn, &fl->maps);
391 spin_unlock(&fl->hlock);
392}
393
394static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800395 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396{
397 struct fastrpc_mmap *match = 0, *map;
398 struct hlist_node *n;
399
400 spin_lock(&fl->hlock);
401 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
402 if (va >= map->va &&
403 va + len <= map->va + map->len &&
404 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800405 if (refs)
406 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700407 match = map;
408 break;
409 }
410 }
411 spin_unlock(&fl->hlock);
412 if (match) {
413 *ppmap = match;
414 return 0;
415 }
416 return -ENOTTY;
417}
418
419static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
420 ssize_t len, struct fastrpc_mmap **ppmap)
421{
422 struct fastrpc_mmap *match = 0, *map;
423 struct hlist_node *n;
424 struct fastrpc_apps *me = &gfa;
425
426 spin_lock(&me->hlock);
427 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
428 if (map->raddr == va &&
429 map->raddr + map->len == va + len &&
430 map->refs == 1) {
431 match = map;
432 hlist_del_init(&map->hn);
433 break;
434 }
435 }
436 spin_unlock(&me->hlock);
437 if (match) {
438 *ppmap = match;
439 return 0;
440 }
441 spin_lock(&fl->hlock);
442 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
443 if (map->raddr == va &&
444 map->raddr + map->len == va + len &&
445 map->refs == 1) {
446 match = map;
447 hlist_del_init(&map->hn);
448 break;
449 }
450 }
451 spin_unlock(&fl->hlock);
452 if (match) {
453 *ppmap = match;
454 return 0;
455 }
456 return -ENOTTY;
457}
458
459static void fastrpc_mmap_free(struct fastrpc_mmap *map)
460{
461 struct fastrpc_file *fl;
462 int vmid;
463 struct fastrpc_session_ctx *sess;
464 int destVM[1] = {VMID_HLOS};
465 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
466
467 if (!map)
468 return;
469 fl = map->fl;
470 spin_lock(&fl->hlock);
471 map->refs--;
472 if (!map->refs)
473 hlist_del_init(&map->hn);
474 spin_unlock(&fl->hlock);
475 if (map->refs > 0)
476 return;
477 if (map->secure)
478 sess = fl->secsctx;
479 else
480 sess = fl->sctx;
481
482 if (!IS_ERR_OR_NULL(map->handle))
483 ion_free(fl->apps->client, map->handle);
484 if (sess->smmu.enabled) {
485 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800486 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700487 map->table->sgl,
488 map->table->nents, DMA_BIDIRECTIONAL,
489 map->buf);
490 }
491 vmid = fl->apps->channel[fl->cid].vmid;
492 if (vmid && map->phys) {
493 int srcVM[2] = {VMID_HLOS, vmid};
494
495 hyp_assign_phys(map->phys, buf_page_size(map->size),
496 srcVM, 2, destVM, destVMperm, 1);
497 }
498
499 if (!IS_ERR_OR_NULL(map->table))
500 dma_buf_unmap_attachment(map->attach, map->table,
501 DMA_BIDIRECTIONAL);
502 if (!IS_ERR_OR_NULL(map->attach))
503 dma_buf_detach(map->buf, map->attach);
504 if (!IS_ERR_OR_NULL(map->buf))
505 dma_buf_put(map->buf);
506 kfree(map);
507}
508
509static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
510 struct fastrpc_session_ctx **session);
511
512static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
513 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
514 struct fastrpc_mmap **ppmap)
515{
516 struct fastrpc_session_ctx *sess;
517 struct fastrpc_apps *apps = fl->apps;
518 int cid = fl->cid;
519 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
520 struct fastrpc_mmap *map = 0;
521 unsigned long attrs;
522 unsigned long flags;
523 int err = 0, vmid;
524
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800525 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700526 return 0;
527 map = kzalloc(sizeof(*map), GFP_KERNEL);
528 VERIFY(err, !IS_ERR_OR_NULL(map));
529 if (err)
530 goto bail;
531 INIT_HLIST_NODE(&map->hn);
532 map->flags = mflags;
533 map->refs = 1;
534 map->fl = fl;
535 map->fd = fd;
536 map->attr = attr;
537 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
538 ion_import_dma_buf_fd(fl->apps->client, fd)));
539 if (err)
540 goto bail;
541 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
542 &flags));
543 if (err)
544 goto bail;
545
546 map->uncached = !ION_IS_CACHED(flags);
547 if (map->attr & FASTRPC_ATTR_NOVA)
548 map->uncached = 1;
549
550 map->secure = flags & ION_FLAG_SECURE;
551 if (map->secure) {
552 if (!fl->secsctx)
553 err = fastrpc_session_alloc(chan, 1,
554 &fl->secsctx);
555 if (err)
556 goto bail;
557 }
558 if (map->secure)
559 sess = fl->secsctx;
560 else
561 sess = fl->sctx;
562
563 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
564 if (err)
565 goto bail;
566 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
567 dma_buf_attach(map->buf, sess->dev)));
568 if (err)
569 goto bail;
570 VERIFY(err, !IS_ERR_OR_NULL(map->table =
571 dma_buf_map_attachment(map->attach,
572 DMA_BIDIRECTIONAL)));
573 if (err)
574 goto bail;
575 if (sess->smmu.enabled) {
576 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530577
578 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
579 (sess->smmu.coherent && map->uncached))
580 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
581 else if (map->attr & FASTRPC_ATTR_COHERENT)
582 attrs |= DMA_ATTR_FORCE_COHERENT;
583
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700584 VERIFY(err, map->table->nents ==
585 msm_dma_map_sg_attrs(sess->dev,
586 map->table->sgl, map->table->nents,
587 DMA_BIDIRECTIONAL, map->buf, attrs));
588 if (err)
589 goto bail;
590 } else {
591 VERIFY(err, map->table->nents == 1);
592 if (err)
593 goto bail;
594 }
595 map->phys = sg_dma_address(map->table->sgl);
596 if (sess->smmu.cb) {
597 map->phys += ((uint64_t)sess->smmu.cb << 32);
598 map->size = sg_dma_len(map->table->sgl);
599 } else {
600 map->size = buf_page_size(len);
601 }
602 vmid = fl->apps->channel[fl->cid].vmid;
603 if (vmid) {
604 int srcVM[1] = {VMID_HLOS};
605 int destVM[2] = {VMID_HLOS, vmid};
606 int destVMperm[2] = {PERM_READ | PERM_WRITE,
607 PERM_READ | PERM_WRITE | PERM_EXEC};
608
609 VERIFY(err, !hyp_assign_phys(map->phys,
610 buf_page_size(map->size),
611 srcVM, 1, destVM, destVMperm, 2));
612 if (err)
613 goto bail;
614 }
615 map->va = va;
616 map->len = len;
617
618 fastrpc_mmap_add(map);
619 *ppmap = map;
620
621bail:
622 if (err && map)
623 fastrpc_mmap_free(map);
624 return err;
625}
626
627static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
628 struct fastrpc_buf **obuf)
629{
630 int err = 0, vmid;
631 struct fastrpc_buf *buf = 0, *fr = 0;
632 struct hlist_node *n;
633
634 VERIFY(err, size > 0);
635 if (err)
636 goto bail;
637
638 /* find the smallest buffer that fits in the cache */
639 spin_lock(&fl->hlock);
640 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
641 if (buf->size >= size && (!fr || fr->size > buf->size))
642 fr = buf;
643 }
644 if (fr)
645 hlist_del_init(&fr->hn);
646 spin_unlock(&fl->hlock);
647 if (fr) {
648 *obuf = fr;
649 return 0;
650 }
651 buf = 0;
652 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
653 if (err)
654 goto bail;
655 INIT_HLIST_NODE(&buf->hn);
656 buf->fl = fl;
657 buf->virt = 0;
658 buf->phys = 0;
659 buf->size = size;
660 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
661 (void *)&buf->phys, GFP_KERNEL);
662 if (IS_ERR_OR_NULL(buf->virt)) {
663 /* free cache and retry */
664 fastrpc_buf_list_free(fl);
665 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
666 (void *)&buf->phys, GFP_KERNEL);
667 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
668 }
669 if (err)
670 goto bail;
671 if (fl->sctx->smmu.cb)
672 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
673 vmid = fl->apps->channel[fl->cid].vmid;
674 if (vmid) {
675 int srcVM[1] = {VMID_HLOS};
676 int destVM[2] = {VMID_HLOS, vmid};
677 int destVMperm[2] = {PERM_READ | PERM_WRITE,
678 PERM_READ | PERM_WRITE | PERM_EXEC};
679
680 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
681 srcVM, 1, destVM, destVMperm, 2));
682 if (err)
683 goto bail;
684 }
685
686 *obuf = buf;
687 bail:
688 if (err && buf)
689 fastrpc_buf_free(buf, 0);
690 return err;
691}
692
693
694static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700695 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696 struct smq_invoke_ctx **po)
697{
698 int err = 0;
699 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
700 struct hlist_node *n;
701 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
702
703 spin_lock(&fl->hlock);
704 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
705 if (ictx->pid == current->pid) {
706 if (invoke->sc != ictx->sc || ictx->fl != fl)
707 err = -1;
708 else {
709 ctx = ictx;
710 hlist_del_init(&ctx->hn);
711 hlist_add_head(&ctx->hn, &fl->clst.pending);
712 }
713 break;
714 }
715 }
716 spin_unlock(&fl->hlock);
717 if (ctx)
718 *po = ctx;
719 return err;
720}
721
722#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
723static int overlap_ptr_cmp(const void *a, const void *b)
724{
725 struct overlap *pa = *((struct overlap **)a);
726 struct overlap *pb = *((struct overlap **)b);
727 /* sort with lowest starting buffer first */
728 int st = CMP(pa->start, pb->start);
729 /* sort with highest ending buffer first */
730 int ed = CMP(pb->end, pa->end);
731 return st == 0 ? ed : st;
732}
733
Sathish Ambley9466d672017-01-25 10:51:55 -0800734static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735{
Sathish Ambley9466d672017-01-25 10:51:55 -0800736 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700737 remote_arg_t *lpra = ctx->lpra;
738 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
739 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
740 int nbufs = inbufs + outbufs;
741 struct overlap max;
742
743 for (i = 0; i < nbufs; ++i) {
744 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
745 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800746 if (lpra[i].buf.len) {
747 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
748 if (err)
749 goto bail;
750 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700751 ctx->overs[i].raix = i;
752 ctx->overps[i] = &ctx->overs[i];
753 }
754 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
755 max.start = 0;
756 max.end = 0;
757 for (i = 0; i < nbufs; ++i) {
758 if (ctx->overps[i]->start < max.end) {
759 ctx->overps[i]->mstart = max.end;
760 ctx->overps[i]->mend = ctx->overps[i]->end;
761 ctx->overps[i]->offset = max.end -
762 ctx->overps[i]->start;
763 if (ctx->overps[i]->end > max.end) {
764 max.end = ctx->overps[i]->end;
765 } else {
766 ctx->overps[i]->mend = 0;
767 ctx->overps[i]->mstart = 0;
768 }
769 } else {
770 ctx->overps[i]->mend = ctx->overps[i]->end;
771 ctx->overps[i]->mstart = ctx->overps[i]->start;
772 ctx->overps[i]->offset = 0;
773 max = *ctx->overps[i];
774 }
775 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800776bail:
777 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700778}
779
780#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
781 do {\
782 if (!(kernel))\
783 VERIFY(err, 0 == copy_from_user((dst), (src),\
784 (size)));\
785 else\
786 memmove((dst), (src), (size));\
787 } while (0)
788
789#define K_COPY_TO_USER(err, kernel, dst, src, size) \
790 do {\
791 if (!(kernel))\
792 VERIFY(err, 0 == copy_to_user((dst), (src),\
793 (size)));\
794 else\
795 memmove((dst), (src), (size));\
796 } while (0)
797
798
799static void context_free(struct smq_invoke_ctx *ctx);
800
801static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700802 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700803 struct smq_invoke_ctx **po)
804{
805 int err = 0, bufs, size = 0;
806 struct smq_invoke_ctx *ctx = 0;
807 struct fastrpc_ctx_lst *clst = &fl->clst;
808 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
809
810 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
811 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
812 sizeof(*ctx->fds) * (bufs) +
813 sizeof(*ctx->attrs) * (bufs) +
814 sizeof(*ctx->overs) * (bufs) +
815 sizeof(*ctx->overps) * (bufs);
816
817 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
818 if (err)
819 goto bail;
820
821 INIT_HLIST_NODE(&ctx->hn);
822 hlist_add_fake(&ctx->hn);
823 ctx->fl = fl;
824 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
825 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
826 ctx->fds = (int *)(&ctx->lpra[bufs]);
827 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
828 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
829 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
830
831 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
832 bufs * sizeof(*ctx->lpra));
833 if (err)
834 goto bail;
835
836 if (invokefd->fds) {
837 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
838 bufs * sizeof(*ctx->fds));
839 if (err)
840 goto bail;
841 }
842 if (invokefd->attrs) {
843 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
844 bufs * sizeof(*ctx->attrs));
845 if (err)
846 goto bail;
847 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700848 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700849 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800850 if (bufs) {
851 VERIFY(err, 0 == context_build_overlap(ctx));
852 if (err)
853 goto bail;
854 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700855 ctx->retval = -1;
856 ctx->pid = current->pid;
857 ctx->tgid = current->tgid;
858 init_completion(&ctx->work);
859
860 spin_lock(&fl->hlock);
861 hlist_add_head(&ctx->hn, &clst->pending);
862 spin_unlock(&fl->hlock);
863
864 *po = ctx;
865bail:
866 if (ctx && err)
867 context_free(ctx);
868 return err;
869}
870
871static void context_save_interrupted(struct smq_invoke_ctx *ctx)
872{
873 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
874
875 spin_lock(&ctx->fl->hlock);
876 hlist_del_init(&ctx->hn);
877 hlist_add_head(&ctx->hn, &clst->interrupted);
878 spin_unlock(&ctx->fl->hlock);
879 /* free the cache on power collapse */
880 fastrpc_buf_list_free(ctx->fl);
881}
882
883static void context_free(struct smq_invoke_ctx *ctx)
884{
885 int i;
886 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
887 REMOTE_SCALARS_OUTBUFS(ctx->sc);
888 spin_lock(&ctx->fl->hlock);
889 hlist_del_init(&ctx->hn);
890 spin_unlock(&ctx->fl->hlock);
891 for (i = 0; i < nbufs; ++i)
892 fastrpc_mmap_free(ctx->maps[i]);
893 fastrpc_buf_free(ctx->buf, 1);
894 kfree(ctx);
895}
896
897static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
898{
899 ctx->retval = retval;
900 complete(&ctx->work);
901}
902
903
904static void fastrpc_notify_users(struct fastrpc_file *me)
905{
906 struct smq_invoke_ctx *ictx;
907 struct hlist_node *n;
908
909 spin_lock(&me->hlock);
910 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
911 complete(&ictx->work);
912 }
913 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
914 complete(&ictx->work);
915 }
916 spin_unlock(&me->hlock);
917
918}
919
920static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
921{
922 struct fastrpc_file *fl;
923 struct hlist_node *n;
924
925 spin_lock(&me->hlock);
926 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
927 if (fl->cid == cid)
928 fastrpc_notify_users(fl);
929 }
930 spin_unlock(&me->hlock);
931
932}
933static void context_list_ctor(struct fastrpc_ctx_lst *me)
934{
935 INIT_HLIST_HEAD(&me->interrupted);
936 INIT_HLIST_HEAD(&me->pending);
937}
938
939static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
940{
941 struct fastrpc_ctx_lst *clst = &fl->clst;
942 struct smq_invoke_ctx *ictx = 0, *ctxfree;
943 struct hlist_node *n;
944
945 do {
946 ctxfree = 0;
947 spin_lock(&fl->hlock);
948 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
949 hlist_del_init(&ictx->hn);
950 ctxfree = ictx;
951 break;
952 }
953 spin_unlock(&fl->hlock);
954 if (ctxfree)
955 context_free(ctxfree);
956 } while (ctxfree);
957 do {
958 ctxfree = 0;
959 spin_lock(&fl->hlock);
960 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
961 hlist_del_init(&ictx->hn);
962 ctxfree = ictx;
963 break;
964 }
965 spin_unlock(&fl->hlock);
966 if (ctxfree)
967 context_free(ctxfree);
968 } while (ctxfree);
969}
970
971static int fastrpc_file_free(struct fastrpc_file *fl);
972static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
973{
974 struct fastrpc_file *fl, *free;
975 struct hlist_node *n;
976
977 do {
978 free = 0;
979 spin_lock(&me->hlock);
980 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
981 hlist_del_init(&fl->hn);
982 free = fl;
983 break;
984 }
985 spin_unlock(&me->hlock);
986 if (free)
987 fastrpc_file_free(free);
988 } while (free);
989}
990
991static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
992{
993 remote_arg64_t *rpra;
994 remote_arg_t *lpra = ctx->lpra;
995 struct smq_invoke_buf *list;
996 struct smq_phy_page *pages, *ipage;
997 uint32_t sc = ctx->sc;
998 int inbufs = REMOTE_SCALARS_INBUFS(sc);
999 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001000 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001001 uintptr_t args;
1002 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001003 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001004 int err = 0;
1005 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001006 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001007 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001008
1009 /* calculate size of the metadata */
1010 rpra = 0;
1011 list = smq_invoke_buf_start(rpra, sc);
1012 pages = smq_phy_page_start(sc, list);
1013 ipage = pages;
1014
1015 for (i = 0; i < bufs; ++i) {
1016 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1017 ssize_t len = lpra[i].buf.len;
1018
1019 if (ctx->fds[i] && (ctx->fds[i] != -1))
1020 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1021 ctx->attrs[i], buf, len,
1022 mflags, &ctx->maps[i]);
1023 ipage += 1;
1024 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001025 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1026 for (i = bufs; i < bufs + handles; i++) {
1027 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1028 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1029 if (err)
1030 goto bail;
1031 ipage += 1;
1032 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001033 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1034 (sizeof(uint32_t) * M_CRCLIST);
1035
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001036 /* calculate len requreed for copying */
1037 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1038 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001039 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001040 ssize_t len = lpra[i].buf.len;
1041
1042 if (!len)
1043 continue;
1044 if (ctx->maps[i])
1045 continue;
1046 if (ctx->overps[oix]->offset == 0)
1047 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001048 mstart = ctx->overps[oix]->mstart;
1049 mend = ctx->overps[oix]->mend;
1050 VERIFY(err, (mend - mstart) <= LONG_MAX);
1051 if (err)
1052 goto bail;
1053 copylen += mend - mstart;
1054 VERIFY(err, copylen >= 0);
1055 if (err)
1056 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001057 }
1058 ctx->used = copylen;
1059
1060 /* allocate new buffer */
1061 if (copylen) {
1062 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1063 if (err)
1064 goto bail;
1065 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301066 if (ctx->buf->virt && metalen <= copylen)
1067 memset(ctx->buf->virt, 0, metalen);
1068
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 /* copy metadata */
1070 rpra = ctx->buf->virt;
1071 ctx->rpra = rpra;
1072 list = smq_invoke_buf_start(rpra, sc);
1073 pages = smq_phy_page_start(sc, list);
1074 ipage = pages;
1075 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001076 for (i = 0; i < bufs + handles; ++i) {
1077 if (lpra[i].buf.len)
1078 list[i].num = 1;
1079 else
1080 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001081 list[i].pgidx = ipage - pages;
1082 ipage++;
1083 }
1084 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001085 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001086 for (i = 0; i < inbufs + outbufs; ++i) {
1087 struct fastrpc_mmap *map = ctx->maps[i];
1088 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1089 ssize_t len = lpra[i].buf.len;
1090
1091 rpra[i].buf.pv = 0;
1092 rpra[i].buf.len = len;
1093 if (!len)
1094 continue;
1095 if (map) {
1096 struct vm_area_struct *vma;
1097 uintptr_t offset;
1098 int num = buf_num_pages(buf, len);
1099 int idx = list[i].pgidx;
1100
1101 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001102 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001103 } else {
1104 down_read(&current->mm->mmap_sem);
1105 VERIFY(err, NULL != (vma = find_vma(current->mm,
1106 map->va)));
1107 if (err) {
1108 up_read(&current->mm->mmap_sem);
1109 goto bail;
1110 }
1111 offset = buf_page_start(buf) - vma->vm_start;
1112 up_read(&current->mm->mmap_sem);
1113 VERIFY(err, offset < (uintptr_t)map->size);
1114 if (err)
1115 goto bail;
1116 }
1117 pages[idx].addr = map->phys + offset;
1118 pages[idx].size = num << PAGE_SHIFT;
1119 }
1120 rpra[i].buf.pv = buf;
1121 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001122 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001123 for (i = bufs; i < bufs + handles; ++i) {
1124 struct fastrpc_mmap *map = ctx->maps[i];
1125
1126 pages[i].addr = map->phys;
1127 pages[i].size = map->size;
1128 }
1129 fdlist = (uint64_t *)&pages[bufs + handles];
1130 for (i = 0; i < M_FDLIST; i++)
1131 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001132 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301133 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001134
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001135 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001136 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001137 rlen = copylen - metalen;
1138 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1139 int i = ctx->overps[oix]->raix;
1140 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001141 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001142 uint64_t buf;
1143 ssize_t len = lpra[i].buf.len;
1144
1145 if (!len)
1146 continue;
1147 if (map)
1148 continue;
1149 if (ctx->overps[oix]->offset == 0) {
1150 rlen -= ALIGN(args, BALIGN) - args;
1151 args = ALIGN(args, BALIGN);
1152 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001153 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001154 VERIFY(err, rlen >= mlen);
1155 if (err)
1156 goto bail;
1157 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1158 pages[list[i].pgidx].addr = ctx->buf->phys -
1159 ctx->overps[oix]->offset +
1160 (copylen - rlen);
1161 pages[list[i].pgidx].addr =
1162 buf_page_start(pages[list[i].pgidx].addr);
1163 buf = rpra[i].buf.pv;
1164 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1165 if (i < inbufs) {
1166 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1167 lpra[i].buf.pv, len);
1168 if (err)
1169 goto bail;
1170 }
1171 args = args + mlen;
1172 rlen -= mlen;
1173 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001174 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001175
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001176 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001177 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1178 int i = ctx->overps[oix]->raix;
1179 struct fastrpc_mmap *map = ctx->maps[i];
1180
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001181 if (map && map->uncached)
1182 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301183 if (ctx->fl->sctx->smmu.coherent &&
1184 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1185 continue;
1186 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1187 continue;
1188
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001189 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1190 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1191 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1192 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001193 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001194 for (i = bufs; i < bufs + handles; i++) {
1195 rpra[i].dma.fd = ctx->fds[i];
1196 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1197 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001198 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001199
1200 if (!ctx->fl->sctx->smmu.coherent) {
1201 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001202 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001203 PERF_END);
1204 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001205 bail:
1206 return err;
1207}
1208
1209static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1210 remote_arg_t *upra)
1211{
1212 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001213 struct smq_invoke_buf *list;
1214 struct smq_phy_page *pages;
1215 struct fastrpc_mmap *mmap;
1216 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001217 uint32_t *crclist = NULL;
1218
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001219 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001220 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001221 int err = 0;
1222
1223 inbufs = REMOTE_SCALARS_INBUFS(sc);
1224 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001225 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1226 list = smq_invoke_buf_start(ctx->rpra, sc);
1227 pages = smq_phy_page_start(sc, list);
1228 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001229 crclist = (uint32_t *)(fdlist + M_FDLIST);
1230
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001231 for (i = inbufs; i < inbufs + outbufs; ++i) {
1232 if (!ctx->maps[i]) {
1233 K_COPY_TO_USER(err, kernel,
1234 ctx->lpra[i].buf.pv,
1235 uint64_to_ptr(rpra[i].buf.pv),
1236 rpra[i].buf.len);
1237 if (err)
1238 goto bail;
1239 } else {
1240 fastrpc_mmap_free(ctx->maps[i]);
1241 ctx->maps[i] = 0;
1242 }
1243 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001244 if (inbufs + outbufs + handles) {
1245 for (i = 0; i < M_FDLIST; i++) {
1246 if (!fdlist[i])
1247 break;
1248 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001249 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001250 fastrpc_mmap_free(mmap);
1251 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001252 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001253 if (ctx->crc && crclist && rpra)
1254 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1255 crclist, M_CRCLIST*sizeof(uint32_t));
1256
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001257 bail:
1258 return err;
1259}
1260
1261static void inv_args_pre(struct smq_invoke_ctx *ctx)
1262{
1263 int i, inbufs, outbufs;
1264 uint32_t sc = ctx->sc;
1265 remote_arg64_t *rpra = ctx->rpra;
1266 uintptr_t end;
1267
1268 inbufs = REMOTE_SCALARS_INBUFS(sc);
1269 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1270 for (i = inbufs; i < inbufs + outbufs; ++i) {
1271 struct fastrpc_mmap *map = ctx->maps[i];
1272
1273 if (map && map->uncached)
1274 continue;
1275 if (!rpra[i].buf.len)
1276 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301277 if (ctx->fl->sctx->smmu.coherent &&
1278 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1279 continue;
1280 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1281 continue;
1282
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001283 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1284 buf_page_start(rpra[i].buf.pv))
1285 continue;
1286 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1287 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1288 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1289 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1290 rpra[i].buf.len);
1291 if (!IS_CACHE_ALIGNED(end))
1292 dmac_flush_range((char *)end,
1293 (char *)end + 1);
1294 }
1295}
1296
1297static void inv_args(struct smq_invoke_ctx *ctx)
1298{
1299 int i, inbufs, outbufs;
1300 uint32_t sc = ctx->sc;
1301 remote_arg64_t *rpra = ctx->rpra;
1302 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001303
1304 inbufs = REMOTE_SCALARS_INBUFS(sc);
1305 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1306 for (i = inbufs; i < inbufs + outbufs; ++i) {
1307 struct fastrpc_mmap *map = ctx->maps[i];
1308
1309 if (map && map->uncached)
1310 continue;
1311 if (!rpra[i].buf.len)
1312 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301313 if (ctx->fl->sctx->smmu.coherent &&
1314 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1315 continue;
1316 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1317 continue;
1318
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001319 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1320 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001321 continue;
1322 }
1323 if (map && map->handle)
1324 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1325 (char *)uint64_to_ptr(rpra[i].buf.pv),
1326 rpra[i].buf.len, ION_IOC_INV_CACHES);
1327 else
1328 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1329 (char *)uint64_to_ptr(rpra[i].buf.pv
1330 + rpra[i].buf.len));
1331 }
1332
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001333 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001334 dmac_inv_range(rpra, (char *)rpra + used);
1335}
1336
1337static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1338 uint32_t kernel, uint32_t handle)
1339{
1340 struct smq_msg *msg = &ctx->msg;
1341 struct fastrpc_file *fl = ctx->fl;
1342 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1343 int err = 0;
1344
1345 VERIFY(err, 0 != channel_ctx->chan);
1346 if (err)
1347 goto bail;
1348 msg->pid = current->tgid;
1349 msg->tid = current->pid;
1350 if (kernel)
1351 msg->pid = 0;
1352 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1353 msg->invoke.header.handle = handle;
1354 msg->invoke.header.sc = ctx->sc;
1355 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1356 msg->invoke.page.size = buf_page_size(ctx->used);
1357
1358 if (fl->ssrcount != channel_ctx->ssrcount) {
1359 err = -ECONNRESET;
1360 goto bail;
1361 }
1362 VERIFY(err, channel_ctx->link.port_state ==
1363 FASTRPC_LINK_CONNECTED);
1364 if (err)
1365 goto bail;
1366 err = glink_tx(channel_ctx->chan,
1367 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1368 GLINK_TX_REQ_INTENT);
1369 bail:
1370 return err;
1371}
1372
1373static void fastrpc_init(struct fastrpc_apps *me)
1374{
1375 int i;
1376
1377 INIT_HLIST_HEAD(&me->drivers);
1378 spin_lock_init(&me->hlock);
1379 mutex_init(&me->smd_mutex);
1380 me->channel = &gcinfo[0];
1381 for (i = 0; i < NUM_CHANNELS; i++) {
1382 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301383 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001384 me->channel[i].sesscount = 0;
1385 }
1386}
1387
1388static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1389
1390static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1391 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001392 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001393{
1394 struct smq_invoke_ctx *ctx = 0;
1395 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1396 int cid = fl->cid;
1397 int interrupted = 0;
1398 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001399 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001400
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001401 if (fl->profile)
1402 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001403 if (!kernel) {
1404 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1405 &ctx));
1406 if (err)
1407 goto bail;
1408 if (fl->sctx->smmu.faults)
1409 err = FASTRPC_ENOSUCH;
1410 if (err)
1411 goto bail;
1412 if (ctx)
1413 goto wait;
1414 }
1415
1416 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1417 if (err)
1418 goto bail;
1419
1420 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001421 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001422 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001423 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001424 if (err)
1425 goto bail;
1426 }
1427
Sathish Ambleyc432b502017-06-05 12:03:42 -07001428 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001430 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001431 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001432 PERF_END);
1433
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001434 if (err)
1435 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436 wait:
1437 if (kernel)
1438 wait_for_completion(&ctx->work);
1439 else {
1440 interrupted = wait_for_completion_interruptible(&ctx->work);
1441 VERIFY(err, 0 == (err = interrupted));
1442 if (err)
1443 goto bail;
1444 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001445
1446 PERF(fl->profile, fl->perf.invargs,
1447 if (!fl->sctx->smmu.coherent)
1448 inv_args(ctx);
1449 PERF_END);
1450
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001451 VERIFY(err, 0 == (err = ctx->retval));
1452 if (err)
1453 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001454
1455 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001456 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001457 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001458 if (err)
1459 goto bail;
1460 bail:
1461 if (ctx && interrupted == -ERESTARTSYS)
1462 context_save_interrupted(ctx);
1463 else if (ctx)
1464 context_free(ctx);
1465 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1466 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001467
1468 if (fl->profile && !interrupted) {
1469 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1470 fl->perf.invoke += getnstimediff(&invoket);
1471 if (!(invoke->handle >= 0 &&
1472 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1473 fl->perf.count++;
1474 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001475 return err;
1476}
1477
Sathish Ambley36849af2017-02-02 09:35:55 -08001478static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001479static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001480 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001481{
1482 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001483 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001484 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001485 struct smq_phy_page pages[1];
1486 struct fastrpc_mmap *file = 0, *mem = 0;
1487
Sathish Ambley36849af2017-02-02 09:35:55 -08001488 VERIFY(err, !fastrpc_channel_open(fl));
1489 if (err)
1490 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001491 if (init->flags == FASTRPC_INIT_ATTACH) {
1492 remote_arg_t ra[1];
1493 int tgid = current->tgid;
1494
1495 ra[0].buf.pv = (void *)&tgid;
1496 ra[0].buf.len = sizeof(tgid);
1497 ioctl.inv.handle = 1;
1498 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1499 ioctl.inv.pra = ra;
1500 ioctl.fds = 0;
1501 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001502 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001503 fl->pd = 0;
1504 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1505 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1506 if (err)
1507 goto bail;
1508 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001509 remote_arg_t ra[6];
1510 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001511 int mflags = 0;
1512 struct {
1513 int pgid;
1514 int namelen;
1515 int filelen;
1516 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001517 int attrs;
1518 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001519 } inbuf;
1520
1521 inbuf.pgid = current->tgid;
1522 inbuf.namelen = strlen(current->comm) + 1;
1523 inbuf.filelen = init->filelen;
1524 fl->pd = 1;
1525 if (init->filelen) {
1526 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1527 init->file, init->filelen, mflags, &file));
1528 if (err)
1529 goto bail;
1530 }
1531 inbuf.pageslen = 1;
1532 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1533 init->mem, init->memlen, mflags, &mem));
1534 if (err)
1535 goto bail;
1536 inbuf.pageslen = 1;
1537 ra[0].buf.pv = (void *)&inbuf;
1538 ra[0].buf.len = sizeof(inbuf);
1539 fds[0] = 0;
1540
1541 ra[1].buf.pv = (void *)current->comm;
1542 ra[1].buf.len = inbuf.namelen;
1543 fds[1] = 0;
1544
1545 ra[2].buf.pv = (void *)init->file;
1546 ra[2].buf.len = inbuf.filelen;
1547 fds[2] = init->filefd;
1548
1549 pages[0].addr = mem->phys;
1550 pages[0].size = mem->size;
1551 ra[3].buf.pv = (void *)pages;
1552 ra[3].buf.len = 1 * sizeof(*pages);
1553 fds[3] = 0;
1554
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001555 inbuf.attrs = uproc->attrs;
1556 ra[4].buf.pv = (void *)&(inbuf.attrs);
1557 ra[4].buf.len = sizeof(inbuf.attrs);
1558 fds[4] = 0;
1559
1560 inbuf.siglen = uproc->siglen;
1561 ra[5].buf.pv = (void *)&(inbuf.siglen);
1562 ra[5].buf.len = sizeof(inbuf.siglen);
1563 fds[5] = 0;
1564
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001565 ioctl.inv.handle = 1;
1566 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001567 if (uproc->attrs)
1568 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569 ioctl.inv.pra = ra;
1570 ioctl.fds = fds;
1571 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001572 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001573 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1574 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1575 if (err)
1576 goto bail;
1577 } else {
1578 err = -ENOTTY;
1579 }
1580bail:
1581 if (mem && err)
1582 fastrpc_mmap_free(mem);
1583 if (file)
1584 fastrpc_mmap_free(file);
1585 return err;
1586}
1587
1588static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1589{
1590 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001591 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 remote_arg_t ra[1];
1593 int tgid = 0;
1594
Sathish Ambley36849af2017-02-02 09:35:55 -08001595 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1596 if (err)
1597 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001598 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1599 if (err)
1600 goto bail;
1601 tgid = fl->tgid;
1602 ra[0].buf.pv = (void *)&tgid;
1603 ra[0].buf.len = sizeof(tgid);
1604 ioctl.inv.handle = 1;
1605 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1606 ioctl.inv.pra = ra;
1607 ioctl.fds = 0;
1608 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001609 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001610 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1611 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1612bail:
1613 return err;
1614}
1615
1616static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1617 struct fastrpc_mmap *map)
1618{
Sathish Ambleybae51902017-07-03 15:00:49 -07001619 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001620 struct smq_phy_page page;
1621 int num = 1;
1622 remote_arg_t ra[3];
1623 int err = 0;
1624 struct {
1625 int pid;
1626 uint32_t flags;
1627 uintptr_t vaddrin;
1628 int num;
1629 } inargs;
1630 struct {
1631 uintptr_t vaddrout;
1632 } routargs;
1633
1634 inargs.pid = current->tgid;
1635 inargs.vaddrin = (uintptr_t)map->va;
1636 inargs.flags = flags;
1637 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1638 ra[0].buf.pv = (void *)&inargs;
1639 ra[0].buf.len = sizeof(inargs);
1640 page.addr = map->phys;
1641 page.size = map->size;
1642 ra[1].buf.pv = (void *)&page;
1643 ra[1].buf.len = num * sizeof(page);
1644
1645 ra[2].buf.pv = (void *)&routargs;
1646 ra[2].buf.len = sizeof(routargs);
1647
1648 ioctl.inv.handle = 1;
1649 if (fl->apps->compat)
1650 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1651 else
1652 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1653 ioctl.inv.pra = ra;
1654 ioctl.fds = 0;
1655 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001656 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001657 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1658 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1659 map->raddr = (uintptr_t)routargs.vaddrout;
1660
1661 return err;
1662}
1663
1664static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1665 struct fastrpc_mmap *map)
1666{
Sathish Ambleybae51902017-07-03 15:00:49 -07001667 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001668 remote_arg_t ra[1];
1669 int err = 0;
1670 struct {
1671 int pid;
1672 uintptr_t vaddrout;
1673 ssize_t size;
1674 } inargs;
1675
1676 inargs.pid = current->tgid;
1677 inargs.size = map->size;
1678 inargs.vaddrout = map->raddr;
1679 ra[0].buf.pv = (void *)&inargs;
1680 ra[0].buf.len = sizeof(inargs);
1681
1682 ioctl.inv.handle = 1;
1683 if (fl->apps->compat)
1684 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1685 else
1686 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1687 ioctl.inv.pra = ra;
1688 ioctl.fds = 0;
1689 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001690 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001691 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1692 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1693 return err;
1694}
1695
1696static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1697 ssize_t len, struct fastrpc_mmap **ppmap);
1698
1699static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1700
1701static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1702 struct fastrpc_ioctl_munmap *ud)
1703{
1704 int err = 0;
1705 struct fastrpc_mmap *map = 0;
1706
1707 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1708 if (err)
1709 goto bail;
1710 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1711 if (err)
1712 goto bail;
1713 fastrpc_mmap_free(map);
1714bail:
1715 if (err && map)
1716 fastrpc_mmap_add(map);
1717 return err;
1718}
1719
1720static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1721 struct fastrpc_ioctl_mmap *ud)
1722{
1723
1724 struct fastrpc_mmap *map = 0;
1725 int err = 0;
1726
1727 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001728 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001729 return 0;
1730
1731 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1732 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1733 if (err)
1734 goto bail;
1735 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1736 if (err)
1737 goto bail;
1738 ud->vaddrout = map->raddr;
1739 bail:
1740 if (err && map)
1741 fastrpc_mmap_free(map);
1742 return err;
1743}
1744
1745static void fastrpc_channel_close(struct kref *kref)
1746{
1747 struct fastrpc_apps *me = &gfa;
1748 struct fastrpc_channel_ctx *ctx;
1749 int cid;
1750
1751 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1752 cid = ctx - &gcinfo[0];
1753 fastrpc_glink_close(ctx->chan, cid);
1754 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301755 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1756 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001757 mutex_unlock(&me->smd_mutex);
1758 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1759 MAJOR(me->dev_no), cid);
1760}
1761
1762static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1763
1764static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1765 int secure, struct fastrpc_session_ctx **session)
1766{
1767 struct fastrpc_apps *me = &gfa;
1768 int idx = 0, err = 0;
1769
1770 if (chan->sesscount) {
1771 for (idx = 0; idx < chan->sesscount; ++idx) {
1772 if (!chan->session[idx].used &&
1773 chan->session[idx].smmu.secure == secure) {
1774 chan->session[idx].used = 1;
1775 break;
1776 }
1777 }
1778 VERIFY(err, idx < chan->sesscount);
1779 if (err)
1780 goto bail;
1781 chan->session[idx].smmu.faults = 0;
1782 } else {
1783 VERIFY(err, me->dev != NULL);
1784 if (err)
1785 goto bail;
1786 chan->session[0].dev = me->dev;
1787 }
1788
1789 *session = &chan->session[idx];
1790 bail:
1791 return err;
1792}
1793
1794bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1795{
1796 if (glink_queue_rx_intent(h, NULL, size))
1797 return false;
1798 return true;
1799}
1800
1801void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1802 const void *pkt_priv, const void *ptr)
1803{
1804}
1805
1806void fastrpc_glink_notify_rx(void *handle, const void *priv,
1807 const void *pkt_priv, const void *ptr, size_t size)
1808{
1809 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1810 int len = size;
1811
1812 while (len >= sizeof(*rsp) && rsp) {
1813 rsp->ctx = rsp->ctx & ~1;
1814 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1815 rsp++;
1816 len = len - sizeof(*rsp);
1817 }
1818 glink_rx_done(handle, ptr, true);
1819}
1820
1821void fastrpc_glink_notify_state(void *handle, const void *priv,
1822 unsigned int event)
1823{
1824 struct fastrpc_apps *me = &gfa;
1825 int cid = (int)(uintptr_t)priv;
1826 struct fastrpc_glink_info *link;
1827
1828 if (cid < 0 || cid >= NUM_CHANNELS)
1829 return;
1830 link = &me->channel[cid].link;
1831 switch (event) {
1832 case GLINK_CONNECTED:
1833 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301834 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835 break;
1836 case GLINK_LOCAL_DISCONNECTED:
1837 link->port_state = FASTRPC_LINK_DISCONNECTED;
1838 break;
1839 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301840 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001841 fastrpc_glink_close(me->channel[cid].chan, cid);
1842 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001843 }
1844 break;
1845 default:
1846 break;
1847 }
1848}
1849
1850static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1851 struct fastrpc_session_ctx **session)
1852{
1853 int err = 0;
1854 struct fastrpc_apps *me = &gfa;
1855
1856 mutex_lock(&me->smd_mutex);
1857 if (!*session)
1858 err = fastrpc_session_alloc_locked(chan, secure, session);
1859 mutex_unlock(&me->smd_mutex);
1860 return err;
1861}
1862
1863static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1864 struct fastrpc_session_ctx *session)
1865{
1866 struct fastrpc_apps *me = &gfa;
1867
1868 mutex_lock(&me->smd_mutex);
1869 session->used = 0;
1870 mutex_unlock(&me->smd_mutex);
1871}
1872
1873static int fastrpc_file_free(struct fastrpc_file *fl)
1874{
1875 struct hlist_node *n;
1876 struct fastrpc_mmap *map = 0;
1877 int cid;
1878
1879 if (!fl)
1880 return 0;
1881 cid = fl->cid;
1882
1883 spin_lock(&fl->apps->hlock);
1884 hlist_del_init(&fl->hn);
1885 spin_unlock(&fl->apps->hlock);
1886
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001887 if (!fl->sctx) {
1888 kfree(fl);
1889 return 0;
1890 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001891 (void)fastrpc_release_current_dsp_process(fl);
tharun kumar9f899ea2017-07-03 17:07:03 +05301892 spin_lock(&fl->hlock);
1893 fl->file_close = 1;
1894 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001895 fastrpc_context_list_dtor(fl);
1896 fastrpc_buf_list_free(fl);
1897 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1898 fastrpc_mmap_free(map);
1899 }
1900 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1901 kref_put_mutex(&fl->apps->channel[cid].kref,
1902 fastrpc_channel_close, &fl->apps->smd_mutex);
1903 if (fl->sctx)
1904 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1905 if (fl->secsctx)
1906 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1907 kfree(fl);
1908 return 0;
1909}
1910
1911static int fastrpc_device_release(struct inode *inode, struct file *file)
1912{
1913 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1914
1915 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001916 if (fl->debugfs_file != NULL)
1917 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001918 fastrpc_file_free(fl);
1919 file->private_data = 0;
1920 }
1921 return 0;
1922}
1923
1924static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1925 void *priv)
1926{
1927 struct fastrpc_apps *me = &gfa;
1928 int cid = (int)((uintptr_t)priv);
1929 struct fastrpc_glink_info *link;
1930
1931 if (cid < 0 || cid >= NUM_CHANNELS)
1932 return;
1933
1934 link = &me->channel[cid].link;
1935 switch (cb_info->link_state) {
1936 case GLINK_LINK_STATE_UP:
1937 link->link_state = FASTRPC_LINK_STATE_UP;
1938 complete(&me->channel[cid].work);
1939 break;
1940 case GLINK_LINK_STATE_DOWN:
1941 link->link_state = FASTRPC_LINK_STATE_DOWN;
1942 break;
1943 default:
1944 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1945 break;
1946 }
1947}
1948
1949static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1950{
1951 int err = 0;
1952 struct fastrpc_glink_info *link;
1953
1954 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1955 if (err)
1956 goto bail;
1957
1958 link = &me->channel[cid].link;
1959 if (link->link_notify_handle != NULL)
1960 goto bail;
1961
1962 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1963 link->link_notify_handle = glink_register_link_state_cb(
1964 &link->link_info,
1965 (void *)((uintptr_t)cid));
1966 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1967 if (err) {
1968 link->link_notify_handle = NULL;
1969 goto bail;
1970 }
1971 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1972 RPC_TIMEOUT));
1973bail:
1974 return err;
1975}
1976
1977static void fastrpc_glink_close(void *chan, int cid)
1978{
1979 int err = 0;
1980 struct fastrpc_glink_info *link;
1981
1982 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1983 if (err)
1984 return;
1985 link = &gfa.channel[cid].link;
1986
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301987 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001988 link->port_state = FASTRPC_LINK_DISCONNECTING;
1989 glink_close(chan);
1990 }
1991}
1992
1993static int fastrpc_glink_open(int cid)
1994{
1995 int err = 0;
1996 void *handle = NULL;
1997 struct fastrpc_apps *me = &gfa;
1998 struct glink_open_config *cfg;
1999 struct fastrpc_glink_info *link;
2000
2001 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2002 if (err)
2003 goto bail;
2004 link = &me->channel[cid].link;
2005 cfg = &me->channel[cid].link.cfg;
2006 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2007 if (err)
2008 goto bail;
2009
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302010 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2011 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002012 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002013
2014 link->port_state = FASTRPC_LINK_CONNECTING;
2015 cfg->priv = (void *)(uintptr_t)cid;
2016 cfg->edge = gcinfo[cid].link.link_info.edge;
2017 cfg->transport = gcinfo[cid].link.link_info.transport;
2018 cfg->name = FASTRPC_GLINK_GUID;
2019 cfg->notify_rx = fastrpc_glink_notify_rx;
2020 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2021 cfg->notify_state = fastrpc_glink_notify_state;
2022 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2023 handle = glink_open(cfg);
2024 VERIFY(err, !IS_ERR_OR_NULL(handle));
2025 if (err)
2026 goto bail;
2027 me->channel[cid].chan = handle;
2028bail:
2029 return err;
2030}
2031
Sathish Ambley1ca68232017-01-19 10:32:55 -08002032static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2033{
2034 filp->private_data = inode->i_private;
2035 return 0;
2036}
2037
2038static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2039 size_t count, loff_t *position)
2040{
2041 struct fastrpc_file *fl = filp->private_data;
2042 struct hlist_node *n;
2043 struct fastrpc_buf *buf = 0;
2044 struct fastrpc_mmap *map = 0;
2045 struct smq_invoke_ctx *ictx = 0;
2046 struct fastrpc_channel_ctx *chan;
2047 struct fastrpc_session_ctx *sess;
2048 unsigned int len = 0;
2049 int i, j, ret = 0;
2050 char *fileinfo = NULL;
2051
2052 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2053 if (!fileinfo)
2054 goto bail;
2055 if (fl == NULL) {
2056 for (i = 0; i < NUM_CHANNELS; i++) {
2057 chan = &gcinfo[i];
2058 len += scnprintf(fileinfo + len,
2059 DEBUGFS_SIZE - len, "%s\n\n",
2060 chan->name);
2061 len += scnprintf(fileinfo + len,
2062 DEBUGFS_SIZE - len, "%s %d\n",
2063 "sesscount:", chan->sesscount);
2064 for (j = 0; j < chan->sesscount; j++) {
2065 sess = &chan->session[j];
2066 len += scnprintf(fileinfo + len,
2067 DEBUGFS_SIZE - len,
2068 "%s%d\n\n", "SESSION", j);
2069 len += scnprintf(fileinfo + len,
2070 DEBUGFS_SIZE - len,
2071 "%s %d\n", "sid:",
2072 sess->smmu.cb);
2073 len += scnprintf(fileinfo + len,
2074 DEBUGFS_SIZE - len,
2075 "%s %d\n", "SECURE:",
2076 sess->smmu.secure);
2077 }
2078 }
2079 } else {
2080 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2081 "%s %d\n\n",
2082 "PROCESS_ID:", fl->tgid);
2083 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2084 "%s %d\n\n",
2085 "CHANNEL_ID:", fl->cid);
2086 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2087 "%s %d\n\n",
2088 "SSRCOUNT:", fl->ssrcount);
2089 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2090 "%s\n",
2091 "LIST OF BUFS:");
2092 spin_lock(&fl->hlock);
2093 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2094 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2095 "%s %p %s %p %s %llx\n", "buf:",
2096 buf, "buf->virt:", buf->virt,
2097 "buf->phys:", buf->phys);
2098 }
2099 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2100 "\n%s\n",
2101 "LIST OF MAPS:");
2102 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2103 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2104 "%s %p %s %lx %s %llx\n",
2105 "map:", map,
2106 "map->va:", map->va,
2107 "map->phys:", map->phys);
2108 }
2109 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2110 "\n%s\n",
2111 "LIST OF PENDING SMQCONTEXTS:");
2112 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2113 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2114 "%s %p %s %u %s %u %s %u\n",
2115 "smqcontext:", ictx,
2116 "sc:", ictx->sc,
2117 "tid:", ictx->pid,
2118 "handle", ictx->rpra->h);
2119 }
2120 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2121 "\n%s\n",
2122 "LIST OF INTERRUPTED SMQCONTEXTS:");
2123 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2124 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2125 "%s %p %s %u %s %u %s %u\n",
2126 "smqcontext:", ictx,
2127 "sc:", ictx->sc,
2128 "tid:", ictx->pid,
2129 "handle", ictx->rpra->h);
2130 }
2131 spin_unlock(&fl->hlock);
2132 }
2133 if (len > DEBUGFS_SIZE)
2134 len = DEBUGFS_SIZE;
2135 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2136 kfree(fileinfo);
2137bail:
2138 return ret;
2139}
2140
2141static const struct file_operations debugfs_fops = {
2142 .open = fastrpc_debugfs_open,
2143 .read = fastrpc_debugfs_read,
2144};
Sathish Ambley36849af2017-02-02 09:35:55 -08002145static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002146{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002147 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002148 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002149
2150 mutex_lock(&me->smd_mutex);
2151
Sathish Ambley36849af2017-02-02 09:35:55 -08002152 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002153 if (err)
2154 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002155 cid = fl->cid;
2156 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2157 if (err)
2158 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002159 fl->ssrcount = me->channel[cid].ssrcount;
2160 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2161 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302162 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2163 if (err)
2164 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002165 VERIFY(err, 0 == fastrpc_glink_open(cid));
2166 if (err)
2167 goto bail;
2168
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302169 VERIFY(err,
2170 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002171 RPC_TIMEOUT));
2172 if (err) {
2173 me->channel[cid].chan = 0;
2174 goto bail;
2175 }
2176 kref_init(&me->channel[cid].kref);
2177 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2178 MAJOR(me->dev_no), cid);
2179 if (me->channel[cid].ssrcount !=
2180 me->channel[cid].prevssrcount) {
2181 me->channel[cid].prevssrcount =
2182 me->channel[cid].ssrcount;
2183 }
2184 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002185
2186bail:
2187 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002188 return err;
2189}
2190
Sathish Ambley36849af2017-02-02 09:35:55 -08002191static int fastrpc_device_open(struct inode *inode, struct file *filp)
2192{
2193 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002194 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002195 struct fastrpc_file *fl = 0;
2196 struct fastrpc_apps *me = &gfa;
2197
2198 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2199 if (err)
2200 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002201 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2202 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002203 context_list_ctor(&fl->clst);
2204 spin_lock_init(&fl->hlock);
2205 INIT_HLIST_HEAD(&fl->maps);
2206 INIT_HLIST_HEAD(&fl->bufs);
2207 INIT_HLIST_NODE(&fl->hn);
2208 fl->tgid = current->tgid;
2209 fl->apps = me;
2210 fl->mode = FASTRPC_MODE_SERIAL;
2211 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002212 if (debugfs_file != NULL)
2213 fl->debugfs_file = debugfs_file;
2214 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002215 filp->private_data = fl;
2216 spin_lock(&me->hlock);
2217 hlist_add_head(&fl->hn, &me->drivers);
2218 spin_unlock(&me->hlock);
2219 return 0;
2220}
2221
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002222static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2223{
2224 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002225 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002226
Sathish Ambley36849af2017-02-02 09:35:55 -08002227 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002228 if (err)
2229 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002230 if (fl->cid == -1) {
2231 cid = *info;
2232 VERIFY(err, cid < NUM_CHANNELS);
2233 if (err)
2234 goto bail;
2235 fl->cid = cid;
2236 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2237 VERIFY(err, !fastrpc_session_alloc_locked(
2238 &fl->apps->channel[cid], 0, &fl->sctx));
2239 if (err)
2240 goto bail;
2241 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002242 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2243bail:
2244 return err;
2245}
2246
2247static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2248 unsigned long ioctl_param)
2249{
2250 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002251 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002252 struct fastrpc_ioctl_mmap mmap;
2253 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002254 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002255 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002256 } p;
2257 void *param = (char *)ioctl_param;
2258 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2259 int size = 0, err = 0;
2260 uint32_t info;
2261
2262 p.inv.fds = 0;
2263 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002264 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302265 spin_lock(&fl->hlock);
2266 if (fl->file_close == 1) {
2267 err = EBADF;
2268 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2269 spin_unlock(&fl->hlock);
2270 goto bail;
2271 }
2272 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002273
2274 switch (ioctl_num) {
2275 case FASTRPC_IOCTL_INVOKE:
2276 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002277 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002278 case FASTRPC_IOCTL_INVOKE_FD:
2279 if (!size)
2280 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2281 /* fall through */
2282 case FASTRPC_IOCTL_INVOKE_ATTRS:
2283 if (!size)
2284 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002285 /* fall through */
2286 case FASTRPC_IOCTL_INVOKE_CRC:
2287 if (!size)
2288 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002289 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2290 if (err)
2291 goto bail;
2292 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2293 0, &p.inv)));
2294 if (err)
2295 goto bail;
2296 break;
2297 case FASTRPC_IOCTL_MMAP:
2298 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2299 sizeof(p.mmap)));
2300 if (err)
2301 goto bail;
2302 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2303 if (err)
2304 goto bail;
2305 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2306 if (err)
2307 goto bail;
2308 break;
2309 case FASTRPC_IOCTL_MUNMAP:
2310 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2311 sizeof(p.munmap)));
2312 if (err)
2313 goto bail;
2314 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2315 &p.munmap)));
2316 if (err)
2317 goto bail;
2318 break;
2319 case FASTRPC_IOCTL_SETMODE:
2320 switch ((uint32_t)ioctl_param) {
2321 case FASTRPC_MODE_PARALLEL:
2322 case FASTRPC_MODE_SERIAL:
2323 fl->mode = (uint32_t)ioctl_param;
2324 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002325 case FASTRPC_MODE_PROFILE:
2326 fl->profile = (uint32_t)ioctl_param;
2327 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002328 default:
2329 err = -ENOTTY;
2330 break;
2331 }
2332 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002333 case FASTRPC_IOCTL_GETPERF:
2334 VERIFY(err, 0 == copy_from_user(&p.perf,
2335 param, sizeof(p.perf)));
2336 if (err)
2337 goto bail;
2338 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2339 if (p.perf.keys) {
2340 char *keys = PERF_KEYS;
2341
2342 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2343 keys, strlen(keys)+1));
2344 if (err)
2345 goto bail;
2346 }
2347 if (p.perf.data) {
2348 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2349 &fl->perf, sizeof(fl->perf)));
2350 }
2351 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2352 if (err)
2353 goto bail;
2354 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002355 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002356 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2357 if (err)
2358 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002359 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2360 if (err)
2361 goto bail;
2362 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2363 if (err)
2364 goto bail;
2365 break;
2366 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002367 p.init.attrs = 0;
2368 p.init.siglen = 0;
2369 size = sizeof(struct fastrpc_ioctl_init);
2370 /* fall through */
2371 case FASTRPC_IOCTL_INIT_ATTRS:
2372 if (!size)
2373 size = sizeof(struct fastrpc_ioctl_init_attrs);
2374 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002375 if (err)
2376 goto bail;
2377 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2378 if (err)
2379 goto bail;
2380 break;
2381
2382 default:
2383 err = -ENOTTY;
2384 pr_info("bad ioctl: %d\n", ioctl_num);
2385 break;
2386 }
2387 bail:
2388 return err;
2389}
2390
2391static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2392 unsigned long code,
2393 void *data)
2394{
2395 struct fastrpc_apps *me = &gfa;
2396 struct fastrpc_channel_ctx *ctx;
2397 int cid;
2398
2399 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2400 cid = ctx - &me->channel[0];
2401 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2402 mutex_lock(&me->smd_mutex);
2403 ctx->ssrcount++;
2404 if (ctx->chan) {
2405 fastrpc_glink_close(ctx->chan, cid);
2406 ctx->chan = 0;
2407 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2408 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2409 }
2410 mutex_unlock(&me->smd_mutex);
2411 fastrpc_notify_drivers(me, cid);
2412 }
2413
2414 return NOTIFY_DONE;
2415}
2416
2417static const struct file_operations fops = {
2418 .open = fastrpc_device_open,
2419 .release = fastrpc_device_release,
2420 .unlocked_ioctl = fastrpc_device_ioctl,
2421 .compat_ioctl = compat_fastrpc_device_ioctl,
2422};
2423
2424static const struct of_device_id fastrpc_match_table[] = {
2425 { .compatible = "qcom,msm-fastrpc-adsp", },
2426 { .compatible = "qcom,msm-fastrpc-compute", },
2427 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2428 { .compatible = "qcom,msm-adsprpc-mem-region", },
2429 {}
2430};
2431
2432static int fastrpc_cb_probe(struct device *dev)
2433{
2434 struct fastrpc_channel_ctx *chan;
2435 struct fastrpc_session_ctx *sess;
2436 struct of_phandle_args iommuspec;
2437 const char *name;
2438 unsigned int start = 0x80000000;
2439 int err = 0, i;
2440 int secure_vmid = VMID_CP_PIXEL;
2441
2442 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2443 if (err)
2444 goto bail;
2445 for (i = 0; i < NUM_CHANNELS; i++) {
2446 if (!gcinfo[i].name)
2447 continue;
2448 if (!strcmp(name, gcinfo[i].name))
2449 break;
2450 }
2451 VERIFY(err, i < NUM_CHANNELS);
2452 if (err)
2453 goto bail;
2454 chan = &gcinfo[i];
2455 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2456 if (err)
2457 goto bail;
2458
2459 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2460 "#iommu-cells", 0, &iommuspec));
2461 if (err)
2462 goto bail;
2463 sess = &chan->session[chan->sesscount];
2464 sess->smmu.cb = iommuspec.args[0] & 0xf;
2465 sess->used = 0;
2466 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2467 "dma-coherent");
2468 sess->smmu.secure = of_property_read_bool(dev->of_node,
2469 "qcom,secure-context-bank");
2470 if (sess->smmu.secure)
2471 start = 0x60000000;
2472 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2473 arm_iommu_create_mapping(&platform_bus_type,
2474 start, 0x7fffffff)));
2475 if (err)
2476 goto bail;
2477
2478 if (sess->smmu.secure)
2479 iommu_domain_set_attr(sess->smmu.mapping->domain,
2480 DOMAIN_ATTR_SECURE_VMID,
2481 &secure_vmid);
2482
2483 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2484 if (err)
2485 goto bail;
2486 sess->dev = dev;
2487 sess->smmu.enabled = 1;
2488 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002489 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2490 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002491bail:
2492 return err;
2493}
2494
2495static int fastrpc_probe(struct platform_device *pdev)
2496{
2497 int err = 0;
2498 struct fastrpc_apps *me = &gfa;
2499 struct device *dev = &pdev->dev;
2500 struct smq_phy_page range;
2501 struct device_node *ion_node, *node;
2502 struct platform_device *ion_pdev;
2503 struct cma *cma;
2504 uint32_t val;
2505
2506 if (of_device_is_compatible(dev->of_node,
2507 "qcom,msm-fastrpc-compute-cb"))
2508 return fastrpc_cb_probe(dev);
2509
2510 if (of_device_is_compatible(dev->of_node,
2511 "qcom,msm-adsprpc-mem-region")) {
2512 me->dev = dev;
2513 range.addr = 0;
2514 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2515 if (ion_node) {
2516 for_each_available_child_of_node(ion_node, node) {
2517 if (of_property_read_u32(node, "reg", &val))
2518 continue;
2519 if (val != ION_ADSP_HEAP_ID)
2520 continue;
2521 ion_pdev = of_find_device_by_node(node);
2522 if (!ion_pdev)
2523 break;
2524 cma = dev_get_cma_area(&ion_pdev->dev);
2525 if (cma) {
2526 range.addr = cma_get_base(cma);
2527 range.size = (size_t)cma_get_size(cma);
2528 }
2529 break;
2530 }
2531 }
2532 if (range.addr) {
2533 int srcVM[1] = {VMID_HLOS};
2534 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2535 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002536 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002537 PERM_READ | PERM_WRITE | PERM_EXEC,
2538 PERM_READ | PERM_WRITE | PERM_EXEC,
2539 PERM_READ | PERM_WRITE | PERM_EXEC,
2540 };
2541
2542 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2543 srcVM, 1, destVM, destVMperm, 4));
2544 if (err)
2545 goto bail;
2546 }
2547 return 0;
2548 }
2549
2550 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2551 fastrpc_match_table,
2552 NULL, &pdev->dev));
2553 if (err)
2554 goto bail;
2555bail:
2556 return err;
2557}
2558
2559static void fastrpc_deinit(void)
2560{
2561 struct fastrpc_apps *me = &gfa;
2562 struct fastrpc_channel_ctx *chan = gcinfo;
2563 int i, j;
2564
2565 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2566 if (chan->chan) {
2567 kref_put_mutex(&chan->kref,
2568 fastrpc_channel_close, &me->smd_mutex);
2569 chan->chan = 0;
2570 }
2571 for (j = 0; j < NUM_SESSIONS; j++) {
2572 struct fastrpc_session_ctx *sess = &chan->session[j];
2573
2574 if (sess->smmu.enabled) {
2575 arm_iommu_detach_device(sess->dev);
2576 sess->dev = 0;
2577 }
2578 if (sess->smmu.mapping) {
2579 arm_iommu_release_mapping(sess->smmu.mapping);
2580 sess->smmu.mapping = 0;
2581 }
2582 }
2583 }
2584}
2585
2586static struct platform_driver fastrpc_driver = {
2587 .probe = fastrpc_probe,
2588 .driver = {
2589 .name = "fastrpc",
2590 .owner = THIS_MODULE,
2591 .of_match_table = fastrpc_match_table,
2592 },
2593};
2594
2595static int __init fastrpc_device_init(void)
2596{
2597 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002598 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002599 int err = 0, i;
2600
2601 memset(me, 0, sizeof(*me));
2602
2603 fastrpc_init(me);
2604 me->dev = NULL;
2605 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2606 if (err)
2607 goto register_bail;
2608 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2609 DEVICE_NAME));
2610 if (err)
2611 goto alloc_chrdev_bail;
2612 cdev_init(&me->cdev, &fops);
2613 me->cdev.owner = THIS_MODULE;
2614 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002615 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002616 if (err)
2617 goto cdev_init_bail;
2618 me->class = class_create(THIS_MODULE, "fastrpc");
2619 VERIFY(err, !IS_ERR(me->class));
2620 if (err)
2621 goto class_create_bail;
2622 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002623 dev = device_create(me->class, NULL,
2624 MKDEV(MAJOR(me->dev_no), 0),
2625 NULL, gcinfo[0].name);
2626 VERIFY(err, !IS_ERR_OR_NULL(dev));
2627 if (err)
2628 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002629 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002630 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002631 me->channel[i].ssrcount = 0;
2632 me->channel[i].prevssrcount = 0;
2633 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2634 me->channel[i].handle = subsys_notif_register_notifier(
2635 gcinfo[i].subsys,
2636 &me->channel[i].nb);
2637 }
2638
2639 me->client = msm_ion_client_create(DEVICE_NAME);
2640 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2641 if (err)
2642 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002643 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002644 return 0;
2645device_create_bail:
2646 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002647 if (me->channel[i].handle)
2648 subsys_notif_unregister_notifier(me->channel[i].handle,
2649 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002650 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002651 if (!IS_ERR_OR_NULL(dev))
2652 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002653 class_destroy(me->class);
2654class_create_bail:
2655 cdev_del(&me->cdev);
2656cdev_init_bail:
2657 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2658alloc_chrdev_bail:
2659register_bail:
2660 fastrpc_deinit();
2661 return err;
2662}
2663
2664static void __exit fastrpc_device_exit(void)
2665{
2666 struct fastrpc_apps *me = &gfa;
2667 int i;
2668
2669 fastrpc_file_list_dtor(me);
2670 fastrpc_deinit();
2671 for (i = 0; i < NUM_CHANNELS; i++) {
2672 if (!gcinfo[i].name)
2673 continue;
2674 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2675 subsys_notif_unregister_notifier(me->channel[i].handle,
2676 &me->channel[i].nb);
2677 }
2678 class_destroy(me->class);
2679 cdev_del(&me->cdev);
2680 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2681 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002682 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002683}
2684
2685late_initcall(fastrpc_device_init);
2686module_exit(fastrpc_device_exit);
2687
2688MODULE_LICENSE("GPL v2");