blob: cdbcb4ceb91907b5c4b1becf7881f39f02c5f387 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070063
64#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
65
66#define FASTRPC_LINK_STATE_DOWN (0x0)
67#define FASTRPC_LINK_STATE_UP (0x1)
68#define FASTRPC_LINK_DISCONNECTED (0x0)
69#define FASTRPC_LINK_CONNECTING (0x1)
70#define FASTRPC_LINK_CONNECTED (0x3)
71#define FASTRPC_LINK_DISCONNECTING (0x7)
72
Sathish Ambleya21b5b52017-01-11 16:11:01 -080073#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
74#define FASTRPC_STATIC_HANDLE_LISTENER (3)
75#define FASTRPC_STATIC_HANDLE_MAX (20)
76
77#define PERF_END (void)0
78
79#define PERF(enb, cnt, ff) \
80 {\
81 struct timespec startT = {0};\
82 if (enb) {\
83 getnstimeofday(&startT);\
84 } \
85 ff ;\
86 if (enb) {\
87 cnt += getnstimediff(&startT);\
88 } \
89 }
90
Sathish Ambley69e1ab02016-10-18 10:28:15 -070091static int fastrpc_glink_open(int cid);
92static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080093static struct dentry *debugfs_root;
94static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96static inline uint64_t buf_page_start(uint64_t buf)
97{
98 uint64_t start = (uint64_t) buf & PAGE_MASK;
99 return start;
100}
101
102static inline uint64_t buf_page_offset(uint64_t buf)
103{
104 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
105 return offset;
106}
107
108static inline int buf_num_pages(uint64_t buf, ssize_t len)
109{
110 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
111 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
112 int nPages = end - start + 1;
113 return nPages;
114}
115
116static inline uint64_t buf_page_size(uint32_t size)
117{
118 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
119
120 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
121}
122
123static inline void *uint64_to_ptr(uint64_t addr)
124{
125 void *ptr = (void *)((uintptr_t)addr);
126
127 return ptr;
128}
129
130static inline uint64_t ptr_to_uint64(void *ptr)
131{
132 uint64_t addr = (uint64_t)((uintptr_t)ptr);
133
134 return addr;
135}
136
137struct fastrpc_file;
138
139struct fastrpc_buf {
140 struct hlist_node hn;
141 struct fastrpc_file *fl;
142 void *virt;
143 uint64_t phys;
144 ssize_t size;
145};
146
147struct fastrpc_ctx_lst;
148
149struct overlap {
150 uintptr_t start;
151 uintptr_t end;
152 int raix;
153 uintptr_t mstart;
154 uintptr_t mend;
155 uintptr_t offset;
156};
157
158struct smq_invoke_ctx {
159 struct hlist_node hn;
160 struct completion work;
161 int retval;
162 int pid;
163 int tgid;
164 remote_arg_t *lpra;
165 remote_arg64_t *rpra;
166 int *fds;
167 unsigned int *attrs;
168 struct fastrpc_mmap **maps;
169 struct fastrpc_buf *buf;
170 ssize_t used;
171 struct fastrpc_file *fl;
172 uint32_t sc;
173 struct overlap *overs;
174 struct overlap **overps;
175 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700176 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700177};
178
179struct fastrpc_ctx_lst {
180 struct hlist_head pending;
181 struct hlist_head interrupted;
182};
183
184struct fastrpc_smmu {
185 struct dma_iommu_mapping *mapping;
186 int cb;
187 int enabled;
188 int faults;
189 int secure;
190 int coherent;
191};
192
193struct fastrpc_session_ctx {
194 struct device *dev;
195 struct fastrpc_smmu smmu;
196 int used;
197};
198
199struct fastrpc_glink_info {
200 int link_state;
201 int port_state;
202 struct glink_open_config cfg;
203 struct glink_link_info link_info;
204 void *link_notify_handle;
205};
206
207struct fastrpc_channel_ctx {
208 char *name;
209 char *subsys;
210 void *chan;
211 struct device *dev;
212 struct fastrpc_session_ctx session[NUM_SESSIONS];
213 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530214 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700215 struct notifier_block nb;
216 struct kref kref;
217 int sesscount;
218 int ssrcount;
219 void *handle;
220 int prevssrcount;
221 int vmid;
222 struct fastrpc_glink_info link;
223};
224
225struct fastrpc_apps {
226 struct fastrpc_channel_ctx *channel;
227 struct cdev cdev;
228 struct class *class;
229 struct mutex smd_mutex;
230 struct smq_phy_page range;
231 struct hlist_head maps;
232 dev_t dev_no;
233 int compat;
234 struct hlist_head drivers;
235 spinlock_t hlock;
236 struct ion_client *client;
237 struct device *dev;
238};
239
240struct fastrpc_mmap {
241 struct hlist_node hn;
242 struct fastrpc_file *fl;
243 struct fastrpc_apps *apps;
244 int fd;
245 uint32_t flags;
246 struct dma_buf *buf;
247 struct sg_table *table;
248 struct dma_buf_attachment *attach;
249 struct ion_handle *handle;
250 uint64_t phys;
251 ssize_t size;
252 uintptr_t va;
253 ssize_t len;
254 int refs;
255 uintptr_t raddr;
256 int uncached;
257 int secure;
258 uintptr_t attr;
259};
260
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800261struct fastrpc_perf {
262 int64_t count;
263 int64_t flush;
264 int64_t map;
265 int64_t copy;
266 int64_t link;
267 int64_t getargs;
268 int64_t putargs;
269 int64_t invargs;
270 int64_t invoke;
271};
272
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273struct fastrpc_file {
274 struct hlist_node hn;
275 spinlock_t hlock;
276 struct hlist_head maps;
277 struct hlist_head bufs;
278 struct fastrpc_ctx_lst clst;
279 struct fastrpc_session_ctx *sctx;
280 struct fastrpc_session_ctx *secsctx;
281 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800282 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700283 int tgid;
284 int cid;
285 int ssrcount;
286 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530287 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700288 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800289 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800290 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700291};
292
293static struct fastrpc_apps gfa;
294
295static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
296 {
297 .name = "adsprpc-smd",
298 .subsys = "adsp",
299 .link.link_info.edge = "lpass",
300 .link.link_info.transport = "smem",
301 },
302 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700303 .name = "mdsprpc-smd",
304 .subsys = "modem",
305 .link.link_info.edge = "mpss",
306 .link.link_info.transport = "smem",
307 },
308 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800309 .name = "sdsprpc-smd",
310 .subsys = "slpi",
311 .link.link_info.edge = "dsps",
312 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800313 },
314 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700315 .name = "cdsprpc-smd",
316 .subsys = "cdsp",
317 .link.link_info.edge = "cdsp",
318 .link.link_info.transport = "smem",
319 },
320};
321
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800322static inline int64_t getnstimediff(struct timespec *start)
323{
324 int64_t ns;
325 struct timespec ts, b;
326
327 getnstimeofday(&ts);
328 b = timespec_sub(ts, *start);
329 ns = timespec_to_ns(&b);
330 return ns;
331}
332
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700333static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
334{
335 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
336 int vmid;
337
338 if (!fl)
339 return;
340 if (cache) {
341 spin_lock(&fl->hlock);
342 hlist_add_head(&buf->hn, &fl->bufs);
343 spin_unlock(&fl->hlock);
344 return;
345 }
346 if (!IS_ERR_OR_NULL(buf->virt)) {
347 int destVM[1] = {VMID_HLOS};
348 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
349
350 if (fl->sctx->smmu.cb)
351 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
352 vmid = fl->apps->channel[fl->cid].vmid;
353 if (vmid) {
354 int srcVM[2] = {VMID_HLOS, vmid};
355
356 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
357 srcVM, 2, destVM, destVMperm, 1);
358 }
359 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
360 buf->phys);
361 }
362 kfree(buf);
363}
364
365static void fastrpc_buf_list_free(struct fastrpc_file *fl)
366{
367 struct fastrpc_buf *buf, *free;
368
369 do {
370 struct hlist_node *n;
371
372 free = 0;
373 spin_lock(&fl->hlock);
374 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
375 hlist_del_init(&buf->hn);
376 free = buf;
377 break;
378 }
379 spin_unlock(&fl->hlock);
380 if (free)
381 fastrpc_buf_free(free, 0);
382 } while (free);
383}
384
385static void fastrpc_mmap_add(struct fastrpc_mmap *map)
386{
387 struct fastrpc_file *fl = map->fl;
388
389 spin_lock(&fl->hlock);
390 hlist_add_head(&map->hn, &fl->maps);
391 spin_unlock(&fl->hlock);
392}
393
394static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800395 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396{
397 struct fastrpc_mmap *match = 0, *map;
398 struct hlist_node *n;
399
400 spin_lock(&fl->hlock);
401 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
402 if (va >= map->va &&
403 va + len <= map->va + map->len &&
404 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800405 if (refs)
406 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700407 match = map;
408 break;
409 }
410 }
411 spin_unlock(&fl->hlock);
412 if (match) {
413 *ppmap = match;
414 return 0;
415 }
416 return -ENOTTY;
417}
418
419static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
420 ssize_t len, struct fastrpc_mmap **ppmap)
421{
422 struct fastrpc_mmap *match = 0, *map;
423 struct hlist_node *n;
424 struct fastrpc_apps *me = &gfa;
425
426 spin_lock(&me->hlock);
427 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
428 if (map->raddr == va &&
429 map->raddr + map->len == va + len &&
430 map->refs == 1) {
431 match = map;
432 hlist_del_init(&map->hn);
433 break;
434 }
435 }
436 spin_unlock(&me->hlock);
437 if (match) {
438 *ppmap = match;
439 return 0;
440 }
441 spin_lock(&fl->hlock);
442 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
443 if (map->raddr == va &&
444 map->raddr + map->len == va + len &&
445 map->refs == 1) {
446 match = map;
447 hlist_del_init(&map->hn);
448 break;
449 }
450 }
451 spin_unlock(&fl->hlock);
452 if (match) {
453 *ppmap = match;
454 return 0;
455 }
456 return -ENOTTY;
457}
458
459static void fastrpc_mmap_free(struct fastrpc_mmap *map)
460{
461 struct fastrpc_file *fl;
462 int vmid;
463 struct fastrpc_session_ctx *sess;
464 int destVM[1] = {VMID_HLOS};
465 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
466
467 if (!map)
468 return;
469 fl = map->fl;
470 spin_lock(&fl->hlock);
471 map->refs--;
472 if (!map->refs)
473 hlist_del_init(&map->hn);
474 spin_unlock(&fl->hlock);
475 if (map->refs > 0)
476 return;
477 if (map->secure)
478 sess = fl->secsctx;
479 else
480 sess = fl->sctx;
481
482 if (!IS_ERR_OR_NULL(map->handle))
483 ion_free(fl->apps->client, map->handle);
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530484 if (sess && sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700485 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800486 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700487 map->table->sgl,
488 map->table->nents, DMA_BIDIRECTIONAL,
489 map->buf);
490 }
491 vmid = fl->apps->channel[fl->cid].vmid;
492 if (vmid && map->phys) {
493 int srcVM[2] = {VMID_HLOS, vmid};
494
495 hyp_assign_phys(map->phys, buf_page_size(map->size),
496 srcVM, 2, destVM, destVMperm, 1);
497 }
498
499 if (!IS_ERR_OR_NULL(map->table))
500 dma_buf_unmap_attachment(map->attach, map->table,
501 DMA_BIDIRECTIONAL);
502 if (!IS_ERR_OR_NULL(map->attach))
503 dma_buf_detach(map->buf, map->attach);
504 if (!IS_ERR_OR_NULL(map->buf))
505 dma_buf_put(map->buf);
506 kfree(map);
507}
508
509static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
510 struct fastrpc_session_ctx **session);
511
512static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
513 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
514 struct fastrpc_mmap **ppmap)
515{
516 struct fastrpc_session_ctx *sess;
517 struct fastrpc_apps *apps = fl->apps;
518 int cid = fl->cid;
519 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
520 struct fastrpc_mmap *map = 0;
521 unsigned long attrs;
522 unsigned long flags;
523 int err = 0, vmid;
524
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800525 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700526 return 0;
527 map = kzalloc(sizeof(*map), GFP_KERNEL);
528 VERIFY(err, !IS_ERR_OR_NULL(map));
529 if (err)
530 goto bail;
531 INIT_HLIST_NODE(&map->hn);
532 map->flags = mflags;
533 map->refs = 1;
534 map->fl = fl;
535 map->fd = fd;
536 map->attr = attr;
537 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
538 ion_import_dma_buf_fd(fl->apps->client, fd)));
539 if (err)
540 goto bail;
541 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
542 &flags));
543 if (err)
544 goto bail;
545
546 map->uncached = !ION_IS_CACHED(flags);
547 if (map->attr & FASTRPC_ATTR_NOVA)
548 map->uncached = 1;
549
550 map->secure = flags & ION_FLAG_SECURE;
551 if (map->secure) {
552 if (!fl->secsctx)
553 err = fastrpc_session_alloc(chan, 1,
554 &fl->secsctx);
555 if (err)
556 goto bail;
557 }
558 if (map->secure)
559 sess = fl->secsctx;
560 else
561 sess = fl->sctx;
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530562 VERIFY(err, !IS_ERR_OR_NULL(sess));
563 if (err)
564 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700565 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
566 if (err)
567 goto bail;
568 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
569 dma_buf_attach(map->buf, sess->dev)));
570 if (err)
571 goto bail;
572 VERIFY(err, !IS_ERR_OR_NULL(map->table =
573 dma_buf_map_attachment(map->attach,
574 DMA_BIDIRECTIONAL)));
575 if (err)
576 goto bail;
577 if (sess->smmu.enabled) {
578 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530579
580 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
581 (sess->smmu.coherent && map->uncached))
582 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
583 else if (map->attr & FASTRPC_ATTR_COHERENT)
584 attrs |= DMA_ATTR_FORCE_COHERENT;
585
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700586 VERIFY(err, map->table->nents ==
587 msm_dma_map_sg_attrs(sess->dev,
588 map->table->sgl, map->table->nents,
589 DMA_BIDIRECTIONAL, map->buf, attrs));
590 if (err)
591 goto bail;
592 } else {
593 VERIFY(err, map->table->nents == 1);
594 if (err)
595 goto bail;
596 }
597 map->phys = sg_dma_address(map->table->sgl);
598 if (sess->smmu.cb) {
599 map->phys += ((uint64_t)sess->smmu.cb << 32);
600 map->size = sg_dma_len(map->table->sgl);
601 } else {
602 map->size = buf_page_size(len);
603 }
604 vmid = fl->apps->channel[fl->cid].vmid;
605 if (vmid) {
606 int srcVM[1] = {VMID_HLOS};
607 int destVM[2] = {VMID_HLOS, vmid};
608 int destVMperm[2] = {PERM_READ | PERM_WRITE,
609 PERM_READ | PERM_WRITE | PERM_EXEC};
610
611 VERIFY(err, !hyp_assign_phys(map->phys,
612 buf_page_size(map->size),
613 srcVM, 1, destVM, destVMperm, 2));
614 if (err)
615 goto bail;
616 }
617 map->va = va;
618 map->len = len;
619
620 fastrpc_mmap_add(map);
621 *ppmap = map;
622
623bail:
624 if (err && map)
625 fastrpc_mmap_free(map);
626 return err;
627}
628
629static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
630 struct fastrpc_buf **obuf)
631{
632 int err = 0, vmid;
633 struct fastrpc_buf *buf = 0, *fr = 0;
634 struct hlist_node *n;
635
636 VERIFY(err, size > 0);
637 if (err)
638 goto bail;
639
640 /* find the smallest buffer that fits in the cache */
641 spin_lock(&fl->hlock);
642 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
643 if (buf->size >= size && (!fr || fr->size > buf->size))
644 fr = buf;
645 }
646 if (fr)
647 hlist_del_init(&fr->hn);
648 spin_unlock(&fl->hlock);
649 if (fr) {
650 *obuf = fr;
651 return 0;
652 }
653 buf = 0;
654 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
655 if (err)
656 goto bail;
657 INIT_HLIST_NODE(&buf->hn);
658 buf->fl = fl;
659 buf->virt = 0;
660 buf->phys = 0;
661 buf->size = size;
662 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
663 (void *)&buf->phys, GFP_KERNEL);
664 if (IS_ERR_OR_NULL(buf->virt)) {
665 /* free cache and retry */
666 fastrpc_buf_list_free(fl);
667 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
668 (void *)&buf->phys, GFP_KERNEL);
669 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
670 }
671 if (err)
672 goto bail;
673 if (fl->sctx->smmu.cb)
674 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
675 vmid = fl->apps->channel[fl->cid].vmid;
676 if (vmid) {
677 int srcVM[1] = {VMID_HLOS};
678 int destVM[2] = {VMID_HLOS, vmid};
679 int destVMperm[2] = {PERM_READ | PERM_WRITE,
680 PERM_READ | PERM_WRITE | PERM_EXEC};
681
682 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
683 srcVM, 1, destVM, destVMperm, 2));
684 if (err)
685 goto bail;
686 }
687
688 *obuf = buf;
689 bail:
690 if (err && buf)
691 fastrpc_buf_free(buf, 0);
692 return err;
693}
694
695
696static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700697 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700698 struct smq_invoke_ctx **po)
699{
700 int err = 0;
701 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
702 struct hlist_node *n;
703 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
704
705 spin_lock(&fl->hlock);
706 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
707 if (ictx->pid == current->pid) {
708 if (invoke->sc != ictx->sc || ictx->fl != fl)
709 err = -1;
710 else {
711 ctx = ictx;
712 hlist_del_init(&ctx->hn);
713 hlist_add_head(&ctx->hn, &fl->clst.pending);
714 }
715 break;
716 }
717 }
718 spin_unlock(&fl->hlock);
719 if (ctx)
720 *po = ctx;
721 return err;
722}
723
724#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
725static int overlap_ptr_cmp(const void *a, const void *b)
726{
727 struct overlap *pa = *((struct overlap **)a);
728 struct overlap *pb = *((struct overlap **)b);
729 /* sort with lowest starting buffer first */
730 int st = CMP(pa->start, pb->start);
731 /* sort with highest ending buffer first */
732 int ed = CMP(pb->end, pa->end);
733 return st == 0 ? ed : st;
734}
735
Sathish Ambley9466d672017-01-25 10:51:55 -0800736static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700737{
Sathish Ambley9466d672017-01-25 10:51:55 -0800738 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700739 remote_arg_t *lpra = ctx->lpra;
740 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
741 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
742 int nbufs = inbufs + outbufs;
743 struct overlap max;
744
745 for (i = 0; i < nbufs; ++i) {
746 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
747 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800748 if (lpra[i].buf.len) {
749 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
750 if (err)
751 goto bail;
752 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700753 ctx->overs[i].raix = i;
754 ctx->overps[i] = &ctx->overs[i];
755 }
756 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
757 max.start = 0;
758 max.end = 0;
759 for (i = 0; i < nbufs; ++i) {
760 if (ctx->overps[i]->start < max.end) {
761 ctx->overps[i]->mstart = max.end;
762 ctx->overps[i]->mend = ctx->overps[i]->end;
763 ctx->overps[i]->offset = max.end -
764 ctx->overps[i]->start;
765 if (ctx->overps[i]->end > max.end) {
766 max.end = ctx->overps[i]->end;
767 } else {
768 ctx->overps[i]->mend = 0;
769 ctx->overps[i]->mstart = 0;
770 }
771 } else {
772 ctx->overps[i]->mend = ctx->overps[i]->end;
773 ctx->overps[i]->mstart = ctx->overps[i]->start;
774 ctx->overps[i]->offset = 0;
775 max = *ctx->overps[i];
776 }
777 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800778bail:
779 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700780}
781
782#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
783 do {\
784 if (!(kernel))\
785 VERIFY(err, 0 == copy_from_user((dst), (src),\
786 (size)));\
787 else\
788 memmove((dst), (src), (size));\
789 } while (0)
790
791#define K_COPY_TO_USER(err, kernel, dst, src, size) \
792 do {\
793 if (!(kernel))\
794 VERIFY(err, 0 == copy_to_user((dst), (src),\
795 (size)));\
796 else\
797 memmove((dst), (src), (size));\
798 } while (0)
799
800
801static void context_free(struct smq_invoke_ctx *ctx);
802
803static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700804 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700805 struct smq_invoke_ctx **po)
806{
807 int err = 0, bufs, size = 0;
808 struct smq_invoke_ctx *ctx = 0;
809 struct fastrpc_ctx_lst *clst = &fl->clst;
810 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
811
812 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
813 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
814 sizeof(*ctx->fds) * (bufs) +
815 sizeof(*ctx->attrs) * (bufs) +
816 sizeof(*ctx->overs) * (bufs) +
817 sizeof(*ctx->overps) * (bufs);
818
819 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
820 if (err)
821 goto bail;
822
823 INIT_HLIST_NODE(&ctx->hn);
824 hlist_add_fake(&ctx->hn);
825 ctx->fl = fl;
826 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
827 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
828 ctx->fds = (int *)(&ctx->lpra[bufs]);
829 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
830 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
831 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
832
833 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
834 bufs * sizeof(*ctx->lpra));
835 if (err)
836 goto bail;
837
838 if (invokefd->fds) {
839 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
840 bufs * sizeof(*ctx->fds));
841 if (err)
842 goto bail;
843 }
844 if (invokefd->attrs) {
845 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
846 bufs * sizeof(*ctx->attrs));
847 if (err)
848 goto bail;
849 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700850 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700851 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800852 if (bufs) {
853 VERIFY(err, 0 == context_build_overlap(ctx));
854 if (err)
855 goto bail;
856 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700857 ctx->retval = -1;
858 ctx->pid = current->pid;
859 ctx->tgid = current->tgid;
860 init_completion(&ctx->work);
861
862 spin_lock(&fl->hlock);
863 hlist_add_head(&ctx->hn, &clst->pending);
864 spin_unlock(&fl->hlock);
865
866 *po = ctx;
867bail:
868 if (ctx && err)
869 context_free(ctx);
870 return err;
871}
872
873static void context_save_interrupted(struct smq_invoke_ctx *ctx)
874{
875 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
876
877 spin_lock(&ctx->fl->hlock);
878 hlist_del_init(&ctx->hn);
879 hlist_add_head(&ctx->hn, &clst->interrupted);
880 spin_unlock(&ctx->fl->hlock);
881 /* free the cache on power collapse */
882 fastrpc_buf_list_free(ctx->fl);
883}
884
885static void context_free(struct smq_invoke_ctx *ctx)
886{
887 int i;
888 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
889 REMOTE_SCALARS_OUTBUFS(ctx->sc);
890 spin_lock(&ctx->fl->hlock);
891 hlist_del_init(&ctx->hn);
892 spin_unlock(&ctx->fl->hlock);
893 for (i = 0; i < nbufs; ++i)
894 fastrpc_mmap_free(ctx->maps[i]);
895 fastrpc_buf_free(ctx->buf, 1);
896 kfree(ctx);
897}
898
899static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
900{
901 ctx->retval = retval;
902 complete(&ctx->work);
903}
904
905
906static void fastrpc_notify_users(struct fastrpc_file *me)
907{
908 struct smq_invoke_ctx *ictx;
909 struct hlist_node *n;
910
911 spin_lock(&me->hlock);
912 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
913 complete(&ictx->work);
914 }
915 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
916 complete(&ictx->work);
917 }
918 spin_unlock(&me->hlock);
919
920}
921
922static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
923{
924 struct fastrpc_file *fl;
925 struct hlist_node *n;
926
927 spin_lock(&me->hlock);
928 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
929 if (fl->cid == cid)
930 fastrpc_notify_users(fl);
931 }
932 spin_unlock(&me->hlock);
933
934}
935static void context_list_ctor(struct fastrpc_ctx_lst *me)
936{
937 INIT_HLIST_HEAD(&me->interrupted);
938 INIT_HLIST_HEAD(&me->pending);
939}
940
941static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
942{
943 struct fastrpc_ctx_lst *clst = &fl->clst;
944 struct smq_invoke_ctx *ictx = 0, *ctxfree;
945 struct hlist_node *n;
946
947 do {
948 ctxfree = 0;
949 spin_lock(&fl->hlock);
950 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
951 hlist_del_init(&ictx->hn);
952 ctxfree = ictx;
953 break;
954 }
955 spin_unlock(&fl->hlock);
956 if (ctxfree)
957 context_free(ctxfree);
958 } while (ctxfree);
959 do {
960 ctxfree = 0;
961 spin_lock(&fl->hlock);
962 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
963 hlist_del_init(&ictx->hn);
964 ctxfree = ictx;
965 break;
966 }
967 spin_unlock(&fl->hlock);
968 if (ctxfree)
969 context_free(ctxfree);
970 } while (ctxfree);
971}
972
973static int fastrpc_file_free(struct fastrpc_file *fl);
974static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
975{
976 struct fastrpc_file *fl, *free;
977 struct hlist_node *n;
978
979 do {
980 free = 0;
981 spin_lock(&me->hlock);
982 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
983 hlist_del_init(&fl->hn);
984 free = fl;
985 break;
986 }
987 spin_unlock(&me->hlock);
988 if (free)
989 fastrpc_file_free(free);
990 } while (free);
991}
992
993static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
994{
995 remote_arg64_t *rpra;
996 remote_arg_t *lpra = ctx->lpra;
997 struct smq_invoke_buf *list;
998 struct smq_phy_page *pages, *ipage;
999 uint32_t sc = ctx->sc;
1000 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1001 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001002 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001003 uintptr_t args;
1004 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001005 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001006 int err = 0;
1007 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001008 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001009 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001010
1011 /* calculate size of the metadata */
1012 rpra = 0;
1013 list = smq_invoke_buf_start(rpra, sc);
1014 pages = smq_phy_page_start(sc, list);
1015 ipage = pages;
1016
1017 for (i = 0; i < bufs; ++i) {
1018 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1019 ssize_t len = lpra[i].buf.len;
1020
1021 if (ctx->fds[i] && (ctx->fds[i] != -1))
1022 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1023 ctx->attrs[i], buf, len,
1024 mflags, &ctx->maps[i]);
1025 ipage += 1;
1026 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001027 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1028 for (i = bufs; i < bufs + handles; i++) {
1029 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1030 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1031 if (err)
1032 goto bail;
1033 ipage += 1;
1034 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001035 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1036 (sizeof(uint32_t) * M_CRCLIST);
1037
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001038 /* calculate len requreed for copying */
1039 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1040 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001041 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001042 ssize_t len = lpra[i].buf.len;
1043
1044 if (!len)
1045 continue;
1046 if (ctx->maps[i])
1047 continue;
1048 if (ctx->overps[oix]->offset == 0)
1049 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001050 mstart = ctx->overps[oix]->mstart;
1051 mend = ctx->overps[oix]->mend;
1052 VERIFY(err, (mend - mstart) <= LONG_MAX);
1053 if (err)
1054 goto bail;
1055 copylen += mend - mstart;
1056 VERIFY(err, copylen >= 0);
1057 if (err)
1058 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001059 }
1060 ctx->used = copylen;
1061
1062 /* allocate new buffer */
1063 if (copylen) {
1064 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1065 if (err)
1066 goto bail;
1067 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301068 if (ctx->buf->virt && metalen <= copylen)
1069 memset(ctx->buf->virt, 0, metalen);
1070
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001071 /* copy metadata */
1072 rpra = ctx->buf->virt;
1073 ctx->rpra = rpra;
1074 list = smq_invoke_buf_start(rpra, sc);
1075 pages = smq_phy_page_start(sc, list);
1076 ipage = pages;
1077 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001078 for (i = 0; i < bufs + handles; ++i) {
1079 if (lpra[i].buf.len)
1080 list[i].num = 1;
1081 else
1082 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001083 list[i].pgidx = ipage - pages;
1084 ipage++;
1085 }
1086 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001087 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001088 for (i = 0; i < inbufs + outbufs; ++i) {
1089 struct fastrpc_mmap *map = ctx->maps[i];
1090 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1091 ssize_t len = lpra[i].buf.len;
1092
1093 rpra[i].buf.pv = 0;
1094 rpra[i].buf.len = len;
1095 if (!len)
1096 continue;
1097 if (map) {
1098 struct vm_area_struct *vma;
1099 uintptr_t offset;
1100 int num = buf_num_pages(buf, len);
1101 int idx = list[i].pgidx;
1102
1103 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001104 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001105 } else {
1106 down_read(&current->mm->mmap_sem);
1107 VERIFY(err, NULL != (vma = find_vma(current->mm,
1108 map->va)));
1109 if (err) {
1110 up_read(&current->mm->mmap_sem);
1111 goto bail;
1112 }
1113 offset = buf_page_start(buf) - vma->vm_start;
1114 up_read(&current->mm->mmap_sem);
1115 VERIFY(err, offset < (uintptr_t)map->size);
1116 if (err)
1117 goto bail;
1118 }
1119 pages[idx].addr = map->phys + offset;
1120 pages[idx].size = num << PAGE_SHIFT;
1121 }
1122 rpra[i].buf.pv = buf;
1123 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001124 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001125 for (i = bufs; i < bufs + handles; ++i) {
1126 struct fastrpc_mmap *map = ctx->maps[i];
1127
1128 pages[i].addr = map->phys;
1129 pages[i].size = map->size;
1130 }
1131 fdlist = (uint64_t *)&pages[bufs + handles];
1132 for (i = 0; i < M_FDLIST; i++)
1133 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001134 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301135 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001136
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001137 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001138 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001139 rlen = copylen - metalen;
1140 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1141 int i = ctx->overps[oix]->raix;
1142 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001143 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001144 uint64_t buf;
1145 ssize_t len = lpra[i].buf.len;
1146
1147 if (!len)
1148 continue;
1149 if (map)
1150 continue;
1151 if (ctx->overps[oix]->offset == 0) {
1152 rlen -= ALIGN(args, BALIGN) - args;
1153 args = ALIGN(args, BALIGN);
1154 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001155 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001156 VERIFY(err, rlen >= mlen);
1157 if (err)
1158 goto bail;
1159 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1160 pages[list[i].pgidx].addr = ctx->buf->phys -
1161 ctx->overps[oix]->offset +
1162 (copylen - rlen);
1163 pages[list[i].pgidx].addr =
1164 buf_page_start(pages[list[i].pgidx].addr);
1165 buf = rpra[i].buf.pv;
1166 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1167 if (i < inbufs) {
1168 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1169 lpra[i].buf.pv, len);
1170 if (err)
1171 goto bail;
1172 }
1173 args = args + mlen;
1174 rlen -= mlen;
1175 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001176 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001177
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001178 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1180 int i = ctx->overps[oix]->raix;
1181 struct fastrpc_mmap *map = ctx->maps[i];
1182
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001183 if (map && map->uncached)
1184 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301185 if (ctx->fl->sctx->smmu.coherent &&
1186 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1187 continue;
1188 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1189 continue;
1190
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001191 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1192 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1193 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1194 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001195 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001196 for (i = bufs; i < bufs + handles; i++) {
1197 rpra[i].dma.fd = ctx->fds[i];
1198 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1199 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001200 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001201
1202 if (!ctx->fl->sctx->smmu.coherent) {
1203 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001204 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001205 PERF_END);
1206 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001207 bail:
1208 return err;
1209}
1210
1211static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1212 remote_arg_t *upra)
1213{
1214 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001215 struct smq_invoke_buf *list;
1216 struct smq_phy_page *pages;
1217 struct fastrpc_mmap *mmap;
1218 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001219 uint32_t *crclist = NULL;
1220
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001221 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001222 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001223 int err = 0;
1224
1225 inbufs = REMOTE_SCALARS_INBUFS(sc);
1226 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001227 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1228 list = smq_invoke_buf_start(ctx->rpra, sc);
1229 pages = smq_phy_page_start(sc, list);
1230 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001231 crclist = (uint32_t *)(fdlist + M_FDLIST);
1232
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001233 for (i = inbufs; i < inbufs + outbufs; ++i) {
1234 if (!ctx->maps[i]) {
1235 K_COPY_TO_USER(err, kernel,
1236 ctx->lpra[i].buf.pv,
1237 uint64_to_ptr(rpra[i].buf.pv),
1238 rpra[i].buf.len);
1239 if (err)
1240 goto bail;
1241 } else {
1242 fastrpc_mmap_free(ctx->maps[i]);
1243 ctx->maps[i] = 0;
1244 }
1245 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001246 if (inbufs + outbufs + handles) {
1247 for (i = 0; i < M_FDLIST; i++) {
1248 if (!fdlist[i])
1249 break;
1250 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001251 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001252 fastrpc_mmap_free(mmap);
1253 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001254 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001255 if (ctx->crc && crclist && rpra)
1256 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1257 crclist, M_CRCLIST*sizeof(uint32_t));
1258
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001259 bail:
1260 return err;
1261}
1262
1263static void inv_args_pre(struct smq_invoke_ctx *ctx)
1264{
1265 int i, inbufs, outbufs;
1266 uint32_t sc = ctx->sc;
1267 remote_arg64_t *rpra = ctx->rpra;
1268 uintptr_t end;
1269
1270 inbufs = REMOTE_SCALARS_INBUFS(sc);
1271 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1272 for (i = inbufs; i < inbufs + outbufs; ++i) {
1273 struct fastrpc_mmap *map = ctx->maps[i];
1274
1275 if (map && map->uncached)
1276 continue;
1277 if (!rpra[i].buf.len)
1278 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301279 if (ctx->fl->sctx->smmu.coherent &&
1280 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1281 continue;
1282 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1283 continue;
1284
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001285 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1286 buf_page_start(rpra[i].buf.pv))
1287 continue;
1288 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1289 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1290 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1291 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1292 rpra[i].buf.len);
1293 if (!IS_CACHE_ALIGNED(end))
1294 dmac_flush_range((char *)end,
1295 (char *)end + 1);
1296 }
1297}
1298
1299static void inv_args(struct smq_invoke_ctx *ctx)
1300{
1301 int i, inbufs, outbufs;
1302 uint32_t sc = ctx->sc;
1303 remote_arg64_t *rpra = ctx->rpra;
1304 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001305
1306 inbufs = REMOTE_SCALARS_INBUFS(sc);
1307 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1308 for (i = inbufs; i < inbufs + outbufs; ++i) {
1309 struct fastrpc_mmap *map = ctx->maps[i];
1310
1311 if (map && map->uncached)
1312 continue;
1313 if (!rpra[i].buf.len)
1314 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301315 if (ctx->fl->sctx->smmu.coherent &&
1316 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1317 continue;
1318 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1319 continue;
1320
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001321 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1322 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001323 continue;
1324 }
1325 if (map && map->handle)
1326 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1327 (char *)uint64_to_ptr(rpra[i].buf.pv),
1328 rpra[i].buf.len, ION_IOC_INV_CACHES);
1329 else
1330 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1331 (char *)uint64_to_ptr(rpra[i].buf.pv
1332 + rpra[i].buf.len));
1333 }
1334
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001335 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336 dmac_inv_range(rpra, (char *)rpra + used);
1337}
1338
1339static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1340 uint32_t kernel, uint32_t handle)
1341{
1342 struct smq_msg *msg = &ctx->msg;
1343 struct fastrpc_file *fl = ctx->fl;
1344 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1345 int err = 0;
1346
1347 VERIFY(err, 0 != channel_ctx->chan);
1348 if (err)
1349 goto bail;
1350 msg->pid = current->tgid;
1351 msg->tid = current->pid;
1352 if (kernel)
1353 msg->pid = 0;
1354 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1355 msg->invoke.header.handle = handle;
1356 msg->invoke.header.sc = ctx->sc;
1357 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1358 msg->invoke.page.size = buf_page_size(ctx->used);
1359
1360 if (fl->ssrcount != channel_ctx->ssrcount) {
1361 err = -ECONNRESET;
1362 goto bail;
1363 }
1364 VERIFY(err, channel_ctx->link.port_state ==
1365 FASTRPC_LINK_CONNECTED);
1366 if (err)
1367 goto bail;
1368 err = glink_tx(channel_ctx->chan,
1369 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1370 GLINK_TX_REQ_INTENT);
1371 bail:
1372 return err;
1373}
1374
1375static void fastrpc_init(struct fastrpc_apps *me)
1376{
1377 int i;
1378
1379 INIT_HLIST_HEAD(&me->drivers);
1380 spin_lock_init(&me->hlock);
1381 mutex_init(&me->smd_mutex);
1382 me->channel = &gcinfo[0];
1383 for (i = 0; i < NUM_CHANNELS; i++) {
1384 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301385 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001386 me->channel[i].sesscount = 0;
1387 }
1388}
1389
1390static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1391
1392static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1393 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001394 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001395{
1396 struct smq_invoke_ctx *ctx = 0;
1397 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1398 int cid = fl->cid;
1399 int interrupted = 0;
1400 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001401 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001402
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001403 if (fl->profile)
1404 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301405
1406 VERIFY(err, fl->sctx != NULL);
1407 if (err)
1408 goto bail;
1409 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1410 if (err)
1411 goto bail;
1412
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001413 if (!kernel) {
1414 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1415 &ctx));
1416 if (err)
1417 goto bail;
1418 if (fl->sctx->smmu.faults)
1419 err = FASTRPC_ENOSUCH;
1420 if (err)
1421 goto bail;
1422 if (ctx)
1423 goto wait;
1424 }
1425
1426 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1427 if (err)
1428 goto bail;
1429
1430 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001431 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001432 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001433 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001434 if (err)
1435 goto bail;
1436 }
1437
Sathish Ambleyc432b502017-06-05 12:03:42 -07001438 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001439 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001440 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001442 PERF_END);
1443
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001444 if (err)
1445 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001446 wait:
1447 if (kernel)
1448 wait_for_completion(&ctx->work);
1449 else {
1450 interrupted = wait_for_completion_interruptible(&ctx->work);
1451 VERIFY(err, 0 == (err = interrupted));
1452 if (err)
1453 goto bail;
1454 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001455
1456 PERF(fl->profile, fl->perf.invargs,
1457 if (!fl->sctx->smmu.coherent)
1458 inv_args(ctx);
1459 PERF_END);
1460
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001461 VERIFY(err, 0 == (err = ctx->retval));
1462 if (err)
1463 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001464
1465 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001466 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001467 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001468 if (err)
1469 goto bail;
1470 bail:
1471 if (ctx && interrupted == -ERESTARTSYS)
1472 context_save_interrupted(ctx);
1473 else if (ctx)
1474 context_free(ctx);
1475 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1476 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001477
1478 if (fl->profile && !interrupted) {
1479 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1480 fl->perf.invoke += getnstimediff(&invoket);
1481 if (!(invoke->handle >= 0 &&
1482 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1483 fl->perf.count++;
1484 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001485 return err;
1486}
1487
Sathish Ambley36849af2017-02-02 09:35:55 -08001488static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001489static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001490 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001491{
1492 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001493 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001494 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001495 struct smq_phy_page pages[1];
1496 struct fastrpc_mmap *file = 0, *mem = 0;
1497
Sathish Ambley36849af2017-02-02 09:35:55 -08001498 VERIFY(err, !fastrpc_channel_open(fl));
1499 if (err)
1500 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001501 if (init->flags == FASTRPC_INIT_ATTACH) {
1502 remote_arg_t ra[1];
1503 int tgid = current->tgid;
1504
1505 ra[0].buf.pv = (void *)&tgid;
1506 ra[0].buf.len = sizeof(tgid);
1507 ioctl.inv.handle = 1;
1508 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1509 ioctl.inv.pra = ra;
1510 ioctl.fds = 0;
1511 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001512 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001513 fl->pd = 0;
1514 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1515 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1516 if (err)
1517 goto bail;
1518 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001519 remote_arg_t ra[6];
1520 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001521 int mflags = 0;
1522 struct {
1523 int pgid;
1524 int namelen;
1525 int filelen;
1526 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001527 int attrs;
1528 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001529 } inbuf;
1530
1531 inbuf.pgid = current->tgid;
1532 inbuf.namelen = strlen(current->comm) + 1;
1533 inbuf.filelen = init->filelen;
1534 fl->pd = 1;
1535 if (init->filelen) {
1536 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1537 init->file, init->filelen, mflags, &file));
1538 if (err)
1539 goto bail;
1540 }
1541 inbuf.pageslen = 1;
1542 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1543 init->mem, init->memlen, mflags, &mem));
1544 if (err)
1545 goto bail;
1546 inbuf.pageslen = 1;
1547 ra[0].buf.pv = (void *)&inbuf;
1548 ra[0].buf.len = sizeof(inbuf);
1549 fds[0] = 0;
1550
1551 ra[1].buf.pv = (void *)current->comm;
1552 ra[1].buf.len = inbuf.namelen;
1553 fds[1] = 0;
1554
1555 ra[2].buf.pv = (void *)init->file;
1556 ra[2].buf.len = inbuf.filelen;
1557 fds[2] = init->filefd;
1558
1559 pages[0].addr = mem->phys;
1560 pages[0].size = mem->size;
1561 ra[3].buf.pv = (void *)pages;
1562 ra[3].buf.len = 1 * sizeof(*pages);
1563 fds[3] = 0;
1564
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001565 inbuf.attrs = uproc->attrs;
1566 ra[4].buf.pv = (void *)&(inbuf.attrs);
1567 ra[4].buf.len = sizeof(inbuf.attrs);
1568 fds[4] = 0;
1569
1570 inbuf.siglen = uproc->siglen;
1571 ra[5].buf.pv = (void *)&(inbuf.siglen);
1572 ra[5].buf.len = sizeof(inbuf.siglen);
1573 fds[5] = 0;
1574
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001575 ioctl.inv.handle = 1;
1576 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001577 if (uproc->attrs)
1578 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001579 ioctl.inv.pra = ra;
1580 ioctl.fds = fds;
1581 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001582 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001583 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1584 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1585 if (err)
1586 goto bail;
1587 } else {
1588 err = -ENOTTY;
1589 }
1590bail:
1591 if (mem && err)
1592 fastrpc_mmap_free(mem);
1593 if (file)
1594 fastrpc_mmap_free(file);
1595 return err;
1596}
1597
1598static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1599{
1600 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001601 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001602 remote_arg_t ra[1];
1603 int tgid = 0;
1604
Sathish Ambley36849af2017-02-02 09:35:55 -08001605 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1606 if (err)
1607 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001608 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1609 if (err)
1610 goto bail;
1611 tgid = fl->tgid;
1612 ra[0].buf.pv = (void *)&tgid;
1613 ra[0].buf.len = sizeof(tgid);
1614 ioctl.inv.handle = 1;
1615 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1616 ioctl.inv.pra = ra;
1617 ioctl.fds = 0;
1618 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001619 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001620 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1621 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1622bail:
1623 return err;
1624}
1625
1626static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1627 struct fastrpc_mmap *map)
1628{
Sathish Ambleybae51902017-07-03 15:00:49 -07001629 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 struct smq_phy_page page;
1631 int num = 1;
1632 remote_arg_t ra[3];
1633 int err = 0;
1634 struct {
1635 int pid;
1636 uint32_t flags;
1637 uintptr_t vaddrin;
1638 int num;
1639 } inargs;
1640 struct {
1641 uintptr_t vaddrout;
1642 } routargs;
1643
1644 inargs.pid = current->tgid;
1645 inargs.vaddrin = (uintptr_t)map->va;
1646 inargs.flags = flags;
1647 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1648 ra[0].buf.pv = (void *)&inargs;
1649 ra[0].buf.len = sizeof(inargs);
1650 page.addr = map->phys;
1651 page.size = map->size;
1652 ra[1].buf.pv = (void *)&page;
1653 ra[1].buf.len = num * sizeof(page);
1654
1655 ra[2].buf.pv = (void *)&routargs;
1656 ra[2].buf.len = sizeof(routargs);
1657
1658 ioctl.inv.handle = 1;
1659 if (fl->apps->compat)
1660 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1661 else
1662 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1663 ioctl.inv.pra = ra;
1664 ioctl.fds = 0;
1665 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001666 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001667 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1668 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1669 map->raddr = (uintptr_t)routargs.vaddrout;
1670
1671 return err;
1672}
1673
1674static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1675 struct fastrpc_mmap *map)
1676{
Sathish Ambleybae51902017-07-03 15:00:49 -07001677 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001678 remote_arg_t ra[1];
1679 int err = 0;
1680 struct {
1681 int pid;
1682 uintptr_t vaddrout;
1683 ssize_t size;
1684 } inargs;
1685
1686 inargs.pid = current->tgid;
1687 inargs.size = map->size;
1688 inargs.vaddrout = map->raddr;
1689 ra[0].buf.pv = (void *)&inargs;
1690 ra[0].buf.len = sizeof(inargs);
1691
1692 ioctl.inv.handle = 1;
1693 if (fl->apps->compat)
1694 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1695 else
1696 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1697 ioctl.inv.pra = ra;
1698 ioctl.fds = 0;
1699 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001700 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001701 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1702 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1703 return err;
1704}
1705
1706static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1707 ssize_t len, struct fastrpc_mmap **ppmap);
1708
1709static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1710
1711static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1712 struct fastrpc_ioctl_munmap *ud)
1713{
1714 int err = 0;
1715 struct fastrpc_mmap *map = 0;
1716
1717 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1718 if (err)
1719 goto bail;
1720 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1721 if (err)
1722 goto bail;
1723 fastrpc_mmap_free(map);
1724bail:
1725 if (err && map)
1726 fastrpc_mmap_add(map);
1727 return err;
1728}
1729
1730static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1731 struct fastrpc_ioctl_mmap *ud)
1732{
1733
1734 struct fastrpc_mmap *map = 0;
1735 int err = 0;
1736
1737 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001738 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001739 return 0;
1740
1741 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1742 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1743 if (err)
1744 goto bail;
1745 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1746 if (err)
1747 goto bail;
1748 ud->vaddrout = map->raddr;
1749 bail:
1750 if (err && map)
1751 fastrpc_mmap_free(map);
1752 return err;
1753}
1754
1755static void fastrpc_channel_close(struct kref *kref)
1756{
1757 struct fastrpc_apps *me = &gfa;
1758 struct fastrpc_channel_ctx *ctx;
1759 int cid;
1760
1761 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1762 cid = ctx - &gcinfo[0];
1763 fastrpc_glink_close(ctx->chan, cid);
1764 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301765 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1766 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001767 mutex_unlock(&me->smd_mutex);
1768 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1769 MAJOR(me->dev_no), cid);
1770}
1771
1772static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1773
1774static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1775 int secure, struct fastrpc_session_ctx **session)
1776{
1777 struct fastrpc_apps *me = &gfa;
1778 int idx = 0, err = 0;
1779
1780 if (chan->sesscount) {
1781 for (idx = 0; idx < chan->sesscount; ++idx) {
1782 if (!chan->session[idx].used &&
1783 chan->session[idx].smmu.secure == secure) {
1784 chan->session[idx].used = 1;
1785 break;
1786 }
1787 }
1788 VERIFY(err, idx < chan->sesscount);
1789 if (err)
1790 goto bail;
1791 chan->session[idx].smmu.faults = 0;
1792 } else {
1793 VERIFY(err, me->dev != NULL);
1794 if (err)
1795 goto bail;
1796 chan->session[0].dev = me->dev;
1797 }
1798
1799 *session = &chan->session[idx];
1800 bail:
1801 return err;
1802}
1803
1804bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1805{
1806 if (glink_queue_rx_intent(h, NULL, size))
1807 return false;
1808 return true;
1809}
1810
1811void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1812 const void *pkt_priv, const void *ptr)
1813{
1814}
1815
1816void fastrpc_glink_notify_rx(void *handle, const void *priv,
1817 const void *pkt_priv, const void *ptr, size_t size)
1818{
1819 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1820 int len = size;
1821
1822 while (len >= sizeof(*rsp) && rsp) {
1823 rsp->ctx = rsp->ctx & ~1;
1824 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1825 rsp++;
1826 len = len - sizeof(*rsp);
1827 }
1828 glink_rx_done(handle, ptr, true);
1829}
1830
1831void fastrpc_glink_notify_state(void *handle, const void *priv,
1832 unsigned int event)
1833{
1834 struct fastrpc_apps *me = &gfa;
1835 int cid = (int)(uintptr_t)priv;
1836 struct fastrpc_glink_info *link;
1837
1838 if (cid < 0 || cid >= NUM_CHANNELS)
1839 return;
1840 link = &me->channel[cid].link;
1841 switch (event) {
1842 case GLINK_CONNECTED:
1843 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301844 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001845 break;
1846 case GLINK_LOCAL_DISCONNECTED:
1847 link->port_state = FASTRPC_LINK_DISCONNECTED;
1848 break;
1849 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301850 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001851 fastrpc_glink_close(me->channel[cid].chan, cid);
1852 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001853 }
1854 break;
1855 default:
1856 break;
1857 }
1858}
1859
1860static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1861 struct fastrpc_session_ctx **session)
1862{
1863 int err = 0;
1864 struct fastrpc_apps *me = &gfa;
1865
1866 mutex_lock(&me->smd_mutex);
1867 if (!*session)
1868 err = fastrpc_session_alloc_locked(chan, secure, session);
1869 mutex_unlock(&me->smd_mutex);
1870 return err;
1871}
1872
1873static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1874 struct fastrpc_session_ctx *session)
1875{
1876 struct fastrpc_apps *me = &gfa;
1877
1878 mutex_lock(&me->smd_mutex);
1879 session->used = 0;
1880 mutex_unlock(&me->smd_mutex);
1881}
1882
1883static int fastrpc_file_free(struct fastrpc_file *fl)
1884{
1885 struct hlist_node *n;
1886 struct fastrpc_mmap *map = 0;
1887 int cid;
1888
1889 if (!fl)
1890 return 0;
1891 cid = fl->cid;
1892
1893 spin_lock(&fl->apps->hlock);
1894 hlist_del_init(&fl->hn);
1895 spin_unlock(&fl->apps->hlock);
1896
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001897 if (!fl->sctx) {
1898 kfree(fl);
1899 return 0;
1900 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001901 (void)fastrpc_release_current_dsp_process(fl);
tharun kumar9f899ea2017-07-03 17:07:03 +05301902 spin_lock(&fl->hlock);
1903 fl->file_close = 1;
1904 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001905 fastrpc_context_list_dtor(fl);
1906 fastrpc_buf_list_free(fl);
1907 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1908 fastrpc_mmap_free(map);
1909 }
1910 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1911 kref_put_mutex(&fl->apps->channel[cid].kref,
1912 fastrpc_channel_close, &fl->apps->smd_mutex);
1913 if (fl->sctx)
1914 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1915 if (fl->secsctx)
1916 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1917 kfree(fl);
1918 return 0;
1919}
1920
1921static int fastrpc_device_release(struct inode *inode, struct file *file)
1922{
1923 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1924
1925 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001926 if (fl->debugfs_file != NULL)
1927 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001928 fastrpc_file_free(fl);
1929 file->private_data = 0;
1930 }
1931 return 0;
1932}
1933
1934static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1935 void *priv)
1936{
1937 struct fastrpc_apps *me = &gfa;
1938 int cid = (int)((uintptr_t)priv);
1939 struct fastrpc_glink_info *link;
1940
1941 if (cid < 0 || cid >= NUM_CHANNELS)
1942 return;
1943
1944 link = &me->channel[cid].link;
1945 switch (cb_info->link_state) {
1946 case GLINK_LINK_STATE_UP:
1947 link->link_state = FASTRPC_LINK_STATE_UP;
1948 complete(&me->channel[cid].work);
1949 break;
1950 case GLINK_LINK_STATE_DOWN:
1951 link->link_state = FASTRPC_LINK_STATE_DOWN;
1952 break;
1953 default:
1954 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1955 break;
1956 }
1957}
1958
1959static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1960{
1961 int err = 0;
1962 struct fastrpc_glink_info *link;
1963
1964 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1965 if (err)
1966 goto bail;
1967
1968 link = &me->channel[cid].link;
1969 if (link->link_notify_handle != NULL)
1970 goto bail;
1971
1972 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1973 link->link_notify_handle = glink_register_link_state_cb(
1974 &link->link_info,
1975 (void *)((uintptr_t)cid));
1976 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1977 if (err) {
1978 link->link_notify_handle = NULL;
1979 goto bail;
1980 }
1981 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1982 RPC_TIMEOUT));
1983bail:
1984 return err;
1985}
1986
1987static void fastrpc_glink_close(void *chan, int cid)
1988{
1989 int err = 0;
1990 struct fastrpc_glink_info *link;
1991
1992 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1993 if (err)
1994 return;
1995 link = &gfa.channel[cid].link;
1996
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301997 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001998 link->port_state = FASTRPC_LINK_DISCONNECTING;
1999 glink_close(chan);
2000 }
2001}
2002
2003static int fastrpc_glink_open(int cid)
2004{
2005 int err = 0;
2006 void *handle = NULL;
2007 struct fastrpc_apps *me = &gfa;
2008 struct glink_open_config *cfg;
2009 struct fastrpc_glink_info *link;
2010
2011 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2012 if (err)
2013 goto bail;
2014 link = &me->channel[cid].link;
2015 cfg = &me->channel[cid].link.cfg;
2016 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2017 if (err)
2018 goto bail;
2019
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302020 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2021 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002022 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002023
2024 link->port_state = FASTRPC_LINK_CONNECTING;
2025 cfg->priv = (void *)(uintptr_t)cid;
2026 cfg->edge = gcinfo[cid].link.link_info.edge;
2027 cfg->transport = gcinfo[cid].link.link_info.transport;
2028 cfg->name = FASTRPC_GLINK_GUID;
2029 cfg->notify_rx = fastrpc_glink_notify_rx;
2030 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2031 cfg->notify_state = fastrpc_glink_notify_state;
2032 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2033 handle = glink_open(cfg);
2034 VERIFY(err, !IS_ERR_OR_NULL(handle));
2035 if (err)
2036 goto bail;
2037 me->channel[cid].chan = handle;
2038bail:
2039 return err;
2040}
2041
Sathish Ambley1ca68232017-01-19 10:32:55 -08002042static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2043{
2044 filp->private_data = inode->i_private;
2045 return 0;
2046}
2047
2048static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2049 size_t count, loff_t *position)
2050{
2051 struct fastrpc_file *fl = filp->private_data;
2052 struct hlist_node *n;
2053 struct fastrpc_buf *buf = 0;
2054 struct fastrpc_mmap *map = 0;
2055 struct smq_invoke_ctx *ictx = 0;
2056 struct fastrpc_channel_ctx *chan;
2057 struct fastrpc_session_ctx *sess;
2058 unsigned int len = 0;
2059 int i, j, ret = 0;
2060 char *fileinfo = NULL;
2061
2062 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2063 if (!fileinfo)
2064 goto bail;
2065 if (fl == NULL) {
2066 for (i = 0; i < NUM_CHANNELS; i++) {
2067 chan = &gcinfo[i];
2068 len += scnprintf(fileinfo + len,
2069 DEBUGFS_SIZE - len, "%s\n\n",
2070 chan->name);
2071 len += scnprintf(fileinfo + len,
2072 DEBUGFS_SIZE - len, "%s %d\n",
2073 "sesscount:", chan->sesscount);
2074 for (j = 0; j < chan->sesscount; j++) {
2075 sess = &chan->session[j];
2076 len += scnprintf(fileinfo + len,
2077 DEBUGFS_SIZE - len,
2078 "%s%d\n\n", "SESSION", j);
2079 len += scnprintf(fileinfo + len,
2080 DEBUGFS_SIZE - len,
2081 "%s %d\n", "sid:",
2082 sess->smmu.cb);
2083 len += scnprintf(fileinfo + len,
2084 DEBUGFS_SIZE - len,
2085 "%s %d\n", "SECURE:",
2086 sess->smmu.secure);
2087 }
2088 }
2089 } else {
2090 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2091 "%s %d\n\n",
2092 "PROCESS_ID:", fl->tgid);
2093 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2094 "%s %d\n\n",
2095 "CHANNEL_ID:", fl->cid);
2096 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2097 "%s %d\n\n",
2098 "SSRCOUNT:", fl->ssrcount);
2099 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2100 "%s\n",
2101 "LIST OF BUFS:");
2102 spin_lock(&fl->hlock);
2103 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2104 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2105 "%s %p %s %p %s %llx\n", "buf:",
2106 buf, "buf->virt:", buf->virt,
2107 "buf->phys:", buf->phys);
2108 }
2109 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2110 "\n%s\n",
2111 "LIST OF MAPS:");
2112 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2113 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2114 "%s %p %s %lx %s %llx\n",
2115 "map:", map,
2116 "map->va:", map->va,
2117 "map->phys:", map->phys);
2118 }
2119 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2120 "\n%s\n",
2121 "LIST OF PENDING SMQCONTEXTS:");
2122 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2123 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2124 "%s %p %s %u %s %u %s %u\n",
2125 "smqcontext:", ictx,
2126 "sc:", ictx->sc,
2127 "tid:", ictx->pid,
2128 "handle", ictx->rpra->h);
2129 }
2130 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2131 "\n%s\n",
2132 "LIST OF INTERRUPTED SMQCONTEXTS:");
2133 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2134 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2135 "%s %p %s %u %s %u %s %u\n",
2136 "smqcontext:", ictx,
2137 "sc:", ictx->sc,
2138 "tid:", ictx->pid,
2139 "handle", ictx->rpra->h);
2140 }
2141 spin_unlock(&fl->hlock);
2142 }
2143 if (len > DEBUGFS_SIZE)
2144 len = DEBUGFS_SIZE;
2145 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2146 kfree(fileinfo);
2147bail:
2148 return ret;
2149}
2150
2151static const struct file_operations debugfs_fops = {
2152 .open = fastrpc_debugfs_open,
2153 .read = fastrpc_debugfs_read,
2154};
Sathish Ambley36849af2017-02-02 09:35:55 -08002155static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002156{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002157 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002158 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002159
2160 mutex_lock(&me->smd_mutex);
2161
Sathish Ambley36849af2017-02-02 09:35:55 -08002162 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002163 if (err)
2164 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002165 cid = fl->cid;
2166 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2167 if (err)
2168 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002169 fl->ssrcount = me->channel[cid].ssrcount;
2170 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2171 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302172 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2173 if (err)
2174 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002175 VERIFY(err, 0 == fastrpc_glink_open(cid));
2176 if (err)
2177 goto bail;
2178
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302179 VERIFY(err,
2180 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002181 RPC_TIMEOUT));
2182 if (err) {
2183 me->channel[cid].chan = 0;
2184 goto bail;
2185 }
2186 kref_init(&me->channel[cid].kref);
2187 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2188 MAJOR(me->dev_no), cid);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002189 err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
2190 if (err)
2191 pr_info("adsprpc: initial intent failed for %d\n", cid);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192 if (me->channel[cid].ssrcount !=
2193 me->channel[cid].prevssrcount) {
2194 me->channel[cid].prevssrcount =
2195 me->channel[cid].ssrcount;
2196 }
2197 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002198
2199bail:
2200 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002201 return err;
2202}
2203
Sathish Ambley36849af2017-02-02 09:35:55 -08002204static int fastrpc_device_open(struct inode *inode, struct file *filp)
2205{
2206 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002207 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002208 struct fastrpc_file *fl = 0;
2209 struct fastrpc_apps *me = &gfa;
2210
2211 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2212 if (err)
2213 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002214 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2215 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002216 context_list_ctor(&fl->clst);
2217 spin_lock_init(&fl->hlock);
2218 INIT_HLIST_HEAD(&fl->maps);
2219 INIT_HLIST_HEAD(&fl->bufs);
2220 INIT_HLIST_NODE(&fl->hn);
2221 fl->tgid = current->tgid;
2222 fl->apps = me;
2223 fl->mode = FASTRPC_MODE_SERIAL;
2224 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002225 if (debugfs_file != NULL)
2226 fl->debugfs_file = debugfs_file;
2227 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002228 filp->private_data = fl;
2229 spin_lock(&me->hlock);
2230 hlist_add_head(&fl->hn, &me->drivers);
2231 spin_unlock(&me->hlock);
2232 return 0;
2233}
2234
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002235static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2236{
2237 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002238 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002239
Sathish Ambley36849af2017-02-02 09:35:55 -08002240 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002241 if (err)
2242 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002243 if (fl->cid == -1) {
2244 cid = *info;
2245 VERIFY(err, cid < NUM_CHANNELS);
2246 if (err)
2247 goto bail;
2248 fl->cid = cid;
2249 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2250 VERIFY(err, !fastrpc_session_alloc_locked(
2251 &fl->apps->channel[cid], 0, &fl->sctx));
2252 if (err)
2253 goto bail;
2254 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302255 VERIFY(err, fl->sctx != NULL);
2256 if (err)
2257 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002258 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2259bail:
2260 return err;
2261}
2262
2263static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2264 unsigned long ioctl_param)
2265{
2266 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002267 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002268 struct fastrpc_ioctl_mmap mmap;
2269 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002270 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002271 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002272 } p;
2273 void *param = (char *)ioctl_param;
2274 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2275 int size = 0, err = 0;
2276 uint32_t info;
2277
2278 p.inv.fds = 0;
2279 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002280 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302281 spin_lock(&fl->hlock);
2282 if (fl->file_close == 1) {
2283 err = EBADF;
2284 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2285 spin_unlock(&fl->hlock);
2286 goto bail;
2287 }
2288 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002289
2290 switch (ioctl_num) {
2291 case FASTRPC_IOCTL_INVOKE:
2292 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002293 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002294 case FASTRPC_IOCTL_INVOKE_FD:
2295 if (!size)
2296 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2297 /* fall through */
2298 case FASTRPC_IOCTL_INVOKE_ATTRS:
2299 if (!size)
2300 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002301 /* fall through */
2302 case FASTRPC_IOCTL_INVOKE_CRC:
2303 if (!size)
2304 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002305 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2306 if (err)
2307 goto bail;
2308 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2309 0, &p.inv)));
2310 if (err)
2311 goto bail;
2312 break;
2313 case FASTRPC_IOCTL_MMAP:
2314 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2315 sizeof(p.mmap)));
2316 if (err)
2317 goto bail;
2318 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2319 if (err)
2320 goto bail;
2321 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2322 if (err)
2323 goto bail;
2324 break;
2325 case FASTRPC_IOCTL_MUNMAP:
2326 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2327 sizeof(p.munmap)));
2328 if (err)
2329 goto bail;
2330 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2331 &p.munmap)));
2332 if (err)
2333 goto bail;
2334 break;
2335 case FASTRPC_IOCTL_SETMODE:
2336 switch ((uint32_t)ioctl_param) {
2337 case FASTRPC_MODE_PARALLEL:
2338 case FASTRPC_MODE_SERIAL:
2339 fl->mode = (uint32_t)ioctl_param;
2340 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002341 case FASTRPC_MODE_PROFILE:
2342 fl->profile = (uint32_t)ioctl_param;
2343 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002344 default:
2345 err = -ENOTTY;
2346 break;
2347 }
2348 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002349 case FASTRPC_IOCTL_GETPERF:
2350 VERIFY(err, 0 == copy_from_user(&p.perf,
2351 param, sizeof(p.perf)));
2352 if (err)
2353 goto bail;
2354 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2355 if (p.perf.keys) {
2356 char *keys = PERF_KEYS;
2357
2358 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2359 keys, strlen(keys)+1));
2360 if (err)
2361 goto bail;
2362 }
2363 if (p.perf.data) {
2364 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2365 &fl->perf, sizeof(fl->perf)));
2366 }
2367 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2368 if (err)
2369 goto bail;
2370 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002371 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002372 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2373 if (err)
2374 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002375 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2376 if (err)
2377 goto bail;
2378 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2379 if (err)
2380 goto bail;
2381 break;
2382 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002383 p.init.attrs = 0;
2384 p.init.siglen = 0;
2385 size = sizeof(struct fastrpc_ioctl_init);
2386 /* fall through */
2387 case FASTRPC_IOCTL_INIT_ATTRS:
2388 if (!size)
2389 size = sizeof(struct fastrpc_ioctl_init_attrs);
2390 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002391 if (err)
2392 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302393 VERIFY(err, p.init.init.filelen >= 0 &&
2394 p.init.init.memlen >= 0);
2395 if (err)
2396 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002397 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2398 if (err)
2399 goto bail;
2400 break;
2401
2402 default:
2403 err = -ENOTTY;
2404 pr_info("bad ioctl: %d\n", ioctl_num);
2405 break;
2406 }
2407 bail:
2408 return err;
2409}
2410
2411static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2412 unsigned long code,
2413 void *data)
2414{
2415 struct fastrpc_apps *me = &gfa;
2416 struct fastrpc_channel_ctx *ctx;
2417 int cid;
2418
2419 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2420 cid = ctx - &me->channel[0];
2421 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2422 mutex_lock(&me->smd_mutex);
2423 ctx->ssrcount++;
2424 if (ctx->chan) {
2425 fastrpc_glink_close(ctx->chan, cid);
2426 ctx->chan = 0;
2427 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2428 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2429 }
2430 mutex_unlock(&me->smd_mutex);
2431 fastrpc_notify_drivers(me, cid);
2432 }
2433
2434 return NOTIFY_DONE;
2435}
2436
2437static const struct file_operations fops = {
2438 .open = fastrpc_device_open,
2439 .release = fastrpc_device_release,
2440 .unlocked_ioctl = fastrpc_device_ioctl,
2441 .compat_ioctl = compat_fastrpc_device_ioctl,
2442};
2443
2444static const struct of_device_id fastrpc_match_table[] = {
2445 { .compatible = "qcom,msm-fastrpc-adsp", },
2446 { .compatible = "qcom,msm-fastrpc-compute", },
2447 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2448 { .compatible = "qcom,msm-adsprpc-mem-region", },
2449 {}
2450};
2451
2452static int fastrpc_cb_probe(struct device *dev)
2453{
2454 struct fastrpc_channel_ctx *chan;
2455 struct fastrpc_session_ctx *sess;
2456 struct of_phandle_args iommuspec;
2457 const char *name;
2458 unsigned int start = 0x80000000;
2459 int err = 0, i;
2460 int secure_vmid = VMID_CP_PIXEL;
2461
2462 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2463 if (err)
2464 goto bail;
2465 for (i = 0; i < NUM_CHANNELS; i++) {
2466 if (!gcinfo[i].name)
2467 continue;
2468 if (!strcmp(name, gcinfo[i].name))
2469 break;
2470 }
2471 VERIFY(err, i < NUM_CHANNELS);
2472 if (err)
2473 goto bail;
2474 chan = &gcinfo[i];
2475 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2476 if (err)
2477 goto bail;
2478
2479 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2480 "#iommu-cells", 0, &iommuspec));
2481 if (err)
2482 goto bail;
2483 sess = &chan->session[chan->sesscount];
2484 sess->smmu.cb = iommuspec.args[0] & 0xf;
2485 sess->used = 0;
2486 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2487 "dma-coherent");
2488 sess->smmu.secure = of_property_read_bool(dev->of_node,
2489 "qcom,secure-context-bank");
2490 if (sess->smmu.secure)
2491 start = 0x60000000;
2492 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2493 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302494 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002495 if (err)
2496 goto bail;
2497
2498 if (sess->smmu.secure)
2499 iommu_domain_set_attr(sess->smmu.mapping->domain,
2500 DOMAIN_ATTR_SECURE_VMID,
2501 &secure_vmid);
2502
2503 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2504 if (err)
2505 goto bail;
2506 sess->dev = dev;
2507 sess->smmu.enabled = 1;
2508 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002509 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2510 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002511bail:
2512 return err;
2513}
2514
2515static int fastrpc_probe(struct platform_device *pdev)
2516{
2517 int err = 0;
2518 struct fastrpc_apps *me = &gfa;
2519 struct device *dev = &pdev->dev;
2520 struct smq_phy_page range;
2521 struct device_node *ion_node, *node;
2522 struct platform_device *ion_pdev;
2523 struct cma *cma;
2524 uint32_t val;
2525
2526 if (of_device_is_compatible(dev->of_node,
2527 "qcom,msm-fastrpc-compute-cb"))
2528 return fastrpc_cb_probe(dev);
2529
2530 if (of_device_is_compatible(dev->of_node,
2531 "qcom,msm-adsprpc-mem-region")) {
2532 me->dev = dev;
2533 range.addr = 0;
2534 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2535 if (ion_node) {
2536 for_each_available_child_of_node(ion_node, node) {
2537 if (of_property_read_u32(node, "reg", &val))
2538 continue;
2539 if (val != ION_ADSP_HEAP_ID)
2540 continue;
2541 ion_pdev = of_find_device_by_node(node);
2542 if (!ion_pdev)
2543 break;
2544 cma = dev_get_cma_area(&ion_pdev->dev);
2545 if (cma) {
2546 range.addr = cma_get_base(cma);
2547 range.size = (size_t)cma_get_size(cma);
2548 }
2549 break;
2550 }
2551 }
2552 if (range.addr) {
2553 int srcVM[1] = {VMID_HLOS};
2554 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2555 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002556 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002557 PERM_READ | PERM_WRITE | PERM_EXEC,
2558 PERM_READ | PERM_WRITE | PERM_EXEC,
2559 PERM_READ | PERM_WRITE | PERM_EXEC,
2560 };
2561
2562 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2563 srcVM, 1, destVM, destVMperm, 4));
2564 if (err)
2565 goto bail;
2566 }
2567 return 0;
2568 }
2569
2570 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2571 fastrpc_match_table,
2572 NULL, &pdev->dev));
2573 if (err)
2574 goto bail;
2575bail:
2576 return err;
2577}
2578
2579static void fastrpc_deinit(void)
2580{
2581 struct fastrpc_apps *me = &gfa;
2582 struct fastrpc_channel_ctx *chan = gcinfo;
2583 int i, j;
2584
2585 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2586 if (chan->chan) {
2587 kref_put_mutex(&chan->kref,
2588 fastrpc_channel_close, &me->smd_mutex);
2589 chan->chan = 0;
2590 }
2591 for (j = 0; j < NUM_SESSIONS; j++) {
2592 struct fastrpc_session_ctx *sess = &chan->session[j];
2593
2594 if (sess->smmu.enabled) {
2595 arm_iommu_detach_device(sess->dev);
2596 sess->dev = 0;
2597 }
2598 if (sess->smmu.mapping) {
2599 arm_iommu_release_mapping(sess->smmu.mapping);
2600 sess->smmu.mapping = 0;
2601 }
2602 }
2603 }
2604}
2605
2606static struct platform_driver fastrpc_driver = {
2607 .probe = fastrpc_probe,
2608 .driver = {
2609 .name = "fastrpc",
2610 .owner = THIS_MODULE,
2611 .of_match_table = fastrpc_match_table,
2612 },
2613};
2614
2615static int __init fastrpc_device_init(void)
2616{
2617 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002618 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002619 int err = 0, i;
2620
2621 memset(me, 0, sizeof(*me));
2622
2623 fastrpc_init(me);
2624 me->dev = NULL;
2625 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2626 if (err)
2627 goto register_bail;
2628 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2629 DEVICE_NAME));
2630 if (err)
2631 goto alloc_chrdev_bail;
2632 cdev_init(&me->cdev, &fops);
2633 me->cdev.owner = THIS_MODULE;
2634 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002635 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002636 if (err)
2637 goto cdev_init_bail;
2638 me->class = class_create(THIS_MODULE, "fastrpc");
2639 VERIFY(err, !IS_ERR(me->class));
2640 if (err)
2641 goto class_create_bail;
2642 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002643 dev = device_create(me->class, NULL,
2644 MKDEV(MAJOR(me->dev_no), 0),
2645 NULL, gcinfo[0].name);
2646 VERIFY(err, !IS_ERR_OR_NULL(dev));
2647 if (err)
2648 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002649 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002650 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002651 me->channel[i].ssrcount = 0;
2652 me->channel[i].prevssrcount = 0;
2653 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2654 me->channel[i].handle = subsys_notif_register_notifier(
2655 gcinfo[i].subsys,
2656 &me->channel[i].nb);
2657 }
2658
2659 me->client = msm_ion_client_create(DEVICE_NAME);
2660 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2661 if (err)
2662 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002663 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002664 return 0;
2665device_create_bail:
2666 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002667 if (me->channel[i].handle)
2668 subsys_notif_unregister_notifier(me->channel[i].handle,
2669 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002670 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002671 if (!IS_ERR_OR_NULL(dev))
2672 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002673 class_destroy(me->class);
2674class_create_bail:
2675 cdev_del(&me->cdev);
2676cdev_init_bail:
2677 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2678alloc_chrdev_bail:
2679register_bail:
2680 fastrpc_deinit();
2681 return err;
2682}
2683
2684static void __exit fastrpc_device_exit(void)
2685{
2686 struct fastrpc_apps *me = &gfa;
2687 int i;
2688
2689 fastrpc_file_list_dtor(me);
2690 fastrpc_deinit();
2691 for (i = 0; i < NUM_CHANNELS; i++) {
2692 if (!gcinfo[i].name)
2693 continue;
2694 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2695 subsys_notif_unregister_notifier(me->channel[i].handle,
2696 &me->channel[i].nb);
2697 }
2698 class_destroy(me->class);
2699 cdev_del(&me->cdev);
2700 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2701 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002702 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002703}
2704
2705late_initcall(fastrpc_device_init);
2706module_exit(fastrpc_device_exit);
2707
2708MODULE_LICENSE("GPL v2");