blob: 1064e997b5a57c9244900e6f4511b9a4c6c6fa07 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048
49#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070063
64#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
65
66#define FASTRPC_LINK_STATE_DOWN (0x0)
67#define FASTRPC_LINK_STATE_UP (0x1)
68#define FASTRPC_LINK_DISCONNECTED (0x0)
69#define FASTRPC_LINK_CONNECTING (0x1)
70#define FASTRPC_LINK_CONNECTED (0x3)
71#define FASTRPC_LINK_DISCONNECTING (0x7)
72
Sathish Ambleya21b5b52017-01-11 16:11:01 -080073#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
74#define FASTRPC_STATIC_HANDLE_LISTENER (3)
75#define FASTRPC_STATIC_HANDLE_MAX (20)
76
77#define PERF_END (void)0
78
79#define PERF(enb, cnt, ff) \
80 {\
81 struct timespec startT = {0};\
82 if (enb) {\
83 getnstimeofday(&startT);\
84 } \
85 ff ;\
86 if (enb) {\
87 cnt += getnstimediff(&startT);\
88 } \
89 }
90
Sathish Ambley69e1ab02016-10-18 10:28:15 -070091static int fastrpc_glink_open(int cid);
92static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080093static struct dentry *debugfs_root;
94static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96static inline uint64_t buf_page_start(uint64_t buf)
97{
98 uint64_t start = (uint64_t) buf & PAGE_MASK;
99 return start;
100}
101
102static inline uint64_t buf_page_offset(uint64_t buf)
103{
104 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
105 return offset;
106}
107
108static inline int buf_num_pages(uint64_t buf, ssize_t len)
109{
110 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
111 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
112 int nPages = end - start + 1;
113 return nPages;
114}
115
116static inline uint64_t buf_page_size(uint32_t size)
117{
118 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
119
120 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
121}
122
123static inline void *uint64_to_ptr(uint64_t addr)
124{
125 void *ptr = (void *)((uintptr_t)addr);
126
127 return ptr;
128}
129
130static inline uint64_t ptr_to_uint64(void *ptr)
131{
132 uint64_t addr = (uint64_t)((uintptr_t)ptr);
133
134 return addr;
135}
136
137struct fastrpc_file;
138
139struct fastrpc_buf {
140 struct hlist_node hn;
141 struct fastrpc_file *fl;
142 void *virt;
143 uint64_t phys;
144 ssize_t size;
145};
146
147struct fastrpc_ctx_lst;
148
149struct overlap {
150 uintptr_t start;
151 uintptr_t end;
152 int raix;
153 uintptr_t mstart;
154 uintptr_t mend;
155 uintptr_t offset;
156};
157
158struct smq_invoke_ctx {
159 struct hlist_node hn;
160 struct completion work;
161 int retval;
162 int pid;
163 int tgid;
164 remote_arg_t *lpra;
165 remote_arg64_t *rpra;
166 int *fds;
167 unsigned int *attrs;
168 struct fastrpc_mmap **maps;
169 struct fastrpc_buf *buf;
170 ssize_t used;
171 struct fastrpc_file *fl;
172 uint32_t sc;
173 struct overlap *overs;
174 struct overlap **overps;
175 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700176 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700177};
178
179struct fastrpc_ctx_lst {
180 struct hlist_head pending;
181 struct hlist_head interrupted;
182};
183
184struct fastrpc_smmu {
185 struct dma_iommu_mapping *mapping;
186 int cb;
187 int enabled;
188 int faults;
189 int secure;
190 int coherent;
191};
192
193struct fastrpc_session_ctx {
194 struct device *dev;
195 struct fastrpc_smmu smmu;
196 int used;
197};
198
199struct fastrpc_glink_info {
200 int link_state;
201 int port_state;
202 struct glink_open_config cfg;
203 struct glink_link_info link_info;
204 void *link_notify_handle;
205};
206
207struct fastrpc_channel_ctx {
208 char *name;
209 char *subsys;
210 void *chan;
211 struct device *dev;
212 struct fastrpc_session_ctx session[NUM_SESSIONS];
213 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530214 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700215 struct notifier_block nb;
216 struct kref kref;
217 int sesscount;
218 int ssrcount;
219 void *handle;
220 int prevssrcount;
221 int vmid;
222 struct fastrpc_glink_info link;
223};
224
225struct fastrpc_apps {
226 struct fastrpc_channel_ctx *channel;
227 struct cdev cdev;
228 struct class *class;
229 struct mutex smd_mutex;
230 struct smq_phy_page range;
231 struct hlist_head maps;
232 dev_t dev_no;
233 int compat;
234 struct hlist_head drivers;
235 spinlock_t hlock;
236 struct ion_client *client;
237 struct device *dev;
238};
239
240struct fastrpc_mmap {
241 struct hlist_node hn;
242 struct fastrpc_file *fl;
243 struct fastrpc_apps *apps;
244 int fd;
245 uint32_t flags;
246 struct dma_buf *buf;
247 struct sg_table *table;
248 struct dma_buf_attachment *attach;
249 struct ion_handle *handle;
250 uint64_t phys;
251 ssize_t size;
252 uintptr_t va;
253 ssize_t len;
254 int refs;
255 uintptr_t raddr;
256 int uncached;
257 int secure;
258 uintptr_t attr;
259};
260
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800261struct fastrpc_perf {
262 int64_t count;
263 int64_t flush;
264 int64_t map;
265 int64_t copy;
266 int64_t link;
267 int64_t getargs;
268 int64_t putargs;
269 int64_t invargs;
270 int64_t invoke;
271};
272
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273struct fastrpc_file {
274 struct hlist_node hn;
275 spinlock_t hlock;
276 struct hlist_head maps;
277 struct hlist_head bufs;
278 struct fastrpc_ctx_lst clst;
279 struct fastrpc_session_ctx *sctx;
280 struct fastrpc_session_ctx *secsctx;
281 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800282 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700283 int tgid;
284 int cid;
285 int ssrcount;
286 int pd;
287 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800288 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800289 struct dentry *debugfs_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290};
291
292static struct fastrpc_apps gfa;
293
294static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
295 {
296 .name = "adsprpc-smd",
297 .subsys = "adsp",
298 .link.link_info.edge = "lpass",
299 .link.link_info.transport = "smem",
300 },
301 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700302 .name = "mdsprpc-smd",
303 .subsys = "modem",
304 .link.link_info.edge = "mpss",
305 .link.link_info.transport = "smem",
306 },
307 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800308 .name = "sdsprpc-smd",
309 .subsys = "slpi",
310 .link.link_info.edge = "dsps",
311 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800312 },
313 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314 .name = "cdsprpc-smd",
315 .subsys = "cdsp",
316 .link.link_info.edge = "cdsp",
317 .link.link_info.transport = "smem",
318 },
319};
320
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800321static inline int64_t getnstimediff(struct timespec *start)
322{
323 int64_t ns;
324 struct timespec ts, b;
325
326 getnstimeofday(&ts);
327 b = timespec_sub(ts, *start);
328 ns = timespec_to_ns(&b);
329 return ns;
330}
331
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700332static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
333{
334 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
335 int vmid;
336
337 if (!fl)
338 return;
339 if (cache) {
340 spin_lock(&fl->hlock);
341 hlist_add_head(&buf->hn, &fl->bufs);
342 spin_unlock(&fl->hlock);
343 return;
344 }
345 if (!IS_ERR_OR_NULL(buf->virt)) {
346 int destVM[1] = {VMID_HLOS};
347 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
348
349 if (fl->sctx->smmu.cb)
350 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
351 vmid = fl->apps->channel[fl->cid].vmid;
352 if (vmid) {
353 int srcVM[2] = {VMID_HLOS, vmid};
354
355 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
356 srcVM, 2, destVM, destVMperm, 1);
357 }
358 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
359 buf->phys);
360 }
361 kfree(buf);
362}
363
364static void fastrpc_buf_list_free(struct fastrpc_file *fl)
365{
366 struct fastrpc_buf *buf, *free;
367
368 do {
369 struct hlist_node *n;
370
371 free = 0;
372 spin_lock(&fl->hlock);
373 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
374 hlist_del_init(&buf->hn);
375 free = buf;
376 break;
377 }
378 spin_unlock(&fl->hlock);
379 if (free)
380 fastrpc_buf_free(free, 0);
381 } while (free);
382}
383
384static void fastrpc_mmap_add(struct fastrpc_mmap *map)
385{
386 struct fastrpc_file *fl = map->fl;
387
388 spin_lock(&fl->hlock);
389 hlist_add_head(&map->hn, &fl->maps);
390 spin_unlock(&fl->hlock);
391}
392
393static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800394 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700395{
396 struct fastrpc_mmap *match = 0, *map;
397 struct hlist_node *n;
398
399 spin_lock(&fl->hlock);
400 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
401 if (va >= map->va &&
402 va + len <= map->va + map->len &&
403 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800404 if (refs)
405 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700406 match = map;
407 break;
408 }
409 }
410 spin_unlock(&fl->hlock);
411 if (match) {
412 *ppmap = match;
413 return 0;
414 }
415 return -ENOTTY;
416}
417
418static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
419 ssize_t len, struct fastrpc_mmap **ppmap)
420{
421 struct fastrpc_mmap *match = 0, *map;
422 struct hlist_node *n;
423 struct fastrpc_apps *me = &gfa;
424
425 spin_lock(&me->hlock);
426 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
427 if (map->raddr == va &&
428 map->raddr + map->len == va + len &&
429 map->refs == 1) {
430 match = map;
431 hlist_del_init(&map->hn);
432 break;
433 }
434 }
435 spin_unlock(&me->hlock);
436 if (match) {
437 *ppmap = match;
438 return 0;
439 }
440 spin_lock(&fl->hlock);
441 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
442 if (map->raddr == va &&
443 map->raddr + map->len == va + len &&
444 map->refs == 1) {
445 match = map;
446 hlist_del_init(&map->hn);
447 break;
448 }
449 }
450 spin_unlock(&fl->hlock);
451 if (match) {
452 *ppmap = match;
453 return 0;
454 }
455 return -ENOTTY;
456}
457
458static void fastrpc_mmap_free(struct fastrpc_mmap *map)
459{
460 struct fastrpc_file *fl;
461 int vmid;
462 struct fastrpc_session_ctx *sess;
463 int destVM[1] = {VMID_HLOS};
464 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
465
466 if (!map)
467 return;
468 fl = map->fl;
469 spin_lock(&fl->hlock);
470 map->refs--;
471 if (!map->refs)
472 hlist_del_init(&map->hn);
473 spin_unlock(&fl->hlock);
474 if (map->refs > 0)
475 return;
476 if (map->secure)
477 sess = fl->secsctx;
478 else
479 sess = fl->sctx;
480
481 if (!IS_ERR_OR_NULL(map->handle))
482 ion_free(fl->apps->client, map->handle);
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530483 if (sess && sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700484 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800485 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486 map->table->sgl,
487 map->table->nents, DMA_BIDIRECTIONAL,
488 map->buf);
489 }
490 vmid = fl->apps->channel[fl->cid].vmid;
491 if (vmid && map->phys) {
492 int srcVM[2] = {VMID_HLOS, vmid};
493
494 hyp_assign_phys(map->phys, buf_page_size(map->size),
495 srcVM, 2, destVM, destVMperm, 1);
496 }
497
498 if (!IS_ERR_OR_NULL(map->table))
499 dma_buf_unmap_attachment(map->attach, map->table,
500 DMA_BIDIRECTIONAL);
501 if (!IS_ERR_OR_NULL(map->attach))
502 dma_buf_detach(map->buf, map->attach);
503 if (!IS_ERR_OR_NULL(map->buf))
504 dma_buf_put(map->buf);
505 kfree(map);
506}
507
508static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
509 struct fastrpc_session_ctx **session);
510
511static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
512 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
513 struct fastrpc_mmap **ppmap)
514{
515 struct fastrpc_session_ctx *sess;
516 struct fastrpc_apps *apps = fl->apps;
517 int cid = fl->cid;
518 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
519 struct fastrpc_mmap *map = 0;
520 unsigned long attrs;
521 unsigned long flags;
522 int err = 0, vmid;
523
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800524 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700525 return 0;
526 map = kzalloc(sizeof(*map), GFP_KERNEL);
527 VERIFY(err, !IS_ERR_OR_NULL(map));
528 if (err)
529 goto bail;
530 INIT_HLIST_NODE(&map->hn);
531 map->flags = mflags;
532 map->refs = 1;
533 map->fl = fl;
534 map->fd = fd;
535 map->attr = attr;
536 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
537 ion_import_dma_buf_fd(fl->apps->client, fd)));
538 if (err)
539 goto bail;
540 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
541 &flags));
542 if (err)
543 goto bail;
544
545 map->uncached = !ION_IS_CACHED(flags);
546 if (map->attr & FASTRPC_ATTR_NOVA)
547 map->uncached = 1;
548
549 map->secure = flags & ION_FLAG_SECURE;
550 if (map->secure) {
551 if (!fl->secsctx)
552 err = fastrpc_session_alloc(chan, 1,
553 &fl->secsctx);
554 if (err)
555 goto bail;
556 }
557 if (map->secure)
558 sess = fl->secsctx;
559 else
560 sess = fl->sctx;
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530561 VERIFY(err, !IS_ERR_OR_NULL(sess));
562 if (err)
563 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700564 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
565 if (err)
566 goto bail;
567 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
568 dma_buf_attach(map->buf, sess->dev)));
569 if (err)
570 goto bail;
571 VERIFY(err, !IS_ERR_OR_NULL(map->table =
572 dma_buf_map_attachment(map->attach,
573 DMA_BIDIRECTIONAL)));
574 if (err)
575 goto bail;
576 if (sess->smmu.enabled) {
577 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530578
579 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
580 (sess->smmu.coherent && map->uncached))
581 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
582 else if (map->attr & FASTRPC_ATTR_COHERENT)
583 attrs |= DMA_ATTR_FORCE_COHERENT;
584
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700585 VERIFY(err, map->table->nents ==
586 msm_dma_map_sg_attrs(sess->dev,
587 map->table->sgl, map->table->nents,
588 DMA_BIDIRECTIONAL, map->buf, attrs));
589 if (err)
590 goto bail;
591 } else {
592 VERIFY(err, map->table->nents == 1);
593 if (err)
594 goto bail;
595 }
596 map->phys = sg_dma_address(map->table->sgl);
597 if (sess->smmu.cb) {
598 map->phys += ((uint64_t)sess->smmu.cb << 32);
599 map->size = sg_dma_len(map->table->sgl);
600 } else {
601 map->size = buf_page_size(len);
602 }
603 vmid = fl->apps->channel[fl->cid].vmid;
604 if (vmid) {
605 int srcVM[1] = {VMID_HLOS};
606 int destVM[2] = {VMID_HLOS, vmid};
607 int destVMperm[2] = {PERM_READ | PERM_WRITE,
608 PERM_READ | PERM_WRITE | PERM_EXEC};
609
610 VERIFY(err, !hyp_assign_phys(map->phys,
611 buf_page_size(map->size),
612 srcVM, 1, destVM, destVMperm, 2));
613 if (err)
614 goto bail;
615 }
616 map->va = va;
617 map->len = len;
618
619 fastrpc_mmap_add(map);
620 *ppmap = map;
621
622bail:
623 if (err && map)
624 fastrpc_mmap_free(map);
625 return err;
626}
627
628static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
629 struct fastrpc_buf **obuf)
630{
631 int err = 0, vmid;
632 struct fastrpc_buf *buf = 0, *fr = 0;
633 struct hlist_node *n;
634
635 VERIFY(err, size > 0);
636 if (err)
637 goto bail;
638
639 /* find the smallest buffer that fits in the cache */
640 spin_lock(&fl->hlock);
641 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
642 if (buf->size >= size && (!fr || fr->size > buf->size))
643 fr = buf;
644 }
645 if (fr)
646 hlist_del_init(&fr->hn);
647 spin_unlock(&fl->hlock);
648 if (fr) {
649 *obuf = fr;
650 return 0;
651 }
652 buf = 0;
653 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
654 if (err)
655 goto bail;
656 INIT_HLIST_NODE(&buf->hn);
657 buf->fl = fl;
658 buf->virt = 0;
659 buf->phys = 0;
660 buf->size = size;
661 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
662 (void *)&buf->phys, GFP_KERNEL);
663 if (IS_ERR_OR_NULL(buf->virt)) {
664 /* free cache and retry */
665 fastrpc_buf_list_free(fl);
666 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
667 (void *)&buf->phys, GFP_KERNEL);
668 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
669 }
670 if (err)
671 goto bail;
672 if (fl->sctx->smmu.cb)
673 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
674 vmid = fl->apps->channel[fl->cid].vmid;
675 if (vmid) {
676 int srcVM[1] = {VMID_HLOS};
677 int destVM[2] = {VMID_HLOS, vmid};
678 int destVMperm[2] = {PERM_READ | PERM_WRITE,
679 PERM_READ | PERM_WRITE | PERM_EXEC};
680
681 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
682 srcVM, 1, destVM, destVMperm, 2));
683 if (err)
684 goto bail;
685 }
686
687 *obuf = buf;
688 bail:
689 if (err && buf)
690 fastrpc_buf_free(buf, 0);
691 return err;
692}
693
694
695static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700696 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700697 struct smq_invoke_ctx **po)
698{
699 int err = 0;
700 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
701 struct hlist_node *n;
702 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
703
704 spin_lock(&fl->hlock);
705 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
706 if (ictx->pid == current->pid) {
707 if (invoke->sc != ictx->sc || ictx->fl != fl)
708 err = -1;
709 else {
710 ctx = ictx;
711 hlist_del_init(&ctx->hn);
712 hlist_add_head(&ctx->hn, &fl->clst.pending);
713 }
714 break;
715 }
716 }
717 spin_unlock(&fl->hlock);
718 if (ctx)
719 *po = ctx;
720 return err;
721}
722
723#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
724static int overlap_ptr_cmp(const void *a, const void *b)
725{
726 struct overlap *pa = *((struct overlap **)a);
727 struct overlap *pb = *((struct overlap **)b);
728 /* sort with lowest starting buffer first */
729 int st = CMP(pa->start, pb->start);
730 /* sort with highest ending buffer first */
731 int ed = CMP(pb->end, pa->end);
732 return st == 0 ? ed : st;
733}
734
Sathish Ambley9466d672017-01-25 10:51:55 -0800735static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736{
Sathish Ambley9466d672017-01-25 10:51:55 -0800737 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700738 remote_arg_t *lpra = ctx->lpra;
739 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
740 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
741 int nbufs = inbufs + outbufs;
742 struct overlap max;
743
744 for (i = 0; i < nbufs; ++i) {
745 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
746 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800747 if (lpra[i].buf.len) {
748 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
749 if (err)
750 goto bail;
751 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700752 ctx->overs[i].raix = i;
753 ctx->overps[i] = &ctx->overs[i];
754 }
755 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
756 max.start = 0;
757 max.end = 0;
758 for (i = 0; i < nbufs; ++i) {
759 if (ctx->overps[i]->start < max.end) {
760 ctx->overps[i]->mstart = max.end;
761 ctx->overps[i]->mend = ctx->overps[i]->end;
762 ctx->overps[i]->offset = max.end -
763 ctx->overps[i]->start;
764 if (ctx->overps[i]->end > max.end) {
765 max.end = ctx->overps[i]->end;
766 } else {
767 ctx->overps[i]->mend = 0;
768 ctx->overps[i]->mstart = 0;
769 }
770 } else {
771 ctx->overps[i]->mend = ctx->overps[i]->end;
772 ctx->overps[i]->mstart = ctx->overps[i]->start;
773 ctx->overps[i]->offset = 0;
774 max = *ctx->overps[i];
775 }
776 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800777bail:
778 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700779}
780
781#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
782 do {\
783 if (!(kernel))\
784 VERIFY(err, 0 == copy_from_user((dst), (src),\
785 (size)));\
786 else\
787 memmove((dst), (src), (size));\
788 } while (0)
789
790#define K_COPY_TO_USER(err, kernel, dst, src, size) \
791 do {\
792 if (!(kernel))\
793 VERIFY(err, 0 == copy_to_user((dst), (src),\
794 (size)));\
795 else\
796 memmove((dst), (src), (size));\
797 } while (0)
798
799
800static void context_free(struct smq_invoke_ctx *ctx);
801
802static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700803 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700804 struct smq_invoke_ctx **po)
805{
806 int err = 0, bufs, size = 0;
807 struct smq_invoke_ctx *ctx = 0;
808 struct fastrpc_ctx_lst *clst = &fl->clst;
809 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
810
811 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
812 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
813 sizeof(*ctx->fds) * (bufs) +
814 sizeof(*ctx->attrs) * (bufs) +
815 sizeof(*ctx->overs) * (bufs) +
816 sizeof(*ctx->overps) * (bufs);
817
818 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
819 if (err)
820 goto bail;
821
822 INIT_HLIST_NODE(&ctx->hn);
823 hlist_add_fake(&ctx->hn);
824 ctx->fl = fl;
825 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
826 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
827 ctx->fds = (int *)(&ctx->lpra[bufs]);
828 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
829 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
830 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
831
832 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
833 bufs * sizeof(*ctx->lpra));
834 if (err)
835 goto bail;
836
837 if (invokefd->fds) {
838 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
839 bufs * sizeof(*ctx->fds));
840 if (err)
841 goto bail;
842 }
843 if (invokefd->attrs) {
844 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
845 bufs * sizeof(*ctx->attrs));
846 if (err)
847 goto bail;
848 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700849 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700850 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800851 if (bufs) {
852 VERIFY(err, 0 == context_build_overlap(ctx));
853 if (err)
854 goto bail;
855 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700856 ctx->retval = -1;
857 ctx->pid = current->pid;
858 ctx->tgid = current->tgid;
859 init_completion(&ctx->work);
860
861 spin_lock(&fl->hlock);
862 hlist_add_head(&ctx->hn, &clst->pending);
863 spin_unlock(&fl->hlock);
864
865 *po = ctx;
866bail:
867 if (ctx && err)
868 context_free(ctx);
869 return err;
870}
871
872static void context_save_interrupted(struct smq_invoke_ctx *ctx)
873{
874 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
875
876 spin_lock(&ctx->fl->hlock);
877 hlist_del_init(&ctx->hn);
878 hlist_add_head(&ctx->hn, &clst->interrupted);
879 spin_unlock(&ctx->fl->hlock);
880 /* free the cache on power collapse */
881 fastrpc_buf_list_free(ctx->fl);
882}
883
884static void context_free(struct smq_invoke_ctx *ctx)
885{
886 int i;
887 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
888 REMOTE_SCALARS_OUTBUFS(ctx->sc);
889 spin_lock(&ctx->fl->hlock);
890 hlist_del_init(&ctx->hn);
891 spin_unlock(&ctx->fl->hlock);
892 for (i = 0; i < nbufs; ++i)
893 fastrpc_mmap_free(ctx->maps[i]);
894 fastrpc_buf_free(ctx->buf, 1);
895 kfree(ctx);
896}
897
898static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
899{
900 ctx->retval = retval;
901 complete(&ctx->work);
902}
903
904
905static void fastrpc_notify_users(struct fastrpc_file *me)
906{
907 struct smq_invoke_ctx *ictx;
908 struct hlist_node *n;
909
910 spin_lock(&me->hlock);
911 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
912 complete(&ictx->work);
913 }
914 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
915 complete(&ictx->work);
916 }
917 spin_unlock(&me->hlock);
918
919}
920
921static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
922{
923 struct fastrpc_file *fl;
924 struct hlist_node *n;
925
926 spin_lock(&me->hlock);
927 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
928 if (fl->cid == cid)
929 fastrpc_notify_users(fl);
930 }
931 spin_unlock(&me->hlock);
932
933}
934static void context_list_ctor(struct fastrpc_ctx_lst *me)
935{
936 INIT_HLIST_HEAD(&me->interrupted);
937 INIT_HLIST_HEAD(&me->pending);
938}
939
940static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
941{
942 struct fastrpc_ctx_lst *clst = &fl->clst;
943 struct smq_invoke_ctx *ictx = 0, *ctxfree;
944 struct hlist_node *n;
945
946 do {
947 ctxfree = 0;
948 spin_lock(&fl->hlock);
949 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
950 hlist_del_init(&ictx->hn);
951 ctxfree = ictx;
952 break;
953 }
954 spin_unlock(&fl->hlock);
955 if (ctxfree)
956 context_free(ctxfree);
957 } while (ctxfree);
958 do {
959 ctxfree = 0;
960 spin_lock(&fl->hlock);
961 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
962 hlist_del_init(&ictx->hn);
963 ctxfree = ictx;
964 break;
965 }
966 spin_unlock(&fl->hlock);
967 if (ctxfree)
968 context_free(ctxfree);
969 } while (ctxfree);
970}
971
972static int fastrpc_file_free(struct fastrpc_file *fl);
973static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
974{
975 struct fastrpc_file *fl, *free;
976 struct hlist_node *n;
977
978 do {
979 free = 0;
980 spin_lock(&me->hlock);
981 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
982 hlist_del_init(&fl->hn);
983 free = fl;
984 break;
985 }
986 spin_unlock(&me->hlock);
987 if (free)
988 fastrpc_file_free(free);
989 } while (free);
990}
991
992static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
993{
994 remote_arg64_t *rpra;
995 remote_arg_t *lpra = ctx->lpra;
996 struct smq_invoke_buf *list;
997 struct smq_phy_page *pages, *ipage;
998 uint32_t sc = ctx->sc;
999 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1000 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001001 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001002 uintptr_t args;
1003 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001004 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001005 int err = 0;
1006 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001007 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001008 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001009
1010 /* calculate size of the metadata */
1011 rpra = 0;
1012 list = smq_invoke_buf_start(rpra, sc);
1013 pages = smq_phy_page_start(sc, list);
1014 ipage = pages;
1015
1016 for (i = 0; i < bufs; ++i) {
1017 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1018 ssize_t len = lpra[i].buf.len;
1019
1020 if (ctx->fds[i] && (ctx->fds[i] != -1))
1021 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1022 ctx->attrs[i], buf, len,
1023 mflags, &ctx->maps[i]);
1024 ipage += 1;
1025 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001026 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1027 for (i = bufs; i < bufs + handles; i++) {
1028 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1029 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1030 if (err)
1031 goto bail;
1032 ipage += 1;
1033 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001034 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1035 (sizeof(uint32_t) * M_CRCLIST);
1036
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001037 /* calculate len requreed for copying */
1038 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1039 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001040 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001041 ssize_t len = lpra[i].buf.len;
1042
1043 if (!len)
1044 continue;
1045 if (ctx->maps[i])
1046 continue;
1047 if (ctx->overps[oix]->offset == 0)
1048 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001049 mstart = ctx->overps[oix]->mstart;
1050 mend = ctx->overps[oix]->mend;
1051 VERIFY(err, (mend - mstart) <= LONG_MAX);
1052 if (err)
1053 goto bail;
1054 copylen += mend - mstart;
1055 VERIFY(err, copylen >= 0);
1056 if (err)
1057 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001058 }
1059 ctx->used = copylen;
1060
1061 /* allocate new buffer */
1062 if (copylen) {
1063 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1064 if (err)
1065 goto bail;
1066 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301067 if (ctx->buf->virt && metalen <= copylen)
1068 memset(ctx->buf->virt, 0, metalen);
1069
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001070 /* copy metadata */
1071 rpra = ctx->buf->virt;
1072 ctx->rpra = rpra;
1073 list = smq_invoke_buf_start(rpra, sc);
1074 pages = smq_phy_page_start(sc, list);
1075 ipage = pages;
1076 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001077 for (i = 0; i < bufs + handles; ++i) {
1078 if (lpra[i].buf.len)
1079 list[i].num = 1;
1080 else
1081 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001082 list[i].pgidx = ipage - pages;
1083 ipage++;
1084 }
1085 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001086 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001087 for (i = 0; i < inbufs + outbufs; ++i) {
1088 struct fastrpc_mmap *map = ctx->maps[i];
1089 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1090 ssize_t len = lpra[i].buf.len;
1091
1092 rpra[i].buf.pv = 0;
1093 rpra[i].buf.len = len;
1094 if (!len)
1095 continue;
1096 if (map) {
1097 struct vm_area_struct *vma;
1098 uintptr_t offset;
1099 int num = buf_num_pages(buf, len);
1100 int idx = list[i].pgidx;
1101
1102 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001103 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001104 } else {
1105 down_read(&current->mm->mmap_sem);
1106 VERIFY(err, NULL != (vma = find_vma(current->mm,
1107 map->va)));
1108 if (err) {
1109 up_read(&current->mm->mmap_sem);
1110 goto bail;
1111 }
1112 offset = buf_page_start(buf) - vma->vm_start;
1113 up_read(&current->mm->mmap_sem);
1114 VERIFY(err, offset < (uintptr_t)map->size);
1115 if (err)
1116 goto bail;
1117 }
1118 pages[idx].addr = map->phys + offset;
1119 pages[idx].size = num << PAGE_SHIFT;
1120 }
1121 rpra[i].buf.pv = buf;
1122 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001123 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001124 for (i = bufs; i < bufs + handles; ++i) {
1125 struct fastrpc_mmap *map = ctx->maps[i];
1126
1127 pages[i].addr = map->phys;
1128 pages[i].size = map->size;
1129 }
1130 fdlist = (uint64_t *)&pages[bufs + handles];
1131 for (i = 0; i < M_FDLIST; i++)
1132 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001133 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301134 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001135
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001137 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001138 rlen = copylen - metalen;
1139 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1140 int i = ctx->overps[oix]->raix;
1141 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001142 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 uint64_t buf;
1144 ssize_t len = lpra[i].buf.len;
1145
1146 if (!len)
1147 continue;
1148 if (map)
1149 continue;
1150 if (ctx->overps[oix]->offset == 0) {
1151 rlen -= ALIGN(args, BALIGN) - args;
1152 args = ALIGN(args, BALIGN);
1153 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001154 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001155 VERIFY(err, rlen >= mlen);
1156 if (err)
1157 goto bail;
1158 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1159 pages[list[i].pgidx].addr = ctx->buf->phys -
1160 ctx->overps[oix]->offset +
1161 (copylen - rlen);
1162 pages[list[i].pgidx].addr =
1163 buf_page_start(pages[list[i].pgidx].addr);
1164 buf = rpra[i].buf.pv;
1165 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1166 if (i < inbufs) {
1167 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1168 lpra[i].buf.pv, len);
1169 if (err)
1170 goto bail;
1171 }
1172 args = args + mlen;
1173 rlen -= mlen;
1174 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001175 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001177 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001178 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1179 int i = ctx->overps[oix]->raix;
1180 struct fastrpc_mmap *map = ctx->maps[i];
1181
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001182 if (map && map->uncached)
1183 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301184 if (ctx->fl->sctx->smmu.coherent &&
1185 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1186 continue;
1187 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1188 continue;
1189
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001190 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1191 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1192 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1193 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001194 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001195 for (i = bufs; i < bufs + handles; i++) {
1196 rpra[i].dma.fd = ctx->fds[i];
1197 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1198 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001199 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001200
1201 if (!ctx->fl->sctx->smmu.coherent) {
1202 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001203 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001204 PERF_END);
1205 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001206 bail:
1207 return err;
1208}
1209
1210static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1211 remote_arg_t *upra)
1212{
1213 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001214 struct smq_invoke_buf *list;
1215 struct smq_phy_page *pages;
1216 struct fastrpc_mmap *mmap;
1217 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001218 uint32_t *crclist = NULL;
1219
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001220 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001221 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 int err = 0;
1223
1224 inbufs = REMOTE_SCALARS_INBUFS(sc);
1225 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001226 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1227 list = smq_invoke_buf_start(ctx->rpra, sc);
1228 pages = smq_phy_page_start(sc, list);
1229 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001230 crclist = (uint32_t *)(fdlist + M_FDLIST);
1231
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001232 for (i = inbufs; i < inbufs + outbufs; ++i) {
1233 if (!ctx->maps[i]) {
1234 K_COPY_TO_USER(err, kernel,
1235 ctx->lpra[i].buf.pv,
1236 uint64_to_ptr(rpra[i].buf.pv),
1237 rpra[i].buf.len);
1238 if (err)
1239 goto bail;
1240 } else {
1241 fastrpc_mmap_free(ctx->maps[i]);
1242 ctx->maps[i] = 0;
1243 }
1244 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001245 if (inbufs + outbufs + handles) {
1246 for (i = 0; i < M_FDLIST; i++) {
1247 if (!fdlist[i])
1248 break;
1249 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001250 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001251 fastrpc_mmap_free(mmap);
1252 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001253 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001254 if (ctx->crc && crclist && rpra)
1255 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1256 crclist, M_CRCLIST*sizeof(uint32_t));
1257
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001258 bail:
1259 return err;
1260}
1261
1262static void inv_args_pre(struct smq_invoke_ctx *ctx)
1263{
1264 int i, inbufs, outbufs;
1265 uint32_t sc = ctx->sc;
1266 remote_arg64_t *rpra = ctx->rpra;
1267 uintptr_t end;
1268
1269 inbufs = REMOTE_SCALARS_INBUFS(sc);
1270 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1271 for (i = inbufs; i < inbufs + outbufs; ++i) {
1272 struct fastrpc_mmap *map = ctx->maps[i];
1273
1274 if (map && map->uncached)
1275 continue;
1276 if (!rpra[i].buf.len)
1277 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301278 if (ctx->fl->sctx->smmu.coherent &&
1279 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1280 continue;
1281 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1282 continue;
1283
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001284 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1285 buf_page_start(rpra[i].buf.pv))
1286 continue;
1287 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1288 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1289 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1290 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1291 rpra[i].buf.len);
1292 if (!IS_CACHE_ALIGNED(end))
1293 dmac_flush_range((char *)end,
1294 (char *)end + 1);
1295 }
1296}
1297
1298static void inv_args(struct smq_invoke_ctx *ctx)
1299{
1300 int i, inbufs, outbufs;
1301 uint32_t sc = ctx->sc;
1302 remote_arg64_t *rpra = ctx->rpra;
1303 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001304
1305 inbufs = REMOTE_SCALARS_INBUFS(sc);
1306 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1307 for (i = inbufs; i < inbufs + outbufs; ++i) {
1308 struct fastrpc_mmap *map = ctx->maps[i];
1309
1310 if (map && map->uncached)
1311 continue;
1312 if (!rpra[i].buf.len)
1313 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301314 if (ctx->fl->sctx->smmu.coherent &&
1315 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1316 continue;
1317 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1318 continue;
1319
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001320 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1321 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001322 continue;
1323 }
1324 if (map && map->handle)
1325 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1326 (char *)uint64_to_ptr(rpra[i].buf.pv),
1327 rpra[i].buf.len, ION_IOC_INV_CACHES);
1328 else
1329 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1330 (char *)uint64_to_ptr(rpra[i].buf.pv
1331 + rpra[i].buf.len));
1332 }
1333
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001334 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001335 dmac_inv_range(rpra, (char *)rpra + used);
1336}
1337
1338static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1339 uint32_t kernel, uint32_t handle)
1340{
1341 struct smq_msg *msg = &ctx->msg;
1342 struct fastrpc_file *fl = ctx->fl;
1343 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1344 int err = 0;
1345
1346 VERIFY(err, 0 != channel_ctx->chan);
1347 if (err)
1348 goto bail;
1349 msg->pid = current->tgid;
1350 msg->tid = current->pid;
1351 if (kernel)
1352 msg->pid = 0;
1353 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1354 msg->invoke.header.handle = handle;
1355 msg->invoke.header.sc = ctx->sc;
1356 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1357 msg->invoke.page.size = buf_page_size(ctx->used);
1358
1359 if (fl->ssrcount != channel_ctx->ssrcount) {
1360 err = -ECONNRESET;
1361 goto bail;
1362 }
1363 VERIFY(err, channel_ctx->link.port_state ==
1364 FASTRPC_LINK_CONNECTED);
1365 if (err)
1366 goto bail;
1367 err = glink_tx(channel_ctx->chan,
1368 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1369 GLINK_TX_REQ_INTENT);
1370 bail:
1371 return err;
1372}
1373
1374static void fastrpc_init(struct fastrpc_apps *me)
1375{
1376 int i;
1377
1378 INIT_HLIST_HEAD(&me->drivers);
1379 spin_lock_init(&me->hlock);
1380 mutex_init(&me->smd_mutex);
1381 me->channel = &gcinfo[0];
1382 for (i = 0; i < NUM_CHANNELS; i++) {
1383 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301384 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001385 me->channel[i].sesscount = 0;
1386 }
1387}
1388
1389static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1390
1391static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1392 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001393 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001394{
1395 struct smq_invoke_ctx *ctx = 0;
1396 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1397 int cid = fl->cid;
1398 int interrupted = 0;
1399 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001400 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001401
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001402 if (fl->profile)
1403 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001404 if (!kernel) {
1405 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1406 &ctx));
1407 if (err)
1408 goto bail;
1409 if (fl->sctx->smmu.faults)
1410 err = FASTRPC_ENOSUCH;
1411 if (err)
1412 goto bail;
1413 if (ctx)
1414 goto wait;
1415 }
1416
1417 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1418 if (err)
1419 goto bail;
1420
1421 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001422 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001423 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001424 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001425 if (err)
1426 goto bail;
1427 }
1428
Sathish Ambleyc432b502017-06-05 12:03:42 -07001429 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001430 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001431 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001432 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001433 PERF_END);
1434
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001435 if (err)
1436 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001437 wait:
1438 if (kernel)
1439 wait_for_completion(&ctx->work);
1440 else {
1441 interrupted = wait_for_completion_interruptible(&ctx->work);
1442 VERIFY(err, 0 == (err = interrupted));
1443 if (err)
1444 goto bail;
1445 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001446
1447 PERF(fl->profile, fl->perf.invargs,
1448 if (!fl->sctx->smmu.coherent)
1449 inv_args(ctx);
1450 PERF_END);
1451
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452 VERIFY(err, 0 == (err = ctx->retval));
1453 if (err)
1454 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001455
1456 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001458 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001459 if (err)
1460 goto bail;
1461 bail:
1462 if (ctx && interrupted == -ERESTARTSYS)
1463 context_save_interrupted(ctx);
1464 else if (ctx)
1465 context_free(ctx);
1466 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1467 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001468
1469 if (fl->profile && !interrupted) {
1470 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1471 fl->perf.invoke += getnstimediff(&invoket);
1472 if (!(invoke->handle >= 0 &&
1473 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1474 fl->perf.count++;
1475 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001476 return err;
1477}
1478
Sathish Ambley36849af2017-02-02 09:35:55 -08001479static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001480static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001481 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001482{
1483 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001484 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001485 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001486 struct smq_phy_page pages[1];
1487 struct fastrpc_mmap *file = 0, *mem = 0;
1488
Sathish Ambley36849af2017-02-02 09:35:55 -08001489 VERIFY(err, !fastrpc_channel_open(fl));
1490 if (err)
1491 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001492 if (init->flags == FASTRPC_INIT_ATTACH) {
1493 remote_arg_t ra[1];
1494 int tgid = current->tgid;
1495
1496 ra[0].buf.pv = (void *)&tgid;
1497 ra[0].buf.len = sizeof(tgid);
1498 ioctl.inv.handle = 1;
1499 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1500 ioctl.inv.pra = ra;
1501 ioctl.fds = 0;
1502 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001503 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001504 fl->pd = 0;
1505 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1506 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1507 if (err)
1508 goto bail;
1509 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001510 remote_arg_t ra[6];
1511 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001512 int mflags = 0;
1513 struct {
1514 int pgid;
1515 int namelen;
1516 int filelen;
1517 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001518 int attrs;
1519 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001520 } inbuf;
1521
1522 inbuf.pgid = current->tgid;
1523 inbuf.namelen = strlen(current->comm) + 1;
1524 inbuf.filelen = init->filelen;
1525 fl->pd = 1;
1526 if (init->filelen) {
1527 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1528 init->file, init->filelen, mflags, &file));
1529 if (err)
1530 goto bail;
1531 }
1532 inbuf.pageslen = 1;
1533 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1534 init->mem, init->memlen, mflags, &mem));
1535 if (err)
1536 goto bail;
1537 inbuf.pageslen = 1;
1538 ra[0].buf.pv = (void *)&inbuf;
1539 ra[0].buf.len = sizeof(inbuf);
1540 fds[0] = 0;
1541
1542 ra[1].buf.pv = (void *)current->comm;
1543 ra[1].buf.len = inbuf.namelen;
1544 fds[1] = 0;
1545
1546 ra[2].buf.pv = (void *)init->file;
1547 ra[2].buf.len = inbuf.filelen;
1548 fds[2] = init->filefd;
1549
1550 pages[0].addr = mem->phys;
1551 pages[0].size = mem->size;
1552 ra[3].buf.pv = (void *)pages;
1553 ra[3].buf.len = 1 * sizeof(*pages);
1554 fds[3] = 0;
1555
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001556 inbuf.attrs = uproc->attrs;
1557 ra[4].buf.pv = (void *)&(inbuf.attrs);
1558 ra[4].buf.len = sizeof(inbuf.attrs);
1559 fds[4] = 0;
1560
1561 inbuf.siglen = uproc->siglen;
1562 ra[5].buf.pv = (void *)&(inbuf.siglen);
1563 ra[5].buf.len = sizeof(inbuf.siglen);
1564 fds[5] = 0;
1565
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001566 ioctl.inv.handle = 1;
1567 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001568 if (uproc->attrs)
1569 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 ioctl.inv.pra = ra;
1571 ioctl.fds = fds;
1572 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001573 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001574 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1575 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1576 if (err)
1577 goto bail;
1578 } else {
1579 err = -ENOTTY;
1580 }
1581bail:
1582 if (mem && err)
1583 fastrpc_mmap_free(mem);
1584 if (file)
1585 fastrpc_mmap_free(file);
1586 return err;
1587}
1588
1589static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1590{
1591 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001592 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001593 remote_arg_t ra[1];
1594 int tgid = 0;
1595
Sathish Ambley36849af2017-02-02 09:35:55 -08001596 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1597 if (err)
1598 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1600 if (err)
1601 goto bail;
1602 tgid = fl->tgid;
1603 ra[0].buf.pv = (void *)&tgid;
1604 ra[0].buf.len = sizeof(tgid);
1605 ioctl.inv.handle = 1;
1606 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1607 ioctl.inv.pra = ra;
1608 ioctl.fds = 0;
1609 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001610 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1612 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1613bail:
1614 return err;
1615}
1616
1617static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1618 struct fastrpc_mmap *map)
1619{
Sathish Ambleybae51902017-07-03 15:00:49 -07001620 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001621 struct smq_phy_page page;
1622 int num = 1;
1623 remote_arg_t ra[3];
1624 int err = 0;
1625 struct {
1626 int pid;
1627 uint32_t flags;
1628 uintptr_t vaddrin;
1629 int num;
1630 } inargs;
1631 struct {
1632 uintptr_t vaddrout;
1633 } routargs;
1634
1635 inargs.pid = current->tgid;
1636 inargs.vaddrin = (uintptr_t)map->va;
1637 inargs.flags = flags;
1638 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1639 ra[0].buf.pv = (void *)&inargs;
1640 ra[0].buf.len = sizeof(inargs);
1641 page.addr = map->phys;
1642 page.size = map->size;
1643 ra[1].buf.pv = (void *)&page;
1644 ra[1].buf.len = num * sizeof(page);
1645
1646 ra[2].buf.pv = (void *)&routargs;
1647 ra[2].buf.len = sizeof(routargs);
1648
1649 ioctl.inv.handle = 1;
1650 if (fl->apps->compat)
1651 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1652 else
1653 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1654 ioctl.inv.pra = ra;
1655 ioctl.fds = 0;
1656 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001657 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001658 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1659 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1660 map->raddr = (uintptr_t)routargs.vaddrout;
1661
1662 return err;
1663}
1664
1665static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1666 struct fastrpc_mmap *map)
1667{
Sathish Ambleybae51902017-07-03 15:00:49 -07001668 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001669 remote_arg_t ra[1];
1670 int err = 0;
1671 struct {
1672 int pid;
1673 uintptr_t vaddrout;
1674 ssize_t size;
1675 } inargs;
1676
1677 inargs.pid = current->tgid;
1678 inargs.size = map->size;
1679 inargs.vaddrout = map->raddr;
1680 ra[0].buf.pv = (void *)&inargs;
1681 ra[0].buf.len = sizeof(inargs);
1682
1683 ioctl.inv.handle = 1;
1684 if (fl->apps->compat)
1685 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1686 else
1687 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1688 ioctl.inv.pra = ra;
1689 ioctl.fds = 0;
1690 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001691 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001692 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1693 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1694 return err;
1695}
1696
1697static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1698 ssize_t len, struct fastrpc_mmap **ppmap);
1699
1700static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1701
1702static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1703 struct fastrpc_ioctl_munmap *ud)
1704{
1705 int err = 0;
1706 struct fastrpc_mmap *map = 0;
1707
1708 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1709 if (err)
1710 goto bail;
1711 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1712 if (err)
1713 goto bail;
1714 fastrpc_mmap_free(map);
1715bail:
1716 if (err && map)
1717 fastrpc_mmap_add(map);
1718 return err;
1719}
1720
1721static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1722 struct fastrpc_ioctl_mmap *ud)
1723{
1724
1725 struct fastrpc_mmap *map = 0;
1726 int err = 0;
1727
1728 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001729 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001730 return 0;
1731
1732 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1733 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1734 if (err)
1735 goto bail;
1736 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1737 if (err)
1738 goto bail;
1739 ud->vaddrout = map->raddr;
1740 bail:
1741 if (err && map)
1742 fastrpc_mmap_free(map);
1743 return err;
1744}
1745
1746static void fastrpc_channel_close(struct kref *kref)
1747{
1748 struct fastrpc_apps *me = &gfa;
1749 struct fastrpc_channel_ctx *ctx;
1750 int cid;
1751
1752 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1753 cid = ctx - &gcinfo[0];
1754 fastrpc_glink_close(ctx->chan, cid);
1755 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301756 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1757 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001758 mutex_unlock(&me->smd_mutex);
1759 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1760 MAJOR(me->dev_no), cid);
1761}
1762
1763static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1764
1765static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1766 int secure, struct fastrpc_session_ctx **session)
1767{
1768 struct fastrpc_apps *me = &gfa;
1769 int idx = 0, err = 0;
1770
1771 if (chan->sesscount) {
1772 for (idx = 0; idx < chan->sesscount; ++idx) {
1773 if (!chan->session[idx].used &&
1774 chan->session[idx].smmu.secure == secure) {
1775 chan->session[idx].used = 1;
1776 break;
1777 }
1778 }
1779 VERIFY(err, idx < chan->sesscount);
1780 if (err)
1781 goto bail;
1782 chan->session[idx].smmu.faults = 0;
1783 } else {
1784 VERIFY(err, me->dev != NULL);
1785 if (err)
1786 goto bail;
1787 chan->session[0].dev = me->dev;
1788 }
1789
1790 *session = &chan->session[idx];
1791 bail:
1792 return err;
1793}
1794
1795bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1796{
1797 if (glink_queue_rx_intent(h, NULL, size))
1798 return false;
1799 return true;
1800}
1801
1802void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1803 const void *pkt_priv, const void *ptr)
1804{
1805}
1806
1807void fastrpc_glink_notify_rx(void *handle, const void *priv,
1808 const void *pkt_priv, const void *ptr, size_t size)
1809{
1810 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1811 int len = size;
1812
1813 while (len >= sizeof(*rsp) && rsp) {
1814 rsp->ctx = rsp->ctx & ~1;
1815 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1816 rsp++;
1817 len = len - sizeof(*rsp);
1818 }
1819 glink_rx_done(handle, ptr, true);
1820}
1821
1822void fastrpc_glink_notify_state(void *handle, const void *priv,
1823 unsigned int event)
1824{
1825 struct fastrpc_apps *me = &gfa;
1826 int cid = (int)(uintptr_t)priv;
1827 struct fastrpc_glink_info *link;
1828
1829 if (cid < 0 || cid >= NUM_CHANNELS)
1830 return;
1831 link = &me->channel[cid].link;
1832 switch (event) {
1833 case GLINK_CONNECTED:
1834 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301835 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001836 break;
1837 case GLINK_LOCAL_DISCONNECTED:
1838 link->port_state = FASTRPC_LINK_DISCONNECTED;
1839 break;
1840 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301841 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001842 fastrpc_glink_close(me->channel[cid].chan, cid);
1843 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001844 }
1845 break;
1846 default:
1847 break;
1848 }
1849}
1850
1851static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1852 struct fastrpc_session_ctx **session)
1853{
1854 int err = 0;
1855 struct fastrpc_apps *me = &gfa;
1856
1857 mutex_lock(&me->smd_mutex);
1858 if (!*session)
1859 err = fastrpc_session_alloc_locked(chan, secure, session);
1860 mutex_unlock(&me->smd_mutex);
1861 return err;
1862}
1863
1864static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1865 struct fastrpc_session_ctx *session)
1866{
1867 struct fastrpc_apps *me = &gfa;
1868
1869 mutex_lock(&me->smd_mutex);
1870 session->used = 0;
1871 mutex_unlock(&me->smd_mutex);
1872}
1873
1874static int fastrpc_file_free(struct fastrpc_file *fl)
1875{
1876 struct hlist_node *n;
1877 struct fastrpc_mmap *map = 0;
1878 int cid;
1879
1880 if (!fl)
1881 return 0;
1882 cid = fl->cid;
1883
1884 spin_lock(&fl->apps->hlock);
1885 hlist_del_init(&fl->hn);
1886 spin_unlock(&fl->apps->hlock);
1887
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001888 if (!fl->sctx) {
1889 kfree(fl);
1890 return 0;
1891 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001892 (void)fastrpc_release_current_dsp_process(fl);
1893 fastrpc_context_list_dtor(fl);
1894 fastrpc_buf_list_free(fl);
1895 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1896 fastrpc_mmap_free(map);
1897 }
1898 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1899 kref_put_mutex(&fl->apps->channel[cid].kref,
1900 fastrpc_channel_close, &fl->apps->smd_mutex);
1901 if (fl->sctx)
1902 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1903 if (fl->secsctx)
1904 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1905 kfree(fl);
1906 return 0;
1907}
1908
1909static int fastrpc_device_release(struct inode *inode, struct file *file)
1910{
1911 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1912
1913 if (fl) {
Sathish Ambley1ca68232017-01-19 10:32:55 -08001914 if (fl->debugfs_file != NULL)
1915 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001916 fastrpc_file_free(fl);
1917 file->private_data = 0;
1918 }
1919 return 0;
1920}
1921
1922static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1923 void *priv)
1924{
1925 struct fastrpc_apps *me = &gfa;
1926 int cid = (int)((uintptr_t)priv);
1927 struct fastrpc_glink_info *link;
1928
1929 if (cid < 0 || cid >= NUM_CHANNELS)
1930 return;
1931
1932 link = &me->channel[cid].link;
1933 switch (cb_info->link_state) {
1934 case GLINK_LINK_STATE_UP:
1935 link->link_state = FASTRPC_LINK_STATE_UP;
1936 complete(&me->channel[cid].work);
1937 break;
1938 case GLINK_LINK_STATE_DOWN:
1939 link->link_state = FASTRPC_LINK_STATE_DOWN;
1940 break;
1941 default:
1942 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1943 break;
1944 }
1945}
1946
1947static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1948{
1949 int err = 0;
1950 struct fastrpc_glink_info *link;
1951
1952 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1953 if (err)
1954 goto bail;
1955
1956 link = &me->channel[cid].link;
1957 if (link->link_notify_handle != NULL)
1958 goto bail;
1959
1960 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1961 link->link_notify_handle = glink_register_link_state_cb(
1962 &link->link_info,
1963 (void *)((uintptr_t)cid));
1964 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1965 if (err) {
1966 link->link_notify_handle = NULL;
1967 goto bail;
1968 }
1969 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1970 RPC_TIMEOUT));
1971bail:
1972 return err;
1973}
1974
1975static void fastrpc_glink_close(void *chan, int cid)
1976{
1977 int err = 0;
1978 struct fastrpc_glink_info *link;
1979
1980 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1981 if (err)
1982 return;
1983 link = &gfa.channel[cid].link;
1984
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301985 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001986 link->port_state = FASTRPC_LINK_DISCONNECTING;
1987 glink_close(chan);
1988 }
1989}
1990
1991static int fastrpc_glink_open(int cid)
1992{
1993 int err = 0;
1994 void *handle = NULL;
1995 struct fastrpc_apps *me = &gfa;
1996 struct glink_open_config *cfg;
1997 struct fastrpc_glink_info *link;
1998
1999 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2000 if (err)
2001 goto bail;
2002 link = &me->channel[cid].link;
2003 cfg = &me->channel[cid].link.cfg;
2004 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2005 if (err)
2006 goto bail;
2007
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302008 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2009 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002010 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002011
2012 link->port_state = FASTRPC_LINK_CONNECTING;
2013 cfg->priv = (void *)(uintptr_t)cid;
2014 cfg->edge = gcinfo[cid].link.link_info.edge;
2015 cfg->transport = gcinfo[cid].link.link_info.transport;
2016 cfg->name = FASTRPC_GLINK_GUID;
2017 cfg->notify_rx = fastrpc_glink_notify_rx;
2018 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2019 cfg->notify_state = fastrpc_glink_notify_state;
2020 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2021 handle = glink_open(cfg);
2022 VERIFY(err, !IS_ERR_OR_NULL(handle));
2023 if (err)
2024 goto bail;
2025 me->channel[cid].chan = handle;
2026bail:
2027 return err;
2028}
2029
Sathish Ambley1ca68232017-01-19 10:32:55 -08002030static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2031{
2032 filp->private_data = inode->i_private;
2033 return 0;
2034}
2035
2036static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2037 size_t count, loff_t *position)
2038{
2039 struct fastrpc_file *fl = filp->private_data;
2040 struct hlist_node *n;
2041 struct fastrpc_buf *buf = 0;
2042 struct fastrpc_mmap *map = 0;
2043 struct smq_invoke_ctx *ictx = 0;
2044 struct fastrpc_channel_ctx *chan;
2045 struct fastrpc_session_ctx *sess;
2046 unsigned int len = 0;
2047 int i, j, ret = 0;
2048 char *fileinfo = NULL;
2049
2050 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2051 if (!fileinfo)
2052 goto bail;
2053 if (fl == NULL) {
2054 for (i = 0; i < NUM_CHANNELS; i++) {
2055 chan = &gcinfo[i];
2056 len += scnprintf(fileinfo + len,
2057 DEBUGFS_SIZE - len, "%s\n\n",
2058 chan->name);
2059 len += scnprintf(fileinfo + len,
2060 DEBUGFS_SIZE - len, "%s %d\n",
2061 "sesscount:", chan->sesscount);
2062 for (j = 0; j < chan->sesscount; j++) {
2063 sess = &chan->session[j];
2064 len += scnprintf(fileinfo + len,
2065 DEBUGFS_SIZE - len,
2066 "%s%d\n\n", "SESSION", j);
2067 len += scnprintf(fileinfo + len,
2068 DEBUGFS_SIZE - len,
2069 "%s %d\n", "sid:",
2070 sess->smmu.cb);
2071 len += scnprintf(fileinfo + len,
2072 DEBUGFS_SIZE - len,
2073 "%s %d\n", "SECURE:",
2074 sess->smmu.secure);
2075 }
2076 }
2077 } else {
2078 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2079 "%s %d\n\n",
2080 "PROCESS_ID:", fl->tgid);
2081 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2082 "%s %d\n\n",
2083 "CHANNEL_ID:", fl->cid);
2084 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2085 "%s %d\n\n",
2086 "SSRCOUNT:", fl->ssrcount);
2087 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2088 "%s\n",
2089 "LIST OF BUFS:");
2090 spin_lock(&fl->hlock);
2091 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2092 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2093 "%s %p %s %p %s %llx\n", "buf:",
2094 buf, "buf->virt:", buf->virt,
2095 "buf->phys:", buf->phys);
2096 }
2097 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2098 "\n%s\n",
2099 "LIST OF MAPS:");
2100 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2101 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2102 "%s %p %s %lx %s %llx\n",
2103 "map:", map,
2104 "map->va:", map->va,
2105 "map->phys:", map->phys);
2106 }
2107 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2108 "\n%s\n",
2109 "LIST OF PENDING SMQCONTEXTS:");
2110 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2111 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2112 "%s %p %s %u %s %u %s %u\n",
2113 "smqcontext:", ictx,
2114 "sc:", ictx->sc,
2115 "tid:", ictx->pid,
2116 "handle", ictx->rpra->h);
2117 }
2118 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2119 "\n%s\n",
2120 "LIST OF INTERRUPTED SMQCONTEXTS:");
2121 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2122 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2123 "%s %p %s %u %s %u %s %u\n",
2124 "smqcontext:", ictx,
2125 "sc:", ictx->sc,
2126 "tid:", ictx->pid,
2127 "handle", ictx->rpra->h);
2128 }
2129 spin_unlock(&fl->hlock);
2130 }
2131 if (len > DEBUGFS_SIZE)
2132 len = DEBUGFS_SIZE;
2133 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2134 kfree(fileinfo);
2135bail:
2136 return ret;
2137}
2138
2139static const struct file_operations debugfs_fops = {
2140 .open = fastrpc_debugfs_open,
2141 .read = fastrpc_debugfs_read,
2142};
Sathish Ambley36849af2017-02-02 09:35:55 -08002143static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002144{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002145 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002146 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002147
2148 mutex_lock(&me->smd_mutex);
2149
Sathish Ambley36849af2017-02-02 09:35:55 -08002150 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002151 if (err)
2152 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002153 cid = fl->cid;
2154 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2155 if (err)
2156 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002157 fl->ssrcount = me->channel[cid].ssrcount;
2158 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2159 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302160 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2161 if (err)
2162 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002163 VERIFY(err, 0 == fastrpc_glink_open(cid));
2164 if (err)
2165 goto bail;
2166
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302167 VERIFY(err,
2168 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002169 RPC_TIMEOUT));
2170 if (err) {
2171 me->channel[cid].chan = 0;
2172 goto bail;
2173 }
2174 kref_init(&me->channel[cid].kref);
2175 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2176 MAJOR(me->dev_no), cid);
2177 if (me->channel[cid].ssrcount !=
2178 me->channel[cid].prevssrcount) {
2179 me->channel[cid].prevssrcount =
2180 me->channel[cid].ssrcount;
2181 }
2182 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002183
2184bail:
2185 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002186 return err;
2187}
2188
Sathish Ambley36849af2017-02-02 09:35:55 -08002189static int fastrpc_device_open(struct inode *inode, struct file *filp)
2190{
2191 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002192 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002193 struct fastrpc_file *fl = 0;
2194 struct fastrpc_apps *me = &gfa;
2195
2196 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2197 if (err)
2198 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002199 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2200 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002201 context_list_ctor(&fl->clst);
2202 spin_lock_init(&fl->hlock);
2203 INIT_HLIST_HEAD(&fl->maps);
2204 INIT_HLIST_HEAD(&fl->bufs);
2205 INIT_HLIST_NODE(&fl->hn);
2206 fl->tgid = current->tgid;
2207 fl->apps = me;
2208 fl->mode = FASTRPC_MODE_SERIAL;
2209 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002210 if (debugfs_file != NULL)
2211 fl->debugfs_file = debugfs_file;
2212 memset(&fl->perf, 0, sizeof(fl->perf));
Sathish Ambley36849af2017-02-02 09:35:55 -08002213 filp->private_data = fl;
2214 spin_lock(&me->hlock);
2215 hlist_add_head(&fl->hn, &me->drivers);
2216 spin_unlock(&me->hlock);
2217 return 0;
2218}
2219
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002220static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2221{
2222 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002223 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002224
Sathish Ambley36849af2017-02-02 09:35:55 -08002225 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002226 if (err)
2227 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002228 if (fl->cid == -1) {
2229 cid = *info;
2230 VERIFY(err, cid < NUM_CHANNELS);
2231 if (err)
2232 goto bail;
2233 fl->cid = cid;
2234 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2235 VERIFY(err, !fastrpc_session_alloc_locked(
2236 &fl->apps->channel[cid], 0, &fl->sctx));
2237 if (err)
2238 goto bail;
2239 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002240 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2241bail:
2242 return err;
2243}
2244
2245static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2246 unsigned long ioctl_param)
2247{
2248 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002249 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002250 struct fastrpc_ioctl_mmap mmap;
2251 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002252 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002253 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002254 } p;
2255 void *param = (char *)ioctl_param;
2256 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2257 int size = 0, err = 0;
2258 uint32_t info;
2259
2260 p.inv.fds = 0;
2261 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002262 p.inv.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002263
2264 switch (ioctl_num) {
2265 case FASTRPC_IOCTL_INVOKE:
2266 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002267 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002268 case FASTRPC_IOCTL_INVOKE_FD:
2269 if (!size)
2270 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2271 /* fall through */
2272 case FASTRPC_IOCTL_INVOKE_ATTRS:
2273 if (!size)
2274 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002275 /* fall through */
2276 case FASTRPC_IOCTL_INVOKE_CRC:
2277 if (!size)
2278 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002279 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2280 if (err)
2281 goto bail;
2282 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2283 0, &p.inv)));
2284 if (err)
2285 goto bail;
2286 break;
2287 case FASTRPC_IOCTL_MMAP:
2288 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2289 sizeof(p.mmap)));
2290 if (err)
2291 goto bail;
2292 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2293 if (err)
2294 goto bail;
2295 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2296 if (err)
2297 goto bail;
2298 break;
2299 case FASTRPC_IOCTL_MUNMAP:
2300 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2301 sizeof(p.munmap)));
2302 if (err)
2303 goto bail;
2304 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2305 &p.munmap)));
2306 if (err)
2307 goto bail;
2308 break;
2309 case FASTRPC_IOCTL_SETMODE:
2310 switch ((uint32_t)ioctl_param) {
2311 case FASTRPC_MODE_PARALLEL:
2312 case FASTRPC_MODE_SERIAL:
2313 fl->mode = (uint32_t)ioctl_param;
2314 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002315 case FASTRPC_MODE_PROFILE:
2316 fl->profile = (uint32_t)ioctl_param;
2317 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002318 default:
2319 err = -ENOTTY;
2320 break;
2321 }
2322 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002323 case FASTRPC_IOCTL_GETPERF:
2324 VERIFY(err, 0 == copy_from_user(&p.perf,
2325 param, sizeof(p.perf)));
2326 if (err)
2327 goto bail;
2328 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2329 if (p.perf.keys) {
2330 char *keys = PERF_KEYS;
2331
2332 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2333 keys, strlen(keys)+1));
2334 if (err)
2335 goto bail;
2336 }
2337 if (p.perf.data) {
2338 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2339 &fl->perf, sizeof(fl->perf)));
2340 }
2341 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2342 if (err)
2343 goto bail;
2344 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002345 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002346 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2347 if (err)
2348 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002349 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2350 if (err)
2351 goto bail;
2352 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2353 if (err)
2354 goto bail;
2355 break;
2356 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002357 p.init.attrs = 0;
2358 p.init.siglen = 0;
2359 size = sizeof(struct fastrpc_ioctl_init);
2360 /* fall through */
2361 case FASTRPC_IOCTL_INIT_ATTRS:
2362 if (!size)
2363 size = sizeof(struct fastrpc_ioctl_init_attrs);
2364 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002365 if (err)
2366 goto bail;
2367 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2368 if (err)
2369 goto bail;
2370 break;
2371
2372 default:
2373 err = -ENOTTY;
2374 pr_info("bad ioctl: %d\n", ioctl_num);
2375 break;
2376 }
2377 bail:
2378 return err;
2379}
2380
2381static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2382 unsigned long code,
2383 void *data)
2384{
2385 struct fastrpc_apps *me = &gfa;
2386 struct fastrpc_channel_ctx *ctx;
2387 int cid;
2388
2389 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2390 cid = ctx - &me->channel[0];
2391 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2392 mutex_lock(&me->smd_mutex);
2393 ctx->ssrcount++;
2394 if (ctx->chan) {
2395 fastrpc_glink_close(ctx->chan, cid);
2396 ctx->chan = 0;
2397 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2398 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2399 }
2400 mutex_unlock(&me->smd_mutex);
2401 fastrpc_notify_drivers(me, cid);
2402 }
2403
2404 return NOTIFY_DONE;
2405}
2406
2407static const struct file_operations fops = {
2408 .open = fastrpc_device_open,
2409 .release = fastrpc_device_release,
2410 .unlocked_ioctl = fastrpc_device_ioctl,
2411 .compat_ioctl = compat_fastrpc_device_ioctl,
2412};
2413
2414static const struct of_device_id fastrpc_match_table[] = {
2415 { .compatible = "qcom,msm-fastrpc-adsp", },
2416 { .compatible = "qcom,msm-fastrpc-compute", },
2417 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2418 { .compatible = "qcom,msm-adsprpc-mem-region", },
2419 {}
2420};
2421
2422static int fastrpc_cb_probe(struct device *dev)
2423{
2424 struct fastrpc_channel_ctx *chan;
2425 struct fastrpc_session_ctx *sess;
2426 struct of_phandle_args iommuspec;
2427 const char *name;
2428 unsigned int start = 0x80000000;
2429 int err = 0, i;
2430 int secure_vmid = VMID_CP_PIXEL;
2431
2432 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2433 if (err)
2434 goto bail;
2435 for (i = 0; i < NUM_CHANNELS; i++) {
2436 if (!gcinfo[i].name)
2437 continue;
2438 if (!strcmp(name, gcinfo[i].name))
2439 break;
2440 }
2441 VERIFY(err, i < NUM_CHANNELS);
2442 if (err)
2443 goto bail;
2444 chan = &gcinfo[i];
2445 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2446 if (err)
2447 goto bail;
2448
2449 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2450 "#iommu-cells", 0, &iommuspec));
2451 if (err)
2452 goto bail;
2453 sess = &chan->session[chan->sesscount];
2454 sess->smmu.cb = iommuspec.args[0] & 0xf;
2455 sess->used = 0;
2456 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2457 "dma-coherent");
2458 sess->smmu.secure = of_property_read_bool(dev->of_node,
2459 "qcom,secure-context-bank");
2460 if (sess->smmu.secure)
2461 start = 0x60000000;
2462 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2463 arm_iommu_create_mapping(&platform_bus_type,
2464 start, 0x7fffffff)));
2465 if (err)
2466 goto bail;
2467
2468 if (sess->smmu.secure)
2469 iommu_domain_set_attr(sess->smmu.mapping->domain,
2470 DOMAIN_ATTR_SECURE_VMID,
2471 &secure_vmid);
2472
2473 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2474 if (err)
2475 goto bail;
2476 sess->dev = dev;
2477 sess->smmu.enabled = 1;
2478 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002479 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2480 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002481bail:
2482 return err;
2483}
2484
2485static int fastrpc_probe(struct platform_device *pdev)
2486{
2487 int err = 0;
2488 struct fastrpc_apps *me = &gfa;
2489 struct device *dev = &pdev->dev;
2490 struct smq_phy_page range;
2491 struct device_node *ion_node, *node;
2492 struct platform_device *ion_pdev;
2493 struct cma *cma;
2494 uint32_t val;
2495
2496 if (of_device_is_compatible(dev->of_node,
2497 "qcom,msm-fastrpc-compute-cb"))
2498 return fastrpc_cb_probe(dev);
2499
2500 if (of_device_is_compatible(dev->of_node,
2501 "qcom,msm-adsprpc-mem-region")) {
2502 me->dev = dev;
2503 range.addr = 0;
2504 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2505 if (ion_node) {
2506 for_each_available_child_of_node(ion_node, node) {
2507 if (of_property_read_u32(node, "reg", &val))
2508 continue;
2509 if (val != ION_ADSP_HEAP_ID)
2510 continue;
2511 ion_pdev = of_find_device_by_node(node);
2512 if (!ion_pdev)
2513 break;
2514 cma = dev_get_cma_area(&ion_pdev->dev);
2515 if (cma) {
2516 range.addr = cma_get_base(cma);
2517 range.size = (size_t)cma_get_size(cma);
2518 }
2519 break;
2520 }
2521 }
2522 if (range.addr) {
2523 int srcVM[1] = {VMID_HLOS};
2524 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2525 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002526 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002527 PERM_READ | PERM_WRITE | PERM_EXEC,
2528 PERM_READ | PERM_WRITE | PERM_EXEC,
2529 PERM_READ | PERM_WRITE | PERM_EXEC,
2530 };
2531
2532 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2533 srcVM, 1, destVM, destVMperm, 4));
2534 if (err)
2535 goto bail;
2536 }
2537 return 0;
2538 }
2539
2540 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2541 fastrpc_match_table,
2542 NULL, &pdev->dev));
2543 if (err)
2544 goto bail;
2545bail:
2546 return err;
2547}
2548
2549static void fastrpc_deinit(void)
2550{
2551 struct fastrpc_apps *me = &gfa;
2552 struct fastrpc_channel_ctx *chan = gcinfo;
2553 int i, j;
2554
2555 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2556 if (chan->chan) {
2557 kref_put_mutex(&chan->kref,
2558 fastrpc_channel_close, &me->smd_mutex);
2559 chan->chan = 0;
2560 }
2561 for (j = 0; j < NUM_SESSIONS; j++) {
2562 struct fastrpc_session_ctx *sess = &chan->session[j];
2563
2564 if (sess->smmu.enabled) {
2565 arm_iommu_detach_device(sess->dev);
2566 sess->dev = 0;
2567 }
2568 if (sess->smmu.mapping) {
2569 arm_iommu_release_mapping(sess->smmu.mapping);
2570 sess->smmu.mapping = 0;
2571 }
2572 }
2573 }
2574}
2575
2576static struct platform_driver fastrpc_driver = {
2577 .probe = fastrpc_probe,
2578 .driver = {
2579 .name = "fastrpc",
2580 .owner = THIS_MODULE,
2581 .of_match_table = fastrpc_match_table,
2582 },
2583};
2584
2585static int __init fastrpc_device_init(void)
2586{
2587 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002588 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002589 int err = 0, i;
2590
2591 memset(me, 0, sizeof(*me));
2592
2593 fastrpc_init(me);
2594 me->dev = NULL;
2595 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2596 if (err)
2597 goto register_bail;
2598 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2599 DEVICE_NAME));
2600 if (err)
2601 goto alloc_chrdev_bail;
2602 cdev_init(&me->cdev, &fops);
2603 me->cdev.owner = THIS_MODULE;
2604 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002605 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002606 if (err)
2607 goto cdev_init_bail;
2608 me->class = class_create(THIS_MODULE, "fastrpc");
2609 VERIFY(err, !IS_ERR(me->class));
2610 if (err)
2611 goto class_create_bail;
2612 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002613 dev = device_create(me->class, NULL,
2614 MKDEV(MAJOR(me->dev_no), 0),
2615 NULL, gcinfo[0].name);
2616 VERIFY(err, !IS_ERR_OR_NULL(dev));
2617 if (err)
2618 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002619 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002620 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002621 me->channel[i].ssrcount = 0;
2622 me->channel[i].prevssrcount = 0;
2623 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2624 me->channel[i].handle = subsys_notif_register_notifier(
2625 gcinfo[i].subsys,
2626 &me->channel[i].nb);
2627 }
2628
2629 me->client = msm_ion_client_create(DEVICE_NAME);
2630 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2631 if (err)
2632 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002633 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002634 return 0;
2635device_create_bail:
2636 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002637 if (me->channel[i].handle)
2638 subsys_notif_unregister_notifier(me->channel[i].handle,
2639 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002640 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002641 if (!IS_ERR_OR_NULL(dev))
2642 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002643 class_destroy(me->class);
2644class_create_bail:
2645 cdev_del(&me->cdev);
2646cdev_init_bail:
2647 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2648alloc_chrdev_bail:
2649register_bail:
2650 fastrpc_deinit();
2651 return err;
2652}
2653
2654static void __exit fastrpc_device_exit(void)
2655{
2656 struct fastrpc_apps *me = &gfa;
2657 int i;
2658
2659 fastrpc_file_list_dtor(me);
2660 fastrpc_deinit();
2661 for (i = 0; i < NUM_CHANNELS; i++) {
2662 if (!gcinfo[i].name)
2663 continue;
2664 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2665 subsys_notif_unregister_notifier(me->channel[i].handle,
2666 &me->channel[i].nb);
2667 }
2668 class_destroy(me->class);
2669 cdev_del(&me->cdev);
2670 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2671 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002672 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002673}
2674
2675late_initcall(fastrpc_device_init);
2676module_exit(fastrpc_device_exit);
2677
2678MODULE_LICENSE("GPL v2");