blob: 6b890c0fced526bafc0f95d5028cf1a3b0a0f2dc [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
47
48#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
49#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
50#define TZ_PIL_AUTH_QDSP6_PROC 1
51#define FASTRPC_ENOSUCH 39
52#define VMID_SSC_Q6 5
53#define VMID_ADSP_Q6 6
54
55#define RPC_TIMEOUT (5 * HZ)
56#define BALIGN 128
57#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
58#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambley58dc64d2016-11-29 17:11:53 -080059#define M_FDLIST 16
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
62
63#define FASTRPC_LINK_STATE_DOWN (0x0)
64#define FASTRPC_LINK_STATE_UP (0x1)
65#define FASTRPC_LINK_DISCONNECTED (0x0)
66#define FASTRPC_LINK_CONNECTING (0x1)
67#define FASTRPC_LINK_CONNECTED (0x3)
68#define FASTRPC_LINK_DISCONNECTING (0x7)
69
Sathish Ambleya21b5b52017-01-11 16:11:01 -080070#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
71#define FASTRPC_STATIC_HANDLE_LISTENER (3)
72#define FASTRPC_STATIC_HANDLE_MAX (20)
73
74#define PERF_END (void)0
75
76#define PERF(enb, cnt, ff) \
77 {\
78 struct timespec startT = {0};\
79 if (enb) {\
80 getnstimeofday(&startT);\
81 } \
82 ff ;\
83 if (enb) {\
84 cnt += getnstimediff(&startT);\
85 } \
86 }
87
Sathish Ambley69e1ab02016-10-18 10:28:15 -070088static int fastrpc_glink_open(int cid);
89static void fastrpc_glink_close(void *chan, int cid);
90
91static inline uint64_t buf_page_start(uint64_t buf)
92{
93 uint64_t start = (uint64_t) buf & PAGE_MASK;
94 return start;
95}
96
97static inline uint64_t buf_page_offset(uint64_t buf)
98{
99 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
100 return offset;
101}
102
103static inline int buf_num_pages(uint64_t buf, ssize_t len)
104{
105 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
106 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
107 int nPages = end - start + 1;
108 return nPages;
109}
110
111static inline uint64_t buf_page_size(uint32_t size)
112{
113 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
114
115 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
116}
117
118static inline void *uint64_to_ptr(uint64_t addr)
119{
120 void *ptr = (void *)((uintptr_t)addr);
121
122 return ptr;
123}
124
125static inline uint64_t ptr_to_uint64(void *ptr)
126{
127 uint64_t addr = (uint64_t)((uintptr_t)ptr);
128
129 return addr;
130}
131
132struct fastrpc_file;
133
134struct fastrpc_buf {
135 struct hlist_node hn;
136 struct fastrpc_file *fl;
137 void *virt;
138 uint64_t phys;
139 ssize_t size;
140};
141
142struct fastrpc_ctx_lst;
143
144struct overlap {
145 uintptr_t start;
146 uintptr_t end;
147 int raix;
148 uintptr_t mstart;
149 uintptr_t mend;
150 uintptr_t offset;
151};
152
153struct smq_invoke_ctx {
154 struct hlist_node hn;
155 struct completion work;
156 int retval;
157 int pid;
158 int tgid;
159 remote_arg_t *lpra;
160 remote_arg64_t *rpra;
161 int *fds;
162 unsigned int *attrs;
163 struct fastrpc_mmap **maps;
164 struct fastrpc_buf *buf;
165 ssize_t used;
166 struct fastrpc_file *fl;
167 uint32_t sc;
168 struct overlap *overs;
169 struct overlap **overps;
170 struct smq_msg msg;
171};
172
173struct fastrpc_ctx_lst {
174 struct hlist_head pending;
175 struct hlist_head interrupted;
176};
177
178struct fastrpc_smmu {
179 struct dma_iommu_mapping *mapping;
180 int cb;
181 int enabled;
182 int faults;
183 int secure;
184 int coherent;
185};
186
187struct fastrpc_session_ctx {
188 struct device *dev;
189 struct fastrpc_smmu smmu;
190 int used;
191};
192
193struct fastrpc_glink_info {
194 int link_state;
195 int port_state;
196 struct glink_open_config cfg;
197 struct glink_link_info link_info;
198 void *link_notify_handle;
199};
200
201struct fastrpc_channel_ctx {
202 char *name;
203 char *subsys;
204 void *chan;
205 struct device *dev;
206 struct fastrpc_session_ctx session[NUM_SESSIONS];
207 struct completion work;
208 struct notifier_block nb;
209 struct kref kref;
210 int sesscount;
211 int ssrcount;
212 void *handle;
213 int prevssrcount;
214 int vmid;
215 struct fastrpc_glink_info link;
216};
217
218struct fastrpc_apps {
219 struct fastrpc_channel_ctx *channel;
220 struct cdev cdev;
221 struct class *class;
222 struct mutex smd_mutex;
223 struct smq_phy_page range;
224 struct hlist_head maps;
225 dev_t dev_no;
226 int compat;
227 struct hlist_head drivers;
228 spinlock_t hlock;
229 struct ion_client *client;
230 struct device *dev;
231};
232
233struct fastrpc_mmap {
234 struct hlist_node hn;
235 struct fastrpc_file *fl;
236 struct fastrpc_apps *apps;
237 int fd;
238 uint32_t flags;
239 struct dma_buf *buf;
240 struct sg_table *table;
241 struct dma_buf_attachment *attach;
242 struct ion_handle *handle;
243 uint64_t phys;
244 ssize_t size;
245 uintptr_t va;
246 ssize_t len;
247 int refs;
248 uintptr_t raddr;
249 int uncached;
250 int secure;
251 uintptr_t attr;
252};
253
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800254struct fastrpc_perf {
255 int64_t count;
256 int64_t flush;
257 int64_t map;
258 int64_t copy;
259 int64_t link;
260 int64_t getargs;
261 int64_t putargs;
262 int64_t invargs;
263 int64_t invoke;
264};
265
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700266struct fastrpc_file {
267 struct hlist_node hn;
268 spinlock_t hlock;
269 struct hlist_head maps;
270 struct hlist_head bufs;
271 struct fastrpc_ctx_lst clst;
272 struct fastrpc_session_ctx *sctx;
273 struct fastrpc_session_ctx *secsctx;
274 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800275 uint32_t profile;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700276 int tgid;
277 int cid;
278 int ssrcount;
279 int pd;
280 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281 struct fastrpc_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700282};
283
284static struct fastrpc_apps gfa;
285
286static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
287 {
288 .name = "adsprpc-smd",
289 .subsys = "adsp",
290 .link.link_info.edge = "lpass",
291 .link.link_info.transport = "smem",
292 },
293 {
294 .name = "sdsprpc-smd",
Sathish Ambleya32a6392017-01-18 13:00:28 -0800295 .subsys = "slpi",
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700296 .link.link_info.edge = "dsps",
297 .link.link_info.transport = "smem",
298 },
299 {
300 .name = "mdsprpc-smd",
301 .subsys = "modem",
302 .link.link_info.edge = "mpss",
303 .link.link_info.transport = "smem",
304 },
305 {
306 .name = "cdsprpc-smd",
307 .subsys = "cdsp",
308 .link.link_info.edge = "cdsp",
309 .link.link_info.transport = "smem",
310 },
311};
312
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800313static inline int64_t getnstimediff(struct timespec *start)
314{
315 int64_t ns;
316 struct timespec ts, b;
317
318 getnstimeofday(&ts);
319 b = timespec_sub(ts, *start);
320 ns = timespec_to_ns(&b);
321 return ns;
322}
323
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700324static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
325{
326 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
327 int vmid;
328
329 if (!fl)
330 return;
331 if (cache) {
332 spin_lock(&fl->hlock);
333 hlist_add_head(&buf->hn, &fl->bufs);
334 spin_unlock(&fl->hlock);
335 return;
336 }
337 if (!IS_ERR_OR_NULL(buf->virt)) {
338 int destVM[1] = {VMID_HLOS};
339 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
340
341 if (fl->sctx->smmu.cb)
342 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
343 vmid = fl->apps->channel[fl->cid].vmid;
344 if (vmid) {
345 int srcVM[2] = {VMID_HLOS, vmid};
346
347 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
348 srcVM, 2, destVM, destVMperm, 1);
349 }
350 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
351 buf->phys);
352 }
353 kfree(buf);
354}
355
356static void fastrpc_buf_list_free(struct fastrpc_file *fl)
357{
358 struct fastrpc_buf *buf, *free;
359
360 do {
361 struct hlist_node *n;
362
363 free = 0;
364 spin_lock(&fl->hlock);
365 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
366 hlist_del_init(&buf->hn);
367 free = buf;
368 break;
369 }
370 spin_unlock(&fl->hlock);
371 if (free)
372 fastrpc_buf_free(free, 0);
373 } while (free);
374}
375
376static void fastrpc_mmap_add(struct fastrpc_mmap *map)
377{
378 struct fastrpc_file *fl = map->fl;
379
380 spin_lock(&fl->hlock);
381 hlist_add_head(&map->hn, &fl->maps);
382 spin_unlock(&fl->hlock);
383}
384
385static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800386 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700387{
388 struct fastrpc_mmap *match = 0, *map;
389 struct hlist_node *n;
390
391 spin_lock(&fl->hlock);
392 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
393 if (va >= map->va &&
394 va + len <= map->va + map->len &&
395 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800396 if (refs)
397 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700398 match = map;
399 break;
400 }
401 }
402 spin_unlock(&fl->hlock);
403 if (match) {
404 *ppmap = match;
405 return 0;
406 }
407 return -ENOTTY;
408}
409
410static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
411 ssize_t len, struct fastrpc_mmap **ppmap)
412{
413 struct fastrpc_mmap *match = 0, *map;
414 struct hlist_node *n;
415 struct fastrpc_apps *me = &gfa;
416
417 spin_lock(&me->hlock);
418 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
419 if (map->raddr == va &&
420 map->raddr + map->len == va + len &&
421 map->refs == 1) {
422 match = map;
423 hlist_del_init(&map->hn);
424 break;
425 }
426 }
427 spin_unlock(&me->hlock);
428 if (match) {
429 *ppmap = match;
430 return 0;
431 }
432 spin_lock(&fl->hlock);
433 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
434 if (map->raddr == va &&
435 map->raddr + map->len == va + len &&
436 map->refs == 1) {
437 match = map;
438 hlist_del_init(&map->hn);
439 break;
440 }
441 }
442 spin_unlock(&fl->hlock);
443 if (match) {
444 *ppmap = match;
445 return 0;
446 }
447 return -ENOTTY;
448}
449
450static void fastrpc_mmap_free(struct fastrpc_mmap *map)
451{
452 struct fastrpc_file *fl;
453 int vmid;
454 struct fastrpc_session_ctx *sess;
455 int destVM[1] = {VMID_HLOS};
456 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
457
458 if (!map)
459 return;
460 fl = map->fl;
461 spin_lock(&fl->hlock);
462 map->refs--;
463 if (!map->refs)
464 hlist_del_init(&map->hn);
465 spin_unlock(&fl->hlock);
466 if (map->refs > 0)
467 return;
468 if (map->secure)
469 sess = fl->secsctx;
470 else
471 sess = fl->sctx;
472
473 if (!IS_ERR_OR_NULL(map->handle))
474 ion_free(fl->apps->client, map->handle);
475 if (sess->smmu.enabled) {
476 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800477 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700478 map->table->sgl,
479 map->table->nents, DMA_BIDIRECTIONAL,
480 map->buf);
481 }
482 vmid = fl->apps->channel[fl->cid].vmid;
483 if (vmid && map->phys) {
484 int srcVM[2] = {VMID_HLOS, vmid};
485
486 hyp_assign_phys(map->phys, buf_page_size(map->size),
487 srcVM, 2, destVM, destVMperm, 1);
488 }
489
490 if (!IS_ERR_OR_NULL(map->table))
491 dma_buf_unmap_attachment(map->attach, map->table,
492 DMA_BIDIRECTIONAL);
493 if (!IS_ERR_OR_NULL(map->attach))
494 dma_buf_detach(map->buf, map->attach);
495 if (!IS_ERR_OR_NULL(map->buf))
496 dma_buf_put(map->buf);
497 kfree(map);
498}
499
500static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
501 struct fastrpc_session_ctx **session);
502
503static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
504 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
505 struct fastrpc_mmap **ppmap)
506{
507 struct fastrpc_session_ctx *sess;
508 struct fastrpc_apps *apps = fl->apps;
509 int cid = fl->cid;
510 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
511 struct fastrpc_mmap *map = 0;
512 unsigned long attrs;
513 unsigned long flags;
514 int err = 0, vmid;
515
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800516 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700517 return 0;
518 map = kzalloc(sizeof(*map), GFP_KERNEL);
519 VERIFY(err, !IS_ERR_OR_NULL(map));
520 if (err)
521 goto bail;
522 INIT_HLIST_NODE(&map->hn);
523 map->flags = mflags;
524 map->refs = 1;
525 map->fl = fl;
526 map->fd = fd;
527 map->attr = attr;
528 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
529 ion_import_dma_buf_fd(fl->apps->client, fd)));
530 if (err)
531 goto bail;
532 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
533 &flags));
534 if (err)
535 goto bail;
536
537 map->uncached = !ION_IS_CACHED(flags);
538 if (map->attr & FASTRPC_ATTR_NOVA)
539 map->uncached = 1;
540
541 map->secure = flags & ION_FLAG_SECURE;
542 if (map->secure) {
543 if (!fl->secsctx)
544 err = fastrpc_session_alloc(chan, 1,
545 &fl->secsctx);
546 if (err)
547 goto bail;
548 }
549 if (map->secure)
550 sess = fl->secsctx;
551 else
552 sess = fl->sctx;
553
554 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
555 if (err)
556 goto bail;
557 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
558 dma_buf_attach(map->buf, sess->dev)));
559 if (err)
560 goto bail;
561 VERIFY(err, !IS_ERR_OR_NULL(map->table =
562 dma_buf_map_attachment(map->attach,
563 DMA_BIDIRECTIONAL)));
564 if (err)
565 goto bail;
566 if (sess->smmu.enabled) {
567 attrs = DMA_ATTR_EXEC_MAPPING;
568 VERIFY(err, map->table->nents ==
569 msm_dma_map_sg_attrs(sess->dev,
570 map->table->sgl, map->table->nents,
571 DMA_BIDIRECTIONAL, map->buf, attrs));
572 if (err)
573 goto bail;
574 } else {
575 VERIFY(err, map->table->nents == 1);
576 if (err)
577 goto bail;
578 }
579 map->phys = sg_dma_address(map->table->sgl);
580 if (sess->smmu.cb) {
581 map->phys += ((uint64_t)sess->smmu.cb << 32);
582 map->size = sg_dma_len(map->table->sgl);
583 } else {
584 map->size = buf_page_size(len);
585 }
586 vmid = fl->apps->channel[fl->cid].vmid;
587 if (vmid) {
588 int srcVM[1] = {VMID_HLOS};
589 int destVM[2] = {VMID_HLOS, vmid};
590 int destVMperm[2] = {PERM_READ | PERM_WRITE,
591 PERM_READ | PERM_WRITE | PERM_EXEC};
592
593 VERIFY(err, !hyp_assign_phys(map->phys,
594 buf_page_size(map->size),
595 srcVM, 1, destVM, destVMperm, 2));
596 if (err)
597 goto bail;
598 }
599 map->va = va;
600 map->len = len;
601
602 fastrpc_mmap_add(map);
603 *ppmap = map;
604
605bail:
606 if (err && map)
607 fastrpc_mmap_free(map);
608 return err;
609}
610
611static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
612 struct fastrpc_buf **obuf)
613{
614 int err = 0, vmid;
615 struct fastrpc_buf *buf = 0, *fr = 0;
616 struct hlist_node *n;
617
618 VERIFY(err, size > 0);
619 if (err)
620 goto bail;
621
622 /* find the smallest buffer that fits in the cache */
623 spin_lock(&fl->hlock);
624 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
625 if (buf->size >= size && (!fr || fr->size > buf->size))
626 fr = buf;
627 }
628 if (fr)
629 hlist_del_init(&fr->hn);
630 spin_unlock(&fl->hlock);
631 if (fr) {
632 *obuf = fr;
633 return 0;
634 }
635 buf = 0;
636 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
637 if (err)
638 goto bail;
639 INIT_HLIST_NODE(&buf->hn);
640 buf->fl = fl;
641 buf->virt = 0;
642 buf->phys = 0;
643 buf->size = size;
644 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
645 (void *)&buf->phys, GFP_KERNEL);
646 if (IS_ERR_OR_NULL(buf->virt)) {
647 /* free cache and retry */
648 fastrpc_buf_list_free(fl);
649 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
650 (void *)&buf->phys, GFP_KERNEL);
651 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
652 }
653 if (err)
654 goto bail;
655 if (fl->sctx->smmu.cb)
656 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
657 vmid = fl->apps->channel[fl->cid].vmid;
658 if (vmid) {
659 int srcVM[1] = {VMID_HLOS};
660 int destVM[2] = {VMID_HLOS, vmid};
661 int destVMperm[2] = {PERM_READ | PERM_WRITE,
662 PERM_READ | PERM_WRITE | PERM_EXEC};
663
664 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
665 srcVM, 1, destVM, destVMperm, 2));
666 if (err)
667 goto bail;
668 }
669
670 *obuf = buf;
671 bail:
672 if (err && buf)
673 fastrpc_buf_free(buf, 0);
674 return err;
675}
676
677
678static int context_restore_interrupted(struct fastrpc_file *fl,
679 struct fastrpc_ioctl_invoke_attrs *inv,
680 struct smq_invoke_ctx **po)
681{
682 int err = 0;
683 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
684 struct hlist_node *n;
685 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
686
687 spin_lock(&fl->hlock);
688 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
689 if (ictx->pid == current->pid) {
690 if (invoke->sc != ictx->sc || ictx->fl != fl)
691 err = -1;
692 else {
693 ctx = ictx;
694 hlist_del_init(&ctx->hn);
695 hlist_add_head(&ctx->hn, &fl->clst.pending);
696 }
697 break;
698 }
699 }
700 spin_unlock(&fl->hlock);
701 if (ctx)
702 *po = ctx;
703 return err;
704}
705
706#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
707static int overlap_ptr_cmp(const void *a, const void *b)
708{
709 struct overlap *pa = *((struct overlap **)a);
710 struct overlap *pb = *((struct overlap **)b);
711 /* sort with lowest starting buffer first */
712 int st = CMP(pa->start, pb->start);
713 /* sort with highest ending buffer first */
714 int ed = CMP(pb->end, pa->end);
715 return st == 0 ? ed : st;
716}
717
Sathish Ambley9466d672017-01-25 10:51:55 -0800718static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719{
Sathish Ambley9466d672017-01-25 10:51:55 -0800720 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700721 remote_arg_t *lpra = ctx->lpra;
722 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
723 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
724 int nbufs = inbufs + outbufs;
725 struct overlap max;
726
727 for (i = 0; i < nbufs; ++i) {
728 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
729 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800730 if (lpra[i].buf.len) {
731 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
732 if (err)
733 goto bail;
734 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 ctx->overs[i].raix = i;
736 ctx->overps[i] = &ctx->overs[i];
737 }
738 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
739 max.start = 0;
740 max.end = 0;
741 for (i = 0; i < nbufs; ++i) {
742 if (ctx->overps[i]->start < max.end) {
743 ctx->overps[i]->mstart = max.end;
744 ctx->overps[i]->mend = ctx->overps[i]->end;
745 ctx->overps[i]->offset = max.end -
746 ctx->overps[i]->start;
747 if (ctx->overps[i]->end > max.end) {
748 max.end = ctx->overps[i]->end;
749 } else {
750 ctx->overps[i]->mend = 0;
751 ctx->overps[i]->mstart = 0;
752 }
753 } else {
754 ctx->overps[i]->mend = ctx->overps[i]->end;
755 ctx->overps[i]->mstart = ctx->overps[i]->start;
756 ctx->overps[i]->offset = 0;
757 max = *ctx->overps[i];
758 }
759 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800760bail:
761 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700762}
763
764#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
765 do {\
766 if (!(kernel))\
767 VERIFY(err, 0 == copy_from_user((dst), (src),\
768 (size)));\
769 else\
770 memmove((dst), (src), (size));\
771 } while (0)
772
773#define K_COPY_TO_USER(err, kernel, dst, src, size) \
774 do {\
775 if (!(kernel))\
776 VERIFY(err, 0 == copy_to_user((dst), (src),\
777 (size)));\
778 else\
779 memmove((dst), (src), (size));\
780 } while (0)
781
782
783static void context_free(struct smq_invoke_ctx *ctx);
784
785static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
786 struct fastrpc_ioctl_invoke_attrs *invokefd,
787 struct smq_invoke_ctx **po)
788{
789 int err = 0, bufs, size = 0;
790 struct smq_invoke_ctx *ctx = 0;
791 struct fastrpc_ctx_lst *clst = &fl->clst;
792 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
793
794 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
795 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
796 sizeof(*ctx->fds) * (bufs) +
797 sizeof(*ctx->attrs) * (bufs) +
798 sizeof(*ctx->overs) * (bufs) +
799 sizeof(*ctx->overps) * (bufs);
800
801 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
802 if (err)
803 goto bail;
804
805 INIT_HLIST_NODE(&ctx->hn);
806 hlist_add_fake(&ctx->hn);
807 ctx->fl = fl;
808 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
809 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
810 ctx->fds = (int *)(&ctx->lpra[bufs]);
811 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
812 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
813 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
814
815 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
816 bufs * sizeof(*ctx->lpra));
817 if (err)
818 goto bail;
819
820 if (invokefd->fds) {
821 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
822 bufs * sizeof(*ctx->fds));
823 if (err)
824 goto bail;
825 }
826 if (invokefd->attrs) {
827 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
828 bufs * sizeof(*ctx->attrs));
829 if (err)
830 goto bail;
831 }
832
833 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800834 if (bufs) {
835 VERIFY(err, 0 == context_build_overlap(ctx));
836 if (err)
837 goto bail;
838 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700839 ctx->retval = -1;
840 ctx->pid = current->pid;
841 ctx->tgid = current->tgid;
842 init_completion(&ctx->work);
843
844 spin_lock(&fl->hlock);
845 hlist_add_head(&ctx->hn, &clst->pending);
846 spin_unlock(&fl->hlock);
847
848 *po = ctx;
849bail:
850 if (ctx && err)
851 context_free(ctx);
852 return err;
853}
854
855static void context_save_interrupted(struct smq_invoke_ctx *ctx)
856{
857 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
858
859 spin_lock(&ctx->fl->hlock);
860 hlist_del_init(&ctx->hn);
861 hlist_add_head(&ctx->hn, &clst->interrupted);
862 spin_unlock(&ctx->fl->hlock);
863 /* free the cache on power collapse */
864 fastrpc_buf_list_free(ctx->fl);
865}
866
867static void context_free(struct smq_invoke_ctx *ctx)
868{
869 int i;
870 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
871 REMOTE_SCALARS_OUTBUFS(ctx->sc);
872 spin_lock(&ctx->fl->hlock);
873 hlist_del_init(&ctx->hn);
874 spin_unlock(&ctx->fl->hlock);
875 for (i = 0; i < nbufs; ++i)
876 fastrpc_mmap_free(ctx->maps[i]);
877 fastrpc_buf_free(ctx->buf, 1);
878 kfree(ctx);
879}
880
881static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
882{
883 ctx->retval = retval;
884 complete(&ctx->work);
885}
886
887
888static void fastrpc_notify_users(struct fastrpc_file *me)
889{
890 struct smq_invoke_ctx *ictx;
891 struct hlist_node *n;
892
893 spin_lock(&me->hlock);
894 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
895 complete(&ictx->work);
896 }
897 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
898 complete(&ictx->work);
899 }
900 spin_unlock(&me->hlock);
901
902}
903
904static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
905{
906 struct fastrpc_file *fl;
907 struct hlist_node *n;
908
909 spin_lock(&me->hlock);
910 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
911 if (fl->cid == cid)
912 fastrpc_notify_users(fl);
913 }
914 spin_unlock(&me->hlock);
915
916}
917static void context_list_ctor(struct fastrpc_ctx_lst *me)
918{
919 INIT_HLIST_HEAD(&me->interrupted);
920 INIT_HLIST_HEAD(&me->pending);
921}
922
923static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
924{
925 struct fastrpc_ctx_lst *clst = &fl->clst;
926 struct smq_invoke_ctx *ictx = 0, *ctxfree;
927 struct hlist_node *n;
928
929 do {
930 ctxfree = 0;
931 spin_lock(&fl->hlock);
932 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
933 hlist_del_init(&ictx->hn);
934 ctxfree = ictx;
935 break;
936 }
937 spin_unlock(&fl->hlock);
938 if (ctxfree)
939 context_free(ctxfree);
940 } while (ctxfree);
941 do {
942 ctxfree = 0;
943 spin_lock(&fl->hlock);
944 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
945 hlist_del_init(&ictx->hn);
946 ctxfree = ictx;
947 break;
948 }
949 spin_unlock(&fl->hlock);
950 if (ctxfree)
951 context_free(ctxfree);
952 } while (ctxfree);
953}
954
955static int fastrpc_file_free(struct fastrpc_file *fl);
956static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
957{
958 struct fastrpc_file *fl, *free;
959 struct hlist_node *n;
960
961 do {
962 free = 0;
963 spin_lock(&me->hlock);
964 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
965 hlist_del_init(&fl->hn);
966 free = fl;
967 break;
968 }
969 spin_unlock(&me->hlock);
970 if (free)
971 fastrpc_file_free(free);
972 } while (free);
973}
974
975static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
976{
977 remote_arg64_t *rpra;
978 remote_arg_t *lpra = ctx->lpra;
979 struct smq_invoke_buf *list;
980 struct smq_phy_page *pages, *ipage;
981 uint32_t sc = ctx->sc;
982 int inbufs = REMOTE_SCALARS_INBUFS(sc);
983 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800984 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700985 uintptr_t args;
986 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800987 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700988 int err = 0;
989 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800990 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700991
992 /* calculate size of the metadata */
993 rpra = 0;
994 list = smq_invoke_buf_start(rpra, sc);
995 pages = smq_phy_page_start(sc, list);
996 ipage = pages;
997
998 for (i = 0; i < bufs; ++i) {
999 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1000 ssize_t len = lpra[i].buf.len;
1001
1002 if (ctx->fds[i] && (ctx->fds[i] != -1))
1003 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1004 ctx->attrs[i], buf, len,
1005 mflags, &ctx->maps[i]);
1006 ipage += 1;
1007 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001008 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1009 for (i = bufs; i < bufs + handles; i++) {
1010 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1011 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1012 if (err)
1013 goto bail;
1014 ipage += 1;
1015 }
1016 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001017 /* calculate len requreed for copying */
1018 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1019 int i = ctx->overps[oix]->raix;
1020 ssize_t len = lpra[i].buf.len;
1021
1022 if (!len)
1023 continue;
1024 if (ctx->maps[i])
1025 continue;
1026 if (ctx->overps[oix]->offset == 0)
1027 copylen = ALIGN(copylen, BALIGN);
1028 copylen += ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1029 }
1030 ctx->used = copylen;
1031
1032 /* allocate new buffer */
1033 if (copylen) {
1034 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1035 if (err)
1036 goto bail;
1037 }
1038 /* copy metadata */
1039 rpra = ctx->buf->virt;
1040 ctx->rpra = rpra;
1041 list = smq_invoke_buf_start(rpra, sc);
1042 pages = smq_phy_page_start(sc, list);
1043 ipage = pages;
1044 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001045 for (i = 0; i < bufs + handles; ++i) {
1046 if (lpra[i].buf.len)
1047 list[i].num = 1;
1048 else
1049 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001050 list[i].pgidx = ipage - pages;
1051 ipage++;
1052 }
1053 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001054 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001055 for (i = 0; i < inbufs + outbufs; ++i) {
1056 struct fastrpc_mmap *map = ctx->maps[i];
1057 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1058 ssize_t len = lpra[i].buf.len;
1059
1060 rpra[i].buf.pv = 0;
1061 rpra[i].buf.len = len;
1062 if (!len)
1063 continue;
1064 if (map) {
1065 struct vm_area_struct *vma;
1066 uintptr_t offset;
1067 int num = buf_num_pages(buf, len);
1068 int idx = list[i].pgidx;
1069
1070 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001071 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001072 } else {
1073 down_read(&current->mm->mmap_sem);
1074 VERIFY(err, NULL != (vma = find_vma(current->mm,
1075 map->va)));
1076 if (err) {
1077 up_read(&current->mm->mmap_sem);
1078 goto bail;
1079 }
1080 offset = buf_page_start(buf) - vma->vm_start;
1081 up_read(&current->mm->mmap_sem);
1082 VERIFY(err, offset < (uintptr_t)map->size);
1083 if (err)
1084 goto bail;
1085 }
1086 pages[idx].addr = map->phys + offset;
1087 pages[idx].size = num << PAGE_SHIFT;
1088 }
1089 rpra[i].buf.pv = buf;
1090 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001091 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001092 for (i = bufs; i < bufs + handles; ++i) {
1093 struct fastrpc_mmap *map = ctx->maps[i];
1094
1095 pages[i].addr = map->phys;
1096 pages[i].size = map->size;
1097 }
1098 fdlist = (uint64_t *)&pages[bufs + handles];
1099 for (i = 0; i < M_FDLIST; i++)
1100 fdlist[i] = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001101
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001102 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001103 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001104 rlen = copylen - metalen;
1105 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1106 int i = ctx->overps[oix]->raix;
1107 struct fastrpc_mmap *map = ctx->maps[i];
1108 int mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
1109 uint64_t buf;
1110 ssize_t len = lpra[i].buf.len;
1111
1112 if (!len)
1113 continue;
1114 if (map)
1115 continue;
1116 if (ctx->overps[oix]->offset == 0) {
1117 rlen -= ALIGN(args, BALIGN) - args;
1118 args = ALIGN(args, BALIGN);
1119 }
1120 VERIFY(err, rlen >= mlen);
1121 if (err)
1122 goto bail;
1123 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1124 pages[list[i].pgidx].addr = ctx->buf->phys -
1125 ctx->overps[oix]->offset +
1126 (copylen - rlen);
1127 pages[list[i].pgidx].addr =
1128 buf_page_start(pages[list[i].pgidx].addr);
1129 buf = rpra[i].buf.pv;
1130 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1131 if (i < inbufs) {
1132 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1133 lpra[i].buf.pv, len);
1134 if (err)
1135 goto bail;
1136 }
1137 args = args + mlen;
1138 rlen -= mlen;
1139 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001140 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001141
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001142 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1144 int i = ctx->overps[oix]->raix;
1145 struct fastrpc_mmap *map = ctx->maps[i];
1146
1147 if (ctx->fl->sctx->smmu.coherent)
1148 continue;
1149 if (map && map->uncached)
1150 continue;
1151 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1152 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1153 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1154 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001155 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001156 for (i = bufs; i < bufs + handles; i++) {
1157 rpra[i].dma.fd = ctx->fds[i];
1158 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1159 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001160 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001161
1162 if (!ctx->fl->sctx->smmu.coherent) {
1163 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001164 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001165 PERF_END);
1166 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001167 bail:
1168 return err;
1169}
1170
1171static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1172 remote_arg_t *upra)
1173{
1174 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001175 struct smq_invoke_buf *list;
1176 struct smq_phy_page *pages;
1177 struct fastrpc_mmap *mmap;
1178 uint64_t *fdlist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001180 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001181 int err = 0;
1182
1183 inbufs = REMOTE_SCALARS_INBUFS(sc);
1184 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001185 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1186 list = smq_invoke_buf_start(ctx->rpra, sc);
1187 pages = smq_phy_page_start(sc, list);
1188 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001189 for (i = inbufs; i < inbufs + outbufs; ++i) {
1190 if (!ctx->maps[i]) {
1191 K_COPY_TO_USER(err, kernel,
1192 ctx->lpra[i].buf.pv,
1193 uint64_to_ptr(rpra[i].buf.pv),
1194 rpra[i].buf.len);
1195 if (err)
1196 goto bail;
1197 } else {
1198 fastrpc_mmap_free(ctx->maps[i]);
1199 ctx->maps[i] = 0;
1200 }
1201 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001202 if (inbufs + outbufs + handles) {
1203 for (i = 0; i < M_FDLIST; i++) {
1204 if (!fdlist[i])
1205 break;
1206 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001207 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001208 fastrpc_mmap_free(mmap);
1209 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001210 }
1211 bail:
1212 return err;
1213}
1214
1215static void inv_args_pre(struct smq_invoke_ctx *ctx)
1216{
1217 int i, inbufs, outbufs;
1218 uint32_t sc = ctx->sc;
1219 remote_arg64_t *rpra = ctx->rpra;
1220 uintptr_t end;
1221
1222 inbufs = REMOTE_SCALARS_INBUFS(sc);
1223 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1224 for (i = inbufs; i < inbufs + outbufs; ++i) {
1225 struct fastrpc_mmap *map = ctx->maps[i];
1226
1227 if (map && map->uncached)
1228 continue;
1229 if (!rpra[i].buf.len)
1230 continue;
1231 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1232 buf_page_start(rpra[i].buf.pv))
1233 continue;
1234 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1235 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1236 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1237 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1238 rpra[i].buf.len);
1239 if (!IS_CACHE_ALIGNED(end))
1240 dmac_flush_range((char *)end,
1241 (char *)end + 1);
1242 }
1243}
1244
1245static void inv_args(struct smq_invoke_ctx *ctx)
1246{
1247 int i, inbufs, outbufs;
1248 uint32_t sc = ctx->sc;
1249 remote_arg64_t *rpra = ctx->rpra;
1250 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001251
1252 inbufs = REMOTE_SCALARS_INBUFS(sc);
1253 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1254 for (i = inbufs; i < inbufs + outbufs; ++i) {
1255 struct fastrpc_mmap *map = ctx->maps[i];
1256
1257 if (map && map->uncached)
1258 continue;
1259 if (!rpra[i].buf.len)
1260 continue;
1261 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1262 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001263 continue;
1264 }
1265 if (map && map->handle)
1266 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1267 (char *)uint64_to_ptr(rpra[i].buf.pv),
1268 rpra[i].buf.len, ION_IOC_INV_CACHES);
1269 else
1270 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1271 (char *)uint64_to_ptr(rpra[i].buf.pv
1272 + rpra[i].buf.len));
1273 }
1274
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001275 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001276 dmac_inv_range(rpra, (char *)rpra + used);
1277}
1278
1279static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1280 uint32_t kernel, uint32_t handle)
1281{
1282 struct smq_msg *msg = &ctx->msg;
1283 struct fastrpc_file *fl = ctx->fl;
1284 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1285 int err = 0;
1286
1287 VERIFY(err, 0 != channel_ctx->chan);
1288 if (err)
1289 goto bail;
1290 msg->pid = current->tgid;
1291 msg->tid = current->pid;
1292 if (kernel)
1293 msg->pid = 0;
1294 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1295 msg->invoke.header.handle = handle;
1296 msg->invoke.header.sc = ctx->sc;
1297 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1298 msg->invoke.page.size = buf_page_size(ctx->used);
1299
1300 if (fl->ssrcount != channel_ctx->ssrcount) {
1301 err = -ECONNRESET;
1302 goto bail;
1303 }
1304 VERIFY(err, channel_ctx->link.port_state ==
1305 FASTRPC_LINK_CONNECTED);
1306 if (err)
1307 goto bail;
1308 err = glink_tx(channel_ctx->chan,
1309 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1310 GLINK_TX_REQ_INTENT);
1311 bail:
1312 return err;
1313}
1314
1315static void fastrpc_init(struct fastrpc_apps *me)
1316{
1317 int i;
1318
1319 INIT_HLIST_HEAD(&me->drivers);
1320 spin_lock_init(&me->hlock);
1321 mutex_init(&me->smd_mutex);
1322 me->channel = &gcinfo[0];
1323 for (i = 0; i < NUM_CHANNELS; i++) {
1324 init_completion(&me->channel[i].work);
1325 me->channel[i].sesscount = 0;
1326 }
1327}
1328
1329static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1330
1331static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1332 uint32_t kernel,
1333 struct fastrpc_ioctl_invoke_attrs *inv)
1334{
1335 struct smq_invoke_ctx *ctx = 0;
1336 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1337 int cid = fl->cid;
1338 int interrupted = 0;
1339 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001340 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001341
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001342 if (fl->profile)
1343 getnstimeofday(&invoket);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001344 if (!kernel) {
1345 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1346 &ctx));
1347 if (err)
1348 goto bail;
1349 if (fl->sctx->smmu.faults)
1350 err = FASTRPC_ENOSUCH;
1351 if (err)
1352 goto bail;
1353 if (ctx)
1354 goto wait;
1355 }
1356
1357 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1358 if (err)
1359 goto bail;
1360
1361 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001362 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001363 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001364 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001365 if (err)
1366 goto bail;
1367 }
1368
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001369 PERF(fl->profile, fl->perf.invargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001370 if (!fl->sctx->smmu.coherent) {
1371 inv_args_pre(ctx);
1372 if (mode == FASTRPC_MODE_SERIAL)
1373 inv_args(ctx);
1374 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001375 PERF_END);
1376
1377 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001378 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001379 PERF_END);
1380
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001381 if (err)
1382 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001383 PERF(fl->profile, fl->perf.invargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001384 if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
1385 inv_args(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001386 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001387 wait:
1388 if (kernel)
1389 wait_for_completion(&ctx->work);
1390 else {
1391 interrupted = wait_for_completion_interruptible(&ctx->work);
1392 VERIFY(err, 0 == (err = interrupted));
1393 if (err)
1394 goto bail;
1395 }
1396 VERIFY(err, 0 == (err = ctx->retval));
1397 if (err)
1398 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001399
1400 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001401 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001402 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001403 if (err)
1404 goto bail;
1405 bail:
1406 if (ctx && interrupted == -ERESTARTSYS)
1407 context_save_interrupted(ctx);
1408 else if (ctx)
1409 context_free(ctx);
1410 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1411 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001412
1413 if (fl->profile && !interrupted) {
1414 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1415 fl->perf.invoke += getnstimediff(&invoket);
1416 if (!(invoke->handle >= 0 &&
1417 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1418 fl->perf.count++;
1419 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001420 return err;
1421}
1422
1423static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001424 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001425{
1426 int err = 0;
1427 struct fastrpc_ioctl_invoke_attrs ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001428 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 struct smq_phy_page pages[1];
1430 struct fastrpc_mmap *file = 0, *mem = 0;
1431
1432 if (init->flags == FASTRPC_INIT_ATTACH) {
1433 remote_arg_t ra[1];
1434 int tgid = current->tgid;
1435
1436 ra[0].buf.pv = (void *)&tgid;
1437 ra[0].buf.len = sizeof(tgid);
1438 ioctl.inv.handle = 1;
1439 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1440 ioctl.inv.pra = ra;
1441 ioctl.fds = 0;
1442 ioctl.attrs = 0;
1443 fl->pd = 0;
1444 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1445 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1446 if (err)
1447 goto bail;
1448 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001449 remote_arg_t ra[6];
1450 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001451 int mflags = 0;
1452 struct {
1453 int pgid;
1454 int namelen;
1455 int filelen;
1456 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001457 int attrs;
1458 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001459 } inbuf;
1460
1461 inbuf.pgid = current->tgid;
1462 inbuf.namelen = strlen(current->comm) + 1;
1463 inbuf.filelen = init->filelen;
1464 fl->pd = 1;
1465 if (init->filelen) {
1466 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1467 init->file, init->filelen, mflags, &file));
1468 if (err)
1469 goto bail;
1470 }
1471 inbuf.pageslen = 1;
1472 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1473 init->mem, init->memlen, mflags, &mem));
1474 if (err)
1475 goto bail;
1476 inbuf.pageslen = 1;
1477 ra[0].buf.pv = (void *)&inbuf;
1478 ra[0].buf.len = sizeof(inbuf);
1479 fds[0] = 0;
1480
1481 ra[1].buf.pv = (void *)current->comm;
1482 ra[1].buf.len = inbuf.namelen;
1483 fds[1] = 0;
1484
1485 ra[2].buf.pv = (void *)init->file;
1486 ra[2].buf.len = inbuf.filelen;
1487 fds[2] = init->filefd;
1488
1489 pages[0].addr = mem->phys;
1490 pages[0].size = mem->size;
1491 ra[3].buf.pv = (void *)pages;
1492 ra[3].buf.len = 1 * sizeof(*pages);
1493 fds[3] = 0;
1494
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001495 inbuf.attrs = uproc->attrs;
1496 ra[4].buf.pv = (void *)&(inbuf.attrs);
1497 ra[4].buf.len = sizeof(inbuf.attrs);
1498 fds[4] = 0;
1499
1500 inbuf.siglen = uproc->siglen;
1501 ra[5].buf.pv = (void *)&(inbuf.siglen);
1502 ra[5].buf.len = sizeof(inbuf.siglen);
1503 fds[5] = 0;
1504
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 ioctl.inv.handle = 1;
1506 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001507 if (uproc->attrs)
1508 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001509 ioctl.inv.pra = ra;
1510 ioctl.fds = fds;
1511 ioctl.attrs = 0;
1512 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1513 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1514 if (err)
1515 goto bail;
1516 } else {
1517 err = -ENOTTY;
1518 }
1519bail:
1520 if (mem && err)
1521 fastrpc_mmap_free(mem);
1522 if (file)
1523 fastrpc_mmap_free(file);
1524 return err;
1525}
1526
1527static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1528{
1529 int err = 0;
1530 struct fastrpc_ioctl_invoke_attrs ioctl;
1531 remote_arg_t ra[1];
1532 int tgid = 0;
1533
1534 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1535 if (err)
1536 goto bail;
1537 tgid = fl->tgid;
1538 ra[0].buf.pv = (void *)&tgid;
1539 ra[0].buf.len = sizeof(tgid);
1540 ioctl.inv.handle = 1;
1541 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1542 ioctl.inv.pra = ra;
1543 ioctl.fds = 0;
1544 ioctl.attrs = 0;
1545 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1546 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1547bail:
1548 return err;
1549}
1550
1551static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1552 struct fastrpc_mmap *map)
1553{
1554 struct fastrpc_ioctl_invoke_attrs ioctl;
1555 struct smq_phy_page page;
1556 int num = 1;
1557 remote_arg_t ra[3];
1558 int err = 0;
1559 struct {
1560 int pid;
1561 uint32_t flags;
1562 uintptr_t vaddrin;
1563 int num;
1564 } inargs;
1565 struct {
1566 uintptr_t vaddrout;
1567 } routargs;
1568
1569 inargs.pid = current->tgid;
1570 inargs.vaddrin = (uintptr_t)map->va;
1571 inargs.flags = flags;
1572 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1573 ra[0].buf.pv = (void *)&inargs;
1574 ra[0].buf.len = sizeof(inargs);
1575 page.addr = map->phys;
1576 page.size = map->size;
1577 ra[1].buf.pv = (void *)&page;
1578 ra[1].buf.len = num * sizeof(page);
1579
1580 ra[2].buf.pv = (void *)&routargs;
1581 ra[2].buf.len = sizeof(routargs);
1582
1583 ioctl.inv.handle = 1;
1584 if (fl->apps->compat)
1585 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1586 else
1587 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1588 ioctl.inv.pra = ra;
1589 ioctl.fds = 0;
1590 ioctl.attrs = 0;
1591 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1592 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1593 map->raddr = (uintptr_t)routargs.vaddrout;
1594
1595 return err;
1596}
1597
1598static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1599 struct fastrpc_mmap *map)
1600{
1601 struct fastrpc_ioctl_invoke_attrs ioctl;
1602 remote_arg_t ra[1];
1603 int err = 0;
1604 struct {
1605 int pid;
1606 uintptr_t vaddrout;
1607 ssize_t size;
1608 } inargs;
1609
1610 inargs.pid = current->tgid;
1611 inargs.size = map->size;
1612 inargs.vaddrout = map->raddr;
1613 ra[0].buf.pv = (void *)&inargs;
1614 ra[0].buf.len = sizeof(inargs);
1615
1616 ioctl.inv.handle = 1;
1617 if (fl->apps->compat)
1618 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1619 else
1620 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1621 ioctl.inv.pra = ra;
1622 ioctl.fds = 0;
1623 ioctl.attrs = 0;
1624 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1625 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1626 return err;
1627}
1628
1629static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1630 ssize_t len, struct fastrpc_mmap **ppmap);
1631
1632static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1633
1634static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1635 struct fastrpc_ioctl_munmap *ud)
1636{
1637 int err = 0;
1638 struct fastrpc_mmap *map = 0;
1639
1640 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1641 if (err)
1642 goto bail;
1643 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1644 if (err)
1645 goto bail;
1646 fastrpc_mmap_free(map);
1647bail:
1648 if (err && map)
1649 fastrpc_mmap_add(map);
1650 return err;
1651}
1652
1653static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1654 struct fastrpc_ioctl_mmap *ud)
1655{
1656
1657 struct fastrpc_mmap *map = 0;
1658 int err = 0;
1659
1660 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001661 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001662 return 0;
1663
1664 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1665 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1666 if (err)
1667 goto bail;
1668 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1669 if (err)
1670 goto bail;
1671 ud->vaddrout = map->raddr;
1672 bail:
1673 if (err && map)
1674 fastrpc_mmap_free(map);
1675 return err;
1676}
1677
1678static void fastrpc_channel_close(struct kref *kref)
1679{
1680 struct fastrpc_apps *me = &gfa;
1681 struct fastrpc_channel_ctx *ctx;
1682 int cid;
1683
1684 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1685 cid = ctx - &gcinfo[0];
1686 fastrpc_glink_close(ctx->chan, cid);
1687 ctx->chan = 0;
1688 mutex_unlock(&me->smd_mutex);
1689 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1690 MAJOR(me->dev_no), cid);
1691}
1692
1693static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1694
1695static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1696 int secure, struct fastrpc_session_ctx **session)
1697{
1698 struct fastrpc_apps *me = &gfa;
1699 int idx = 0, err = 0;
1700
1701 if (chan->sesscount) {
1702 for (idx = 0; idx < chan->sesscount; ++idx) {
1703 if (!chan->session[idx].used &&
1704 chan->session[idx].smmu.secure == secure) {
1705 chan->session[idx].used = 1;
1706 break;
1707 }
1708 }
1709 VERIFY(err, idx < chan->sesscount);
1710 if (err)
1711 goto bail;
1712 chan->session[idx].smmu.faults = 0;
1713 } else {
1714 VERIFY(err, me->dev != NULL);
1715 if (err)
1716 goto bail;
1717 chan->session[0].dev = me->dev;
1718 }
1719
1720 *session = &chan->session[idx];
1721 bail:
1722 return err;
1723}
1724
1725bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1726{
1727 if (glink_queue_rx_intent(h, NULL, size))
1728 return false;
1729 return true;
1730}
1731
1732void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1733 const void *pkt_priv, const void *ptr)
1734{
1735}
1736
1737void fastrpc_glink_notify_rx(void *handle, const void *priv,
1738 const void *pkt_priv, const void *ptr, size_t size)
1739{
1740 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
1741 int len = size;
1742
1743 while (len >= sizeof(*rsp) && rsp) {
1744 rsp->ctx = rsp->ctx & ~1;
1745 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1746 rsp++;
1747 len = len - sizeof(*rsp);
1748 }
1749 glink_rx_done(handle, ptr, true);
1750}
1751
1752void fastrpc_glink_notify_state(void *handle, const void *priv,
1753 unsigned int event)
1754{
1755 struct fastrpc_apps *me = &gfa;
1756 int cid = (int)(uintptr_t)priv;
1757 struct fastrpc_glink_info *link;
1758
1759 if (cid < 0 || cid >= NUM_CHANNELS)
1760 return;
1761 link = &me->channel[cid].link;
1762 switch (event) {
1763 case GLINK_CONNECTED:
1764 link->port_state = FASTRPC_LINK_CONNECTED;
1765 complete(&me->channel[cid].work);
1766 break;
1767 case GLINK_LOCAL_DISCONNECTED:
1768 link->port_state = FASTRPC_LINK_DISCONNECTED;
1769 break;
1770 case GLINK_REMOTE_DISCONNECTED:
1771 if (me->channel[cid].chan &&
1772 link->link_state == FASTRPC_LINK_STATE_UP) {
1773 fastrpc_glink_close(me->channel[cid].chan, cid);
1774 me->channel[cid].chan = 0;
1775 link->port_state = FASTRPC_LINK_DISCONNECTED;
1776 }
1777 break;
1778 default:
1779 break;
1780 }
1781}
1782
1783static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1784 struct fastrpc_session_ctx **session)
1785{
1786 int err = 0;
1787 struct fastrpc_apps *me = &gfa;
1788
1789 mutex_lock(&me->smd_mutex);
1790 if (!*session)
1791 err = fastrpc_session_alloc_locked(chan, secure, session);
1792 mutex_unlock(&me->smd_mutex);
1793 return err;
1794}
1795
1796static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1797 struct fastrpc_session_ctx *session)
1798{
1799 struct fastrpc_apps *me = &gfa;
1800
1801 mutex_lock(&me->smd_mutex);
1802 session->used = 0;
1803 mutex_unlock(&me->smd_mutex);
1804}
1805
1806static int fastrpc_file_free(struct fastrpc_file *fl)
1807{
1808 struct hlist_node *n;
1809 struct fastrpc_mmap *map = 0;
1810 int cid;
1811
1812 if (!fl)
1813 return 0;
1814 cid = fl->cid;
1815
1816 spin_lock(&fl->apps->hlock);
1817 hlist_del_init(&fl->hn);
1818 spin_unlock(&fl->apps->hlock);
1819
1820 (void)fastrpc_release_current_dsp_process(fl);
1821 fastrpc_context_list_dtor(fl);
1822 fastrpc_buf_list_free(fl);
1823 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1824 fastrpc_mmap_free(map);
1825 }
1826 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1827 kref_put_mutex(&fl->apps->channel[cid].kref,
1828 fastrpc_channel_close, &fl->apps->smd_mutex);
1829 if (fl->sctx)
1830 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1831 if (fl->secsctx)
1832 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1833 kfree(fl);
1834 return 0;
1835}
1836
1837static int fastrpc_device_release(struct inode *inode, struct file *file)
1838{
1839 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1840
1841 if (fl) {
1842 fastrpc_file_free(fl);
1843 file->private_data = 0;
1844 }
1845 return 0;
1846}
1847
1848static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1849 void *priv)
1850{
1851 struct fastrpc_apps *me = &gfa;
1852 int cid = (int)((uintptr_t)priv);
1853 struct fastrpc_glink_info *link;
1854
1855 if (cid < 0 || cid >= NUM_CHANNELS)
1856 return;
1857
1858 link = &me->channel[cid].link;
1859 switch (cb_info->link_state) {
1860 case GLINK_LINK_STATE_UP:
1861 link->link_state = FASTRPC_LINK_STATE_UP;
1862 complete(&me->channel[cid].work);
1863 break;
1864 case GLINK_LINK_STATE_DOWN:
1865 link->link_state = FASTRPC_LINK_STATE_DOWN;
1866 break;
1867 default:
1868 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1869 break;
1870 }
1871}
1872
1873static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1874{
1875 int err = 0;
1876 struct fastrpc_glink_info *link;
1877
1878 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1879 if (err)
1880 goto bail;
1881
1882 link = &me->channel[cid].link;
1883 if (link->link_notify_handle != NULL)
1884 goto bail;
1885
1886 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1887 link->link_notify_handle = glink_register_link_state_cb(
1888 &link->link_info,
1889 (void *)((uintptr_t)cid));
1890 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
1891 if (err) {
1892 link->link_notify_handle = NULL;
1893 goto bail;
1894 }
1895 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1896 RPC_TIMEOUT));
1897bail:
1898 return err;
1899}
1900
1901static void fastrpc_glink_close(void *chan, int cid)
1902{
1903 int err = 0;
1904 struct fastrpc_glink_info *link;
1905
1906 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1907 if (err)
1908 return;
1909 link = &gfa.channel[cid].link;
1910
1911 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1912 link->port_state == FASTRPC_LINK_CONNECTING) {
1913 link->port_state = FASTRPC_LINK_DISCONNECTING;
1914 glink_close(chan);
1915 }
1916}
1917
1918static int fastrpc_glink_open(int cid)
1919{
1920 int err = 0;
1921 void *handle = NULL;
1922 struct fastrpc_apps *me = &gfa;
1923 struct glink_open_config *cfg;
1924 struct fastrpc_glink_info *link;
1925
1926 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1927 if (err)
1928 goto bail;
1929 link = &me->channel[cid].link;
1930 cfg = &me->channel[cid].link.cfg;
1931 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
1932 if (err)
1933 goto bail;
1934
1935 if (link->port_state == FASTRPC_LINK_CONNECTED ||
1936 link->port_state == FASTRPC_LINK_CONNECTING) {
1937 goto bail;
1938 }
1939
1940 link->port_state = FASTRPC_LINK_CONNECTING;
1941 cfg->priv = (void *)(uintptr_t)cid;
1942 cfg->edge = gcinfo[cid].link.link_info.edge;
1943 cfg->transport = gcinfo[cid].link.link_info.transport;
1944 cfg->name = FASTRPC_GLINK_GUID;
1945 cfg->notify_rx = fastrpc_glink_notify_rx;
1946 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
1947 cfg->notify_state = fastrpc_glink_notify_state;
1948 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
1949 handle = glink_open(cfg);
1950 VERIFY(err, !IS_ERR_OR_NULL(handle));
1951 if (err)
1952 goto bail;
1953 me->channel[cid].chan = handle;
1954bail:
1955 return err;
1956}
1957
1958static int fastrpc_device_open(struct inode *inode, struct file *filp)
1959{
1960 int cid = MINOR(inode->i_rdev);
1961 int err = 0;
1962 struct fastrpc_apps *me = &gfa;
1963 struct fastrpc_file *fl = 0;
1964
1965 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
1966 if (err)
1967 return err;
1968
1969 filp->private_data = fl;
1970
1971 mutex_lock(&me->smd_mutex);
1972
1973 context_list_ctor(&fl->clst);
1974 spin_lock_init(&fl->hlock);
1975 INIT_HLIST_HEAD(&fl->maps);
1976 INIT_HLIST_HEAD(&fl->bufs);
1977 INIT_HLIST_NODE(&fl->hn);
1978 fl->tgid = current->tgid;
1979 fl->apps = me;
1980 fl->cid = cid;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001981 memset(&fl->perf, 0, sizeof(fl->perf));
1982
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001983 VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
1984 &fl->sctx));
1985 if (err)
1986 goto bail;
1987 fl->cid = cid;
1988 fl->ssrcount = me->channel[cid].ssrcount;
1989 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
1990 (me->channel[cid].chan == 0)) {
1991 fastrpc_glink_register(cid, me);
1992 VERIFY(err, 0 == fastrpc_glink_open(cid));
1993 if (err)
1994 goto bail;
1995
1996 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
1997 RPC_TIMEOUT));
1998 if (err) {
1999 me->channel[cid].chan = 0;
2000 goto bail;
2001 }
2002 kref_init(&me->channel[cid].kref);
2003 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2004 MAJOR(me->dev_no), cid);
2005 if (me->channel[cid].ssrcount !=
2006 me->channel[cid].prevssrcount) {
2007 me->channel[cid].prevssrcount =
2008 me->channel[cid].ssrcount;
2009 }
2010 }
2011 spin_lock(&me->hlock);
2012 hlist_add_head(&fl->hn, &me->drivers);
2013 spin_unlock(&me->hlock);
2014
2015bail:
2016 mutex_unlock(&me->smd_mutex);
2017
2018 if (err)
2019 fastrpc_device_release(inode, filp);
2020 return err;
2021}
2022
2023static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2024{
2025 int err = 0;
2026
2027 VERIFY(err, fl && fl->sctx);
2028 if (err)
2029 goto bail;
2030 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2031bail:
2032 return err;
2033}
2034
2035static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2036 unsigned long ioctl_param)
2037{
2038 union {
2039 struct fastrpc_ioctl_invoke_attrs inv;
2040 struct fastrpc_ioctl_mmap mmap;
2041 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002042 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002043 struct fastrpc_ioctl_perf perf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002044 } p;
2045 void *param = (char *)ioctl_param;
2046 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2047 int size = 0, err = 0;
2048 uint32_t info;
2049
2050 p.inv.fds = 0;
2051 p.inv.attrs = 0;
2052
2053 switch (ioctl_num) {
2054 case FASTRPC_IOCTL_INVOKE:
2055 size = sizeof(struct fastrpc_ioctl_invoke);
2056 case FASTRPC_IOCTL_INVOKE_FD:
2057 if (!size)
2058 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2059 /* fall through */
2060 case FASTRPC_IOCTL_INVOKE_ATTRS:
2061 if (!size)
2062 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
2063 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2064 if (err)
2065 goto bail;
2066 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2067 0, &p.inv)));
2068 if (err)
2069 goto bail;
2070 break;
2071 case FASTRPC_IOCTL_MMAP:
2072 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2073 sizeof(p.mmap)));
2074 if (err)
2075 goto bail;
2076 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2077 if (err)
2078 goto bail;
2079 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2080 if (err)
2081 goto bail;
2082 break;
2083 case FASTRPC_IOCTL_MUNMAP:
2084 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2085 sizeof(p.munmap)));
2086 if (err)
2087 goto bail;
2088 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2089 &p.munmap)));
2090 if (err)
2091 goto bail;
2092 break;
2093 case FASTRPC_IOCTL_SETMODE:
2094 switch ((uint32_t)ioctl_param) {
2095 case FASTRPC_MODE_PARALLEL:
2096 case FASTRPC_MODE_SERIAL:
2097 fl->mode = (uint32_t)ioctl_param;
2098 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002099 case FASTRPC_MODE_PROFILE:
2100 fl->profile = (uint32_t)ioctl_param;
2101 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002102 default:
2103 err = -ENOTTY;
2104 break;
2105 }
2106 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002107 case FASTRPC_IOCTL_GETPERF:
2108 VERIFY(err, 0 == copy_from_user(&p.perf,
2109 param, sizeof(p.perf)));
2110 if (err)
2111 goto bail;
2112 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2113 if (p.perf.keys) {
2114 char *keys = PERF_KEYS;
2115
2116 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2117 keys, strlen(keys)+1));
2118 if (err)
2119 goto bail;
2120 }
2121 if (p.perf.data) {
2122 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2123 &fl->perf, sizeof(fl->perf)));
2124 }
2125 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2126 if (err)
2127 goto bail;
2128 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002129 case FASTRPC_IOCTL_GETINFO:
2130 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2131 if (err)
2132 goto bail;
2133 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2134 if (err)
2135 goto bail;
2136 break;
2137 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002138 p.init.attrs = 0;
2139 p.init.siglen = 0;
2140 size = sizeof(struct fastrpc_ioctl_init);
2141 /* fall through */
2142 case FASTRPC_IOCTL_INIT_ATTRS:
2143 if (!size)
2144 size = sizeof(struct fastrpc_ioctl_init_attrs);
2145 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002146 if (err)
2147 goto bail;
2148 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2149 if (err)
2150 goto bail;
2151 break;
2152
2153 default:
2154 err = -ENOTTY;
2155 pr_info("bad ioctl: %d\n", ioctl_num);
2156 break;
2157 }
2158 bail:
2159 return err;
2160}
2161
2162static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2163 unsigned long code,
2164 void *data)
2165{
2166 struct fastrpc_apps *me = &gfa;
2167 struct fastrpc_channel_ctx *ctx;
2168 int cid;
2169
2170 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2171 cid = ctx - &me->channel[0];
2172 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2173 mutex_lock(&me->smd_mutex);
2174 ctx->ssrcount++;
2175 if (ctx->chan) {
2176 fastrpc_glink_close(ctx->chan, cid);
2177 ctx->chan = 0;
2178 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2179 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2180 }
2181 mutex_unlock(&me->smd_mutex);
2182 fastrpc_notify_drivers(me, cid);
2183 }
2184
2185 return NOTIFY_DONE;
2186}
2187
2188static const struct file_operations fops = {
2189 .open = fastrpc_device_open,
2190 .release = fastrpc_device_release,
2191 .unlocked_ioctl = fastrpc_device_ioctl,
2192 .compat_ioctl = compat_fastrpc_device_ioctl,
2193};
2194
2195static const struct of_device_id fastrpc_match_table[] = {
2196 { .compatible = "qcom,msm-fastrpc-adsp", },
2197 { .compatible = "qcom,msm-fastrpc-compute", },
2198 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2199 { .compatible = "qcom,msm-adsprpc-mem-region", },
2200 {}
2201};
2202
2203static int fastrpc_cb_probe(struct device *dev)
2204{
2205 struct fastrpc_channel_ctx *chan;
2206 struct fastrpc_session_ctx *sess;
2207 struct of_phandle_args iommuspec;
2208 const char *name;
2209 unsigned int start = 0x80000000;
2210 int err = 0, i;
2211 int secure_vmid = VMID_CP_PIXEL;
2212
2213 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2214 if (err)
2215 goto bail;
2216 for (i = 0; i < NUM_CHANNELS; i++) {
2217 if (!gcinfo[i].name)
2218 continue;
2219 if (!strcmp(name, gcinfo[i].name))
2220 break;
2221 }
2222 VERIFY(err, i < NUM_CHANNELS);
2223 if (err)
2224 goto bail;
2225 chan = &gcinfo[i];
2226 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2227 if (err)
2228 goto bail;
2229
2230 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2231 "#iommu-cells", 0, &iommuspec));
2232 if (err)
2233 goto bail;
2234 sess = &chan->session[chan->sesscount];
2235 sess->smmu.cb = iommuspec.args[0] & 0xf;
2236 sess->used = 0;
2237 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2238 "dma-coherent");
2239 sess->smmu.secure = of_property_read_bool(dev->of_node,
2240 "qcom,secure-context-bank");
2241 if (sess->smmu.secure)
2242 start = 0x60000000;
2243 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2244 arm_iommu_create_mapping(&platform_bus_type,
2245 start, 0x7fffffff)));
2246 if (err)
2247 goto bail;
2248
2249 if (sess->smmu.secure)
2250 iommu_domain_set_attr(sess->smmu.mapping->domain,
2251 DOMAIN_ATTR_SECURE_VMID,
2252 &secure_vmid);
2253
2254 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2255 if (err)
2256 goto bail;
2257 sess->dev = dev;
2258 sess->smmu.enabled = 1;
2259 chan->sesscount++;
2260bail:
2261 return err;
2262}
2263
2264static int fastrpc_probe(struct platform_device *pdev)
2265{
2266 int err = 0;
2267 struct fastrpc_apps *me = &gfa;
2268 struct device *dev = &pdev->dev;
2269 struct smq_phy_page range;
2270 struct device_node *ion_node, *node;
2271 struct platform_device *ion_pdev;
2272 struct cma *cma;
2273 uint32_t val;
2274
2275 if (of_device_is_compatible(dev->of_node,
2276 "qcom,msm-fastrpc-compute-cb"))
2277 return fastrpc_cb_probe(dev);
2278
2279 if (of_device_is_compatible(dev->of_node,
2280 "qcom,msm-adsprpc-mem-region")) {
2281 me->dev = dev;
2282 range.addr = 0;
2283 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2284 if (ion_node) {
2285 for_each_available_child_of_node(ion_node, node) {
2286 if (of_property_read_u32(node, "reg", &val))
2287 continue;
2288 if (val != ION_ADSP_HEAP_ID)
2289 continue;
2290 ion_pdev = of_find_device_by_node(node);
2291 if (!ion_pdev)
2292 break;
2293 cma = dev_get_cma_area(&ion_pdev->dev);
2294 if (cma) {
2295 range.addr = cma_get_base(cma);
2296 range.size = (size_t)cma_get_size(cma);
2297 }
2298 break;
2299 }
2300 }
2301 if (range.addr) {
2302 int srcVM[1] = {VMID_HLOS};
2303 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2304 VMID_ADSP_Q6};
2305 int destVMperm[4] = {PERM_READ | PERM_WRITE,
2306 PERM_READ | PERM_WRITE | PERM_EXEC,
2307 PERM_READ | PERM_WRITE | PERM_EXEC,
2308 PERM_READ | PERM_WRITE | PERM_EXEC,
2309 };
2310
2311 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2312 srcVM, 1, destVM, destVMperm, 4));
2313 if (err)
2314 goto bail;
2315 }
2316 return 0;
2317 }
2318
2319 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2320 fastrpc_match_table,
2321 NULL, &pdev->dev));
2322 if (err)
2323 goto bail;
2324bail:
2325 return err;
2326}
2327
2328static void fastrpc_deinit(void)
2329{
2330 struct fastrpc_apps *me = &gfa;
2331 struct fastrpc_channel_ctx *chan = gcinfo;
2332 int i, j;
2333
2334 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2335 if (chan->chan) {
2336 kref_put_mutex(&chan->kref,
2337 fastrpc_channel_close, &me->smd_mutex);
2338 chan->chan = 0;
2339 }
2340 for (j = 0; j < NUM_SESSIONS; j++) {
2341 struct fastrpc_session_ctx *sess = &chan->session[j];
2342
2343 if (sess->smmu.enabled) {
2344 arm_iommu_detach_device(sess->dev);
2345 sess->dev = 0;
2346 }
2347 if (sess->smmu.mapping) {
2348 arm_iommu_release_mapping(sess->smmu.mapping);
2349 sess->smmu.mapping = 0;
2350 }
2351 }
2352 }
2353}
2354
2355static struct platform_driver fastrpc_driver = {
2356 .probe = fastrpc_probe,
2357 .driver = {
2358 .name = "fastrpc",
2359 .owner = THIS_MODULE,
2360 .of_match_table = fastrpc_match_table,
2361 },
2362};
2363
2364static int __init fastrpc_device_init(void)
2365{
2366 struct fastrpc_apps *me = &gfa;
2367 int err = 0, i;
2368
2369 memset(me, 0, sizeof(*me));
2370
2371 fastrpc_init(me);
2372 me->dev = NULL;
2373 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2374 if (err)
2375 goto register_bail;
2376 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2377 DEVICE_NAME));
2378 if (err)
2379 goto alloc_chrdev_bail;
2380 cdev_init(&me->cdev, &fops);
2381 me->cdev.owner = THIS_MODULE;
2382 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
2383 NUM_CHANNELS));
2384 if (err)
2385 goto cdev_init_bail;
2386 me->class = class_create(THIS_MODULE, "fastrpc");
2387 VERIFY(err, !IS_ERR(me->class));
2388 if (err)
2389 goto class_create_bail;
2390 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
2391 for (i = 0; i < NUM_CHANNELS; i++) {
2392 if (!gcinfo[i].name)
2393 continue;
2394 me->channel[i].dev = device_create(me->class, NULL,
2395 MKDEV(MAJOR(me->dev_no), i),
2396 NULL, gcinfo[i].name);
2397 VERIFY(err, !IS_ERR(me->channel[i].dev));
2398 if (err)
2399 goto device_create_bail;
2400 me->channel[i].ssrcount = 0;
2401 me->channel[i].prevssrcount = 0;
2402 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2403 me->channel[i].handle = subsys_notif_register_notifier(
2404 gcinfo[i].subsys,
2405 &me->channel[i].nb);
2406 }
2407
2408 me->client = msm_ion_client_create(DEVICE_NAME);
2409 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2410 if (err)
2411 goto device_create_bail;
2412 return 0;
2413device_create_bail:
2414 for (i = 0; i < NUM_CHANNELS; i++) {
2415 if (IS_ERR_OR_NULL(me->channel[i].dev))
2416 continue;
2417 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2418 subsys_notif_unregister_notifier(me->channel[i].handle,
2419 &me->channel[i].nb);
2420 }
2421 class_destroy(me->class);
2422class_create_bail:
2423 cdev_del(&me->cdev);
2424cdev_init_bail:
2425 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2426alloc_chrdev_bail:
2427register_bail:
2428 fastrpc_deinit();
2429 return err;
2430}
2431
2432static void __exit fastrpc_device_exit(void)
2433{
2434 struct fastrpc_apps *me = &gfa;
2435 int i;
2436
2437 fastrpc_file_list_dtor(me);
2438 fastrpc_deinit();
2439 for (i = 0; i < NUM_CHANNELS; i++) {
2440 if (!gcinfo[i].name)
2441 continue;
2442 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2443 subsys_notif_unregister_notifier(me->channel[i].handle,
2444 &me->channel[i].nb);
2445 }
2446 class_destroy(me->class);
2447 cdev_del(&me->cdev);
2448 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2449 ion_client_destroy(me->client);
2450}
2451
2452late_initcall(fastrpc_device_init);
2453module_exit(fastrpc_device_exit);
2454
2455MODULE_LICENSE("GPL v2");