blob: 89bf7c6259a2c7aa66e4bddd4b396d19173d0279 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053048#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053063#define SESSION_ID_INDEX (30)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070064
65#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
66
67#define FASTRPC_LINK_STATE_DOWN (0x0)
68#define FASTRPC_LINK_STATE_UP (0x1)
69#define FASTRPC_LINK_DISCONNECTED (0x0)
70#define FASTRPC_LINK_CONNECTING (0x1)
71#define FASTRPC_LINK_CONNECTED (0x3)
72#define FASTRPC_LINK_DISCONNECTING (0x7)
73
Sathish Ambleya21b5b52017-01-11 16:11:01 -080074#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
75#define FASTRPC_STATIC_HANDLE_LISTENER (3)
76#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053077#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080078
79#define PERF_END (void)0
80
81#define PERF(enb, cnt, ff) \
82 {\
83 struct timespec startT = {0};\
84 if (enb) {\
85 getnstimeofday(&startT);\
86 } \
87 ff ;\
88 if (enb) {\
89 cnt += getnstimediff(&startT);\
90 } \
91 }
92
Sathish Ambley69e1ab02016-10-18 10:28:15 -070093static int fastrpc_glink_open(int cid);
94static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080095static struct dentry *debugfs_root;
96static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070097
98static inline uint64_t buf_page_start(uint64_t buf)
99{
100 uint64_t start = (uint64_t) buf & PAGE_MASK;
101 return start;
102}
103
104static inline uint64_t buf_page_offset(uint64_t buf)
105{
106 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
107 return offset;
108}
109
110static inline int buf_num_pages(uint64_t buf, ssize_t len)
111{
112 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
113 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
114 int nPages = end - start + 1;
115 return nPages;
116}
117
118static inline uint64_t buf_page_size(uint32_t size)
119{
120 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
121
122 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
123}
124
125static inline void *uint64_to_ptr(uint64_t addr)
126{
127 void *ptr = (void *)((uintptr_t)addr);
128
129 return ptr;
130}
131
132static inline uint64_t ptr_to_uint64(void *ptr)
133{
134 uint64_t addr = (uint64_t)((uintptr_t)ptr);
135
136 return addr;
137}
138
139struct fastrpc_file;
140
141struct fastrpc_buf {
142 struct hlist_node hn;
143 struct fastrpc_file *fl;
144 void *virt;
145 uint64_t phys;
146 ssize_t size;
147};
148
149struct fastrpc_ctx_lst;
150
151struct overlap {
152 uintptr_t start;
153 uintptr_t end;
154 int raix;
155 uintptr_t mstart;
156 uintptr_t mend;
157 uintptr_t offset;
158};
159
160struct smq_invoke_ctx {
161 struct hlist_node hn;
162 struct completion work;
163 int retval;
164 int pid;
165 int tgid;
166 remote_arg_t *lpra;
167 remote_arg64_t *rpra;
168 int *fds;
169 unsigned int *attrs;
170 struct fastrpc_mmap **maps;
171 struct fastrpc_buf *buf;
172 ssize_t used;
173 struct fastrpc_file *fl;
174 uint32_t sc;
175 struct overlap *overs;
176 struct overlap **overps;
177 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700178 uint32_t *crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700179};
180
181struct fastrpc_ctx_lst {
182 struct hlist_head pending;
183 struct hlist_head interrupted;
184};
185
186struct fastrpc_smmu {
187 struct dma_iommu_mapping *mapping;
188 int cb;
189 int enabled;
190 int faults;
191 int secure;
192 int coherent;
193};
194
195struct fastrpc_session_ctx {
196 struct device *dev;
197 struct fastrpc_smmu smmu;
198 int used;
199};
200
201struct fastrpc_glink_info {
202 int link_state;
203 int port_state;
204 struct glink_open_config cfg;
205 struct glink_link_info link_info;
206 void *link_notify_handle;
207};
208
209struct fastrpc_channel_ctx {
210 char *name;
211 char *subsys;
212 void *chan;
213 struct device *dev;
214 struct fastrpc_session_ctx session[NUM_SESSIONS];
215 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530216 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700217 struct notifier_block nb;
218 struct kref kref;
219 int sesscount;
220 int ssrcount;
221 void *handle;
222 int prevssrcount;
223 int vmid;
224 struct fastrpc_glink_info link;
225};
226
227struct fastrpc_apps {
228 struct fastrpc_channel_ctx *channel;
229 struct cdev cdev;
230 struct class *class;
231 struct mutex smd_mutex;
232 struct smq_phy_page range;
233 struct hlist_head maps;
234 dev_t dev_no;
235 int compat;
236 struct hlist_head drivers;
237 spinlock_t hlock;
238 struct ion_client *client;
239 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530240 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700241};
242
243struct fastrpc_mmap {
244 struct hlist_node hn;
245 struct fastrpc_file *fl;
246 struct fastrpc_apps *apps;
247 int fd;
248 uint32_t flags;
249 struct dma_buf *buf;
250 struct sg_table *table;
251 struct dma_buf_attachment *attach;
252 struct ion_handle *handle;
253 uint64_t phys;
254 ssize_t size;
255 uintptr_t va;
256 ssize_t len;
257 int refs;
258 uintptr_t raddr;
259 int uncached;
260 int secure;
261 uintptr_t attr;
262};
263
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800264struct fastrpc_perf {
265 int64_t count;
266 int64_t flush;
267 int64_t map;
268 int64_t copy;
269 int64_t link;
270 int64_t getargs;
271 int64_t putargs;
272 int64_t invargs;
273 int64_t invoke;
274};
275
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700276struct fastrpc_file {
277 struct hlist_node hn;
278 spinlock_t hlock;
279 struct hlist_head maps;
280 struct hlist_head bufs;
281 struct fastrpc_ctx_lst clst;
282 struct fastrpc_session_ctx *sctx;
283 struct fastrpc_session_ctx *secsctx;
284 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800285 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530286 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700287 int tgid;
288 int cid;
289 int ssrcount;
290 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530291 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700292 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800293 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800294 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530295 struct pm_qos_request pm_qos_req;
296 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700297};
298
299static struct fastrpc_apps gfa;
300
301static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
302 {
303 .name = "adsprpc-smd",
304 .subsys = "adsp",
305 .link.link_info.edge = "lpass",
306 .link.link_info.transport = "smem",
307 },
308 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 .name = "mdsprpc-smd",
310 .subsys = "modem",
311 .link.link_info.edge = "mpss",
312 .link.link_info.transport = "smem",
313 },
314 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800315 .name = "sdsprpc-smd",
316 .subsys = "slpi",
317 .link.link_info.edge = "dsps",
318 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800319 },
320 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700321 .name = "cdsprpc-smd",
322 .subsys = "cdsp",
323 .link.link_info.edge = "cdsp",
324 .link.link_info.transport = "smem",
325 },
326};
327
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800328static inline int64_t getnstimediff(struct timespec *start)
329{
330 int64_t ns;
331 struct timespec ts, b;
332
333 getnstimeofday(&ts);
334 b = timespec_sub(ts, *start);
335 ns = timespec_to_ns(&b);
336 return ns;
337}
338
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700339static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
340{
341 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
342 int vmid;
343
344 if (!fl)
345 return;
346 if (cache) {
347 spin_lock(&fl->hlock);
348 hlist_add_head(&buf->hn, &fl->bufs);
349 spin_unlock(&fl->hlock);
350 return;
351 }
352 if (!IS_ERR_OR_NULL(buf->virt)) {
353 int destVM[1] = {VMID_HLOS};
354 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
355
356 if (fl->sctx->smmu.cb)
357 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
358 vmid = fl->apps->channel[fl->cid].vmid;
359 if (vmid) {
360 int srcVM[2] = {VMID_HLOS, vmid};
361
362 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
363 srcVM, 2, destVM, destVMperm, 1);
364 }
365 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
366 buf->phys);
367 }
368 kfree(buf);
369}
370
371static void fastrpc_buf_list_free(struct fastrpc_file *fl)
372{
373 struct fastrpc_buf *buf, *free;
374
375 do {
376 struct hlist_node *n;
377
378 free = 0;
379 spin_lock(&fl->hlock);
380 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
381 hlist_del_init(&buf->hn);
382 free = buf;
383 break;
384 }
385 spin_unlock(&fl->hlock);
386 if (free)
387 fastrpc_buf_free(free, 0);
388 } while (free);
389}
390
391static void fastrpc_mmap_add(struct fastrpc_mmap *map)
392{
393 struct fastrpc_file *fl = map->fl;
394
395 spin_lock(&fl->hlock);
396 hlist_add_head(&map->hn, &fl->maps);
397 spin_unlock(&fl->hlock);
398}
399
400static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800401 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700402{
403 struct fastrpc_mmap *match = 0, *map;
404 struct hlist_node *n;
405
406 spin_lock(&fl->hlock);
407 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
408 if (va >= map->va &&
409 va + len <= map->va + map->len &&
410 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800411 if (refs)
412 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413 match = map;
414 break;
415 }
416 }
417 spin_unlock(&fl->hlock);
418 if (match) {
419 *ppmap = match;
420 return 0;
421 }
422 return -ENOTTY;
423}
424
425static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
426 ssize_t len, struct fastrpc_mmap **ppmap)
427{
428 struct fastrpc_mmap *match = 0, *map;
429 struct hlist_node *n;
430 struct fastrpc_apps *me = &gfa;
431
432 spin_lock(&me->hlock);
433 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
434 if (map->raddr == va &&
435 map->raddr + map->len == va + len &&
436 map->refs == 1) {
437 match = map;
438 hlist_del_init(&map->hn);
439 break;
440 }
441 }
442 spin_unlock(&me->hlock);
443 if (match) {
444 *ppmap = match;
445 return 0;
446 }
447 spin_lock(&fl->hlock);
448 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
449 if (map->raddr == va &&
450 map->raddr + map->len == va + len &&
451 map->refs == 1) {
452 match = map;
453 hlist_del_init(&map->hn);
454 break;
455 }
456 }
457 spin_unlock(&fl->hlock);
458 if (match) {
459 *ppmap = match;
460 return 0;
461 }
462 return -ENOTTY;
463}
464
465static void fastrpc_mmap_free(struct fastrpc_mmap *map)
466{
467 struct fastrpc_file *fl;
468 int vmid;
469 struct fastrpc_session_ctx *sess;
470 int destVM[1] = {VMID_HLOS};
471 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
472
473 if (!map)
474 return;
475 fl = map->fl;
476 spin_lock(&fl->hlock);
477 map->refs--;
478 if (!map->refs)
479 hlist_del_init(&map->hn);
480 spin_unlock(&fl->hlock);
481 if (map->refs > 0)
482 return;
483 if (map->secure)
484 sess = fl->secsctx;
485 else
486 sess = fl->sctx;
487
488 if (!IS_ERR_OR_NULL(map->handle))
489 ion_free(fl->apps->client, map->handle);
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530490 if (sess && sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700491 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800492 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700493 map->table->sgl,
494 map->table->nents, DMA_BIDIRECTIONAL,
495 map->buf);
496 }
497 vmid = fl->apps->channel[fl->cid].vmid;
498 if (vmid && map->phys) {
499 int srcVM[2] = {VMID_HLOS, vmid};
500
501 hyp_assign_phys(map->phys, buf_page_size(map->size),
502 srcVM, 2, destVM, destVMperm, 1);
503 }
504
505 if (!IS_ERR_OR_NULL(map->table))
506 dma_buf_unmap_attachment(map->attach, map->table,
507 DMA_BIDIRECTIONAL);
508 if (!IS_ERR_OR_NULL(map->attach))
509 dma_buf_detach(map->buf, map->attach);
510 if (!IS_ERR_OR_NULL(map->buf))
511 dma_buf_put(map->buf);
512 kfree(map);
513}
514
515static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
516 struct fastrpc_session_ctx **session);
517
518static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
519 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
520 struct fastrpc_mmap **ppmap)
521{
522 struct fastrpc_session_ctx *sess;
523 struct fastrpc_apps *apps = fl->apps;
524 int cid = fl->cid;
525 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
526 struct fastrpc_mmap *map = 0;
527 unsigned long attrs;
528 unsigned long flags;
529 int err = 0, vmid;
530
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800531 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532 return 0;
533 map = kzalloc(sizeof(*map), GFP_KERNEL);
534 VERIFY(err, !IS_ERR_OR_NULL(map));
535 if (err)
536 goto bail;
537 INIT_HLIST_NODE(&map->hn);
538 map->flags = mflags;
539 map->refs = 1;
540 map->fl = fl;
541 map->fd = fd;
542 map->attr = attr;
543 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
544 ion_import_dma_buf_fd(fl->apps->client, fd)));
545 if (err)
546 goto bail;
547 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
548 &flags));
549 if (err)
550 goto bail;
551
552 map->uncached = !ION_IS_CACHED(flags);
553 if (map->attr & FASTRPC_ATTR_NOVA)
554 map->uncached = 1;
555
556 map->secure = flags & ION_FLAG_SECURE;
557 if (map->secure) {
558 if (!fl->secsctx)
559 err = fastrpc_session_alloc(chan, 1,
560 &fl->secsctx);
561 if (err)
562 goto bail;
563 }
564 if (map->secure)
565 sess = fl->secsctx;
566 else
567 sess = fl->sctx;
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530568 VERIFY(err, !IS_ERR_OR_NULL(sess));
569 if (err)
570 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700571 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
572 if (err)
573 goto bail;
574 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
575 dma_buf_attach(map->buf, sess->dev)));
576 if (err)
577 goto bail;
578 VERIFY(err, !IS_ERR_OR_NULL(map->table =
579 dma_buf_map_attachment(map->attach,
580 DMA_BIDIRECTIONAL)));
581 if (err)
582 goto bail;
583 if (sess->smmu.enabled) {
584 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530585
586 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
587 (sess->smmu.coherent && map->uncached))
588 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
589 else if (map->attr & FASTRPC_ATTR_COHERENT)
590 attrs |= DMA_ATTR_FORCE_COHERENT;
591
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700592 VERIFY(err, map->table->nents ==
593 msm_dma_map_sg_attrs(sess->dev,
594 map->table->sgl, map->table->nents,
595 DMA_BIDIRECTIONAL, map->buf, attrs));
596 if (err)
597 goto bail;
598 } else {
599 VERIFY(err, map->table->nents == 1);
600 if (err)
601 goto bail;
602 }
603 map->phys = sg_dma_address(map->table->sgl);
604 if (sess->smmu.cb) {
605 map->phys += ((uint64_t)sess->smmu.cb << 32);
606 map->size = sg_dma_len(map->table->sgl);
607 } else {
608 map->size = buf_page_size(len);
609 }
610 vmid = fl->apps->channel[fl->cid].vmid;
611 if (vmid) {
612 int srcVM[1] = {VMID_HLOS};
613 int destVM[2] = {VMID_HLOS, vmid};
614 int destVMperm[2] = {PERM_READ | PERM_WRITE,
615 PERM_READ | PERM_WRITE | PERM_EXEC};
616
617 VERIFY(err, !hyp_assign_phys(map->phys,
618 buf_page_size(map->size),
619 srcVM, 1, destVM, destVMperm, 2));
620 if (err)
621 goto bail;
622 }
623 map->va = va;
624 map->len = len;
625
626 fastrpc_mmap_add(map);
627 *ppmap = map;
628
629bail:
630 if (err && map)
631 fastrpc_mmap_free(map);
632 return err;
633}
634
635static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
636 struct fastrpc_buf **obuf)
637{
638 int err = 0, vmid;
639 struct fastrpc_buf *buf = 0, *fr = 0;
640 struct hlist_node *n;
641
642 VERIFY(err, size > 0);
643 if (err)
644 goto bail;
645
646 /* find the smallest buffer that fits in the cache */
647 spin_lock(&fl->hlock);
648 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
649 if (buf->size >= size && (!fr || fr->size > buf->size))
650 fr = buf;
651 }
652 if (fr)
653 hlist_del_init(&fr->hn);
654 spin_unlock(&fl->hlock);
655 if (fr) {
656 *obuf = fr;
657 return 0;
658 }
659 buf = 0;
660 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
661 if (err)
662 goto bail;
663 INIT_HLIST_NODE(&buf->hn);
664 buf->fl = fl;
665 buf->virt = 0;
666 buf->phys = 0;
667 buf->size = size;
668 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
669 (void *)&buf->phys, GFP_KERNEL);
670 if (IS_ERR_OR_NULL(buf->virt)) {
671 /* free cache and retry */
672 fastrpc_buf_list_free(fl);
673 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
674 (void *)&buf->phys, GFP_KERNEL);
675 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
676 }
677 if (err)
678 goto bail;
679 if (fl->sctx->smmu.cb)
680 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
681 vmid = fl->apps->channel[fl->cid].vmid;
682 if (vmid) {
683 int srcVM[1] = {VMID_HLOS};
684 int destVM[2] = {VMID_HLOS, vmid};
685 int destVMperm[2] = {PERM_READ | PERM_WRITE,
686 PERM_READ | PERM_WRITE | PERM_EXEC};
687
688 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
689 srcVM, 1, destVM, destVMperm, 2));
690 if (err)
691 goto bail;
692 }
693
694 *obuf = buf;
695 bail:
696 if (err && buf)
697 fastrpc_buf_free(buf, 0);
698 return err;
699}
700
701
702static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700703 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700704 struct smq_invoke_ctx **po)
705{
706 int err = 0;
707 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
708 struct hlist_node *n;
709 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
710
711 spin_lock(&fl->hlock);
712 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
713 if (ictx->pid == current->pid) {
714 if (invoke->sc != ictx->sc || ictx->fl != fl)
715 err = -1;
716 else {
717 ctx = ictx;
718 hlist_del_init(&ctx->hn);
719 hlist_add_head(&ctx->hn, &fl->clst.pending);
720 }
721 break;
722 }
723 }
724 spin_unlock(&fl->hlock);
725 if (ctx)
726 *po = ctx;
727 return err;
728}
729
730#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
731static int overlap_ptr_cmp(const void *a, const void *b)
732{
733 struct overlap *pa = *((struct overlap **)a);
734 struct overlap *pb = *((struct overlap **)b);
735 /* sort with lowest starting buffer first */
736 int st = CMP(pa->start, pb->start);
737 /* sort with highest ending buffer first */
738 int ed = CMP(pb->end, pa->end);
739 return st == 0 ? ed : st;
740}
741
Sathish Ambley9466d672017-01-25 10:51:55 -0800742static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700743{
Sathish Ambley9466d672017-01-25 10:51:55 -0800744 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700745 remote_arg_t *lpra = ctx->lpra;
746 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
747 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
748 int nbufs = inbufs + outbufs;
749 struct overlap max;
750
751 for (i = 0; i < nbufs; ++i) {
752 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
753 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800754 if (lpra[i].buf.len) {
755 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
756 if (err)
757 goto bail;
758 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700759 ctx->overs[i].raix = i;
760 ctx->overps[i] = &ctx->overs[i];
761 }
762 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
763 max.start = 0;
764 max.end = 0;
765 for (i = 0; i < nbufs; ++i) {
766 if (ctx->overps[i]->start < max.end) {
767 ctx->overps[i]->mstart = max.end;
768 ctx->overps[i]->mend = ctx->overps[i]->end;
769 ctx->overps[i]->offset = max.end -
770 ctx->overps[i]->start;
771 if (ctx->overps[i]->end > max.end) {
772 max.end = ctx->overps[i]->end;
773 } else {
774 ctx->overps[i]->mend = 0;
775 ctx->overps[i]->mstart = 0;
776 }
777 } else {
778 ctx->overps[i]->mend = ctx->overps[i]->end;
779 ctx->overps[i]->mstart = ctx->overps[i]->start;
780 ctx->overps[i]->offset = 0;
781 max = *ctx->overps[i];
782 }
783 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800784bail:
785 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700786}
787
788#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
789 do {\
790 if (!(kernel))\
791 VERIFY(err, 0 == copy_from_user((dst), (src),\
792 (size)));\
793 else\
794 memmove((dst), (src), (size));\
795 } while (0)
796
797#define K_COPY_TO_USER(err, kernel, dst, src, size) \
798 do {\
799 if (!(kernel))\
800 VERIFY(err, 0 == copy_to_user((dst), (src),\
801 (size)));\
802 else\
803 memmove((dst), (src), (size));\
804 } while (0)
805
806
807static void context_free(struct smq_invoke_ctx *ctx);
808
809static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700810 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700811 struct smq_invoke_ctx **po)
812{
813 int err = 0, bufs, size = 0;
814 struct smq_invoke_ctx *ctx = 0;
815 struct fastrpc_ctx_lst *clst = &fl->clst;
816 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
817
818 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
819 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
820 sizeof(*ctx->fds) * (bufs) +
821 sizeof(*ctx->attrs) * (bufs) +
822 sizeof(*ctx->overs) * (bufs) +
823 sizeof(*ctx->overps) * (bufs);
824
825 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
826 if (err)
827 goto bail;
828
829 INIT_HLIST_NODE(&ctx->hn);
830 hlist_add_fake(&ctx->hn);
831 ctx->fl = fl;
832 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
833 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
834 ctx->fds = (int *)(&ctx->lpra[bufs]);
835 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
836 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
837 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
838
839 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
840 bufs * sizeof(*ctx->lpra));
841 if (err)
842 goto bail;
843
844 if (invokefd->fds) {
845 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
846 bufs * sizeof(*ctx->fds));
847 if (err)
848 goto bail;
849 }
850 if (invokefd->attrs) {
851 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
852 bufs * sizeof(*ctx->attrs));
853 if (err)
854 goto bail;
855 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700856 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700857 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800858 if (bufs) {
859 VERIFY(err, 0 == context_build_overlap(ctx));
860 if (err)
861 goto bail;
862 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700863 ctx->retval = -1;
864 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530865 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700866 init_completion(&ctx->work);
867
868 spin_lock(&fl->hlock);
869 hlist_add_head(&ctx->hn, &clst->pending);
870 spin_unlock(&fl->hlock);
871
872 *po = ctx;
873bail:
874 if (ctx && err)
875 context_free(ctx);
876 return err;
877}
878
879static void context_save_interrupted(struct smq_invoke_ctx *ctx)
880{
881 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
882
883 spin_lock(&ctx->fl->hlock);
884 hlist_del_init(&ctx->hn);
885 hlist_add_head(&ctx->hn, &clst->interrupted);
886 spin_unlock(&ctx->fl->hlock);
887 /* free the cache on power collapse */
888 fastrpc_buf_list_free(ctx->fl);
889}
890
891static void context_free(struct smq_invoke_ctx *ctx)
892{
893 int i;
894 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
895 REMOTE_SCALARS_OUTBUFS(ctx->sc);
896 spin_lock(&ctx->fl->hlock);
897 hlist_del_init(&ctx->hn);
898 spin_unlock(&ctx->fl->hlock);
899 for (i = 0; i < nbufs; ++i)
900 fastrpc_mmap_free(ctx->maps[i]);
901 fastrpc_buf_free(ctx->buf, 1);
902 kfree(ctx);
903}
904
905static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
906{
907 ctx->retval = retval;
908 complete(&ctx->work);
909}
910
911
912static void fastrpc_notify_users(struct fastrpc_file *me)
913{
914 struct smq_invoke_ctx *ictx;
915 struct hlist_node *n;
916
917 spin_lock(&me->hlock);
918 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
919 complete(&ictx->work);
920 }
921 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
922 complete(&ictx->work);
923 }
924 spin_unlock(&me->hlock);
925
926}
927
928static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
929{
930 struct fastrpc_file *fl;
931 struct hlist_node *n;
932
933 spin_lock(&me->hlock);
934 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
935 if (fl->cid == cid)
936 fastrpc_notify_users(fl);
937 }
938 spin_unlock(&me->hlock);
939
940}
941static void context_list_ctor(struct fastrpc_ctx_lst *me)
942{
943 INIT_HLIST_HEAD(&me->interrupted);
944 INIT_HLIST_HEAD(&me->pending);
945}
946
947static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
948{
949 struct fastrpc_ctx_lst *clst = &fl->clst;
950 struct smq_invoke_ctx *ictx = 0, *ctxfree;
951 struct hlist_node *n;
952
953 do {
954 ctxfree = 0;
955 spin_lock(&fl->hlock);
956 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
957 hlist_del_init(&ictx->hn);
958 ctxfree = ictx;
959 break;
960 }
961 spin_unlock(&fl->hlock);
962 if (ctxfree)
963 context_free(ctxfree);
964 } while (ctxfree);
965 do {
966 ctxfree = 0;
967 spin_lock(&fl->hlock);
968 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
969 hlist_del_init(&ictx->hn);
970 ctxfree = ictx;
971 break;
972 }
973 spin_unlock(&fl->hlock);
974 if (ctxfree)
975 context_free(ctxfree);
976 } while (ctxfree);
977}
978
979static int fastrpc_file_free(struct fastrpc_file *fl);
980static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
981{
982 struct fastrpc_file *fl, *free;
983 struct hlist_node *n;
984
985 do {
986 free = 0;
987 spin_lock(&me->hlock);
988 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
989 hlist_del_init(&fl->hn);
990 free = fl;
991 break;
992 }
993 spin_unlock(&me->hlock);
994 if (free)
995 fastrpc_file_free(free);
996 } while (free);
997}
998
999static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1000{
1001 remote_arg64_t *rpra;
1002 remote_arg_t *lpra = ctx->lpra;
1003 struct smq_invoke_buf *list;
1004 struct smq_phy_page *pages, *ipage;
1005 uint32_t sc = ctx->sc;
1006 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1007 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001008 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001009 uintptr_t args;
1010 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001011 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001012 int err = 0;
1013 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001014 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001015 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016
1017 /* calculate size of the metadata */
1018 rpra = 0;
1019 list = smq_invoke_buf_start(rpra, sc);
1020 pages = smq_phy_page_start(sc, list);
1021 ipage = pages;
1022
1023 for (i = 0; i < bufs; ++i) {
1024 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1025 ssize_t len = lpra[i].buf.len;
1026
1027 if (ctx->fds[i] && (ctx->fds[i] != -1))
1028 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1029 ctx->attrs[i], buf, len,
1030 mflags, &ctx->maps[i]);
1031 ipage += 1;
1032 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001033 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1034 for (i = bufs; i < bufs + handles; i++) {
1035 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1036 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1037 if (err)
1038 goto bail;
1039 ipage += 1;
1040 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001041 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1042 (sizeof(uint32_t) * M_CRCLIST);
1043
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001044 /* calculate len requreed for copying */
1045 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1046 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001047 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001048 ssize_t len = lpra[i].buf.len;
1049
1050 if (!len)
1051 continue;
1052 if (ctx->maps[i])
1053 continue;
1054 if (ctx->overps[oix]->offset == 0)
1055 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001056 mstart = ctx->overps[oix]->mstart;
1057 mend = ctx->overps[oix]->mend;
1058 VERIFY(err, (mend - mstart) <= LONG_MAX);
1059 if (err)
1060 goto bail;
1061 copylen += mend - mstart;
1062 VERIFY(err, copylen >= 0);
1063 if (err)
1064 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001065 }
1066 ctx->used = copylen;
1067
1068 /* allocate new buffer */
1069 if (copylen) {
1070 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1071 if (err)
1072 goto bail;
1073 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301074 if (ctx->buf->virt && metalen <= copylen)
1075 memset(ctx->buf->virt, 0, metalen);
1076
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001077 /* copy metadata */
1078 rpra = ctx->buf->virt;
1079 ctx->rpra = rpra;
1080 list = smq_invoke_buf_start(rpra, sc);
1081 pages = smq_phy_page_start(sc, list);
1082 ipage = pages;
1083 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001084 for (i = 0; i < bufs + handles; ++i) {
1085 if (lpra[i].buf.len)
1086 list[i].num = 1;
1087 else
1088 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001089 list[i].pgidx = ipage - pages;
1090 ipage++;
1091 }
1092 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001093 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001094 for (i = 0; i < inbufs + outbufs; ++i) {
1095 struct fastrpc_mmap *map = ctx->maps[i];
1096 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1097 ssize_t len = lpra[i].buf.len;
1098
1099 rpra[i].buf.pv = 0;
1100 rpra[i].buf.len = len;
1101 if (!len)
1102 continue;
1103 if (map) {
1104 struct vm_area_struct *vma;
1105 uintptr_t offset;
1106 int num = buf_num_pages(buf, len);
1107 int idx = list[i].pgidx;
1108
1109 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001110 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001111 } else {
1112 down_read(&current->mm->mmap_sem);
1113 VERIFY(err, NULL != (vma = find_vma(current->mm,
1114 map->va)));
1115 if (err) {
1116 up_read(&current->mm->mmap_sem);
1117 goto bail;
1118 }
1119 offset = buf_page_start(buf) - vma->vm_start;
1120 up_read(&current->mm->mmap_sem);
1121 VERIFY(err, offset < (uintptr_t)map->size);
1122 if (err)
1123 goto bail;
1124 }
1125 pages[idx].addr = map->phys + offset;
1126 pages[idx].size = num << PAGE_SHIFT;
1127 }
1128 rpra[i].buf.pv = buf;
1129 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001130 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001131 for (i = bufs; i < bufs + handles; ++i) {
1132 struct fastrpc_mmap *map = ctx->maps[i];
1133
1134 pages[i].addr = map->phys;
1135 pages[i].size = map->size;
1136 }
1137 fdlist = (uint64_t *)&pages[bufs + handles];
1138 for (i = 0; i < M_FDLIST; i++)
1139 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001140 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301141 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001142
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001144 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001145 rlen = copylen - metalen;
1146 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1147 int i = ctx->overps[oix]->raix;
1148 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001149 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001150 uint64_t buf;
1151 ssize_t len = lpra[i].buf.len;
1152
1153 if (!len)
1154 continue;
1155 if (map)
1156 continue;
1157 if (ctx->overps[oix]->offset == 0) {
1158 rlen -= ALIGN(args, BALIGN) - args;
1159 args = ALIGN(args, BALIGN);
1160 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001161 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001162 VERIFY(err, rlen >= mlen);
1163 if (err)
1164 goto bail;
1165 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1166 pages[list[i].pgidx].addr = ctx->buf->phys -
1167 ctx->overps[oix]->offset +
1168 (copylen - rlen);
1169 pages[list[i].pgidx].addr =
1170 buf_page_start(pages[list[i].pgidx].addr);
1171 buf = rpra[i].buf.pv;
1172 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1173 if (i < inbufs) {
1174 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1175 lpra[i].buf.pv, len);
1176 if (err)
1177 goto bail;
1178 }
1179 args = args + mlen;
1180 rlen -= mlen;
1181 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001182 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001183
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001184 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001185 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1186 int i = ctx->overps[oix]->raix;
1187 struct fastrpc_mmap *map = ctx->maps[i];
1188
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001189 if (map && map->uncached)
1190 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301191 if (ctx->fl->sctx->smmu.coherent &&
1192 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1193 continue;
1194 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1195 continue;
1196
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001197 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1198 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1199 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1200 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001201 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001202 for (i = bufs; i < bufs + handles; i++) {
1203 rpra[i].dma.fd = ctx->fds[i];
1204 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1205 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001206 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001207
1208 if (!ctx->fl->sctx->smmu.coherent) {
1209 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001210 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001211 PERF_END);
1212 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001213 bail:
1214 return err;
1215}
1216
1217static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1218 remote_arg_t *upra)
1219{
1220 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001221 struct smq_invoke_buf *list;
1222 struct smq_phy_page *pages;
1223 struct fastrpc_mmap *mmap;
1224 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001225 uint32_t *crclist = NULL;
1226
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001227 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001228 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001229 int err = 0;
1230
1231 inbufs = REMOTE_SCALARS_INBUFS(sc);
1232 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001233 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1234 list = smq_invoke_buf_start(ctx->rpra, sc);
1235 pages = smq_phy_page_start(sc, list);
1236 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001237 crclist = (uint32_t *)(fdlist + M_FDLIST);
1238
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001239 for (i = inbufs; i < inbufs + outbufs; ++i) {
1240 if (!ctx->maps[i]) {
1241 K_COPY_TO_USER(err, kernel,
1242 ctx->lpra[i].buf.pv,
1243 uint64_to_ptr(rpra[i].buf.pv),
1244 rpra[i].buf.len);
1245 if (err)
1246 goto bail;
1247 } else {
1248 fastrpc_mmap_free(ctx->maps[i]);
1249 ctx->maps[i] = 0;
1250 }
1251 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001252 if (inbufs + outbufs + handles) {
1253 for (i = 0; i < M_FDLIST; i++) {
1254 if (!fdlist[i])
1255 break;
1256 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001257 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001258 fastrpc_mmap_free(mmap);
1259 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001260 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001261 if (ctx->crc && crclist && rpra)
1262 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1263 crclist, M_CRCLIST*sizeof(uint32_t));
1264
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001265 bail:
1266 return err;
1267}
1268
1269static void inv_args_pre(struct smq_invoke_ctx *ctx)
1270{
1271 int i, inbufs, outbufs;
1272 uint32_t sc = ctx->sc;
1273 remote_arg64_t *rpra = ctx->rpra;
1274 uintptr_t end;
1275
1276 inbufs = REMOTE_SCALARS_INBUFS(sc);
1277 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1278 for (i = inbufs; i < inbufs + outbufs; ++i) {
1279 struct fastrpc_mmap *map = ctx->maps[i];
1280
1281 if (map && map->uncached)
1282 continue;
1283 if (!rpra[i].buf.len)
1284 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301285 if (ctx->fl->sctx->smmu.coherent &&
1286 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1287 continue;
1288 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1289 continue;
1290
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001291 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1292 buf_page_start(rpra[i].buf.pv))
1293 continue;
1294 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1295 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1296 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1297 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1298 rpra[i].buf.len);
1299 if (!IS_CACHE_ALIGNED(end))
1300 dmac_flush_range((char *)end,
1301 (char *)end + 1);
1302 }
1303}
1304
1305static void inv_args(struct smq_invoke_ctx *ctx)
1306{
1307 int i, inbufs, outbufs;
1308 uint32_t sc = ctx->sc;
1309 remote_arg64_t *rpra = ctx->rpra;
1310 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001311
1312 inbufs = REMOTE_SCALARS_INBUFS(sc);
1313 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1314 for (i = inbufs; i < inbufs + outbufs; ++i) {
1315 struct fastrpc_mmap *map = ctx->maps[i];
1316
1317 if (map && map->uncached)
1318 continue;
1319 if (!rpra[i].buf.len)
1320 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301321 if (ctx->fl->sctx->smmu.coherent &&
1322 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1323 continue;
1324 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1325 continue;
1326
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001327 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1328 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001329 continue;
1330 }
1331 if (map && map->handle)
1332 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1333 (char *)uint64_to_ptr(rpra[i].buf.pv),
1334 rpra[i].buf.len, ION_IOC_INV_CACHES);
1335 else
1336 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1337 (char *)uint64_to_ptr(rpra[i].buf.pv
1338 + rpra[i].buf.len));
1339 }
1340
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001341 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001342 dmac_inv_range(rpra, (char *)rpra + used);
1343}
1344
1345static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1346 uint32_t kernel, uint32_t handle)
1347{
1348 struct smq_msg *msg = &ctx->msg;
1349 struct fastrpc_file *fl = ctx->fl;
1350 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1351 int err = 0;
1352
1353 VERIFY(err, 0 != channel_ctx->chan);
1354 if (err)
1355 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301356 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001357 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301358 if (fl->sessionid)
1359 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360 if (kernel)
1361 msg->pid = 0;
1362 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1363 msg->invoke.header.handle = handle;
1364 msg->invoke.header.sc = ctx->sc;
1365 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1366 msg->invoke.page.size = buf_page_size(ctx->used);
1367
1368 if (fl->ssrcount != channel_ctx->ssrcount) {
1369 err = -ECONNRESET;
1370 goto bail;
1371 }
1372 VERIFY(err, channel_ctx->link.port_state ==
1373 FASTRPC_LINK_CONNECTED);
1374 if (err)
1375 goto bail;
1376 err = glink_tx(channel_ctx->chan,
1377 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1378 GLINK_TX_REQ_INTENT);
1379 bail:
1380 return err;
1381}
1382
1383static void fastrpc_init(struct fastrpc_apps *me)
1384{
1385 int i;
1386
1387 INIT_HLIST_HEAD(&me->drivers);
1388 spin_lock_init(&me->hlock);
1389 mutex_init(&me->smd_mutex);
1390 me->channel = &gcinfo[0];
1391 for (i = 0; i < NUM_CHANNELS; i++) {
1392 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301393 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001394 me->channel[i].sesscount = 0;
1395 }
1396}
1397
1398static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1399
1400static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1401 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001402 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001403{
1404 struct smq_invoke_ctx *ctx = 0;
1405 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1406 int cid = fl->cid;
1407 int interrupted = 0;
1408 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001409 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001410
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001411 if (fl->profile)
1412 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301413
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301414
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301415 VERIFY(err, fl->sctx != NULL);
1416 if (err)
1417 goto bail;
1418 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1419 if (err)
1420 goto bail;
1421
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001422 if (!kernel) {
1423 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1424 &ctx));
1425 if (err)
1426 goto bail;
1427 if (fl->sctx->smmu.faults)
1428 err = FASTRPC_ENOSUCH;
1429 if (err)
1430 goto bail;
1431 if (ctx)
1432 goto wait;
1433 }
1434
1435 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1436 if (err)
1437 goto bail;
1438
1439 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001440 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001442 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001443 if (err)
1444 goto bail;
1445 }
1446
Sathish Ambleyc432b502017-06-05 12:03:42 -07001447 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001448 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001449 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001450 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001451 PERF_END);
1452
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001453 if (err)
1454 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001455 wait:
1456 if (kernel)
1457 wait_for_completion(&ctx->work);
1458 else {
1459 interrupted = wait_for_completion_interruptible(&ctx->work);
1460 VERIFY(err, 0 == (err = interrupted));
1461 if (err)
1462 goto bail;
1463 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001464
1465 PERF(fl->profile, fl->perf.invargs,
1466 if (!fl->sctx->smmu.coherent)
1467 inv_args(ctx);
1468 PERF_END);
1469
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001470 VERIFY(err, 0 == (err = ctx->retval));
1471 if (err)
1472 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001473
1474 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001475 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001476 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001477 if (err)
1478 goto bail;
1479 bail:
1480 if (ctx && interrupted == -ERESTARTSYS)
1481 context_save_interrupted(ctx);
1482 else if (ctx)
1483 context_free(ctx);
1484 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1485 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001486
1487 if (fl->profile && !interrupted) {
1488 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1489 fl->perf.invoke += getnstimediff(&invoket);
1490 if (!(invoke->handle >= 0 &&
1491 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1492 fl->perf.count++;
1493 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001494 return err;
1495}
1496
Sathish Ambley36849af2017-02-02 09:35:55 -08001497static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001498static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001499 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001500{
1501 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001502 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001503 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001504 struct smq_phy_page pages[1];
1505 struct fastrpc_mmap *file = 0, *mem = 0;
1506
Sathish Ambley36849af2017-02-02 09:35:55 -08001507 VERIFY(err, !fastrpc_channel_open(fl));
1508 if (err)
1509 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510 if (init->flags == FASTRPC_INIT_ATTACH) {
1511 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301512 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001513
1514 ra[0].buf.pv = (void *)&tgid;
1515 ra[0].buf.len = sizeof(tgid);
1516 ioctl.inv.handle = 1;
1517 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1518 ioctl.inv.pra = ra;
1519 ioctl.fds = 0;
1520 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001521 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 fl->pd = 0;
1523 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1524 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1525 if (err)
1526 goto bail;
1527 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001528 remote_arg_t ra[6];
1529 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001530 int mflags = 0;
1531 struct {
1532 int pgid;
1533 int namelen;
1534 int filelen;
1535 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001536 int attrs;
1537 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001538 } inbuf;
1539
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301540 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001541 inbuf.namelen = strlen(current->comm) + 1;
1542 inbuf.filelen = init->filelen;
1543 fl->pd = 1;
1544 if (init->filelen) {
1545 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1546 init->file, init->filelen, mflags, &file));
1547 if (err)
1548 goto bail;
1549 }
1550 inbuf.pageslen = 1;
1551 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1552 init->mem, init->memlen, mflags, &mem));
1553 if (err)
1554 goto bail;
1555 inbuf.pageslen = 1;
1556 ra[0].buf.pv = (void *)&inbuf;
1557 ra[0].buf.len = sizeof(inbuf);
1558 fds[0] = 0;
1559
1560 ra[1].buf.pv = (void *)current->comm;
1561 ra[1].buf.len = inbuf.namelen;
1562 fds[1] = 0;
1563
1564 ra[2].buf.pv = (void *)init->file;
1565 ra[2].buf.len = inbuf.filelen;
1566 fds[2] = init->filefd;
1567
1568 pages[0].addr = mem->phys;
1569 pages[0].size = mem->size;
1570 ra[3].buf.pv = (void *)pages;
1571 ra[3].buf.len = 1 * sizeof(*pages);
1572 fds[3] = 0;
1573
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001574 inbuf.attrs = uproc->attrs;
1575 ra[4].buf.pv = (void *)&(inbuf.attrs);
1576 ra[4].buf.len = sizeof(inbuf.attrs);
1577 fds[4] = 0;
1578
1579 inbuf.siglen = uproc->siglen;
1580 ra[5].buf.pv = (void *)&(inbuf.siglen);
1581 ra[5].buf.len = sizeof(inbuf.siglen);
1582 fds[5] = 0;
1583
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001584 ioctl.inv.handle = 1;
1585 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001586 if (uproc->attrs)
1587 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001588 ioctl.inv.pra = ra;
1589 ioctl.fds = fds;
1590 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001591 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1593 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1594 if (err)
1595 goto bail;
1596 } else {
1597 err = -ENOTTY;
1598 }
1599bail:
1600 if (mem && err)
1601 fastrpc_mmap_free(mem);
1602 if (file)
1603 fastrpc_mmap_free(file);
1604 return err;
1605}
1606
1607static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1608{
1609 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001610 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611 remote_arg_t ra[1];
1612 int tgid = 0;
1613
Sathish Ambley36849af2017-02-02 09:35:55 -08001614 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1615 if (err)
1616 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001617 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1618 if (err)
1619 goto bail;
1620 tgid = fl->tgid;
1621 ra[0].buf.pv = (void *)&tgid;
1622 ra[0].buf.len = sizeof(tgid);
1623 ioctl.inv.handle = 1;
1624 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1625 ioctl.inv.pra = ra;
1626 ioctl.fds = 0;
1627 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001628 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001629 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1630 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1631bail:
1632 return err;
1633}
1634
1635static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1636 struct fastrpc_mmap *map)
1637{
Sathish Ambleybae51902017-07-03 15:00:49 -07001638 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001639 struct smq_phy_page page;
1640 int num = 1;
1641 remote_arg_t ra[3];
1642 int err = 0;
1643 struct {
1644 int pid;
1645 uint32_t flags;
1646 uintptr_t vaddrin;
1647 int num;
1648 } inargs;
1649 struct {
1650 uintptr_t vaddrout;
1651 } routargs;
1652
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301653 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001654 inargs.vaddrin = (uintptr_t)map->va;
1655 inargs.flags = flags;
1656 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1657 ra[0].buf.pv = (void *)&inargs;
1658 ra[0].buf.len = sizeof(inargs);
1659 page.addr = map->phys;
1660 page.size = map->size;
1661 ra[1].buf.pv = (void *)&page;
1662 ra[1].buf.len = num * sizeof(page);
1663
1664 ra[2].buf.pv = (void *)&routargs;
1665 ra[2].buf.len = sizeof(routargs);
1666
1667 ioctl.inv.handle = 1;
1668 if (fl->apps->compat)
1669 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1670 else
1671 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1672 ioctl.inv.pra = ra;
1673 ioctl.fds = 0;
1674 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001675 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001676 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1677 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1678 map->raddr = (uintptr_t)routargs.vaddrout;
1679
1680 return err;
1681}
1682
1683static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1684 struct fastrpc_mmap *map)
1685{
Sathish Ambleybae51902017-07-03 15:00:49 -07001686 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001687 remote_arg_t ra[1];
1688 int err = 0;
1689 struct {
1690 int pid;
1691 uintptr_t vaddrout;
1692 ssize_t size;
1693 } inargs;
1694
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301695 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001696 inargs.size = map->size;
1697 inargs.vaddrout = map->raddr;
1698 ra[0].buf.pv = (void *)&inargs;
1699 ra[0].buf.len = sizeof(inargs);
1700
1701 ioctl.inv.handle = 1;
1702 if (fl->apps->compat)
1703 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1704 else
1705 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1706 ioctl.inv.pra = ra;
1707 ioctl.fds = 0;
1708 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001709 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001710 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1711 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1712 return err;
1713}
1714
1715static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1716 ssize_t len, struct fastrpc_mmap **ppmap);
1717
1718static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1719
1720static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1721 struct fastrpc_ioctl_munmap *ud)
1722{
1723 int err = 0;
1724 struct fastrpc_mmap *map = 0;
1725
1726 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1727 if (err)
1728 goto bail;
1729 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1730 if (err)
1731 goto bail;
1732 fastrpc_mmap_free(map);
1733bail:
1734 if (err && map)
1735 fastrpc_mmap_add(map);
1736 return err;
1737}
1738
1739static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1740 struct fastrpc_ioctl_mmap *ud)
1741{
1742
1743 struct fastrpc_mmap *map = 0;
1744 int err = 0;
1745
1746 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001747 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001748 return 0;
1749
1750 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1751 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1752 if (err)
1753 goto bail;
1754 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1755 if (err)
1756 goto bail;
1757 ud->vaddrout = map->raddr;
1758 bail:
1759 if (err && map)
1760 fastrpc_mmap_free(map);
1761 return err;
1762}
1763
1764static void fastrpc_channel_close(struct kref *kref)
1765{
1766 struct fastrpc_apps *me = &gfa;
1767 struct fastrpc_channel_ctx *ctx;
1768 int cid;
1769
1770 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1771 cid = ctx - &gcinfo[0];
1772 fastrpc_glink_close(ctx->chan, cid);
1773 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301774 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1775 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001776 mutex_unlock(&me->smd_mutex);
1777 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1778 MAJOR(me->dev_no), cid);
1779}
1780
1781static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1782
1783static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1784 int secure, struct fastrpc_session_ctx **session)
1785{
1786 struct fastrpc_apps *me = &gfa;
1787 int idx = 0, err = 0;
1788
1789 if (chan->sesscount) {
1790 for (idx = 0; idx < chan->sesscount; ++idx) {
1791 if (!chan->session[idx].used &&
1792 chan->session[idx].smmu.secure == secure) {
1793 chan->session[idx].used = 1;
1794 break;
1795 }
1796 }
1797 VERIFY(err, idx < chan->sesscount);
1798 if (err)
1799 goto bail;
1800 chan->session[idx].smmu.faults = 0;
1801 } else {
1802 VERIFY(err, me->dev != NULL);
1803 if (err)
1804 goto bail;
1805 chan->session[0].dev = me->dev;
1806 }
1807
1808 *session = &chan->session[idx];
1809 bail:
1810 return err;
1811}
1812
1813bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1814{
1815 if (glink_queue_rx_intent(h, NULL, size))
1816 return false;
1817 return true;
1818}
1819
1820void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1821 const void *pkt_priv, const void *ptr)
1822{
1823}
1824
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301825static int fastrpc_search_ctx(uint64_t rctx)
1826{
1827 struct fastrpc_apps *me = &gfa;
1828 struct fastrpc_file *fl;
1829 struct hlist_node *n, *m;
1830 struct smq_invoke_ctx *ictx = NULL;
1831 struct smq_invoke_ctx *ctx;
1832 int bfound = 0;
1833
1834 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rctx));
1835 if (!ctx)
1836 return bfound;
1837
1838 spin_lock(&me->hlock);
1839 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1840 if (ctx->fl != fl)
1841 continue;
1842 spin_lock(&fl->hlock);
1843 hlist_for_each_entry_safe(ictx, m, &fl->clst.pending, hn) {
1844 if (ptr_to_uint64(ictx) == rctx) {
1845 bfound = 1;
1846 break;
1847 }
1848 }
1849 hlist_for_each_entry_safe(ictx, m, &fl->clst.interrupted, hn) {
1850 if (ptr_to_uint64(ictx) == rctx) {
1851 bfound = 1;
1852 break;
1853 }
1854 }
1855 spin_unlock(&fl->hlock);
1856 if (bfound)
1857 break;
1858 }
1859 spin_unlock(&me->hlock);
1860 return bfound;
1861}
1862
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001863void fastrpc_glink_notify_rx(void *handle, const void *priv,
1864 const void *pkt_priv, const void *ptr, size_t size)
1865{
1866 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301867 int bfound = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001868
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301869 if (!rsp || (size < sizeof(*rsp)))
1870 goto bail;
1871
1872 bfound = fastrpc_search_ctx((uint64_t)(rsp->ctx & ~1));
1873 if (!bfound) {
1874 pr_err("adsprpc: invalid context %pK\n", (void *)rsp->ctx);
1875 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001876 }
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301877
1878 rsp->ctx = rsp->ctx & ~1;
1879 context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
1880bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001881 glink_rx_done(handle, ptr, true);
1882}
1883
1884void fastrpc_glink_notify_state(void *handle, const void *priv,
1885 unsigned int event)
1886{
1887 struct fastrpc_apps *me = &gfa;
1888 int cid = (int)(uintptr_t)priv;
1889 struct fastrpc_glink_info *link;
1890
1891 if (cid < 0 || cid >= NUM_CHANNELS)
1892 return;
1893 link = &me->channel[cid].link;
1894 switch (event) {
1895 case GLINK_CONNECTED:
1896 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301897 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001898 break;
1899 case GLINK_LOCAL_DISCONNECTED:
1900 link->port_state = FASTRPC_LINK_DISCONNECTED;
1901 break;
1902 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301903 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001904 fastrpc_glink_close(me->channel[cid].chan, cid);
1905 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001906 }
1907 break;
1908 default:
1909 break;
1910 }
1911}
1912
1913static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1914 struct fastrpc_session_ctx **session)
1915{
1916 int err = 0;
1917 struct fastrpc_apps *me = &gfa;
1918
1919 mutex_lock(&me->smd_mutex);
1920 if (!*session)
1921 err = fastrpc_session_alloc_locked(chan, secure, session);
1922 mutex_unlock(&me->smd_mutex);
1923 return err;
1924}
1925
1926static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1927 struct fastrpc_session_ctx *session)
1928{
1929 struct fastrpc_apps *me = &gfa;
1930
1931 mutex_lock(&me->smd_mutex);
1932 session->used = 0;
1933 mutex_unlock(&me->smd_mutex);
1934}
1935
1936static int fastrpc_file_free(struct fastrpc_file *fl)
1937{
1938 struct hlist_node *n;
1939 struct fastrpc_mmap *map = 0;
1940 int cid;
1941
1942 if (!fl)
1943 return 0;
1944 cid = fl->cid;
1945
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301946 (void)fastrpc_release_current_dsp_process(fl);
1947
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001948 spin_lock(&fl->apps->hlock);
1949 hlist_del_init(&fl->hn);
1950 spin_unlock(&fl->apps->hlock);
1951
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001952 if (!fl->sctx) {
1953 kfree(fl);
1954 return 0;
1955 }
tharun kumar9f899ea2017-07-03 17:07:03 +05301956 spin_lock(&fl->hlock);
1957 fl->file_close = 1;
1958 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001959 fastrpc_context_list_dtor(fl);
1960 fastrpc_buf_list_free(fl);
1961 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1962 fastrpc_mmap_free(map);
1963 }
1964 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1965 kref_put_mutex(&fl->apps->channel[cid].kref,
1966 fastrpc_channel_close, &fl->apps->smd_mutex);
1967 if (fl->sctx)
1968 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1969 if (fl->secsctx)
1970 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1971 kfree(fl);
1972 return 0;
1973}
1974
1975static int fastrpc_device_release(struct inode *inode, struct file *file)
1976{
1977 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1978
1979 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05301980 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
1981 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08001982 if (fl->debugfs_file != NULL)
1983 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001984 fastrpc_file_free(fl);
1985 file->private_data = 0;
1986 }
1987 return 0;
1988}
1989
1990static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1991 void *priv)
1992{
1993 struct fastrpc_apps *me = &gfa;
1994 int cid = (int)((uintptr_t)priv);
1995 struct fastrpc_glink_info *link;
1996
1997 if (cid < 0 || cid >= NUM_CHANNELS)
1998 return;
1999
2000 link = &me->channel[cid].link;
2001 switch (cb_info->link_state) {
2002 case GLINK_LINK_STATE_UP:
2003 link->link_state = FASTRPC_LINK_STATE_UP;
2004 complete(&me->channel[cid].work);
2005 break;
2006 case GLINK_LINK_STATE_DOWN:
2007 link->link_state = FASTRPC_LINK_STATE_DOWN;
2008 break;
2009 default:
2010 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2011 break;
2012 }
2013}
2014
2015static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2016{
2017 int err = 0;
2018 struct fastrpc_glink_info *link;
2019
2020 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2021 if (err)
2022 goto bail;
2023
2024 link = &me->channel[cid].link;
2025 if (link->link_notify_handle != NULL)
2026 goto bail;
2027
2028 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2029 link->link_notify_handle = glink_register_link_state_cb(
2030 &link->link_info,
2031 (void *)((uintptr_t)cid));
2032 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2033 if (err) {
2034 link->link_notify_handle = NULL;
2035 goto bail;
2036 }
2037 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2038 RPC_TIMEOUT));
2039bail:
2040 return err;
2041}
2042
2043static void fastrpc_glink_close(void *chan, int cid)
2044{
2045 int err = 0;
2046 struct fastrpc_glink_info *link;
2047
2048 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2049 if (err)
2050 return;
2051 link = &gfa.channel[cid].link;
2052
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302053 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002054 link->port_state = FASTRPC_LINK_DISCONNECTING;
2055 glink_close(chan);
2056 }
2057}
2058
2059static int fastrpc_glink_open(int cid)
2060{
2061 int err = 0;
2062 void *handle = NULL;
2063 struct fastrpc_apps *me = &gfa;
2064 struct glink_open_config *cfg;
2065 struct fastrpc_glink_info *link;
2066
2067 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2068 if (err)
2069 goto bail;
2070 link = &me->channel[cid].link;
2071 cfg = &me->channel[cid].link.cfg;
2072 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2073 if (err)
2074 goto bail;
2075
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302076 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2077 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002078 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002079
2080 link->port_state = FASTRPC_LINK_CONNECTING;
2081 cfg->priv = (void *)(uintptr_t)cid;
2082 cfg->edge = gcinfo[cid].link.link_info.edge;
2083 cfg->transport = gcinfo[cid].link.link_info.transport;
2084 cfg->name = FASTRPC_GLINK_GUID;
2085 cfg->notify_rx = fastrpc_glink_notify_rx;
2086 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2087 cfg->notify_state = fastrpc_glink_notify_state;
2088 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2089 handle = glink_open(cfg);
2090 VERIFY(err, !IS_ERR_OR_NULL(handle));
2091 if (err)
2092 goto bail;
2093 me->channel[cid].chan = handle;
2094bail:
2095 return err;
2096}
2097
Sathish Ambley1ca68232017-01-19 10:32:55 -08002098static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2099{
2100 filp->private_data = inode->i_private;
2101 return 0;
2102}
2103
2104static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2105 size_t count, loff_t *position)
2106{
2107 struct fastrpc_file *fl = filp->private_data;
2108 struct hlist_node *n;
2109 struct fastrpc_buf *buf = 0;
2110 struct fastrpc_mmap *map = 0;
2111 struct smq_invoke_ctx *ictx = 0;
2112 struct fastrpc_channel_ctx *chan;
2113 struct fastrpc_session_ctx *sess;
2114 unsigned int len = 0;
2115 int i, j, ret = 0;
2116 char *fileinfo = NULL;
2117
2118 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2119 if (!fileinfo)
2120 goto bail;
2121 if (fl == NULL) {
2122 for (i = 0; i < NUM_CHANNELS; i++) {
2123 chan = &gcinfo[i];
2124 len += scnprintf(fileinfo + len,
2125 DEBUGFS_SIZE - len, "%s\n\n",
2126 chan->name);
2127 len += scnprintf(fileinfo + len,
2128 DEBUGFS_SIZE - len, "%s %d\n",
2129 "sesscount:", chan->sesscount);
2130 for (j = 0; j < chan->sesscount; j++) {
2131 sess = &chan->session[j];
2132 len += scnprintf(fileinfo + len,
2133 DEBUGFS_SIZE - len,
2134 "%s%d\n\n", "SESSION", j);
2135 len += scnprintf(fileinfo + len,
2136 DEBUGFS_SIZE - len,
2137 "%s %d\n", "sid:",
2138 sess->smmu.cb);
2139 len += scnprintf(fileinfo + len,
2140 DEBUGFS_SIZE - len,
2141 "%s %d\n", "SECURE:",
2142 sess->smmu.secure);
2143 }
2144 }
2145 } else {
2146 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2147 "%s %d\n\n",
2148 "PROCESS_ID:", fl->tgid);
2149 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2150 "%s %d\n\n",
2151 "CHANNEL_ID:", fl->cid);
2152 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2153 "%s %d\n\n",
2154 "SSRCOUNT:", fl->ssrcount);
2155 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2156 "%s\n",
2157 "LIST OF BUFS:");
2158 spin_lock(&fl->hlock);
2159 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2160 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302161 "%s %pK %s %pK %s %llx\n", "buf:",
2162 buf, "buf->virt:", buf->virt,
2163 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002164 }
2165 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2166 "\n%s\n",
2167 "LIST OF MAPS:");
2168 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2169 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302170 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002171 "map:", map,
2172 "map->va:", map->va,
2173 "map->phys:", map->phys);
2174 }
2175 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2176 "\n%s\n",
2177 "LIST OF PENDING SMQCONTEXTS:");
2178 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2179 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302180 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002181 "smqcontext:", ictx,
2182 "sc:", ictx->sc,
2183 "tid:", ictx->pid,
2184 "handle", ictx->rpra->h);
2185 }
2186 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2187 "\n%s\n",
2188 "LIST OF INTERRUPTED SMQCONTEXTS:");
2189 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2190 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302191 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002192 "smqcontext:", ictx,
2193 "sc:", ictx->sc,
2194 "tid:", ictx->pid,
2195 "handle", ictx->rpra->h);
2196 }
2197 spin_unlock(&fl->hlock);
2198 }
2199 if (len > DEBUGFS_SIZE)
2200 len = DEBUGFS_SIZE;
2201 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2202 kfree(fileinfo);
2203bail:
2204 return ret;
2205}
2206
2207static const struct file_operations debugfs_fops = {
2208 .open = fastrpc_debugfs_open,
2209 .read = fastrpc_debugfs_read,
2210};
Sathish Ambley36849af2017-02-02 09:35:55 -08002211static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002212{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002213 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002214 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002215
2216 mutex_lock(&me->smd_mutex);
2217
Sathish Ambley36849af2017-02-02 09:35:55 -08002218 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002219 if (err)
2220 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002221 cid = fl->cid;
2222 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2223 if (err)
2224 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002225 fl->ssrcount = me->channel[cid].ssrcount;
2226 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2227 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302228 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2229 if (err)
2230 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002231 VERIFY(err, 0 == fastrpc_glink_open(cid));
2232 if (err)
2233 goto bail;
2234
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302235 VERIFY(err,
2236 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002237 RPC_TIMEOUT));
2238 if (err) {
2239 me->channel[cid].chan = 0;
2240 goto bail;
2241 }
2242 kref_init(&me->channel[cid].kref);
2243 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2244 MAJOR(me->dev_no), cid);
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302245 err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
2246 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002247 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302248 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2249 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002250 if (me->channel[cid].ssrcount !=
2251 me->channel[cid].prevssrcount) {
2252 me->channel[cid].prevssrcount =
2253 me->channel[cid].ssrcount;
2254 }
2255 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002256
2257bail:
2258 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002259 return err;
2260}
2261
Sathish Ambley36849af2017-02-02 09:35:55 -08002262static int fastrpc_device_open(struct inode *inode, struct file *filp)
2263{
2264 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002265 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002266 struct fastrpc_file *fl = 0;
2267 struct fastrpc_apps *me = &gfa;
2268
2269 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2270 if (err)
2271 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002272 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2273 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002274 context_list_ctor(&fl->clst);
2275 spin_lock_init(&fl->hlock);
2276 INIT_HLIST_HEAD(&fl->maps);
2277 INIT_HLIST_HEAD(&fl->bufs);
2278 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302279 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002280 fl->tgid = current->tgid;
2281 fl->apps = me;
2282 fl->mode = FASTRPC_MODE_SERIAL;
2283 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002284 if (debugfs_file != NULL)
2285 fl->debugfs_file = debugfs_file;
2286 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302287 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002288 filp->private_data = fl;
2289 spin_lock(&me->hlock);
2290 hlist_add_head(&fl->hn, &me->drivers);
2291 spin_unlock(&me->hlock);
2292 return 0;
2293}
2294
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002295static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2296{
2297 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002298 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002299
Sathish Ambley36849af2017-02-02 09:35:55 -08002300 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002301 if (err)
2302 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002303 if (fl->cid == -1) {
2304 cid = *info;
2305 VERIFY(err, cid < NUM_CHANNELS);
2306 if (err)
2307 goto bail;
2308 fl->cid = cid;
2309 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2310 VERIFY(err, !fastrpc_session_alloc_locked(
2311 &fl->apps->channel[cid], 0, &fl->sctx));
2312 if (err)
2313 goto bail;
2314 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302315 VERIFY(err, fl->sctx != NULL);
2316 if (err)
2317 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002318 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2319bail:
2320 return err;
2321}
2322
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302323static int fastrpc_internal_control(struct fastrpc_file *fl,
2324 struct fastrpc_ioctl_control *cp)
2325{
2326 int err = 0;
2327 int latency;
2328
2329 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2330 if (err)
2331 goto bail;
2332 VERIFY(err, !IS_ERR_OR_NULL(cp));
2333 if (err)
2334 goto bail;
2335
2336 switch (cp->req) {
2337 case FASTRPC_CONTROL_LATENCY:
2338 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2339 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2340 VERIFY(err, latency != 0);
2341 if (err)
2342 goto bail;
2343 if (!fl->qos_request) {
2344 pm_qos_add_request(&fl->pm_qos_req,
2345 PM_QOS_CPU_DMA_LATENCY, latency);
2346 fl->qos_request = 1;
2347 } else
2348 pm_qos_update_request(&fl->pm_qos_req, latency);
2349 break;
2350 default:
2351 err = -ENOTTY;
2352 break;
2353 }
2354bail:
2355 return err;
2356}
2357
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002358static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2359 unsigned long ioctl_param)
2360{
2361 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002362 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002363 struct fastrpc_ioctl_mmap mmap;
2364 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002365 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002366 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302367 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002368 } p;
2369 void *param = (char *)ioctl_param;
2370 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2371 int size = 0, err = 0;
2372 uint32_t info;
2373
2374 p.inv.fds = 0;
2375 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002376 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302377 spin_lock(&fl->hlock);
2378 if (fl->file_close == 1) {
2379 err = EBADF;
2380 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2381 spin_unlock(&fl->hlock);
2382 goto bail;
2383 }
2384 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002385
2386 switch (ioctl_num) {
2387 case FASTRPC_IOCTL_INVOKE:
2388 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002389 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002390 case FASTRPC_IOCTL_INVOKE_FD:
2391 if (!size)
2392 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2393 /* fall through */
2394 case FASTRPC_IOCTL_INVOKE_ATTRS:
2395 if (!size)
2396 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002397 /* fall through */
2398 case FASTRPC_IOCTL_INVOKE_CRC:
2399 if (!size)
2400 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002401 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2402 if (err)
2403 goto bail;
2404 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2405 0, &p.inv)));
2406 if (err)
2407 goto bail;
2408 break;
2409 case FASTRPC_IOCTL_MMAP:
2410 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2411 sizeof(p.mmap)));
2412 if (err)
2413 goto bail;
2414 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2415 if (err)
2416 goto bail;
2417 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2418 if (err)
2419 goto bail;
2420 break;
2421 case FASTRPC_IOCTL_MUNMAP:
2422 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2423 sizeof(p.munmap)));
2424 if (err)
2425 goto bail;
2426 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2427 &p.munmap)));
2428 if (err)
2429 goto bail;
2430 break;
2431 case FASTRPC_IOCTL_SETMODE:
2432 switch ((uint32_t)ioctl_param) {
2433 case FASTRPC_MODE_PARALLEL:
2434 case FASTRPC_MODE_SERIAL:
2435 fl->mode = (uint32_t)ioctl_param;
2436 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002437 case FASTRPC_MODE_PROFILE:
2438 fl->profile = (uint32_t)ioctl_param;
2439 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302440 case FASTRPC_MODE_SESSION:
2441 fl->sessionid = 1;
2442 fl->tgid |= (1 << SESSION_ID_INDEX);
2443 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002444 default:
2445 err = -ENOTTY;
2446 break;
2447 }
2448 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002449 case FASTRPC_IOCTL_GETPERF:
2450 VERIFY(err, 0 == copy_from_user(&p.perf,
2451 param, sizeof(p.perf)));
2452 if (err)
2453 goto bail;
2454 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2455 if (p.perf.keys) {
2456 char *keys = PERF_KEYS;
2457
2458 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2459 keys, strlen(keys)+1));
2460 if (err)
2461 goto bail;
2462 }
2463 if (p.perf.data) {
2464 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2465 &fl->perf, sizeof(fl->perf)));
2466 }
2467 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2468 if (err)
2469 goto bail;
2470 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302471 case FASTRPC_IOCTL_CONTROL:
2472 VERIFY(err, 0 == copy_from_user(&p.cp, (void __user *)param,
2473 sizeof(p.cp)));
2474 if (err)
2475 goto bail;
2476 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2477 if (err)
2478 goto bail;
2479 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002480 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002481 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2482 if (err)
2483 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002484 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2485 if (err)
2486 goto bail;
2487 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2488 if (err)
2489 goto bail;
2490 break;
2491 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002492 p.init.attrs = 0;
2493 p.init.siglen = 0;
2494 size = sizeof(struct fastrpc_ioctl_init);
2495 /* fall through */
2496 case FASTRPC_IOCTL_INIT_ATTRS:
2497 if (!size)
2498 size = sizeof(struct fastrpc_ioctl_init_attrs);
2499 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002500 if (err)
2501 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302502 VERIFY(err, p.init.init.filelen >= 0 &&
2503 p.init.init.memlen >= 0);
2504 if (err)
2505 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002506 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2507 if (err)
2508 goto bail;
2509 break;
2510
2511 default:
2512 err = -ENOTTY;
2513 pr_info("bad ioctl: %d\n", ioctl_num);
2514 break;
2515 }
2516 bail:
2517 return err;
2518}
2519
2520static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2521 unsigned long code,
2522 void *data)
2523{
2524 struct fastrpc_apps *me = &gfa;
2525 struct fastrpc_channel_ctx *ctx;
2526 int cid;
2527
2528 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2529 cid = ctx - &me->channel[0];
2530 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2531 mutex_lock(&me->smd_mutex);
2532 ctx->ssrcount++;
2533 if (ctx->chan) {
2534 fastrpc_glink_close(ctx->chan, cid);
2535 ctx->chan = 0;
2536 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2537 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2538 }
2539 mutex_unlock(&me->smd_mutex);
2540 fastrpc_notify_drivers(me, cid);
2541 }
2542
2543 return NOTIFY_DONE;
2544}
2545
2546static const struct file_operations fops = {
2547 .open = fastrpc_device_open,
2548 .release = fastrpc_device_release,
2549 .unlocked_ioctl = fastrpc_device_ioctl,
2550 .compat_ioctl = compat_fastrpc_device_ioctl,
2551};
2552
2553static const struct of_device_id fastrpc_match_table[] = {
2554 { .compatible = "qcom,msm-fastrpc-adsp", },
2555 { .compatible = "qcom,msm-fastrpc-compute", },
2556 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2557 { .compatible = "qcom,msm-adsprpc-mem-region", },
2558 {}
2559};
2560
2561static int fastrpc_cb_probe(struct device *dev)
2562{
2563 struct fastrpc_channel_ctx *chan;
2564 struct fastrpc_session_ctx *sess;
2565 struct of_phandle_args iommuspec;
2566 const char *name;
2567 unsigned int start = 0x80000000;
2568 int err = 0, i;
2569 int secure_vmid = VMID_CP_PIXEL;
2570
2571 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2572 if (err)
2573 goto bail;
2574 for (i = 0; i < NUM_CHANNELS; i++) {
2575 if (!gcinfo[i].name)
2576 continue;
2577 if (!strcmp(name, gcinfo[i].name))
2578 break;
2579 }
2580 VERIFY(err, i < NUM_CHANNELS);
2581 if (err)
2582 goto bail;
2583 chan = &gcinfo[i];
2584 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2585 if (err)
2586 goto bail;
2587
2588 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2589 "#iommu-cells", 0, &iommuspec));
2590 if (err)
2591 goto bail;
2592 sess = &chan->session[chan->sesscount];
2593 sess->smmu.cb = iommuspec.args[0] & 0xf;
2594 sess->used = 0;
2595 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2596 "dma-coherent");
2597 sess->smmu.secure = of_property_read_bool(dev->of_node,
2598 "qcom,secure-context-bank");
2599 if (sess->smmu.secure)
2600 start = 0x60000000;
2601 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2602 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302603 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002604 if (err)
2605 goto bail;
2606
2607 if (sess->smmu.secure)
2608 iommu_domain_set_attr(sess->smmu.mapping->domain,
2609 DOMAIN_ATTR_SECURE_VMID,
2610 &secure_vmid);
2611
2612 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2613 if (err)
2614 goto bail;
2615 sess->dev = dev;
2616 sess->smmu.enabled = 1;
2617 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002618 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2619 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002620bail:
2621 return err;
2622}
2623
2624static int fastrpc_probe(struct platform_device *pdev)
2625{
2626 int err = 0;
2627 struct fastrpc_apps *me = &gfa;
2628 struct device *dev = &pdev->dev;
2629 struct smq_phy_page range;
2630 struct device_node *ion_node, *node;
2631 struct platform_device *ion_pdev;
2632 struct cma *cma;
2633 uint32_t val;
2634
2635 if (of_device_is_compatible(dev->of_node,
2636 "qcom,msm-fastrpc-compute-cb"))
2637 return fastrpc_cb_probe(dev);
2638
2639 if (of_device_is_compatible(dev->of_node,
2640 "qcom,msm-adsprpc-mem-region")) {
2641 me->dev = dev;
2642 range.addr = 0;
2643 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2644 if (ion_node) {
2645 for_each_available_child_of_node(ion_node, node) {
2646 if (of_property_read_u32(node, "reg", &val))
2647 continue;
2648 if (val != ION_ADSP_HEAP_ID)
2649 continue;
2650 ion_pdev = of_find_device_by_node(node);
2651 if (!ion_pdev)
2652 break;
2653 cma = dev_get_cma_area(&ion_pdev->dev);
2654 if (cma) {
2655 range.addr = cma_get_base(cma);
2656 range.size = (size_t)cma_get_size(cma);
2657 }
2658 break;
2659 }
2660 }
2661 if (range.addr) {
2662 int srcVM[1] = {VMID_HLOS};
2663 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2664 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002665 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002666 PERM_READ | PERM_WRITE | PERM_EXEC,
2667 PERM_READ | PERM_WRITE | PERM_EXEC,
2668 PERM_READ | PERM_WRITE | PERM_EXEC,
2669 };
2670
2671 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2672 srcVM, 1, destVM, destVMperm, 4));
2673 if (err)
2674 goto bail;
2675 }
2676 return 0;
2677 }
2678
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302679 err = of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
2680 &me->latency);
2681 if (err)
2682 me->latency = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002683 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2684 fastrpc_match_table,
2685 NULL, &pdev->dev));
2686 if (err)
2687 goto bail;
2688bail:
2689 return err;
2690}
2691
2692static void fastrpc_deinit(void)
2693{
2694 struct fastrpc_apps *me = &gfa;
2695 struct fastrpc_channel_ctx *chan = gcinfo;
2696 int i, j;
2697
2698 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2699 if (chan->chan) {
2700 kref_put_mutex(&chan->kref,
2701 fastrpc_channel_close, &me->smd_mutex);
2702 chan->chan = 0;
2703 }
2704 for (j = 0; j < NUM_SESSIONS; j++) {
2705 struct fastrpc_session_ctx *sess = &chan->session[j];
2706
2707 if (sess->smmu.enabled) {
2708 arm_iommu_detach_device(sess->dev);
2709 sess->dev = 0;
2710 }
2711 if (sess->smmu.mapping) {
2712 arm_iommu_release_mapping(sess->smmu.mapping);
2713 sess->smmu.mapping = 0;
2714 }
2715 }
2716 }
2717}
2718
2719static struct platform_driver fastrpc_driver = {
2720 .probe = fastrpc_probe,
2721 .driver = {
2722 .name = "fastrpc",
2723 .owner = THIS_MODULE,
2724 .of_match_table = fastrpc_match_table,
2725 },
2726};
2727
2728static int __init fastrpc_device_init(void)
2729{
2730 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002731 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002732 int err = 0, i;
2733
2734 memset(me, 0, sizeof(*me));
2735
2736 fastrpc_init(me);
2737 me->dev = NULL;
2738 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2739 if (err)
2740 goto register_bail;
2741 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2742 DEVICE_NAME));
2743 if (err)
2744 goto alloc_chrdev_bail;
2745 cdev_init(&me->cdev, &fops);
2746 me->cdev.owner = THIS_MODULE;
2747 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002748 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002749 if (err)
2750 goto cdev_init_bail;
2751 me->class = class_create(THIS_MODULE, "fastrpc");
2752 VERIFY(err, !IS_ERR(me->class));
2753 if (err)
2754 goto class_create_bail;
2755 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002756 dev = device_create(me->class, NULL,
2757 MKDEV(MAJOR(me->dev_no), 0),
2758 NULL, gcinfo[0].name);
2759 VERIFY(err, !IS_ERR_OR_NULL(dev));
2760 if (err)
2761 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002762 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002763 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002764 me->channel[i].ssrcount = 0;
2765 me->channel[i].prevssrcount = 0;
2766 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2767 me->channel[i].handle = subsys_notif_register_notifier(
2768 gcinfo[i].subsys,
2769 &me->channel[i].nb);
2770 }
2771
2772 me->client = msm_ion_client_create(DEVICE_NAME);
2773 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2774 if (err)
2775 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002776 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002777 return 0;
2778device_create_bail:
2779 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002780 if (me->channel[i].handle)
2781 subsys_notif_unregister_notifier(me->channel[i].handle,
2782 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002783 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002784 if (!IS_ERR_OR_NULL(dev))
2785 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002786 class_destroy(me->class);
2787class_create_bail:
2788 cdev_del(&me->cdev);
2789cdev_init_bail:
2790 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2791alloc_chrdev_bail:
2792register_bail:
2793 fastrpc_deinit();
2794 return err;
2795}
2796
2797static void __exit fastrpc_device_exit(void)
2798{
2799 struct fastrpc_apps *me = &gfa;
2800 int i;
2801
2802 fastrpc_file_list_dtor(me);
2803 fastrpc_deinit();
2804 for (i = 0; i < NUM_CHANNELS; i++) {
2805 if (!gcinfo[i].name)
2806 continue;
2807 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2808 subsys_notif_unregister_notifier(me->channel[i].handle,
2809 &me->channel[i].nb);
2810 }
2811 class_destroy(me->class);
2812 cdev_del(&me->cdev);
2813 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2814 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002815 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002816}
2817
2818late_initcall(fastrpc_device_init);
2819module_exit(fastrpc_device_exit);
2820
2821MODULE_LICENSE("GPL v2");