blob: 822e90ded9ec687663f65535788d91e8b7449791 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
45#include "adsprpc_compat.h"
46#include "adsprpc_shared.h"
Sathish Ambley1ca68232017-01-19 10:32:55 -080047#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053048#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
50#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
51#define TZ_PIL_AUTH_QDSP6_PROC 1
52#define FASTRPC_ENOSUCH 39
53#define VMID_SSC_Q6 5
54#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080055#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056
57#define RPC_TIMEOUT (5 * HZ)
58#define BALIGN 128
59#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
60#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070061#define M_FDLIST (16)
62#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053063#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053064#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070065
66#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
67
68#define FASTRPC_LINK_STATE_DOWN (0x0)
69#define FASTRPC_LINK_STATE_UP (0x1)
70#define FASTRPC_LINK_DISCONNECTED (0x0)
71#define FASTRPC_LINK_CONNECTING (0x1)
72#define FASTRPC_LINK_CONNECTED (0x3)
73#define FASTRPC_LINK_DISCONNECTING (0x7)
74
Sathish Ambleya21b5b52017-01-11 16:11:01 -080075#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
76#define FASTRPC_STATIC_HANDLE_LISTENER (3)
77#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053078#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080079
80#define PERF_END (void)0
81
82#define PERF(enb, cnt, ff) \
83 {\
84 struct timespec startT = {0};\
85 if (enb) {\
86 getnstimeofday(&startT);\
87 } \
88 ff ;\
89 if (enb) {\
90 cnt += getnstimediff(&startT);\
91 } \
92 }
93
Sathish Ambley69e1ab02016-10-18 10:28:15 -070094static int fastrpc_glink_open(int cid);
95static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -080096static struct dentry *debugfs_root;
97static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -070098
99static inline uint64_t buf_page_start(uint64_t buf)
100{
101 uint64_t start = (uint64_t) buf & PAGE_MASK;
102 return start;
103}
104
105static inline uint64_t buf_page_offset(uint64_t buf)
106{
107 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
108 return offset;
109}
110
111static inline int buf_num_pages(uint64_t buf, ssize_t len)
112{
113 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
114 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
115 int nPages = end - start + 1;
116 return nPages;
117}
118
119static inline uint64_t buf_page_size(uint32_t size)
120{
121 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
122
123 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
124}
125
126static inline void *uint64_to_ptr(uint64_t addr)
127{
128 void *ptr = (void *)((uintptr_t)addr);
129
130 return ptr;
131}
132
133static inline uint64_t ptr_to_uint64(void *ptr)
134{
135 uint64_t addr = (uint64_t)((uintptr_t)ptr);
136
137 return addr;
138}
139
140struct fastrpc_file;
141
142struct fastrpc_buf {
143 struct hlist_node hn;
144 struct fastrpc_file *fl;
145 void *virt;
146 uint64_t phys;
147 ssize_t size;
148};
149
150struct fastrpc_ctx_lst;
151
152struct overlap {
153 uintptr_t start;
154 uintptr_t end;
155 int raix;
156 uintptr_t mstart;
157 uintptr_t mend;
158 uintptr_t offset;
159};
160
161struct smq_invoke_ctx {
162 struct hlist_node hn;
163 struct completion work;
164 int retval;
165 int pid;
166 int tgid;
167 remote_arg_t *lpra;
168 remote_arg64_t *rpra;
169 int *fds;
170 unsigned int *attrs;
171 struct fastrpc_mmap **maps;
172 struct fastrpc_buf *buf;
173 ssize_t used;
174 struct fastrpc_file *fl;
175 uint32_t sc;
176 struct overlap *overs;
177 struct overlap **overps;
178 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700179 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530180 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700181};
182
183struct fastrpc_ctx_lst {
184 struct hlist_head pending;
185 struct hlist_head interrupted;
186};
187
188struct fastrpc_smmu {
189 struct dma_iommu_mapping *mapping;
190 int cb;
191 int enabled;
192 int faults;
193 int secure;
194 int coherent;
195};
196
197struct fastrpc_session_ctx {
198 struct device *dev;
199 struct fastrpc_smmu smmu;
200 int used;
201};
202
203struct fastrpc_glink_info {
204 int link_state;
205 int port_state;
206 struct glink_open_config cfg;
207 struct glink_link_info link_info;
208 void *link_notify_handle;
209};
210
211struct fastrpc_channel_ctx {
212 char *name;
213 char *subsys;
214 void *chan;
215 struct device *dev;
216 struct fastrpc_session_ctx session[NUM_SESSIONS];
217 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530218 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700219 struct notifier_block nb;
220 struct kref kref;
221 int sesscount;
222 int ssrcount;
223 void *handle;
224 int prevssrcount;
225 int vmid;
226 struct fastrpc_glink_info link;
227};
228
229struct fastrpc_apps {
230 struct fastrpc_channel_ctx *channel;
231 struct cdev cdev;
232 struct class *class;
233 struct mutex smd_mutex;
234 struct smq_phy_page range;
235 struct hlist_head maps;
236 dev_t dev_no;
237 int compat;
238 struct hlist_head drivers;
239 spinlock_t hlock;
240 struct ion_client *client;
241 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530242 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700243};
244
245struct fastrpc_mmap {
246 struct hlist_node hn;
247 struct fastrpc_file *fl;
248 struct fastrpc_apps *apps;
249 int fd;
250 uint32_t flags;
251 struct dma_buf *buf;
252 struct sg_table *table;
253 struct dma_buf_attachment *attach;
254 struct ion_handle *handle;
255 uint64_t phys;
256 ssize_t size;
257 uintptr_t va;
258 ssize_t len;
259 int refs;
260 uintptr_t raddr;
261 int uncached;
262 int secure;
263 uintptr_t attr;
264};
265
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800266struct fastrpc_perf {
267 int64_t count;
268 int64_t flush;
269 int64_t map;
270 int64_t copy;
271 int64_t link;
272 int64_t getargs;
273 int64_t putargs;
274 int64_t invargs;
275 int64_t invoke;
276};
277
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700278struct fastrpc_file {
279 struct hlist_node hn;
280 spinlock_t hlock;
281 struct hlist_head maps;
282 struct hlist_head bufs;
283 struct fastrpc_ctx_lst clst;
284 struct fastrpc_session_ctx *sctx;
285 struct fastrpc_session_ctx *secsctx;
286 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800287 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530288 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700289 int tgid;
290 int cid;
291 int ssrcount;
292 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530293 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700294 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800295 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800296 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530297 struct pm_qos_request pm_qos_req;
298 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700299};
300
301static struct fastrpc_apps gfa;
302
303static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
304 {
305 .name = "adsprpc-smd",
306 .subsys = "adsp",
307 .link.link_info.edge = "lpass",
308 .link.link_info.transport = "smem",
309 },
310 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311 .name = "mdsprpc-smd",
312 .subsys = "modem",
313 .link.link_info.edge = "mpss",
314 .link.link_info.transport = "smem",
315 },
316 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800317 .name = "sdsprpc-smd",
318 .subsys = "slpi",
319 .link.link_info.edge = "dsps",
320 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800321 },
322 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700323 .name = "cdsprpc-smd",
324 .subsys = "cdsp",
325 .link.link_info.edge = "cdsp",
326 .link.link_info.transport = "smem",
327 },
328};
329
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800330static inline int64_t getnstimediff(struct timespec *start)
331{
332 int64_t ns;
333 struct timespec ts, b;
334
335 getnstimeofday(&ts);
336 b = timespec_sub(ts, *start);
337 ns = timespec_to_ns(&b);
338 return ns;
339}
340
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700341static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
342{
343 struct fastrpc_file *fl = buf == 0 ? 0 : buf->fl;
344 int vmid;
345
346 if (!fl)
347 return;
348 if (cache) {
349 spin_lock(&fl->hlock);
350 hlist_add_head(&buf->hn, &fl->bufs);
351 spin_unlock(&fl->hlock);
352 return;
353 }
354 if (!IS_ERR_OR_NULL(buf->virt)) {
355 int destVM[1] = {VMID_HLOS};
356 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
357
358 if (fl->sctx->smmu.cb)
359 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
360 vmid = fl->apps->channel[fl->cid].vmid;
361 if (vmid) {
362 int srcVM[2] = {VMID_HLOS, vmid};
363
364 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
365 srcVM, 2, destVM, destVMperm, 1);
366 }
367 dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
368 buf->phys);
369 }
370 kfree(buf);
371}
372
373static void fastrpc_buf_list_free(struct fastrpc_file *fl)
374{
375 struct fastrpc_buf *buf, *free;
376
377 do {
378 struct hlist_node *n;
379
380 free = 0;
381 spin_lock(&fl->hlock);
382 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
383 hlist_del_init(&buf->hn);
384 free = buf;
385 break;
386 }
387 spin_unlock(&fl->hlock);
388 if (free)
389 fastrpc_buf_free(free, 0);
390 } while (free);
391}
392
393static void fastrpc_mmap_add(struct fastrpc_mmap *map)
394{
395 struct fastrpc_file *fl = map->fl;
396
397 spin_lock(&fl->hlock);
398 hlist_add_head(&map->hn, &fl->maps);
399 spin_unlock(&fl->hlock);
400}
401
402static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800403 ssize_t len, int mflags, int refs, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700404{
405 struct fastrpc_mmap *match = 0, *map;
406 struct hlist_node *n;
407
408 spin_lock(&fl->hlock);
409 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
410 if (va >= map->va &&
411 va + len <= map->va + map->len &&
412 map->fd == fd) {
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800413 if (refs)
414 map->refs++;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700415 match = map;
416 break;
417 }
418 }
419 spin_unlock(&fl->hlock);
420 if (match) {
421 *ppmap = match;
422 return 0;
423 }
424 return -ENOTTY;
425}
426
427static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
428 ssize_t len, struct fastrpc_mmap **ppmap)
429{
430 struct fastrpc_mmap *match = 0, *map;
431 struct hlist_node *n;
432 struct fastrpc_apps *me = &gfa;
433
434 spin_lock(&me->hlock);
435 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
436 if (map->raddr == va &&
437 map->raddr + map->len == va + len &&
438 map->refs == 1) {
439 match = map;
440 hlist_del_init(&map->hn);
441 break;
442 }
443 }
444 spin_unlock(&me->hlock);
445 if (match) {
446 *ppmap = match;
447 return 0;
448 }
449 spin_lock(&fl->hlock);
450 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
451 if (map->raddr == va &&
452 map->raddr + map->len == va + len &&
453 map->refs == 1) {
454 match = map;
455 hlist_del_init(&map->hn);
456 break;
457 }
458 }
459 spin_unlock(&fl->hlock);
460 if (match) {
461 *ppmap = match;
462 return 0;
463 }
464 return -ENOTTY;
465}
466
467static void fastrpc_mmap_free(struct fastrpc_mmap *map)
468{
469 struct fastrpc_file *fl;
470 int vmid;
471 struct fastrpc_session_ctx *sess;
472 int destVM[1] = {VMID_HLOS};
473 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
474
475 if (!map)
476 return;
477 fl = map->fl;
478 spin_lock(&fl->hlock);
479 map->refs--;
480 if (!map->refs)
481 hlist_del_init(&map->hn);
482 spin_unlock(&fl->hlock);
483 if (map->refs > 0)
484 return;
485 if (map->secure)
486 sess = fl->secsctx;
487 else
488 sess = fl->sctx;
489
490 if (!IS_ERR_OR_NULL(map->handle))
491 ion_free(fl->apps->client, map->handle);
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530492 if (sess && sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700493 if (map->size || map->phys)
Sathish Ambley58dc64d2016-11-29 17:11:53 -0800494 msm_dma_unmap_sg(sess->dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700495 map->table->sgl,
496 map->table->nents, DMA_BIDIRECTIONAL,
497 map->buf);
498 }
499 vmid = fl->apps->channel[fl->cid].vmid;
500 if (vmid && map->phys) {
501 int srcVM[2] = {VMID_HLOS, vmid};
502
503 hyp_assign_phys(map->phys, buf_page_size(map->size),
504 srcVM, 2, destVM, destVMperm, 1);
505 }
506
507 if (!IS_ERR_OR_NULL(map->table))
508 dma_buf_unmap_attachment(map->attach, map->table,
509 DMA_BIDIRECTIONAL);
510 if (!IS_ERR_OR_NULL(map->attach))
511 dma_buf_detach(map->buf, map->attach);
512 if (!IS_ERR_OR_NULL(map->buf))
513 dma_buf_put(map->buf);
514 kfree(map);
515}
516
517static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
518 struct fastrpc_session_ctx **session);
519
520static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
521 unsigned int attr, uintptr_t va, ssize_t len, int mflags,
522 struct fastrpc_mmap **ppmap)
523{
524 struct fastrpc_session_ctx *sess;
525 struct fastrpc_apps *apps = fl->apps;
526 int cid = fl->cid;
527 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
528 struct fastrpc_mmap *map = 0;
529 unsigned long attrs;
530 unsigned long flags;
531 int err = 0, vmid;
532
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800533 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700534 return 0;
535 map = kzalloc(sizeof(*map), GFP_KERNEL);
536 VERIFY(err, !IS_ERR_OR_NULL(map));
537 if (err)
538 goto bail;
539 INIT_HLIST_NODE(&map->hn);
540 map->flags = mflags;
541 map->refs = 1;
542 map->fl = fl;
543 map->fd = fd;
544 map->attr = attr;
545 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
546 ion_import_dma_buf_fd(fl->apps->client, fd)));
547 if (err)
548 goto bail;
549 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
550 &flags));
551 if (err)
552 goto bail;
553
554 map->uncached = !ION_IS_CACHED(flags);
555 if (map->attr & FASTRPC_ATTR_NOVA)
556 map->uncached = 1;
557
558 map->secure = flags & ION_FLAG_SECURE;
559 if (map->secure) {
560 if (!fl->secsctx)
561 err = fastrpc_session_alloc(chan, 1,
562 &fl->secsctx);
563 if (err)
564 goto bail;
565 }
566 if (map->secure)
567 sess = fl->secsctx;
568 else
569 sess = fl->sctx;
Tharun Kumar Merugu8a4547d2017-08-04 17:36:42 +0530570 VERIFY(err, !IS_ERR_OR_NULL(sess));
571 if (err)
572 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700573 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
574 if (err)
575 goto bail;
576 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
577 dma_buf_attach(map->buf, sess->dev)));
578 if (err)
579 goto bail;
580 VERIFY(err, !IS_ERR_OR_NULL(map->table =
581 dma_buf_map_attachment(map->attach,
582 DMA_BIDIRECTIONAL)));
583 if (err)
584 goto bail;
585 if (sess->smmu.enabled) {
586 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530587
588 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
589 (sess->smmu.coherent && map->uncached))
590 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
591 else if (map->attr & FASTRPC_ATTR_COHERENT)
592 attrs |= DMA_ATTR_FORCE_COHERENT;
593
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700594 VERIFY(err, map->table->nents ==
595 msm_dma_map_sg_attrs(sess->dev,
596 map->table->sgl, map->table->nents,
597 DMA_BIDIRECTIONAL, map->buf, attrs));
598 if (err)
599 goto bail;
600 } else {
601 VERIFY(err, map->table->nents == 1);
602 if (err)
603 goto bail;
604 }
605 map->phys = sg_dma_address(map->table->sgl);
606 if (sess->smmu.cb) {
607 map->phys += ((uint64_t)sess->smmu.cb << 32);
608 map->size = sg_dma_len(map->table->sgl);
609 } else {
610 map->size = buf_page_size(len);
611 }
612 vmid = fl->apps->channel[fl->cid].vmid;
613 if (vmid) {
614 int srcVM[1] = {VMID_HLOS};
615 int destVM[2] = {VMID_HLOS, vmid};
616 int destVMperm[2] = {PERM_READ | PERM_WRITE,
617 PERM_READ | PERM_WRITE | PERM_EXEC};
618
619 VERIFY(err, !hyp_assign_phys(map->phys,
620 buf_page_size(map->size),
621 srcVM, 1, destVM, destVMperm, 2));
622 if (err)
623 goto bail;
624 }
625 map->va = va;
626 map->len = len;
627
628 fastrpc_mmap_add(map);
629 *ppmap = map;
630
631bail:
632 if (err && map)
633 fastrpc_mmap_free(map);
634 return err;
635}
636
637static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
638 struct fastrpc_buf **obuf)
639{
640 int err = 0, vmid;
641 struct fastrpc_buf *buf = 0, *fr = 0;
642 struct hlist_node *n;
643
644 VERIFY(err, size > 0);
645 if (err)
646 goto bail;
647
648 /* find the smallest buffer that fits in the cache */
649 spin_lock(&fl->hlock);
650 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
651 if (buf->size >= size && (!fr || fr->size > buf->size))
652 fr = buf;
653 }
654 if (fr)
655 hlist_del_init(&fr->hn);
656 spin_unlock(&fl->hlock);
657 if (fr) {
658 *obuf = fr;
659 return 0;
660 }
661 buf = 0;
662 VERIFY(err, buf = kzalloc(sizeof(*buf), GFP_KERNEL));
663 if (err)
664 goto bail;
665 INIT_HLIST_NODE(&buf->hn);
666 buf->fl = fl;
667 buf->virt = 0;
668 buf->phys = 0;
669 buf->size = size;
670 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
671 (void *)&buf->phys, GFP_KERNEL);
672 if (IS_ERR_OR_NULL(buf->virt)) {
673 /* free cache and retry */
674 fastrpc_buf_list_free(fl);
675 buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
676 (void *)&buf->phys, GFP_KERNEL);
677 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
678 }
679 if (err)
680 goto bail;
681 if (fl->sctx->smmu.cb)
682 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
683 vmid = fl->apps->channel[fl->cid].vmid;
684 if (vmid) {
685 int srcVM[1] = {VMID_HLOS};
686 int destVM[2] = {VMID_HLOS, vmid};
687 int destVMperm[2] = {PERM_READ | PERM_WRITE,
688 PERM_READ | PERM_WRITE | PERM_EXEC};
689
690 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
691 srcVM, 1, destVM, destVMperm, 2));
692 if (err)
693 goto bail;
694 }
695
696 *obuf = buf;
697 bail:
698 if (err && buf)
699 fastrpc_buf_free(buf, 0);
700 return err;
701}
702
703
704static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700705 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700706 struct smq_invoke_ctx **po)
707{
708 int err = 0;
709 struct smq_invoke_ctx *ctx = 0, *ictx = 0;
710 struct hlist_node *n;
711 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
712
713 spin_lock(&fl->hlock);
714 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
715 if (ictx->pid == current->pid) {
716 if (invoke->sc != ictx->sc || ictx->fl != fl)
717 err = -1;
718 else {
719 ctx = ictx;
720 hlist_del_init(&ctx->hn);
721 hlist_add_head(&ctx->hn, &fl->clst.pending);
722 }
723 break;
724 }
725 }
726 spin_unlock(&fl->hlock);
727 if (ctx)
728 *po = ctx;
729 return err;
730}
731
732#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
733static int overlap_ptr_cmp(const void *a, const void *b)
734{
735 struct overlap *pa = *((struct overlap **)a);
736 struct overlap *pb = *((struct overlap **)b);
737 /* sort with lowest starting buffer first */
738 int st = CMP(pa->start, pb->start);
739 /* sort with highest ending buffer first */
740 int ed = CMP(pb->end, pa->end);
741 return st == 0 ? ed : st;
742}
743
Sathish Ambley9466d672017-01-25 10:51:55 -0800744static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700745{
Sathish Ambley9466d672017-01-25 10:51:55 -0800746 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700747 remote_arg_t *lpra = ctx->lpra;
748 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
749 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
750 int nbufs = inbufs + outbufs;
751 struct overlap max;
752
753 for (i = 0; i < nbufs; ++i) {
754 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
755 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800756 if (lpra[i].buf.len) {
757 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
758 if (err)
759 goto bail;
760 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700761 ctx->overs[i].raix = i;
762 ctx->overps[i] = &ctx->overs[i];
763 }
764 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, 0);
765 max.start = 0;
766 max.end = 0;
767 for (i = 0; i < nbufs; ++i) {
768 if (ctx->overps[i]->start < max.end) {
769 ctx->overps[i]->mstart = max.end;
770 ctx->overps[i]->mend = ctx->overps[i]->end;
771 ctx->overps[i]->offset = max.end -
772 ctx->overps[i]->start;
773 if (ctx->overps[i]->end > max.end) {
774 max.end = ctx->overps[i]->end;
775 } else {
776 ctx->overps[i]->mend = 0;
777 ctx->overps[i]->mstart = 0;
778 }
779 } else {
780 ctx->overps[i]->mend = ctx->overps[i]->end;
781 ctx->overps[i]->mstart = ctx->overps[i]->start;
782 ctx->overps[i]->offset = 0;
783 max = *ctx->overps[i];
784 }
785 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800786bail:
787 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700788}
789
790#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
791 do {\
792 if (!(kernel))\
793 VERIFY(err, 0 == copy_from_user((dst), (src),\
794 (size)));\
795 else\
796 memmove((dst), (src), (size));\
797 } while (0)
798
799#define K_COPY_TO_USER(err, kernel, dst, src, size) \
800 do {\
801 if (!(kernel))\
802 VERIFY(err, 0 == copy_to_user((dst), (src),\
803 (size)));\
804 else\
805 memmove((dst), (src), (size));\
806 } while (0)
807
808
809static void context_free(struct smq_invoke_ctx *ctx);
810
811static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700812 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700813 struct smq_invoke_ctx **po)
814{
815 int err = 0, bufs, size = 0;
816 struct smq_invoke_ctx *ctx = 0;
817 struct fastrpc_ctx_lst *clst = &fl->clst;
818 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
819
820 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
821 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
822 sizeof(*ctx->fds) * (bufs) +
823 sizeof(*ctx->attrs) * (bufs) +
824 sizeof(*ctx->overs) * (bufs) +
825 sizeof(*ctx->overps) * (bufs);
826
827 VERIFY(err, ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL));
828 if (err)
829 goto bail;
830
831 INIT_HLIST_NODE(&ctx->hn);
832 hlist_add_fake(&ctx->hn);
833 ctx->fl = fl;
834 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
835 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
836 ctx->fds = (int *)(&ctx->lpra[bufs]);
837 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
838 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
839 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
840
841 K_COPY_FROM_USER(err, kernel, ctx->lpra, invoke->pra,
842 bufs * sizeof(*ctx->lpra));
843 if (err)
844 goto bail;
845
846 if (invokefd->fds) {
847 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
848 bufs * sizeof(*ctx->fds));
849 if (err)
850 goto bail;
851 }
852 if (invokefd->attrs) {
853 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
854 bufs * sizeof(*ctx->attrs));
855 if (err)
856 goto bail;
857 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700858 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700859 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800860 if (bufs) {
861 VERIFY(err, 0 == context_build_overlap(ctx));
862 if (err)
863 goto bail;
864 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700865 ctx->retval = -1;
866 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530867 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700868 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530869 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700870
871 spin_lock(&fl->hlock);
872 hlist_add_head(&ctx->hn, &clst->pending);
873 spin_unlock(&fl->hlock);
874
875 *po = ctx;
876bail:
877 if (ctx && err)
878 context_free(ctx);
879 return err;
880}
881
882static void context_save_interrupted(struct smq_invoke_ctx *ctx)
883{
884 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
885
886 spin_lock(&ctx->fl->hlock);
887 hlist_del_init(&ctx->hn);
888 hlist_add_head(&ctx->hn, &clst->interrupted);
889 spin_unlock(&ctx->fl->hlock);
890 /* free the cache on power collapse */
891 fastrpc_buf_list_free(ctx->fl);
892}
893
894static void context_free(struct smq_invoke_ctx *ctx)
895{
896 int i;
897 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
898 REMOTE_SCALARS_OUTBUFS(ctx->sc);
899 spin_lock(&ctx->fl->hlock);
900 hlist_del_init(&ctx->hn);
901 spin_unlock(&ctx->fl->hlock);
902 for (i = 0; i < nbufs; ++i)
903 fastrpc_mmap_free(ctx->maps[i]);
904 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +0530905 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700906 kfree(ctx);
907}
908
909static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
910{
911 ctx->retval = retval;
912 complete(&ctx->work);
913}
914
915
916static void fastrpc_notify_users(struct fastrpc_file *me)
917{
918 struct smq_invoke_ctx *ictx;
919 struct hlist_node *n;
920
921 spin_lock(&me->hlock);
922 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
923 complete(&ictx->work);
924 }
925 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
926 complete(&ictx->work);
927 }
928 spin_unlock(&me->hlock);
929
930}
931
932static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
933{
934 struct fastrpc_file *fl;
935 struct hlist_node *n;
936
937 spin_lock(&me->hlock);
938 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
939 if (fl->cid == cid)
940 fastrpc_notify_users(fl);
941 }
942 spin_unlock(&me->hlock);
943
944}
945static void context_list_ctor(struct fastrpc_ctx_lst *me)
946{
947 INIT_HLIST_HEAD(&me->interrupted);
948 INIT_HLIST_HEAD(&me->pending);
949}
950
951static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
952{
953 struct fastrpc_ctx_lst *clst = &fl->clst;
954 struct smq_invoke_ctx *ictx = 0, *ctxfree;
955 struct hlist_node *n;
956
957 do {
958 ctxfree = 0;
959 spin_lock(&fl->hlock);
960 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
961 hlist_del_init(&ictx->hn);
962 ctxfree = ictx;
963 break;
964 }
965 spin_unlock(&fl->hlock);
966 if (ctxfree)
967 context_free(ctxfree);
968 } while (ctxfree);
969 do {
970 ctxfree = 0;
971 spin_lock(&fl->hlock);
972 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
973 hlist_del_init(&ictx->hn);
974 ctxfree = ictx;
975 break;
976 }
977 spin_unlock(&fl->hlock);
978 if (ctxfree)
979 context_free(ctxfree);
980 } while (ctxfree);
981}
982
983static int fastrpc_file_free(struct fastrpc_file *fl);
984static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
985{
986 struct fastrpc_file *fl, *free;
987 struct hlist_node *n;
988
989 do {
990 free = 0;
991 spin_lock(&me->hlock);
992 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
993 hlist_del_init(&fl->hn);
994 free = fl;
995 break;
996 }
997 spin_unlock(&me->hlock);
998 if (free)
999 fastrpc_file_free(free);
1000 } while (free);
1001}
1002
1003static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1004{
1005 remote_arg64_t *rpra;
1006 remote_arg_t *lpra = ctx->lpra;
1007 struct smq_invoke_buf *list;
1008 struct smq_phy_page *pages, *ipage;
1009 uint32_t sc = ctx->sc;
1010 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1011 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001012 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001013 uintptr_t args;
1014 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001015 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016 int err = 0;
1017 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001018 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001019 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001020
1021 /* calculate size of the metadata */
1022 rpra = 0;
1023 list = smq_invoke_buf_start(rpra, sc);
1024 pages = smq_phy_page_start(sc, list);
1025 ipage = pages;
1026
1027 for (i = 0; i < bufs; ++i) {
1028 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1029 ssize_t len = lpra[i].buf.len;
1030
1031 if (ctx->fds[i] && (ctx->fds[i] != -1))
1032 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1033 ctx->attrs[i], buf, len,
1034 mflags, &ctx->maps[i]);
1035 ipage += 1;
1036 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001037 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1038 for (i = bufs; i < bufs + handles; i++) {
1039 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1040 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1041 if (err)
1042 goto bail;
1043 ipage += 1;
1044 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001045 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1046 (sizeof(uint32_t) * M_CRCLIST);
1047
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001048 /* calculate len requreed for copying */
1049 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1050 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001051 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001052 ssize_t len = lpra[i].buf.len;
1053
1054 if (!len)
1055 continue;
1056 if (ctx->maps[i])
1057 continue;
1058 if (ctx->overps[oix]->offset == 0)
1059 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001060 mstart = ctx->overps[oix]->mstart;
1061 mend = ctx->overps[oix]->mend;
1062 VERIFY(err, (mend - mstart) <= LONG_MAX);
1063 if (err)
1064 goto bail;
1065 copylen += mend - mstart;
1066 VERIFY(err, copylen >= 0);
1067 if (err)
1068 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 }
1070 ctx->used = copylen;
1071
1072 /* allocate new buffer */
1073 if (copylen) {
1074 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1075 if (err)
1076 goto bail;
1077 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301078 if (ctx->buf->virt && metalen <= copylen)
1079 memset(ctx->buf->virt, 0, metalen);
1080
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001081 /* copy metadata */
1082 rpra = ctx->buf->virt;
1083 ctx->rpra = rpra;
1084 list = smq_invoke_buf_start(rpra, sc);
1085 pages = smq_phy_page_start(sc, list);
1086 ipage = pages;
1087 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001088 for (i = 0; i < bufs + handles; ++i) {
1089 if (lpra[i].buf.len)
1090 list[i].num = 1;
1091 else
1092 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001093 list[i].pgidx = ipage - pages;
1094 ipage++;
1095 }
1096 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001097 PERF(ctx->fl->profile, ctx->fl->perf.map,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001098 for (i = 0; i < inbufs + outbufs; ++i) {
1099 struct fastrpc_mmap *map = ctx->maps[i];
1100 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1101 ssize_t len = lpra[i].buf.len;
1102
1103 rpra[i].buf.pv = 0;
1104 rpra[i].buf.len = len;
1105 if (!len)
1106 continue;
1107 if (map) {
1108 struct vm_area_struct *vma;
1109 uintptr_t offset;
1110 int num = buf_num_pages(buf, len);
1111 int idx = list[i].pgidx;
1112
1113 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001114 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001115 } else {
1116 down_read(&current->mm->mmap_sem);
1117 VERIFY(err, NULL != (vma = find_vma(current->mm,
1118 map->va)));
1119 if (err) {
1120 up_read(&current->mm->mmap_sem);
1121 goto bail;
1122 }
1123 offset = buf_page_start(buf) - vma->vm_start;
1124 up_read(&current->mm->mmap_sem);
1125 VERIFY(err, offset < (uintptr_t)map->size);
1126 if (err)
1127 goto bail;
1128 }
1129 pages[idx].addr = map->phys + offset;
1130 pages[idx].size = num << PAGE_SHIFT;
1131 }
1132 rpra[i].buf.pv = buf;
1133 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001134 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001135 for (i = bufs; i < bufs + handles; ++i) {
1136 struct fastrpc_mmap *map = ctx->maps[i];
1137
1138 pages[i].addr = map->phys;
1139 pages[i].size = map->size;
1140 }
1141 fdlist = (uint64_t *)&pages[bufs + handles];
1142 for (i = 0; i < M_FDLIST; i++)
1143 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001144 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301145 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001146
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001147 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001148 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001149 rlen = copylen - metalen;
1150 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1151 int i = ctx->overps[oix]->raix;
1152 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001153 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001154 uint64_t buf;
1155 ssize_t len = lpra[i].buf.len;
1156
1157 if (!len)
1158 continue;
1159 if (map)
1160 continue;
1161 if (ctx->overps[oix]->offset == 0) {
1162 rlen -= ALIGN(args, BALIGN) - args;
1163 args = ALIGN(args, BALIGN);
1164 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001165 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001166 VERIFY(err, rlen >= mlen);
1167 if (err)
1168 goto bail;
1169 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1170 pages[list[i].pgidx].addr = ctx->buf->phys -
1171 ctx->overps[oix]->offset +
1172 (copylen - rlen);
1173 pages[list[i].pgidx].addr =
1174 buf_page_start(pages[list[i].pgidx].addr);
1175 buf = rpra[i].buf.pv;
1176 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1177 if (i < inbufs) {
1178 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1179 lpra[i].buf.pv, len);
1180 if (err)
1181 goto bail;
1182 }
1183 args = args + mlen;
1184 rlen -= mlen;
1185 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001186 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001187
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001188 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001189 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1190 int i = ctx->overps[oix]->raix;
1191 struct fastrpc_mmap *map = ctx->maps[i];
1192
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001193 if (map && map->uncached)
1194 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301195 if (ctx->fl->sctx->smmu.coherent &&
1196 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1197 continue;
1198 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1199 continue;
1200
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001201 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1202 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1203 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1204 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001205 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001206 for (i = bufs; i < bufs + handles; i++) {
1207 rpra[i].dma.fd = ctx->fds[i];
1208 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1209 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001210 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001211
1212 if (!ctx->fl->sctx->smmu.coherent) {
1213 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001214 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001215 PERF_END);
1216 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001217 bail:
1218 return err;
1219}
1220
1221static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1222 remote_arg_t *upra)
1223{
1224 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001225 struct smq_invoke_buf *list;
1226 struct smq_phy_page *pages;
1227 struct fastrpc_mmap *mmap;
1228 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001229 uint32_t *crclist = NULL;
1230
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001231 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001232 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001233 int err = 0;
1234
1235 inbufs = REMOTE_SCALARS_INBUFS(sc);
1236 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001237 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1238 list = smq_invoke_buf_start(ctx->rpra, sc);
1239 pages = smq_phy_page_start(sc, list);
1240 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001241 crclist = (uint32_t *)(fdlist + M_FDLIST);
1242
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001243 for (i = inbufs; i < inbufs + outbufs; ++i) {
1244 if (!ctx->maps[i]) {
1245 K_COPY_TO_USER(err, kernel,
1246 ctx->lpra[i].buf.pv,
1247 uint64_to_ptr(rpra[i].buf.pv),
1248 rpra[i].buf.len);
1249 if (err)
1250 goto bail;
1251 } else {
1252 fastrpc_mmap_free(ctx->maps[i]);
1253 ctx->maps[i] = 0;
1254 }
1255 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001256 if (inbufs + outbufs + handles) {
1257 for (i = 0; i < M_FDLIST; i++) {
1258 if (!fdlist[i])
1259 break;
1260 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001261 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001262 fastrpc_mmap_free(mmap);
1263 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001264 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001265 if (ctx->crc && crclist && rpra)
1266 K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
1267 crclist, M_CRCLIST*sizeof(uint32_t));
1268
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001269 bail:
1270 return err;
1271}
1272
1273static void inv_args_pre(struct smq_invoke_ctx *ctx)
1274{
1275 int i, inbufs, outbufs;
1276 uint32_t sc = ctx->sc;
1277 remote_arg64_t *rpra = ctx->rpra;
1278 uintptr_t end;
1279
1280 inbufs = REMOTE_SCALARS_INBUFS(sc);
1281 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1282 for (i = inbufs; i < inbufs + outbufs; ++i) {
1283 struct fastrpc_mmap *map = ctx->maps[i];
1284
1285 if (map && map->uncached)
1286 continue;
1287 if (!rpra[i].buf.len)
1288 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301289 if (ctx->fl->sctx->smmu.coherent &&
1290 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1291 continue;
1292 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1293 continue;
1294
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001295 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1296 buf_page_start(rpra[i].buf.pv))
1297 continue;
1298 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1299 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1300 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1301 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1302 rpra[i].buf.len);
1303 if (!IS_CACHE_ALIGNED(end))
1304 dmac_flush_range((char *)end,
1305 (char *)end + 1);
1306 }
1307}
1308
1309static void inv_args(struct smq_invoke_ctx *ctx)
1310{
1311 int i, inbufs, outbufs;
1312 uint32_t sc = ctx->sc;
1313 remote_arg64_t *rpra = ctx->rpra;
1314 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001315
1316 inbufs = REMOTE_SCALARS_INBUFS(sc);
1317 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1318 for (i = inbufs; i < inbufs + outbufs; ++i) {
1319 struct fastrpc_mmap *map = ctx->maps[i];
1320
1321 if (map && map->uncached)
1322 continue;
1323 if (!rpra[i].buf.len)
1324 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301325 if (ctx->fl->sctx->smmu.coherent &&
1326 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1327 continue;
1328 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1329 continue;
1330
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001331 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1332 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001333 continue;
1334 }
1335 if (map && map->handle)
1336 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1337 (char *)uint64_to_ptr(rpra[i].buf.pv),
1338 rpra[i].buf.len, ION_IOC_INV_CACHES);
1339 else
1340 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1341 (char *)uint64_to_ptr(rpra[i].buf.pv
1342 + rpra[i].buf.len));
1343 }
1344
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001345 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001346 dmac_inv_range(rpra, (char *)rpra + used);
1347}
1348
1349static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1350 uint32_t kernel, uint32_t handle)
1351{
1352 struct smq_msg *msg = &ctx->msg;
1353 struct fastrpc_file *fl = ctx->fl;
1354 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1355 int err = 0;
1356
1357 VERIFY(err, 0 != channel_ctx->chan);
1358 if (err)
1359 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301360 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001361 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301362 if (fl->sessionid)
1363 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364 if (kernel)
1365 msg->pid = 0;
1366 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1367 msg->invoke.header.handle = handle;
1368 msg->invoke.header.sc = ctx->sc;
1369 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1370 msg->invoke.page.size = buf_page_size(ctx->used);
1371
1372 if (fl->ssrcount != channel_ctx->ssrcount) {
1373 err = -ECONNRESET;
1374 goto bail;
1375 }
1376 VERIFY(err, channel_ctx->link.port_state ==
1377 FASTRPC_LINK_CONNECTED);
1378 if (err)
1379 goto bail;
1380 err = glink_tx(channel_ctx->chan,
1381 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1382 GLINK_TX_REQ_INTENT);
1383 bail:
1384 return err;
1385}
1386
1387static void fastrpc_init(struct fastrpc_apps *me)
1388{
1389 int i;
1390
1391 INIT_HLIST_HEAD(&me->drivers);
1392 spin_lock_init(&me->hlock);
1393 mutex_init(&me->smd_mutex);
1394 me->channel = &gcinfo[0];
1395 for (i = 0; i < NUM_CHANNELS; i++) {
1396 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301397 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001398 me->channel[i].sesscount = 0;
1399 }
1400}
1401
1402static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1403
1404static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1405 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001406 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001407{
1408 struct smq_invoke_ctx *ctx = 0;
1409 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1410 int cid = fl->cid;
1411 int interrupted = 0;
1412 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001413 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001414
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001415 if (fl->profile)
1416 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301417
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301418
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301419 VERIFY(err, fl->sctx != NULL);
1420 if (err)
1421 goto bail;
1422 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1423 if (err)
1424 goto bail;
1425
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001426 if (!kernel) {
1427 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1428 &ctx));
1429 if (err)
1430 goto bail;
1431 if (fl->sctx->smmu.faults)
1432 err = FASTRPC_ENOSUCH;
1433 if (err)
1434 goto bail;
1435 if (ctx)
1436 goto wait;
1437 }
1438
1439 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1440 if (err)
1441 goto bail;
1442
1443 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001444 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001445 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001446 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001447 if (err)
1448 goto bail;
1449 }
1450
Sathish Ambleyc432b502017-06-05 12:03:42 -07001451 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001453 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001455 PERF_END);
1456
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 if (err)
1458 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001459 wait:
1460 if (kernel)
1461 wait_for_completion(&ctx->work);
1462 else {
1463 interrupted = wait_for_completion_interruptible(&ctx->work);
1464 VERIFY(err, 0 == (err = interrupted));
1465 if (err)
1466 goto bail;
1467 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001468
1469 PERF(fl->profile, fl->perf.invargs,
1470 if (!fl->sctx->smmu.coherent)
1471 inv_args(ctx);
1472 PERF_END);
1473
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001474 VERIFY(err, 0 == (err = ctx->retval));
1475 if (err)
1476 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001477
1478 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001479 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001480 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001481 if (err)
1482 goto bail;
1483 bail:
1484 if (ctx && interrupted == -ERESTARTSYS)
1485 context_save_interrupted(ctx);
1486 else if (ctx)
1487 context_free(ctx);
1488 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1489 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001490
1491 if (fl->profile && !interrupted) {
1492 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1493 fl->perf.invoke += getnstimediff(&invoket);
1494 if (!(invoke->handle >= 0 &&
1495 invoke->handle <= FASTRPC_STATIC_HANDLE_MAX))
1496 fl->perf.count++;
1497 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001498 return err;
1499}
1500
Sathish Ambley36849af2017-02-02 09:35:55 -08001501static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001502static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001503 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001504{
1505 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001506 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001507 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001508 struct smq_phy_page pages[1];
1509 struct fastrpc_mmap *file = 0, *mem = 0;
1510
Sathish Ambley36849af2017-02-02 09:35:55 -08001511 VERIFY(err, !fastrpc_channel_open(fl));
1512 if (err)
1513 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001514 if (init->flags == FASTRPC_INIT_ATTACH) {
1515 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301516 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001517
1518 ra[0].buf.pv = (void *)&tgid;
1519 ra[0].buf.len = sizeof(tgid);
1520 ioctl.inv.handle = 1;
1521 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1522 ioctl.inv.pra = ra;
1523 ioctl.fds = 0;
1524 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001525 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001526 fl->pd = 0;
1527 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1528 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1529 if (err)
1530 goto bail;
1531 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001532 remote_arg_t ra[6];
1533 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001534 int mflags = 0;
1535 struct {
1536 int pgid;
1537 int namelen;
1538 int filelen;
1539 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001540 int attrs;
1541 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001542 } inbuf;
1543
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301544 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001545 inbuf.namelen = strlen(current->comm) + 1;
1546 inbuf.filelen = init->filelen;
1547 fl->pd = 1;
1548 if (init->filelen) {
1549 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1550 init->file, init->filelen, mflags, &file));
1551 if (err)
1552 goto bail;
1553 }
1554 inbuf.pageslen = 1;
1555 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1556 init->mem, init->memlen, mflags, &mem));
1557 if (err)
1558 goto bail;
1559 inbuf.pageslen = 1;
1560 ra[0].buf.pv = (void *)&inbuf;
1561 ra[0].buf.len = sizeof(inbuf);
1562 fds[0] = 0;
1563
1564 ra[1].buf.pv = (void *)current->comm;
1565 ra[1].buf.len = inbuf.namelen;
1566 fds[1] = 0;
1567
1568 ra[2].buf.pv = (void *)init->file;
1569 ra[2].buf.len = inbuf.filelen;
1570 fds[2] = init->filefd;
1571
1572 pages[0].addr = mem->phys;
1573 pages[0].size = mem->size;
1574 ra[3].buf.pv = (void *)pages;
1575 ra[3].buf.len = 1 * sizeof(*pages);
1576 fds[3] = 0;
1577
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001578 inbuf.attrs = uproc->attrs;
1579 ra[4].buf.pv = (void *)&(inbuf.attrs);
1580 ra[4].buf.len = sizeof(inbuf.attrs);
1581 fds[4] = 0;
1582
1583 inbuf.siglen = uproc->siglen;
1584 ra[5].buf.pv = (void *)&(inbuf.siglen);
1585 ra[5].buf.len = sizeof(inbuf.siglen);
1586 fds[5] = 0;
1587
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001588 ioctl.inv.handle = 1;
1589 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001590 if (uproc->attrs)
1591 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 ioctl.inv.pra = ra;
1593 ioctl.fds = fds;
1594 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001595 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001596 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1597 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1598 if (err)
1599 goto bail;
1600 } else {
1601 err = -ENOTTY;
1602 }
1603bail:
1604 if (mem && err)
1605 fastrpc_mmap_free(mem);
1606 if (file)
1607 fastrpc_mmap_free(file);
1608 return err;
1609}
1610
1611static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1612{
1613 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001614 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001615 remote_arg_t ra[1];
1616 int tgid = 0;
1617
Sathish Ambley36849af2017-02-02 09:35:55 -08001618 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1619 if (err)
1620 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001621 VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
1622 if (err)
1623 goto bail;
1624 tgid = fl->tgid;
1625 ra[0].buf.pv = (void *)&tgid;
1626 ra[0].buf.len = sizeof(tgid);
1627 ioctl.inv.handle = 1;
1628 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1629 ioctl.inv.pra = ra;
1630 ioctl.fds = 0;
1631 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001632 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1634 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1635bail:
1636 return err;
1637}
1638
1639static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1640 struct fastrpc_mmap *map)
1641{
Sathish Ambleybae51902017-07-03 15:00:49 -07001642 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001643 struct smq_phy_page page;
1644 int num = 1;
1645 remote_arg_t ra[3];
1646 int err = 0;
1647 struct {
1648 int pid;
1649 uint32_t flags;
1650 uintptr_t vaddrin;
1651 int num;
1652 } inargs;
1653 struct {
1654 uintptr_t vaddrout;
1655 } routargs;
1656
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301657 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001658 inargs.vaddrin = (uintptr_t)map->va;
1659 inargs.flags = flags;
1660 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1661 ra[0].buf.pv = (void *)&inargs;
1662 ra[0].buf.len = sizeof(inargs);
1663 page.addr = map->phys;
1664 page.size = map->size;
1665 ra[1].buf.pv = (void *)&page;
1666 ra[1].buf.len = num * sizeof(page);
1667
1668 ra[2].buf.pv = (void *)&routargs;
1669 ra[2].buf.len = sizeof(routargs);
1670
1671 ioctl.inv.handle = 1;
1672 if (fl->apps->compat)
1673 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1674 else
1675 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1676 ioctl.inv.pra = ra;
1677 ioctl.fds = 0;
1678 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001679 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001680 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1681 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1682 map->raddr = (uintptr_t)routargs.vaddrout;
1683
1684 return err;
1685}
1686
1687static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1688 struct fastrpc_mmap *map)
1689{
Sathish Ambleybae51902017-07-03 15:00:49 -07001690 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001691 remote_arg_t ra[1];
1692 int err = 0;
1693 struct {
1694 int pid;
1695 uintptr_t vaddrout;
1696 ssize_t size;
1697 } inargs;
1698
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301699 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001700 inargs.size = map->size;
1701 inargs.vaddrout = map->raddr;
1702 ra[0].buf.pv = (void *)&inargs;
1703 ra[0].buf.len = sizeof(inargs);
1704
1705 ioctl.inv.handle = 1;
1706 if (fl->apps->compat)
1707 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1708 else
1709 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1710 ioctl.inv.pra = ra;
1711 ioctl.fds = 0;
1712 ioctl.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001713 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001714 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1715 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1716 return err;
1717}
1718
1719static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
1720 ssize_t len, struct fastrpc_mmap **ppmap);
1721
1722static void fastrpc_mmap_add(struct fastrpc_mmap *map);
1723
1724static int fastrpc_internal_munmap(struct fastrpc_file *fl,
1725 struct fastrpc_ioctl_munmap *ud)
1726{
1727 int err = 0;
1728 struct fastrpc_mmap *map = 0;
1729
1730 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
1731 if (err)
1732 goto bail;
1733 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
1734 if (err)
1735 goto bail;
1736 fastrpc_mmap_free(map);
1737bail:
1738 if (err && map)
1739 fastrpc_mmap_add(map);
1740 return err;
1741}
1742
1743static int fastrpc_internal_mmap(struct fastrpc_file *fl,
1744 struct fastrpc_ioctl_mmap *ud)
1745{
1746
1747 struct fastrpc_mmap *map = 0;
1748 int err = 0;
1749
1750 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, ud->size,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001751 ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001752 return 0;
1753
1754 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
1755 (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map));
1756 if (err)
1757 goto bail;
1758 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
1759 if (err)
1760 goto bail;
1761 ud->vaddrout = map->raddr;
1762 bail:
1763 if (err && map)
1764 fastrpc_mmap_free(map);
1765 return err;
1766}
1767
1768static void fastrpc_channel_close(struct kref *kref)
1769{
1770 struct fastrpc_apps *me = &gfa;
1771 struct fastrpc_channel_ctx *ctx;
1772 int cid;
1773
1774 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
1775 cid = ctx - &gcinfo[0];
1776 fastrpc_glink_close(ctx->chan, cid);
1777 ctx->chan = 0;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05301778 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
1779 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001780 mutex_unlock(&me->smd_mutex);
1781 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
1782 MAJOR(me->dev_no), cid);
1783}
1784
1785static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
1786
1787static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
1788 int secure, struct fastrpc_session_ctx **session)
1789{
1790 struct fastrpc_apps *me = &gfa;
1791 int idx = 0, err = 0;
1792
1793 if (chan->sesscount) {
1794 for (idx = 0; idx < chan->sesscount; ++idx) {
1795 if (!chan->session[idx].used &&
1796 chan->session[idx].smmu.secure == secure) {
1797 chan->session[idx].used = 1;
1798 break;
1799 }
1800 }
1801 VERIFY(err, idx < chan->sesscount);
1802 if (err)
1803 goto bail;
1804 chan->session[idx].smmu.faults = 0;
1805 } else {
1806 VERIFY(err, me->dev != NULL);
1807 if (err)
1808 goto bail;
1809 chan->session[0].dev = me->dev;
1810 }
1811
1812 *session = &chan->session[idx];
1813 bail:
1814 return err;
1815}
1816
1817bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv, size_t size)
1818{
1819 if (glink_queue_rx_intent(h, NULL, size))
1820 return false;
1821 return true;
1822}
1823
1824void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
1825 const void *pkt_priv, const void *ptr)
1826{
1827}
1828
1829void fastrpc_glink_notify_rx(void *handle, const void *priv,
1830 const void *pkt_priv, const void *ptr, size_t size)
1831{
1832 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05301833 struct smq_invoke_ctx *ctx;
1834 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835
c_mtharufdac6892017-10-12 13:09:01 +05301836 VERIFY(err, (rsp && size >= sizeof(*rsp)));
1837 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301838 goto bail;
1839
c_mtharufdac6892017-10-12 13:09:01 +05301840 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
1841 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
1842 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301843 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301844
c_mtharufdac6892017-10-12 13:09:01 +05301845 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301846bail:
c_mtharufdac6892017-10-12 13:09:01 +05301847 if (err)
1848 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001849 glink_rx_done(handle, ptr, true);
1850}
1851
1852void fastrpc_glink_notify_state(void *handle, const void *priv,
1853 unsigned int event)
1854{
1855 struct fastrpc_apps *me = &gfa;
1856 int cid = (int)(uintptr_t)priv;
1857 struct fastrpc_glink_info *link;
1858
1859 if (cid < 0 || cid >= NUM_CHANNELS)
1860 return;
1861 link = &me->channel[cid].link;
1862 switch (event) {
1863 case GLINK_CONNECTED:
1864 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301865 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001866 break;
1867 case GLINK_LOCAL_DISCONNECTED:
1868 link->port_state = FASTRPC_LINK_DISCONNECTED;
1869 break;
1870 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05301871 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001872 fastrpc_glink_close(me->channel[cid].chan, cid);
1873 me->channel[cid].chan = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001874 }
1875 break;
1876 default:
1877 break;
1878 }
1879}
1880
1881static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
1882 struct fastrpc_session_ctx **session)
1883{
1884 int err = 0;
1885 struct fastrpc_apps *me = &gfa;
1886
1887 mutex_lock(&me->smd_mutex);
1888 if (!*session)
1889 err = fastrpc_session_alloc_locked(chan, secure, session);
1890 mutex_unlock(&me->smd_mutex);
1891 return err;
1892}
1893
1894static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
1895 struct fastrpc_session_ctx *session)
1896{
1897 struct fastrpc_apps *me = &gfa;
1898
1899 mutex_lock(&me->smd_mutex);
1900 session->used = 0;
1901 mutex_unlock(&me->smd_mutex);
1902}
1903
1904static int fastrpc_file_free(struct fastrpc_file *fl)
1905{
1906 struct hlist_node *n;
1907 struct fastrpc_mmap *map = 0;
1908 int cid;
1909
1910 if (!fl)
1911 return 0;
1912 cid = fl->cid;
1913
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05301914 (void)fastrpc_release_current_dsp_process(fl);
1915
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001916 spin_lock(&fl->apps->hlock);
1917 hlist_del_init(&fl->hn);
1918 spin_unlock(&fl->apps->hlock);
1919
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08001920 if (!fl->sctx) {
1921 kfree(fl);
1922 return 0;
1923 }
tharun kumar9f899ea2017-07-03 17:07:03 +05301924 spin_lock(&fl->hlock);
1925 fl->file_close = 1;
1926 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001927 fastrpc_context_list_dtor(fl);
1928 fastrpc_buf_list_free(fl);
1929 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
1930 fastrpc_mmap_free(map);
1931 }
1932 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
1933 kref_put_mutex(&fl->apps->channel[cid].kref,
1934 fastrpc_channel_close, &fl->apps->smd_mutex);
1935 if (fl->sctx)
1936 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
1937 if (fl->secsctx)
1938 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
1939 kfree(fl);
1940 return 0;
1941}
1942
1943static int fastrpc_device_release(struct inode *inode, struct file *file)
1944{
1945 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
1946
1947 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05301948 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
1949 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08001950 if (fl->debugfs_file != NULL)
1951 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001952 fastrpc_file_free(fl);
1953 file->private_data = 0;
1954 }
1955 return 0;
1956}
1957
1958static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
1959 void *priv)
1960{
1961 struct fastrpc_apps *me = &gfa;
1962 int cid = (int)((uintptr_t)priv);
1963 struct fastrpc_glink_info *link;
1964
1965 if (cid < 0 || cid >= NUM_CHANNELS)
1966 return;
1967
1968 link = &me->channel[cid].link;
1969 switch (cb_info->link_state) {
1970 case GLINK_LINK_STATE_UP:
1971 link->link_state = FASTRPC_LINK_STATE_UP;
1972 complete(&me->channel[cid].work);
1973 break;
1974 case GLINK_LINK_STATE_DOWN:
1975 link->link_state = FASTRPC_LINK_STATE_DOWN;
1976 break;
1977 default:
1978 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
1979 break;
1980 }
1981}
1982
1983static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
1984{
1985 int err = 0;
1986 struct fastrpc_glink_info *link;
1987
1988 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
1989 if (err)
1990 goto bail;
1991
1992 link = &me->channel[cid].link;
1993 if (link->link_notify_handle != NULL)
1994 goto bail;
1995
1996 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
1997 link->link_notify_handle = glink_register_link_state_cb(
1998 &link->link_info,
1999 (void *)((uintptr_t)cid));
2000 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2001 if (err) {
2002 link->link_notify_handle = NULL;
2003 goto bail;
2004 }
2005 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2006 RPC_TIMEOUT));
2007bail:
2008 return err;
2009}
2010
2011static void fastrpc_glink_close(void *chan, int cid)
2012{
2013 int err = 0;
2014 struct fastrpc_glink_info *link;
2015
2016 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2017 if (err)
2018 return;
2019 link = &gfa.channel[cid].link;
2020
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302021 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002022 link->port_state = FASTRPC_LINK_DISCONNECTING;
2023 glink_close(chan);
2024 }
2025}
2026
2027static int fastrpc_glink_open(int cid)
2028{
2029 int err = 0;
2030 void *handle = NULL;
2031 struct fastrpc_apps *me = &gfa;
2032 struct glink_open_config *cfg;
2033 struct fastrpc_glink_info *link;
2034
2035 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2036 if (err)
2037 goto bail;
2038 link = &me->channel[cid].link;
2039 cfg = &me->channel[cid].link.cfg;
2040 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2041 if (err)
2042 goto bail;
2043
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302044 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2045 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002046 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002047
2048 link->port_state = FASTRPC_LINK_CONNECTING;
2049 cfg->priv = (void *)(uintptr_t)cid;
2050 cfg->edge = gcinfo[cid].link.link_info.edge;
2051 cfg->transport = gcinfo[cid].link.link_info.transport;
2052 cfg->name = FASTRPC_GLINK_GUID;
2053 cfg->notify_rx = fastrpc_glink_notify_rx;
2054 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2055 cfg->notify_state = fastrpc_glink_notify_state;
2056 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2057 handle = glink_open(cfg);
2058 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302059 if (err) {
2060 if (link->port_state == FASTRPC_LINK_CONNECTING)
2061 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002062 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302063 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002064 me->channel[cid].chan = handle;
2065bail:
2066 return err;
2067}
2068
Sathish Ambley1ca68232017-01-19 10:32:55 -08002069static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2070{
2071 filp->private_data = inode->i_private;
2072 return 0;
2073}
2074
2075static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2076 size_t count, loff_t *position)
2077{
2078 struct fastrpc_file *fl = filp->private_data;
2079 struct hlist_node *n;
2080 struct fastrpc_buf *buf = 0;
2081 struct fastrpc_mmap *map = 0;
2082 struct smq_invoke_ctx *ictx = 0;
2083 struct fastrpc_channel_ctx *chan;
2084 struct fastrpc_session_ctx *sess;
2085 unsigned int len = 0;
2086 int i, j, ret = 0;
2087 char *fileinfo = NULL;
2088
2089 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2090 if (!fileinfo)
2091 goto bail;
2092 if (fl == NULL) {
2093 for (i = 0; i < NUM_CHANNELS; i++) {
2094 chan = &gcinfo[i];
2095 len += scnprintf(fileinfo + len,
2096 DEBUGFS_SIZE - len, "%s\n\n",
2097 chan->name);
2098 len += scnprintf(fileinfo + len,
2099 DEBUGFS_SIZE - len, "%s %d\n",
2100 "sesscount:", chan->sesscount);
2101 for (j = 0; j < chan->sesscount; j++) {
2102 sess = &chan->session[j];
2103 len += scnprintf(fileinfo + len,
2104 DEBUGFS_SIZE - len,
2105 "%s%d\n\n", "SESSION", j);
2106 len += scnprintf(fileinfo + len,
2107 DEBUGFS_SIZE - len,
2108 "%s %d\n", "sid:",
2109 sess->smmu.cb);
2110 len += scnprintf(fileinfo + len,
2111 DEBUGFS_SIZE - len,
2112 "%s %d\n", "SECURE:",
2113 sess->smmu.secure);
2114 }
2115 }
2116 } else {
2117 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2118 "%s %d\n\n",
2119 "PROCESS_ID:", fl->tgid);
2120 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2121 "%s %d\n\n",
2122 "CHANNEL_ID:", fl->cid);
2123 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2124 "%s %d\n\n",
2125 "SSRCOUNT:", fl->ssrcount);
2126 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2127 "%s\n",
2128 "LIST OF BUFS:");
2129 spin_lock(&fl->hlock);
2130 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2131 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302132 "%s %pK %s %pK %s %llx\n", "buf:",
2133 buf, "buf->virt:", buf->virt,
2134 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002135 }
2136 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2137 "\n%s\n",
2138 "LIST OF MAPS:");
2139 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2140 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302141 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002142 "map:", map,
2143 "map->va:", map->va,
2144 "map->phys:", map->phys);
2145 }
2146 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2147 "\n%s\n",
2148 "LIST OF PENDING SMQCONTEXTS:");
2149 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2150 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302151 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002152 "smqcontext:", ictx,
2153 "sc:", ictx->sc,
2154 "tid:", ictx->pid,
2155 "handle", ictx->rpra->h);
2156 }
2157 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2158 "\n%s\n",
2159 "LIST OF INTERRUPTED SMQCONTEXTS:");
2160 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2161 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302162 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002163 "smqcontext:", ictx,
2164 "sc:", ictx->sc,
2165 "tid:", ictx->pid,
2166 "handle", ictx->rpra->h);
2167 }
2168 spin_unlock(&fl->hlock);
2169 }
2170 if (len > DEBUGFS_SIZE)
2171 len = DEBUGFS_SIZE;
2172 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2173 kfree(fileinfo);
2174bail:
2175 return ret;
2176}
2177
2178static const struct file_operations debugfs_fops = {
2179 .open = fastrpc_debugfs_open,
2180 .read = fastrpc_debugfs_read,
2181};
Sathish Ambley36849af2017-02-02 09:35:55 -08002182static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002183{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002184 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002185 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002186
2187 mutex_lock(&me->smd_mutex);
2188
Sathish Ambley36849af2017-02-02 09:35:55 -08002189 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002190 if (err)
2191 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002192 cid = fl->cid;
2193 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2194 if (err)
2195 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002196 fl->ssrcount = me->channel[cid].ssrcount;
2197 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
2198 (me->channel[cid].chan == 0)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302199 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2200 if (err)
2201 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002202 VERIFY(err, 0 == fastrpc_glink_open(cid));
2203 if (err)
2204 goto bail;
2205
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302206 VERIFY(err,
2207 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002208 RPC_TIMEOUT));
2209 if (err) {
2210 me->channel[cid].chan = 0;
2211 goto bail;
2212 }
2213 kref_init(&me->channel[cid].kref);
2214 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2215 MAJOR(me->dev_no), cid);
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302216 err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
2217 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002218 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302219 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2220 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002221 if (me->channel[cid].ssrcount !=
2222 me->channel[cid].prevssrcount) {
2223 me->channel[cid].prevssrcount =
2224 me->channel[cid].ssrcount;
2225 }
2226 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002227
2228bail:
2229 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002230 return err;
2231}
2232
Sathish Ambley36849af2017-02-02 09:35:55 -08002233static int fastrpc_device_open(struct inode *inode, struct file *filp)
2234{
2235 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002236 struct dentry *debugfs_file;
Sathish Ambley36849af2017-02-02 09:35:55 -08002237 struct fastrpc_file *fl = 0;
2238 struct fastrpc_apps *me = &gfa;
2239
2240 VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
2241 if (err)
2242 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002243 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2244 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002245 context_list_ctor(&fl->clst);
2246 spin_lock_init(&fl->hlock);
2247 INIT_HLIST_HEAD(&fl->maps);
2248 INIT_HLIST_HEAD(&fl->bufs);
2249 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302250 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002251 fl->tgid = current->tgid;
2252 fl->apps = me;
2253 fl->mode = FASTRPC_MODE_SERIAL;
2254 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002255 if (debugfs_file != NULL)
2256 fl->debugfs_file = debugfs_file;
2257 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302258 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002259 filp->private_data = fl;
2260 spin_lock(&me->hlock);
2261 hlist_add_head(&fl->hn, &me->drivers);
2262 spin_unlock(&me->hlock);
2263 return 0;
2264}
2265
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002266static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2267{
2268 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002269 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002270
Sathish Ambley36849af2017-02-02 09:35:55 -08002271 VERIFY(err, fl != 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002272 if (err)
2273 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002274 if (fl->cid == -1) {
2275 cid = *info;
2276 VERIFY(err, cid < NUM_CHANNELS);
2277 if (err)
2278 goto bail;
2279 fl->cid = cid;
2280 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2281 VERIFY(err, !fastrpc_session_alloc_locked(
2282 &fl->apps->channel[cid], 0, &fl->sctx));
2283 if (err)
2284 goto bail;
2285 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302286 VERIFY(err, fl->sctx != NULL);
2287 if (err)
2288 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002289 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2290bail:
2291 return err;
2292}
2293
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302294static int fastrpc_internal_control(struct fastrpc_file *fl,
2295 struct fastrpc_ioctl_control *cp)
2296{
2297 int err = 0;
2298 int latency;
2299
2300 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2301 if (err)
2302 goto bail;
2303 VERIFY(err, !IS_ERR_OR_NULL(cp));
2304 if (err)
2305 goto bail;
2306
2307 switch (cp->req) {
2308 case FASTRPC_CONTROL_LATENCY:
2309 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2310 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2311 VERIFY(err, latency != 0);
2312 if (err)
2313 goto bail;
2314 if (!fl->qos_request) {
2315 pm_qos_add_request(&fl->pm_qos_req,
2316 PM_QOS_CPU_DMA_LATENCY, latency);
2317 fl->qos_request = 1;
2318 } else
2319 pm_qos_update_request(&fl->pm_qos_req, latency);
2320 break;
2321 default:
2322 err = -ENOTTY;
2323 break;
2324 }
2325bail:
2326 return err;
2327}
2328
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002329static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2330 unsigned long ioctl_param)
2331{
2332 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002333 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002334 struct fastrpc_ioctl_mmap mmap;
2335 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002336 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002337 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302338 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002339 } p;
2340 void *param = (char *)ioctl_param;
2341 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2342 int size = 0, err = 0;
2343 uint32_t info;
2344
2345 p.inv.fds = 0;
2346 p.inv.attrs = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002347 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302348 spin_lock(&fl->hlock);
2349 if (fl->file_close == 1) {
2350 err = EBADF;
2351 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2352 spin_unlock(&fl->hlock);
2353 goto bail;
2354 }
2355 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002356
2357 switch (ioctl_num) {
2358 case FASTRPC_IOCTL_INVOKE:
2359 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002360 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002361 case FASTRPC_IOCTL_INVOKE_FD:
2362 if (!size)
2363 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2364 /* fall through */
2365 case FASTRPC_IOCTL_INVOKE_ATTRS:
2366 if (!size)
2367 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002368 /* fall through */
2369 case FASTRPC_IOCTL_INVOKE_CRC:
2370 if (!size)
2371 size = sizeof(struct fastrpc_ioctl_invoke_crc);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002372 VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
2373 if (err)
2374 goto bail;
2375 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2376 0, &p.inv)));
2377 if (err)
2378 goto bail;
2379 break;
2380 case FASTRPC_IOCTL_MMAP:
2381 VERIFY(err, 0 == copy_from_user(&p.mmap, param,
2382 sizeof(p.mmap)));
2383 if (err)
2384 goto bail;
2385 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2386 if (err)
2387 goto bail;
2388 VERIFY(err, 0 == copy_to_user(param, &p.mmap, sizeof(p.mmap)));
2389 if (err)
2390 goto bail;
2391 break;
2392 case FASTRPC_IOCTL_MUNMAP:
2393 VERIFY(err, 0 == copy_from_user(&p.munmap, param,
2394 sizeof(p.munmap)));
2395 if (err)
2396 goto bail;
2397 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2398 &p.munmap)));
2399 if (err)
2400 goto bail;
2401 break;
2402 case FASTRPC_IOCTL_SETMODE:
2403 switch ((uint32_t)ioctl_param) {
2404 case FASTRPC_MODE_PARALLEL:
2405 case FASTRPC_MODE_SERIAL:
2406 fl->mode = (uint32_t)ioctl_param;
2407 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002408 case FASTRPC_MODE_PROFILE:
2409 fl->profile = (uint32_t)ioctl_param;
2410 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302411 case FASTRPC_MODE_SESSION:
2412 fl->sessionid = 1;
2413 fl->tgid |= (1 << SESSION_ID_INDEX);
2414 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002415 default:
2416 err = -ENOTTY;
2417 break;
2418 }
2419 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002420 case FASTRPC_IOCTL_GETPERF:
2421 VERIFY(err, 0 == copy_from_user(&p.perf,
2422 param, sizeof(p.perf)));
2423 if (err)
2424 goto bail;
2425 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2426 if (p.perf.keys) {
2427 char *keys = PERF_KEYS;
2428
2429 VERIFY(err, 0 == copy_to_user((char *)p.perf.keys,
2430 keys, strlen(keys)+1));
2431 if (err)
2432 goto bail;
2433 }
2434 if (p.perf.data) {
2435 VERIFY(err, 0 == copy_to_user((int64_t *)p.perf.data,
2436 &fl->perf, sizeof(fl->perf)));
2437 }
2438 VERIFY(err, 0 == copy_to_user(param, &p.perf, sizeof(p.perf)));
2439 if (err)
2440 goto bail;
2441 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302442 case FASTRPC_IOCTL_CONTROL:
2443 VERIFY(err, 0 == copy_from_user(&p.cp, (void __user *)param,
2444 sizeof(p.cp)));
2445 if (err)
2446 goto bail;
2447 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2448 if (err)
2449 goto bail;
2450 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002451 case FASTRPC_IOCTL_GETINFO:
Sathish Ambley36849af2017-02-02 09:35:55 -08002452 VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
2453 if (err)
2454 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002455 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2456 if (err)
2457 goto bail;
2458 VERIFY(err, 0 == copy_to_user(param, &info, sizeof(info)));
2459 if (err)
2460 goto bail;
2461 break;
2462 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002463 p.init.attrs = 0;
2464 p.init.siglen = 0;
2465 size = sizeof(struct fastrpc_ioctl_init);
2466 /* fall through */
2467 case FASTRPC_IOCTL_INIT_ATTRS:
2468 if (!size)
2469 size = sizeof(struct fastrpc_ioctl_init_attrs);
2470 VERIFY(err, 0 == copy_from_user(&p.init, param, size));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002471 if (err)
2472 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302473 VERIFY(err, p.init.init.filelen >= 0 &&
2474 p.init.init.memlen >= 0);
2475 if (err)
2476 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002477 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2478 if (err)
2479 goto bail;
2480 break;
2481
2482 default:
2483 err = -ENOTTY;
2484 pr_info("bad ioctl: %d\n", ioctl_num);
2485 break;
2486 }
2487 bail:
2488 return err;
2489}
2490
2491static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2492 unsigned long code,
2493 void *data)
2494{
2495 struct fastrpc_apps *me = &gfa;
2496 struct fastrpc_channel_ctx *ctx;
2497 int cid;
2498
2499 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2500 cid = ctx - &me->channel[0];
2501 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2502 mutex_lock(&me->smd_mutex);
2503 ctx->ssrcount++;
2504 if (ctx->chan) {
2505 fastrpc_glink_close(ctx->chan, cid);
2506 ctx->chan = 0;
2507 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2508 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2509 }
2510 mutex_unlock(&me->smd_mutex);
2511 fastrpc_notify_drivers(me, cid);
2512 }
2513
2514 return NOTIFY_DONE;
2515}
2516
2517static const struct file_operations fops = {
2518 .open = fastrpc_device_open,
2519 .release = fastrpc_device_release,
2520 .unlocked_ioctl = fastrpc_device_ioctl,
2521 .compat_ioctl = compat_fastrpc_device_ioctl,
2522};
2523
2524static const struct of_device_id fastrpc_match_table[] = {
2525 { .compatible = "qcom,msm-fastrpc-adsp", },
2526 { .compatible = "qcom,msm-fastrpc-compute", },
2527 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2528 { .compatible = "qcom,msm-adsprpc-mem-region", },
2529 {}
2530};
2531
2532static int fastrpc_cb_probe(struct device *dev)
2533{
2534 struct fastrpc_channel_ctx *chan;
2535 struct fastrpc_session_ctx *sess;
2536 struct of_phandle_args iommuspec;
2537 const char *name;
2538 unsigned int start = 0x80000000;
2539 int err = 0, i;
2540 int secure_vmid = VMID_CP_PIXEL;
2541
2542 VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
2543 if (err)
2544 goto bail;
2545 for (i = 0; i < NUM_CHANNELS; i++) {
2546 if (!gcinfo[i].name)
2547 continue;
2548 if (!strcmp(name, gcinfo[i].name))
2549 break;
2550 }
2551 VERIFY(err, i < NUM_CHANNELS);
2552 if (err)
2553 goto bail;
2554 chan = &gcinfo[i];
2555 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2556 if (err)
2557 goto bail;
2558
2559 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2560 "#iommu-cells", 0, &iommuspec));
2561 if (err)
2562 goto bail;
2563 sess = &chan->session[chan->sesscount];
2564 sess->smmu.cb = iommuspec.args[0] & 0xf;
2565 sess->used = 0;
2566 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2567 "dma-coherent");
2568 sess->smmu.secure = of_property_read_bool(dev->of_node,
2569 "qcom,secure-context-bank");
2570 if (sess->smmu.secure)
2571 start = 0x60000000;
2572 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2573 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302574 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002575 if (err)
2576 goto bail;
2577
2578 if (sess->smmu.secure)
2579 iommu_domain_set_attr(sess->smmu.mapping->domain,
2580 DOMAIN_ATTR_SECURE_VMID,
2581 &secure_vmid);
2582
2583 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2584 if (err)
2585 goto bail;
2586 sess->dev = dev;
2587 sess->smmu.enabled = 1;
2588 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002589 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2590 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002591bail:
2592 return err;
2593}
2594
2595static int fastrpc_probe(struct platform_device *pdev)
2596{
2597 int err = 0;
2598 struct fastrpc_apps *me = &gfa;
2599 struct device *dev = &pdev->dev;
2600 struct smq_phy_page range;
2601 struct device_node *ion_node, *node;
2602 struct platform_device *ion_pdev;
2603 struct cma *cma;
2604 uint32_t val;
2605
2606 if (of_device_is_compatible(dev->of_node,
2607 "qcom,msm-fastrpc-compute-cb"))
2608 return fastrpc_cb_probe(dev);
2609
2610 if (of_device_is_compatible(dev->of_node,
2611 "qcom,msm-adsprpc-mem-region")) {
2612 me->dev = dev;
2613 range.addr = 0;
2614 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2615 if (ion_node) {
2616 for_each_available_child_of_node(ion_node, node) {
2617 if (of_property_read_u32(node, "reg", &val))
2618 continue;
2619 if (val != ION_ADSP_HEAP_ID)
2620 continue;
2621 ion_pdev = of_find_device_by_node(node);
2622 if (!ion_pdev)
2623 break;
2624 cma = dev_get_cma_area(&ion_pdev->dev);
2625 if (cma) {
2626 range.addr = cma_get_base(cma);
2627 range.size = (size_t)cma_get_size(cma);
2628 }
2629 break;
2630 }
2631 }
2632 if (range.addr) {
2633 int srcVM[1] = {VMID_HLOS};
2634 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2635 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002636 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002637 PERM_READ | PERM_WRITE | PERM_EXEC,
2638 PERM_READ | PERM_WRITE | PERM_EXEC,
2639 PERM_READ | PERM_WRITE | PERM_EXEC,
2640 };
2641
2642 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2643 srcVM, 1, destVM, destVMperm, 4));
2644 if (err)
2645 goto bail;
2646 }
2647 return 0;
2648 }
2649
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302650 err = of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
2651 &me->latency);
2652 if (err)
2653 me->latency = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002654 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2655 fastrpc_match_table,
2656 NULL, &pdev->dev));
2657 if (err)
2658 goto bail;
2659bail:
2660 return err;
2661}
2662
2663static void fastrpc_deinit(void)
2664{
2665 struct fastrpc_apps *me = &gfa;
2666 struct fastrpc_channel_ctx *chan = gcinfo;
2667 int i, j;
2668
2669 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
2670 if (chan->chan) {
2671 kref_put_mutex(&chan->kref,
2672 fastrpc_channel_close, &me->smd_mutex);
2673 chan->chan = 0;
2674 }
2675 for (j = 0; j < NUM_SESSIONS; j++) {
2676 struct fastrpc_session_ctx *sess = &chan->session[j];
2677
2678 if (sess->smmu.enabled) {
2679 arm_iommu_detach_device(sess->dev);
2680 sess->dev = 0;
2681 }
2682 if (sess->smmu.mapping) {
2683 arm_iommu_release_mapping(sess->smmu.mapping);
2684 sess->smmu.mapping = 0;
2685 }
2686 }
2687 }
2688}
2689
2690static struct platform_driver fastrpc_driver = {
2691 .probe = fastrpc_probe,
2692 .driver = {
2693 .name = "fastrpc",
2694 .owner = THIS_MODULE,
2695 .of_match_table = fastrpc_match_table,
2696 },
2697};
2698
2699static int __init fastrpc_device_init(void)
2700{
2701 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002702 struct device *dev = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002703 int err = 0, i;
2704
2705 memset(me, 0, sizeof(*me));
2706
2707 fastrpc_init(me);
2708 me->dev = NULL;
2709 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
2710 if (err)
2711 goto register_bail;
2712 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
2713 DEVICE_NAME));
2714 if (err)
2715 goto alloc_chrdev_bail;
2716 cdev_init(&me->cdev, &fops);
2717 me->cdev.owner = THIS_MODULE;
2718 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08002719 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002720 if (err)
2721 goto cdev_init_bail;
2722 me->class = class_create(THIS_MODULE, "fastrpc");
2723 VERIFY(err, !IS_ERR(me->class));
2724 if (err)
2725 goto class_create_bail;
2726 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08002727 dev = device_create(me->class, NULL,
2728 MKDEV(MAJOR(me->dev_no), 0),
2729 NULL, gcinfo[0].name);
2730 VERIFY(err, !IS_ERR_OR_NULL(dev));
2731 if (err)
2732 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002733 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002734 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002735 me->channel[i].ssrcount = 0;
2736 me->channel[i].prevssrcount = 0;
2737 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
2738 me->channel[i].handle = subsys_notif_register_notifier(
2739 gcinfo[i].subsys,
2740 &me->channel[i].nb);
2741 }
2742
2743 me->client = msm_ion_client_create(DEVICE_NAME);
2744 VERIFY(err, !IS_ERR_OR_NULL(me->client));
2745 if (err)
2746 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002747 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002748 return 0;
2749device_create_bail:
2750 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08002751 if (me->channel[i].handle)
2752 subsys_notif_unregister_notifier(me->channel[i].handle,
2753 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002754 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002755 if (!IS_ERR_OR_NULL(dev))
2756 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002757 class_destroy(me->class);
2758class_create_bail:
2759 cdev_del(&me->cdev);
2760cdev_init_bail:
2761 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2762alloc_chrdev_bail:
2763register_bail:
2764 fastrpc_deinit();
2765 return err;
2766}
2767
2768static void __exit fastrpc_device_exit(void)
2769{
2770 struct fastrpc_apps *me = &gfa;
2771 int i;
2772
2773 fastrpc_file_list_dtor(me);
2774 fastrpc_deinit();
2775 for (i = 0; i < NUM_CHANNELS; i++) {
2776 if (!gcinfo[i].name)
2777 continue;
2778 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
2779 subsys_notif_unregister_notifier(me->channel[i].handle,
2780 &me->channel[i].nb);
2781 }
2782 class_destroy(me->class);
2783 cdev_del(&me->cdev);
2784 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
2785 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002786 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002787}
2788
2789late_initcall(fastrpc_device_init);
2790module_exit(fastrpc_device_exit);
2791
2792MODULE_LICENSE("GPL v2");