blob: 7de9b79c2c3514873c6b50bfdbedac953ffa7195 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
78
Sathish Ambleya21b5b52017-01-11 16:11:01 -080079#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
80#define FASTRPC_STATIC_HANDLE_LISTENER (3)
81#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053082#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080083
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053084#define INIT_FILELEN_MAX (2*1024*1024)
85#define INIT_MEMLEN_MAX (8*1024*1024)
86
Sathish Ambleya21b5b52017-01-11 16:11:01 -080087#define PERF_END (void)0
88
89#define PERF(enb, cnt, ff) \
90 {\
91 struct timespec startT = {0};\
92 if (enb) {\
93 getnstimeofday(&startT);\
94 } \
95 ff ;\
96 if (enb) {\
97 cnt += getnstimediff(&startT);\
98 } \
99 }
100
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700101static int fastrpc_glink_open(int cid);
102static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800103static struct dentry *debugfs_root;
104static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700105
106static inline uint64_t buf_page_start(uint64_t buf)
107{
108 uint64_t start = (uint64_t) buf & PAGE_MASK;
109 return start;
110}
111
112static inline uint64_t buf_page_offset(uint64_t buf)
113{
114 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
115 return offset;
116}
117
118static inline int buf_num_pages(uint64_t buf, ssize_t len)
119{
120 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
121 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
122 int nPages = end - start + 1;
123 return nPages;
124}
125
126static inline uint64_t buf_page_size(uint32_t size)
127{
128 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
129
130 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
131}
132
133static inline void *uint64_to_ptr(uint64_t addr)
134{
135 void *ptr = (void *)((uintptr_t)addr);
136
137 return ptr;
138}
139
140static inline uint64_t ptr_to_uint64(void *ptr)
141{
142 uint64_t addr = (uint64_t)((uintptr_t)ptr);
143
144 return addr;
145}
146
147struct fastrpc_file;
148
149struct fastrpc_buf {
150 struct hlist_node hn;
151 struct fastrpc_file *fl;
152 void *virt;
153 uint64_t phys;
154 ssize_t size;
155};
156
157struct fastrpc_ctx_lst;
158
159struct overlap {
160 uintptr_t start;
161 uintptr_t end;
162 int raix;
163 uintptr_t mstart;
164 uintptr_t mend;
165 uintptr_t offset;
166};
167
168struct smq_invoke_ctx {
169 struct hlist_node hn;
170 struct completion work;
171 int retval;
172 int pid;
173 int tgid;
174 remote_arg_t *lpra;
175 remote_arg64_t *rpra;
176 int *fds;
177 unsigned int *attrs;
178 struct fastrpc_mmap **maps;
179 struct fastrpc_buf *buf;
180 ssize_t used;
181 struct fastrpc_file *fl;
182 uint32_t sc;
183 struct overlap *overs;
184 struct overlap **overps;
185 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700186 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530187 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700188};
189
190struct fastrpc_ctx_lst {
191 struct hlist_head pending;
192 struct hlist_head interrupted;
193};
194
195struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530196 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700197 struct dma_iommu_mapping *mapping;
198 int cb;
199 int enabled;
200 int faults;
201 int secure;
202 int coherent;
203};
204
205struct fastrpc_session_ctx {
206 struct device *dev;
207 struct fastrpc_smmu smmu;
208 int used;
209};
210
211struct fastrpc_glink_info {
212 int link_state;
213 int port_state;
214 struct glink_open_config cfg;
215 struct glink_link_info link_info;
216 void *link_notify_handle;
217};
218
219struct fastrpc_channel_ctx {
220 char *name;
221 char *subsys;
222 void *chan;
223 struct device *dev;
224 struct fastrpc_session_ctx session[NUM_SESSIONS];
225 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530226 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700227 struct notifier_block nb;
228 struct kref kref;
229 int sesscount;
230 int ssrcount;
231 void *handle;
232 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530233 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700234 int vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int ramdumpenabled;
236 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700237 struct fastrpc_glink_info link;
238};
239
240struct fastrpc_apps {
241 struct fastrpc_channel_ctx *channel;
242 struct cdev cdev;
243 struct class *class;
244 struct mutex smd_mutex;
245 struct smq_phy_page range;
246 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530247 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 dev_t dev_no;
249 int compat;
250 struct hlist_head drivers;
251 spinlock_t hlock;
252 struct ion_client *client;
253 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530254 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255};
256
257struct fastrpc_mmap {
258 struct hlist_node hn;
259 struct fastrpc_file *fl;
260 struct fastrpc_apps *apps;
261 int fd;
262 uint32_t flags;
263 struct dma_buf *buf;
264 struct sg_table *table;
265 struct dma_buf_attachment *attach;
266 struct ion_handle *handle;
267 uint64_t phys;
268 ssize_t size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530269 uintptr_t __user va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700270 ssize_t len;
271 int refs;
272 uintptr_t raddr;
273 int uncached;
274 int secure;
275 uintptr_t attr;
276};
277
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800278struct fastrpc_perf {
279 int64_t count;
280 int64_t flush;
281 int64_t map;
282 int64_t copy;
283 int64_t link;
284 int64_t getargs;
285 int64_t putargs;
286 int64_t invargs;
287 int64_t invoke;
288};
289
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290struct fastrpc_file {
291 struct hlist_node hn;
292 spinlock_t hlock;
293 struct hlist_head maps;
294 struct hlist_head bufs;
295 struct fastrpc_ctx_lst clst;
296 struct fastrpc_session_ctx *sctx;
297 struct fastrpc_session_ctx *secsctx;
298 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800299 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530300 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700301 int tgid;
302 int cid;
303 int ssrcount;
304 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530305 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800307 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800308 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530309 struct pm_qos_request pm_qos_req;
310 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311};
312
313static struct fastrpc_apps gfa;
314
315static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
316 {
317 .name = "adsprpc-smd",
318 .subsys = "adsp",
319 .link.link_info.edge = "lpass",
320 .link.link_info.transport = "smem",
321 },
322 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700323 .name = "mdsprpc-smd",
324 .subsys = "modem",
325 .link.link_info.edge = "mpss",
326 .link.link_info.transport = "smem",
327 },
328 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800329 .name = "sdsprpc-smd",
330 .subsys = "slpi",
331 .link.link_info.edge = "dsps",
332 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800333 },
334 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700335 .name = "cdsprpc-smd",
336 .subsys = "cdsp",
337 .link.link_info.edge = "cdsp",
338 .link.link_info.transport = "smem",
339 },
340};
341
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800342static inline int64_t getnstimediff(struct timespec *start)
343{
344 int64_t ns;
345 struct timespec ts, b;
346
347 getnstimeofday(&ts);
348 b = timespec_sub(ts, *start);
349 ns = timespec_to_ns(&b);
350 return ns;
351}
352
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700353static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
354{
c_mtharue1a5ce12017-10-13 20:47:09 +0530355 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356 int vmid;
357
358 if (!fl)
359 return;
360 if (cache) {
361 spin_lock(&fl->hlock);
362 hlist_add_head(&buf->hn, &fl->bufs);
363 spin_unlock(&fl->hlock);
364 return;
365 }
366 if (!IS_ERR_OR_NULL(buf->virt)) {
367 int destVM[1] = {VMID_HLOS};
368 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
369
370 if (fl->sctx->smmu.cb)
371 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
372 vmid = fl->apps->channel[fl->cid].vmid;
373 if (vmid) {
374 int srcVM[2] = {VMID_HLOS, vmid};
375
376 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
377 srcVM, 2, destVM, destVMperm, 1);
378 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530379 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700380 buf->phys);
381 }
382 kfree(buf);
383}
384
385static void fastrpc_buf_list_free(struct fastrpc_file *fl)
386{
387 struct fastrpc_buf *buf, *free;
388
389 do {
390 struct hlist_node *n;
391
c_mtharue1a5ce12017-10-13 20:47:09 +0530392 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393 spin_lock(&fl->hlock);
394 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
395 hlist_del_init(&buf->hn);
396 free = buf;
397 break;
398 }
399 spin_unlock(&fl->hlock);
400 if (free)
401 fastrpc_buf_free(free, 0);
402 } while (free);
403}
404
405static void fastrpc_mmap_add(struct fastrpc_mmap *map)
406{
c_mtharue1a5ce12017-10-13 20:47:09 +0530407 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
408 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
409 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700410
c_mtharue1a5ce12017-10-13 20:47:09 +0530411 spin_lock(&me->hlock);
412 hlist_add_head(&map->hn, &me->maps);
413 spin_unlock(&me->hlock);
414 } else {
415 struct fastrpc_file *fl = map->fl;
416
417 spin_lock(&fl->hlock);
418 hlist_add_head(&map->hn, &fl->maps);
419 spin_unlock(&fl->hlock);
420 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700421}
422
c_mtharue1a5ce12017-10-13 20:47:09 +0530423static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
424 uintptr_t __user va, ssize_t len, int mflags, int refs,
425 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700426{
c_mtharue1a5ce12017-10-13 20:47:09 +0530427 struct fastrpc_apps *me = &gfa;
428 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 if (mflags == ADSP_MMAP_HEAP_ADDR ||
431 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
432 spin_lock(&me->hlock);
433 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
434 if (va >= map->va &&
435 va + len <= map->va + map->len &&
436 map->fd == fd) {
437 if (refs)
438 map->refs++;
439 match = map;
440 break;
441 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700442 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530443 spin_unlock(&me->hlock);
444 } else {
445 spin_lock(&fl->hlock);
446 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
447 if (va >= map->va &&
448 va + len <= map->va + map->len &&
449 map->fd == fd) {
450 if (refs)
451 map->refs++;
452 match = map;
453 break;
454 }
455 }
456 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700457 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700458 if (match) {
459 *ppmap = match;
460 return 0;
461 }
462 return -ENOTTY;
463}
464
c_mtharue1a5ce12017-10-13 20:47:09 +0530465static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
466{
467 struct fastrpc_apps *me = &gfa;
468 void *vaddr = NULL;
469
470 if (me->dev == NULL) {
471 pr_err("device adsprpc-mem is not initialized\n");
472 return -ENODEV;
473 }
474 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
475 if (!vaddr) {
476 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
477 (unsigned int)size);
478 return -ENOMEM;
479 }
480 return 0;
481}
482
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700483static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
484 ssize_t len, struct fastrpc_mmap **ppmap)
485{
c_mtharue1a5ce12017-10-13 20:47:09 +0530486 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700487 struct hlist_node *n;
488 struct fastrpc_apps *me = &gfa;
489
490 spin_lock(&me->hlock);
491 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
492 if (map->raddr == va &&
493 map->raddr + map->len == va + len &&
494 map->refs == 1) {
495 match = map;
496 hlist_del_init(&map->hn);
497 break;
498 }
499 }
500 spin_unlock(&me->hlock);
501 if (match) {
502 *ppmap = match;
503 return 0;
504 }
505 spin_lock(&fl->hlock);
506 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
507 if (map->raddr == va &&
508 map->raddr + map->len == va + len &&
509 map->refs == 1) {
510 match = map;
511 hlist_del_init(&map->hn);
512 break;
513 }
514 }
515 spin_unlock(&fl->hlock);
516 if (match) {
517 *ppmap = match;
518 return 0;
519 }
520 return -ENOTTY;
521}
522
523static void fastrpc_mmap_free(struct fastrpc_mmap *map)
524{
c_mtharue1a5ce12017-10-13 20:47:09 +0530525 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700526 struct fastrpc_file *fl;
527 int vmid;
528 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700529
530 if (!map)
531 return;
532 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530533 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
534 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
535 spin_lock(&me->hlock);
536 map->refs--;
537 if (!map->refs)
538 hlist_del_init(&map->hn);
539 spin_unlock(&me->hlock);
540 } else {
541 spin_lock(&fl->hlock);
542 map->refs--;
543 if (!map->refs)
544 hlist_del_init(&map->hn);
545 spin_unlock(&fl->hlock);
546 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700547 if (map->refs > 0)
548 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530549 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
550 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700551
c_mtharue1a5ce12017-10-13 20:47:09 +0530552 if (me->dev == NULL) {
553 pr_err("failed to free remote heap allocation\n");
554 return;
555 }
556 if (map->phys) {
557 dma_free_coherent(me->dev, map->size,
558 &(map->va), map->phys);
559 }
560 } else {
561 int destVM[1] = {VMID_HLOS};
562 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
563
564 if (map->secure)
565 sess = fl->secsctx;
566 else
567 sess = fl->sctx;
568
569 if (!IS_ERR_OR_NULL(map->handle))
570 ion_free(fl->apps->client, map->handle);
571 if (sess && sess->smmu.enabled) {
572 if (map->size || map->phys)
573 msm_dma_unmap_sg(sess->smmu.dev,
574 map->table->sgl,
575 map->table->nents, DMA_BIDIRECTIONAL,
576 map->buf);
577 }
578 vmid = fl->apps->channel[fl->cid].vmid;
579 if (vmid && map->phys) {
580 int srcVM[2] = {VMID_HLOS, vmid};
581
582 hyp_assign_phys(map->phys, buf_page_size(map->size),
583 srcVM, 2, destVM, destVMperm, 1);
584 }
585
586 if (!IS_ERR_OR_NULL(map->table))
587 dma_buf_unmap_attachment(map->attach, map->table,
588 DMA_BIDIRECTIONAL);
589 if (!IS_ERR_OR_NULL(map->attach))
590 dma_buf_detach(map->buf, map->attach);
591 if (!IS_ERR_OR_NULL(map->buf))
592 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700593 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700594 kfree(map);
595}
596
597static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
598 struct fastrpc_session_ctx **session);
599
600static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
c_mtharue1a5ce12017-10-13 20:47:09 +0530601 unsigned int attr, uintptr_t __user va, ssize_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700602 struct fastrpc_mmap **ppmap)
603{
c_mtharue1a5ce12017-10-13 20:47:09 +0530604 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700605 struct fastrpc_session_ctx *sess;
606 struct fastrpc_apps *apps = fl->apps;
607 int cid = fl->cid;
608 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530609 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700612 unsigned long flags;
613 int err = 0, vmid;
614
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800615 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700616 return 0;
617 map = kzalloc(sizeof(*map), GFP_KERNEL);
618 VERIFY(err, !IS_ERR_OR_NULL(map));
619 if (err)
620 goto bail;
621 INIT_HLIST_NODE(&map->hn);
622 map->flags = mflags;
623 map->refs = 1;
624 map->fl = fl;
625 map->fd = fd;
626 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530627 if (mflags == ADSP_MMAP_HEAP_ADDR ||
628 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
629 map->apps = me;
630 map->fl = NULL;
631 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700632 if (err)
633 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530634 map->phys = (uintptr_t)region_start;
635 map->size = len;
636 map->va = (uintptr_t __user)map->phys;
637 } else {
638 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
639 ion_import_dma_buf_fd(fl->apps->client, fd)));
640 if (err)
641 goto bail;
642 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
643 &flags));
644 if (err)
645 goto bail;
646
647 map->uncached = !ION_IS_CACHED(flags);
648 if (map->attr & FASTRPC_ATTR_NOVA)
649 map->uncached = 1;
650
651 map->secure = flags & ION_FLAG_SECURE;
652 if (map->secure) {
653 if (!fl->secsctx)
654 err = fastrpc_session_alloc(chan, 1,
655 &fl->secsctx);
656 if (err)
657 goto bail;
658 }
659 if (map->secure)
660 sess = fl->secsctx;
661 else
662 sess = fl->sctx;
663 VERIFY(err, !IS_ERR_OR_NULL(sess));
664 if (err)
665 goto bail;
666 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
667 if (err)
668 goto bail;
669 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
670 dma_buf_attach(map->buf, sess->smmu.dev)));
671 if (err)
672 goto bail;
673 VERIFY(err, !IS_ERR_OR_NULL(map->table =
674 dma_buf_map_attachment(map->attach,
675 DMA_BIDIRECTIONAL)));
676 if (err)
677 goto bail;
678 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700679 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530680
681 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
682 (sess->smmu.coherent && map->uncached))
683 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
684 else if (map->attr & FASTRPC_ATTR_COHERENT)
685 attrs |= DMA_ATTR_FORCE_COHERENT;
686
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700687 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530688 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700689 map->table->sgl, map->table->nents,
690 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530691 if (err)
692 goto bail;
693 } else {
694 VERIFY(err, map->table->nents == 1);
695 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530697 }
698 map->phys = sg_dma_address(map->table->sgl);
699 if (sess->smmu.cb) {
700 map->phys += ((uint64_t)sess->smmu.cb << 32);
701 map->size = sg_dma_len(map->table->sgl);
702 } else {
703 map->size = buf_page_size(len);
704 }
705 vmid = fl->apps->channel[fl->cid].vmid;
706 if (vmid) {
707 int srcVM[1] = {VMID_HLOS};
708 int destVM[2] = {VMID_HLOS, vmid};
709 int destVMperm[2] = {PERM_READ | PERM_WRITE,
710 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700711
c_mtharue1a5ce12017-10-13 20:47:09 +0530712 VERIFY(err, !hyp_assign_phys(map->phys,
713 buf_page_size(map->size),
714 srcVM, 1, destVM, destVMperm, 2));
715 if (err)
716 goto bail;
717 }
718 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700720 map->len = len;
721
722 fastrpc_mmap_add(map);
723 *ppmap = map;
724
725bail:
726 if (err && map)
727 fastrpc_mmap_free(map);
728 return err;
729}
730
731static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
732 struct fastrpc_buf **obuf)
733{
734 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530735 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736 struct hlist_node *n;
737
738 VERIFY(err, size > 0);
739 if (err)
740 goto bail;
741
742 /* find the smallest buffer that fits in the cache */
743 spin_lock(&fl->hlock);
744 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
745 if (buf->size >= size && (!fr || fr->size > buf->size))
746 fr = buf;
747 }
748 if (fr)
749 hlist_del_init(&fr->hn);
750 spin_unlock(&fl->hlock);
751 if (fr) {
752 *obuf = fr;
753 return 0;
754 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530755 buf = NULL;
756 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700757 if (err)
758 goto bail;
759 INIT_HLIST_NODE(&buf->hn);
760 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530761 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700762 buf->phys = 0;
763 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530764 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700765 (void *)&buf->phys, GFP_KERNEL);
766 if (IS_ERR_OR_NULL(buf->virt)) {
767 /* free cache and retry */
768 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530769 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700770 (void *)&buf->phys, GFP_KERNEL);
771 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
772 }
773 if (err)
774 goto bail;
775 if (fl->sctx->smmu.cb)
776 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
777 vmid = fl->apps->channel[fl->cid].vmid;
778 if (vmid) {
779 int srcVM[1] = {VMID_HLOS};
780 int destVM[2] = {VMID_HLOS, vmid};
781 int destVMperm[2] = {PERM_READ | PERM_WRITE,
782 PERM_READ | PERM_WRITE | PERM_EXEC};
783
784 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
785 srcVM, 1, destVM, destVMperm, 2));
786 if (err)
787 goto bail;
788 }
789
790 *obuf = buf;
791 bail:
792 if (err && buf)
793 fastrpc_buf_free(buf, 0);
794 return err;
795}
796
797
798static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700799 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700800 struct smq_invoke_ctx **po)
801{
802 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530803 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700804 struct hlist_node *n;
805 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
806
807 spin_lock(&fl->hlock);
808 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
809 if (ictx->pid == current->pid) {
810 if (invoke->sc != ictx->sc || ictx->fl != fl)
811 err = -1;
812 else {
813 ctx = ictx;
814 hlist_del_init(&ctx->hn);
815 hlist_add_head(&ctx->hn, &fl->clst.pending);
816 }
817 break;
818 }
819 }
820 spin_unlock(&fl->hlock);
821 if (ctx)
822 *po = ctx;
823 return err;
824}
825
826#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
827static int overlap_ptr_cmp(const void *a, const void *b)
828{
829 struct overlap *pa = *((struct overlap **)a);
830 struct overlap *pb = *((struct overlap **)b);
831 /* sort with lowest starting buffer first */
832 int st = CMP(pa->start, pb->start);
833 /* sort with highest ending buffer first */
834 int ed = CMP(pb->end, pa->end);
835 return st == 0 ? ed : st;
836}
837
Sathish Ambley9466d672017-01-25 10:51:55 -0800838static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700839{
Sathish Ambley9466d672017-01-25 10:51:55 -0800840 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700841 remote_arg_t *lpra = ctx->lpra;
842 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
843 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
844 int nbufs = inbufs + outbufs;
845 struct overlap max;
846
847 for (i = 0; i < nbufs; ++i) {
848 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
849 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800850 if (lpra[i].buf.len) {
851 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
852 if (err)
853 goto bail;
854 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700855 ctx->overs[i].raix = i;
856 ctx->overps[i] = &ctx->overs[i];
857 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530858 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700859 max.start = 0;
860 max.end = 0;
861 for (i = 0; i < nbufs; ++i) {
862 if (ctx->overps[i]->start < max.end) {
863 ctx->overps[i]->mstart = max.end;
864 ctx->overps[i]->mend = ctx->overps[i]->end;
865 ctx->overps[i]->offset = max.end -
866 ctx->overps[i]->start;
867 if (ctx->overps[i]->end > max.end) {
868 max.end = ctx->overps[i]->end;
869 } else {
870 ctx->overps[i]->mend = 0;
871 ctx->overps[i]->mstart = 0;
872 }
873 } else {
874 ctx->overps[i]->mend = ctx->overps[i]->end;
875 ctx->overps[i]->mstart = ctx->overps[i]->start;
876 ctx->overps[i]->offset = 0;
877 max = *ctx->overps[i];
878 }
879 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800880bail:
881 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700882}
883
884#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
885 do {\
886 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530887 VERIFY(err, 0 == copy_from_user((dst),\
888 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700889 (size)));\
890 else\
891 memmove((dst), (src), (size));\
892 } while (0)
893
894#define K_COPY_TO_USER(err, kernel, dst, src, size) \
895 do {\
896 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530897 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
898 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700899 else\
900 memmove((dst), (src), (size));\
901 } while (0)
902
903
904static void context_free(struct smq_invoke_ctx *ctx);
905
906static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700907 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700908 struct smq_invoke_ctx **po)
909{
910 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530911 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700912 struct fastrpc_ctx_lst *clst = &fl->clst;
913 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
914
915 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
916 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
917 sizeof(*ctx->fds) * (bufs) +
918 sizeof(*ctx->attrs) * (bufs) +
919 sizeof(*ctx->overs) * (bufs) +
920 sizeof(*ctx->overps) * (bufs);
921
c_mtharue1a5ce12017-10-13 20:47:09 +0530922 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700923 if (err)
924 goto bail;
925
926 INIT_HLIST_NODE(&ctx->hn);
927 hlist_add_fake(&ctx->hn);
928 ctx->fl = fl;
929 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
930 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
931 ctx->fds = (int *)(&ctx->lpra[bufs]);
932 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
933 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
934 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
935
c_mtharue1a5ce12017-10-13 20:47:09 +0530936 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700937 bufs * sizeof(*ctx->lpra));
938 if (err)
939 goto bail;
940
941 if (invokefd->fds) {
942 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
943 bufs * sizeof(*ctx->fds));
944 if (err)
945 goto bail;
946 }
947 if (invokefd->attrs) {
948 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
949 bufs * sizeof(*ctx->attrs));
950 if (err)
951 goto bail;
952 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700953 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700954 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800955 if (bufs) {
956 VERIFY(err, 0 == context_build_overlap(ctx));
957 if (err)
958 goto bail;
959 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700960 ctx->retval = -1;
961 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530962 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700963 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530964 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700965
966 spin_lock(&fl->hlock);
967 hlist_add_head(&ctx->hn, &clst->pending);
968 spin_unlock(&fl->hlock);
969
970 *po = ctx;
971bail:
972 if (ctx && err)
973 context_free(ctx);
974 return err;
975}
976
977static void context_save_interrupted(struct smq_invoke_ctx *ctx)
978{
979 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
980
981 spin_lock(&ctx->fl->hlock);
982 hlist_del_init(&ctx->hn);
983 hlist_add_head(&ctx->hn, &clst->interrupted);
984 spin_unlock(&ctx->fl->hlock);
985 /* free the cache on power collapse */
986 fastrpc_buf_list_free(ctx->fl);
987}
988
989static void context_free(struct smq_invoke_ctx *ctx)
990{
991 int i;
992 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
993 REMOTE_SCALARS_OUTBUFS(ctx->sc);
994 spin_lock(&ctx->fl->hlock);
995 hlist_del_init(&ctx->hn);
996 spin_unlock(&ctx->fl->hlock);
997 for (i = 0; i < nbufs; ++i)
998 fastrpc_mmap_free(ctx->maps[i]);
999 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301000 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001001 kfree(ctx);
1002}
1003
1004static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1005{
1006 ctx->retval = retval;
1007 complete(&ctx->work);
1008}
1009
1010
1011static void fastrpc_notify_users(struct fastrpc_file *me)
1012{
1013 struct smq_invoke_ctx *ictx;
1014 struct hlist_node *n;
1015
1016 spin_lock(&me->hlock);
1017 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1018 complete(&ictx->work);
1019 }
1020 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1021 complete(&ictx->work);
1022 }
1023 spin_unlock(&me->hlock);
1024
1025}
1026
1027static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1028{
1029 struct fastrpc_file *fl;
1030 struct hlist_node *n;
1031
1032 spin_lock(&me->hlock);
1033 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1034 if (fl->cid == cid)
1035 fastrpc_notify_users(fl);
1036 }
1037 spin_unlock(&me->hlock);
1038
1039}
1040static void context_list_ctor(struct fastrpc_ctx_lst *me)
1041{
1042 INIT_HLIST_HEAD(&me->interrupted);
1043 INIT_HLIST_HEAD(&me->pending);
1044}
1045
1046static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1047{
1048 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301049 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001050 struct hlist_node *n;
1051
1052 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301053 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001054 spin_lock(&fl->hlock);
1055 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1056 hlist_del_init(&ictx->hn);
1057 ctxfree = ictx;
1058 break;
1059 }
1060 spin_unlock(&fl->hlock);
1061 if (ctxfree)
1062 context_free(ctxfree);
1063 } while (ctxfree);
1064 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301065 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001066 spin_lock(&fl->hlock);
1067 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1068 hlist_del_init(&ictx->hn);
1069 ctxfree = ictx;
1070 break;
1071 }
1072 spin_unlock(&fl->hlock);
1073 if (ctxfree)
1074 context_free(ctxfree);
1075 } while (ctxfree);
1076}
1077
1078static int fastrpc_file_free(struct fastrpc_file *fl);
1079static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1080{
1081 struct fastrpc_file *fl, *free;
1082 struct hlist_node *n;
1083
1084 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301085 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001086 spin_lock(&me->hlock);
1087 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1088 hlist_del_init(&fl->hn);
1089 free = fl;
1090 break;
1091 }
1092 spin_unlock(&me->hlock);
1093 if (free)
1094 fastrpc_file_free(free);
1095 } while (free);
1096}
1097
1098static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1099{
1100 remote_arg64_t *rpra;
1101 remote_arg_t *lpra = ctx->lpra;
1102 struct smq_invoke_buf *list;
1103 struct smq_phy_page *pages, *ipage;
1104 uint32_t sc = ctx->sc;
1105 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1106 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001107 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001108 uintptr_t args;
1109 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001110 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001111 int err = 0;
1112 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001113 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001114 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001115
1116 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301117 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 list = smq_invoke_buf_start(rpra, sc);
1119 pages = smq_phy_page_start(sc, list);
1120 ipage = pages;
1121
1122 for (i = 0; i < bufs; ++i) {
c_mtharue1a5ce12017-10-13 20:47:09 +05301123 uintptr_t __user buf = (uintptr_t __user)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001124 ssize_t len = lpra[i].buf.len;
1125
1126 if (ctx->fds[i] && (ctx->fds[i] != -1))
1127 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1128 ctx->attrs[i], buf, len,
1129 mflags, &ctx->maps[i]);
1130 ipage += 1;
1131 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001132 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1133 for (i = bufs; i < bufs + handles; i++) {
1134 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1135 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1136 if (err)
1137 goto bail;
1138 ipage += 1;
1139 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001140 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1141 (sizeof(uint32_t) * M_CRCLIST);
1142
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 /* calculate len requreed for copying */
1144 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1145 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001146 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001147 ssize_t len = lpra[i].buf.len;
1148
1149 if (!len)
1150 continue;
1151 if (ctx->maps[i])
1152 continue;
1153 if (ctx->overps[oix]->offset == 0)
1154 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001155 mstart = ctx->overps[oix]->mstart;
1156 mend = ctx->overps[oix]->mend;
1157 VERIFY(err, (mend - mstart) <= LONG_MAX);
1158 if (err)
1159 goto bail;
1160 copylen += mend - mstart;
1161 VERIFY(err, copylen >= 0);
1162 if (err)
1163 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001164 }
1165 ctx->used = copylen;
1166
1167 /* allocate new buffer */
1168 if (copylen) {
1169 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1170 if (err)
1171 goto bail;
1172 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301173 if (ctx->buf->virt && metalen <= copylen)
1174 memset(ctx->buf->virt, 0, metalen);
1175
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 /* copy metadata */
1177 rpra = ctx->buf->virt;
1178 ctx->rpra = rpra;
1179 list = smq_invoke_buf_start(rpra, sc);
1180 pages = smq_phy_page_start(sc, list);
1181 ipage = pages;
1182 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001183 for (i = 0; i < bufs + handles; ++i) {
1184 if (lpra[i].buf.len)
1185 list[i].num = 1;
1186 else
1187 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 list[i].pgidx = ipage - pages;
1189 ipage++;
1190 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301191
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001192 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001193 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301194 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001195 struct fastrpc_mmap *map = ctx->maps[i];
1196 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1197 ssize_t len = lpra[i].buf.len;
1198
1199 rpra[i].buf.pv = 0;
1200 rpra[i].buf.len = len;
1201 if (!len)
1202 continue;
1203 if (map) {
1204 struct vm_area_struct *vma;
1205 uintptr_t offset;
1206 int num = buf_num_pages(buf, len);
1207 int idx = list[i].pgidx;
1208
1209 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001210 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001211 } else {
1212 down_read(&current->mm->mmap_sem);
1213 VERIFY(err, NULL != (vma = find_vma(current->mm,
1214 map->va)));
1215 if (err) {
1216 up_read(&current->mm->mmap_sem);
1217 goto bail;
1218 }
1219 offset = buf_page_start(buf) - vma->vm_start;
1220 up_read(&current->mm->mmap_sem);
1221 VERIFY(err, offset < (uintptr_t)map->size);
1222 if (err)
1223 goto bail;
1224 }
1225 pages[idx].addr = map->phys + offset;
1226 pages[idx].size = num << PAGE_SHIFT;
1227 }
1228 rpra[i].buf.pv = buf;
1229 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001230 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001231 for (i = bufs; i < bufs + handles; ++i) {
1232 struct fastrpc_mmap *map = ctx->maps[i];
1233
1234 pages[i].addr = map->phys;
1235 pages[i].size = map->size;
1236 }
1237 fdlist = (uint64_t *)&pages[bufs + handles];
1238 for (i = 0; i < M_FDLIST; i++)
1239 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001240 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301241 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001242
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001243 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001244 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001245 rlen = copylen - metalen;
1246 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1247 int i = ctx->overps[oix]->raix;
1248 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001249 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001250 uint64_t buf;
1251 ssize_t len = lpra[i].buf.len;
1252
1253 if (!len)
1254 continue;
1255 if (map)
1256 continue;
1257 if (ctx->overps[oix]->offset == 0) {
1258 rlen -= ALIGN(args, BALIGN) - args;
1259 args = ALIGN(args, BALIGN);
1260 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001261 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001262 VERIFY(err, rlen >= mlen);
1263 if (err)
1264 goto bail;
1265 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1266 pages[list[i].pgidx].addr = ctx->buf->phys -
1267 ctx->overps[oix]->offset +
1268 (copylen - rlen);
1269 pages[list[i].pgidx].addr =
1270 buf_page_start(pages[list[i].pgidx].addr);
1271 buf = rpra[i].buf.pv;
1272 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1273 if (i < inbufs) {
1274 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1275 lpra[i].buf.pv, len);
1276 if (err)
1277 goto bail;
1278 }
1279 args = args + mlen;
1280 rlen -= mlen;
1281 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001282 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001283
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001284 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001285 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1286 int i = ctx->overps[oix]->raix;
1287 struct fastrpc_mmap *map = ctx->maps[i];
1288
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001289 if (map && map->uncached)
1290 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301291 if (ctx->fl->sctx->smmu.coherent &&
1292 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1293 continue;
1294 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1295 continue;
1296
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001297 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1298 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1299 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1300 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001301 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301302 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001303 rpra[i].dma.fd = ctx->fds[i];
1304 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1305 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001306 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001307
1308 if (!ctx->fl->sctx->smmu.coherent) {
1309 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001310 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001311 PERF_END);
1312 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001313 bail:
1314 return err;
1315}
1316
1317static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1318 remote_arg_t *upra)
1319{
1320 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001321 struct smq_invoke_buf *list;
1322 struct smq_phy_page *pages;
1323 struct fastrpc_mmap *mmap;
1324 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001325 uint32_t *crclist = NULL;
1326
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001327 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001328 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001329 int err = 0;
1330
1331 inbufs = REMOTE_SCALARS_INBUFS(sc);
1332 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001333 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1334 list = smq_invoke_buf_start(ctx->rpra, sc);
1335 pages = smq_phy_page_start(sc, list);
1336 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001337 crclist = (uint32_t *)(fdlist + M_FDLIST);
1338
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001339 for (i = inbufs; i < inbufs + outbufs; ++i) {
1340 if (!ctx->maps[i]) {
1341 K_COPY_TO_USER(err, kernel,
1342 ctx->lpra[i].buf.pv,
1343 uint64_to_ptr(rpra[i].buf.pv),
1344 rpra[i].buf.len);
1345 if (err)
1346 goto bail;
1347 } else {
1348 fastrpc_mmap_free(ctx->maps[i]);
c_mtharue1a5ce12017-10-13 20:47:09 +05301349 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001350 }
1351 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001352 if (inbufs + outbufs + handles) {
1353 for (i = 0; i < M_FDLIST; i++) {
1354 if (!fdlist[i])
1355 break;
1356 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001357 0, 0, &mmap))
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001358 fastrpc_mmap_free(mmap);
1359 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001361 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301362 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001363 crclist, M_CRCLIST*sizeof(uint32_t));
1364
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001365 bail:
1366 return err;
1367}
1368
1369static void inv_args_pre(struct smq_invoke_ctx *ctx)
1370{
1371 int i, inbufs, outbufs;
1372 uint32_t sc = ctx->sc;
1373 remote_arg64_t *rpra = ctx->rpra;
1374 uintptr_t end;
1375
1376 inbufs = REMOTE_SCALARS_INBUFS(sc);
1377 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1378 for (i = inbufs; i < inbufs + outbufs; ++i) {
1379 struct fastrpc_mmap *map = ctx->maps[i];
1380
1381 if (map && map->uncached)
1382 continue;
1383 if (!rpra[i].buf.len)
1384 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301385 if (ctx->fl->sctx->smmu.coherent &&
1386 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1387 continue;
1388 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1389 continue;
1390
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001391 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1392 buf_page_start(rpra[i].buf.pv))
1393 continue;
1394 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1395 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1396 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1397 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1398 rpra[i].buf.len);
1399 if (!IS_CACHE_ALIGNED(end))
1400 dmac_flush_range((char *)end,
1401 (char *)end + 1);
1402 }
1403}
1404
1405static void inv_args(struct smq_invoke_ctx *ctx)
1406{
1407 int i, inbufs, outbufs;
1408 uint32_t sc = ctx->sc;
1409 remote_arg64_t *rpra = ctx->rpra;
1410 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001411
1412 inbufs = REMOTE_SCALARS_INBUFS(sc);
1413 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1414 for (i = inbufs; i < inbufs + outbufs; ++i) {
1415 struct fastrpc_mmap *map = ctx->maps[i];
1416
1417 if (map && map->uncached)
1418 continue;
1419 if (!rpra[i].buf.len)
1420 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301421 if (ctx->fl->sctx->smmu.coherent &&
1422 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1423 continue;
1424 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1425 continue;
1426
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001427 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1428 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 continue;
1430 }
1431 if (map && map->handle)
1432 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1433 (char *)uint64_to_ptr(rpra[i].buf.pv),
1434 rpra[i].buf.len, ION_IOC_INV_CACHES);
1435 else
1436 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1437 (char *)uint64_to_ptr(rpra[i].buf.pv
1438 + rpra[i].buf.len));
1439 }
1440
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001441 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442 dmac_inv_range(rpra, (char *)rpra + used);
1443}
1444
1445static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1446 uint32_t kernel, uint32_t handle)
1447{
1448 struct smq_msg *msg = &ctx->msg;
1449 struct fastrpc_file *fl = ctx->fl;
1450 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1451 int err = 0;
1452
c_mtharue1a5ce12017-10-13 20:47:09 +05301453 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 if (err)
1455 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301456 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301458 if (fl->sessionid)
1459 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001460 if (kernel)
1461 msg->pid = 0;
1462 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1463 msg->invoke.header.handle = handle;
1464 msg->invoke.header.sc = ctx->sc;
1465 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1466 msg->invoke.page.size = buf_page_size(ctx->used);
1467
1468 if (fl->ssrcount != channel_ctx->ssrcount) {
1469 err = -ECONNRESET;
1470 goto bail;
1471 }
1472 VERIFY(err, channel_ctx->link.port_state ==
1473 FASTRPC_LINK_CONNECTED);
1474 if (err)
1475 goto bail;
1476 err = glink_tx(channel_ctx->chan,
1477 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1478 GLINK_TX_REQ_INTENT);
1479 bail:
1480 return err;
1481}
1482
1483static void fastrpc_init(struct fastrpc_apps *me)
1484{
1485 int i;
1486
1487 INIT_HLIST_HEAD(&me->drivers);
1488 spin_lock_init(&me->hlock);
1489 mutex_init(&me->smd_mutex);
1490 me->channel = &gcinfo[0];
1491 for (i = 0; i < NUM_CHANNELS; i++) {
1492 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301493 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001494 me->channel[i].sesscount = 0;
1495 }
1496}
1497
1498static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1499
1500static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1501 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001502 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001503{
c_mtharue1a5ce12017-10-13 20:47:09 +05301504 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1506 int cid = fl->cid;
1507 int interrupted = 0;
1508 int err = 0;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001509 struct timespec invoket;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001511 if (fl->profile)
1512 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301513
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301514
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301515 VERIFY(err, fl->sctx != NULL);
1516 if (err)
1517 goto bail;
1518 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1519 if (err)
1520 goto bail;
1521
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 if (!kernel) {
1523 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1524 &ctx));
1525 if (err)
1526 goto bail;
1527 if (fl->sctx->smmu.faults)
1528 err = FASTRPC_ENOSUCH;
1529 if (err)
1530 goto bail;
1531 if (ctx)
1532 goto wait;
1533 }
1534
1535 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1536 if (err)
1537 goto bail;
1538
1539 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001540 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001541 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001542 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001543 if (err)
1544 goto bail;
1545 }
1546
Sathish Ambleyc432b502017-06-05 12:03:42 -07001547 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001548 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001549 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001550 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001551 PERF_END);
1552
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001553 if (err)
1554 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001555 wait:
1556 if (kernel)
1557 wait_for_completion(&ctx->work);
1558 else {
1559 interrupted = wait_for_completion_interruptible(&ctx->work);
1560 VERIFY(err, 0 == (err = interrupted));
1561 if (err)
1562 goto bail;
1563 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001564
1565 PERF(fl->profile, fl->perf.invargs,
1566 if (!fl->sctx->smmu.coherent)
1567 inv_args(ctx);
1568 PERF_END);
1569
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 VERIFY(err, 0 == (err = ctx->retval));
1571 if (err)
1572 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001573
1574 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001575 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001576 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001577 if (err)
1578 goto bail;
1579 bail:
1580 if (ctx && interrupted == -ERESTARTSYS)
1581 context_save_interrupted(ctx);
1582 else if (ctx)
1583 context_free(ctx);
1584 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1585 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001586
1587 if (fl->profile && !interrupted) {
1588 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1589 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301590 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001591 fl->perf.count++;
1592 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001593 return err;
1594}
1595
Sathish Ambley36849af2017-02-02 09:35:55 -08001596static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001597static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001598 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599{
1600 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301601 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001602 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001603 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001604 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301605 struct fastrpc_mmap *file = NULL, *mem = NULL;
1606 char *proc_name = NULL;
1607 int srcVM[1] = {VMID_HLOS};
1608 int destVM[1] = {VMID_ADSP_Q6};
1609 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1610 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611
Sathish Ambley36849af2017-02-02 09:35:55 -08001612 VERIFY(err, !fastrpc_channel_open(fl));
1613 if (err)
1614 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001615 if (init->flags == FASTRPC_INIT_ATTACH) {
1616 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301617 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001618
1619 ra[0].buf.pv = (void *)&tgid;
1620 ra[0].buf.len = sizeof(tgid);
1621 ioctl.inv.handle = 1;
1622 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1623 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301624 ioctl.fds = NULL;
1625 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001626 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001627 fl->pd = 0;
1628 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1629 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1630 if (err)
1631 goto bail;
1632 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001633 remote_arg_t ra[6];
1634 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001635 int mflags = 0;
1636 struct {
1637 int pgid;
1638 int namelen;
1639 int filelen;
1640 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001641 int attrs;
1642 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001643 } inbuf;
1644
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301645 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001646 inbuf.namelen = strlen(current->comm) + 1;
1647 inbuf.filelen = init->filelen;
1648 fl->pd = 1;
1649 if (init->filelen) {
1650 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1651 init->file, init->filelen, mflags, &file));
1652 if (err)
1653 goto bail;
1654 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301655
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001656 inbuf.pageslen = 1;
1657 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1658 init->mem, init->memlen, mflags, &mem));
1659 if (err)
1660 goto bail;
1661 inbuf.pageslen = 1;
1662 ra[0].buf.pv = (void *)&inbuf;
1663 ra[0].buf.len = sizeof(inbuf);
1664 fds[0] = 0;
1665
1666 ra[1].buf.pv = (void *)current->comm;
1667 ra[1].buf.len = inbuf.namelen;
1668 fds[1] = 0;
1669
1670 ra[2].buf.pv = (void *)init->file;
1671 ra[2].buf.len = inbuf.filelen;
1672 fds[2] = init->filefd;
1673
1674 pages[0].addr = mem->phys;
1675 pages[0].size = mem->size;
1676 ra[3].buf.pv = (void *)pages;
1677 ra[3].buf.len = 1 * sizeof(*pages);
1678 fds[3] = 0;
1679
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001680 inbuf.attrs = uproc->attrs;
1681 ra[4].buf.pv = (void *)&(inbuf.attrs);
1682 ra[4].buf.len = sizeof(inbuf.attrs);
1683 fds[4] = 0;
1684
1685 inbuf.siglen = uproc->siglen;
1686 ra[5].buf.pv = (void *)&(inbuf.siglen);
1687 ra[5].buf.len = sizeof(inbuf.siglen);
1688 fds[5] = 0;
1689
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001690 ioctl.inv.handle = 1;
1691 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001692 if (uproc->attrs)
1693 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001694 ioctl.inv.pra = ra;
1695 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301696 ioctl.attrs = NULL;
1697 ioctl.crc = NULL;
1698 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1699 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1700 if (err)
1701 goto bail;
1702 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1703 remote_arg_t ra[3];
1704 uint64_t phys = 0;
1705 ssize_t size = 0;
1706 int fds[3];
1707 struct {
1708 int pgid;
1709 int namelen;
1710 int pageslen;
1711 } inbuf;
1712
1713 if (!init->filelen)
1714 goto bail;
1715
1716 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1717 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1718 if (err)
1719 goto bail;
1720 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1721 (void __user *)init->file, init->filelen));
1722 if (err)
1723 goto bail;
1724
1725 inbuf.pgid = current->tgid;
1726 inbuf.namelen = strlen(proc_name)+1;
1727 inbuf.pageslen = 0;
1728 if (!me->staticpd_flags) {
1729 inbuf.pageslen = 1;
1730 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1731 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1732 &mem));
1733 if (err)
1734 goto bail;
1735 phys = mem->phys;
1736 size = mem->size;
1737 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1738 srcVM, 1, destVM, destVMperm, 1));
1739 if (err) {
1740 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1741 err);
1742 pr_err("map->phys %llx, map->size %d\n",
1743 phys, (int)size);
1744 goto bail;
1745 }
1746 me->staticpd_flags = 1;
1747 }
1748
1749 ra[0].buf.pv = (void *)&inbuf;
1750 ra[0].buf.len = sizeof(inbuf);
1751 fds[0] = 0;
1752
1753 ra[1].buf.pv = (void *)proc_name;
1754 ra[1].buf.len = inbuf.namelen;
1755 fds[1] = 0;
1756
1757 pages[0].addr = phys;
1758 pages[0].size = size;
1759
1760 ra[2].buf.pv = (void *)pages;
1761 ra[2].buf.len = sizeof(*pages);
1762 fds[2] = 0;
1763 ioctl.inv.handle = 1;
1764
1765 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1766 ioctl.inv.pra = ra;
1767 ioctl.fds = NULL;
1768 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001769 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001770 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1771 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1772 if (err)
1773 goto bail;
1774 } else {
1775 err = -ENOTTY;
1776 }
1777bail:
c_mtharue1a5ce12017-10-13 20:47:09 +05301778 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1779 me->staticpd_flags = 0;
1780 if (mem && err) {
1781 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1782 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1783 destVM, 1, srcVM, hlosVMperm, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001784 fastrpc_mmap_free(mem);
c_mtharue1a5ce12017-10-13 20:47:09 +05301785 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001786 if (file)
1787 fastrpc_mmap_free(file);
1788 return err;
1789}
1790
1791static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1792{
1793 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001794 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001795 remote_arg_t ra[1];
1796 int tgid = 0;
1797
Sathish Ambley36849af2017-02-02 09:35:55 -08001798 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1799 if (err)
1800 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301801 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001802 if (err)
1803 goto bail;
1804 tgid = fl->tgid;
1805 ra[0].buf.pv = (void *)&tgid;
1806 ra[0].buf.len = sizeof(tgid);
1807 ioctl.inv.handle = 1;
1808 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1809 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301810 ioctl.fds = NULL;
1811 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001812 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001813 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1814 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1815bail:
1816 return err;
1817}
1818
1819static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1820 struct fastrpc_mmap *map)
1821{
Sathish Ambleybae51902017-07-03 15:00:49 -07001822 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001823 struct smq_phy_page page;
1824 int num = 1;
1825 remote_arg_t ra[3];
1826 int err = 0;
1827 struct {
1828 int pid;
1829 uint32_t flags;
1830 uintptr_t vaddrin;
1831 int num;
1832 } inargs;
1833 struct {
1834 uintptr_t vaddrout;
1835 } routargs;
1836
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301837 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001838 inargs.vaddrin = (uintptr_t)map->va;
1839 inargs.flags = flags;
1840 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1841 ra[0].buf.pv = (void *)&inargs;
1842 ra[0].buf.len = sizeof(inargs);
1843 page.addr = map->phys;
1844 page.size = map->size;
1845 ra[1].buf.pv = (void *)&page;
1846 ra[1].buf.len = num * sizeof(page);
1847
1848 ra[2].buf.pv = (void *)&routargs;
1849 ra[2].buf.len = sizeof(routargs);
1850
1851 ioctl.inv.handle = 1;
1852 if (fl->apps->compat)
1853 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1854 else
1855 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1856 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301857 ioctl.fds = NULL;
1858 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001859 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001860 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1861 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1862 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301863 if (err)
1864 goto bail;
1865 if (flags == ADSP_MMAP_HEAP_ADDR) {
1866 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001867
c_mtharue1a5ce12017-10-13 20:47:09 +05301868 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1869 desc.args[1] = map->phys;
1870 desc.args[2] = map->size;
1871 desc.arginfo = SCM_ARGS(3);
1872 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1873 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1874 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1875
1876 int srcVM[1] = {VMID_HLOS};
1877 int destVM[1] = {VMID_ADSP_Q6};
1878 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1879
1880 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1881 srcVM, 1, destVM, destVMperm, 1));
1882 if (err)
1883 goto bail;
1884 }
1885bail:
1886 return err;
1887}
1888
1889static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1890 struct fastrpc_mmap *map)
1891{
1892 int err = 0;
1893 int srcVM[1] = {VMID_ADSP_Q6};
1894 int destVM[1] = {VMID_HLOS};
1895 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1896
1897 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1898 struct fastrpc_ioctl_invoke_crc ioctl;
1899 struct scm_desc desc = {0};
1900 remote_arg_t ra[1];
1901 int err = 0;
1902 struct {
1903 uint8_t skey;
1904 } routargs;
1905
1906 ra[0].buf.pv = (void *)&routargs;
1907 ra[0].buf.len = sizeof(routargs);
1908
1909 ioctl.inv.handle = 1;
1910 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1911 ioctl.inv.pra = ra;
1912 ioctl.fds = NULL;
1913 ioctl.attrs = NULL;
1914 ioctl.crc = NULL;
1915 if (fl == NULL)
1916 goto bail;
1917
1918 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1919 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1920 if (err)
1921 goto bail;
1922 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1923 desc.args[1] = map->phys;
1924 desc.args[2] = map->size;
1925 desc.args[3] = routargs.skey;
1926 desc.arginfo = SCM_ARGS(4);
1927 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1928 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1929 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1930 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1931 srcVM, 1, destVM, destVMperm, 1));
1932 if (err)
1933 goto bail;
1934 }
1935
1936bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001937 return err;
1938}
1939
1940static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1941 struct fastrpc_mmap *map)
1942{
Sathish Ambleybae51902017-07-03 15:00:49 -07001943 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001944 remote_arg_t ra[1];
1945 int err = 0;
1946 struct {
1947 int pid;
1948 uintptr_t vaddrout;
1949 ssize_t size;
1950 } inargs;
1951
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301952 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001953 inargs.size = map->size;
1954 inargs.vaddrout = map->raddr;
1955 ra[0].buf.pv = (void *)&inargs;
1956 ra[0].buf.len = sizeof(inargs);
1957
1958 ioctl.inv.handle = 1;
1959 if (fl->apps->compat)
1960 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1961 else
1962 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1963 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301964 ioctl.fds = NULL;
1965 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001966 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001967 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1968 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301969 if (err)
1970 goto bail;
1971 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1972 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1973 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
1974 if (err)
1975 goto bail;
1976 }
1977bail:
1978 return err;
1979}
1980
1981static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
1982{
1983 struct fastrpc_mmap *match = NULL, *map = NULL;
1984 struct hlist_node *n = NULL;
1985 int err = 0, ret = 0;
1986 struct fastrpc_apps *me = &gfa;
1987 struct ramdump_segment *ramdump_segments_rh = NULL;
1988
1989 do {
1990 match = NULL;
1991 spin_lock(&me->hlock);
1992 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
1993 match = map;
1994 hlist_del_init(&map->hn);
1995 break;
1996 }
1997 spin_unlock(&me->hlock);
1998
1999 if (match) {
2000 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2001 if (err)
2002 goto bail;
2003 if (me->channel[0].ramdumpenabled) {
2004 ramdump_segments_rh = kcalloc(1,
2005 sizeof(struct ramdump_segment), GFP_KERNEL);
2006 if (ramdump_segments_rh) {
2007 ramdump_segments_rh->address =
2008 match->phys;
2009 ramdump_segments_rh->size = match->size;
2010 ret = do_elf_ramdump(
2011 me->channel[0].remoteheap_ramdump_dev,
2012 ramdump_segments_rh, 1);
2013 if (ret < 0)
2014 pr_err("ADSPRPC: unable to dump heap");
2015 kfree(ramdump_segments_rh);
2016 }
2017 }
2018 fastrpc_mmap_free(match);
2019 }
2020 } while (match);
2021bail:
2022 if (err && match)
2023 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002024 return err;
2025}
2026
2027static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
2028 ssize_t len, struct fastrpc_mmap **ppmap);
2029
2030static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2031
2032static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2033 struct fastrpc_ioctl_munmap *ud)
2034{
2035 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302036 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002037
2038 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2039 if (err)
2040 goto bail;
2041 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2042 if (err)
2043 goto bail;
2044 fastrpc_mmap_free(map);
2045bail:
2046 if (err && map)
2047 fastrpc_mmap_add(map);
2048 return err;
2049}
2050
2051static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2052 struct fastrpc_ioctl_mmap *ud)
2053{
2054
c_mtharue1a5ce12017-10-13 20:47:09 +05302055 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002056 int err = 0;
2057
c_mtharue1a5ce12017-10-13 20:47:09 +05302058 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t __user)ud->vaddrin,
2059 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002060 return 0;
2061
2062 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
c_mtharue1a5ce12017-10-13 20:47:09 +05302063 (uintptr_t __user)ud->vaddrin, ud->size,
2064 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002065 if (err)
2066 goto bail;
2067 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2068 if (err)
2069 goto bail;
2070 ud->vaddrout = map->raddr;
2071 bail:
2072 if (err && map)
2073 fastrpc_mmap_free(map);
2074 return err;
2075}
2076
2077static void fastrpc_channel_close(struct kref *kref)
2078{
2079 struct fastrpc_apps *me = &gfa;
2080 struct fastrpc_channel_ctx *ctx;
2081 int cid;
2082
2083 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2084 cid = ctx - &gcinfo[0];
2085 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302086 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302087 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2088 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002089 mutex_unlock(&me->smd_mutex);
2090 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2091 MAJOR(me->dev_no), cid);
2092}
2093
2094static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2095
2096static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2097 int secure, struct fastrpc_session_ctx **session)
2098{
2099 struct fastrpc_apps *me = &gfa;
2100 int idx = 0, err = 0;
2101
2102 if (chan->sesscount) {
2103 for (idx = 0; idx < chan->sesscount; ++idx) {
2104 if (!chan->session[idx].used &&
2105 chan->session[idx].smmu.secure == secure) {
2106 chan->session[idx].used = 1;
2107 break;
2108 }
2109 }
2110 VERIFY(err, idx < chan->sesscount);
2111 if (err)
2112 goto bail;
2113 chan->session[idx].smmu.faults = 0;
2114 } else {
2115 VERIFY(err, me->dev != NULL);
2116 if (err)
2117 goto bail;
2118 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302119 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002120 }
2121
2122 *session = &chan->session[idx];
2123 bail:
2124 return err;
2125}
2126
c_mtharue1a5ce12017-10-13 20:47:09 +05302127static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2128 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002129{
2130 if (glink_queue_rx_intent(h, NULL, size))
2131 return false;
2132 return true;
2133}
2134
c_mtharue1a5ce12017-10-13 20:47:09 +05302135static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002136 const void *pkt_priv, const void *ptr)
2137{
2138}
2139
c_mtharue1a5ce12017-10-13 20:47:09 +05302140static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002141 const void *pkt_priv, const void *ptr, size_t size)
2142{
2143 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302144 struct smq_invoke_ctx *ctx;
2145 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002146
c_mtharufdac6892017-10-12 13:09:01 +05302147 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2148 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302149 goto bail;
2150
c_mtharufdac6892017-10-12 13:09:01 +05302151 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2152 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2153 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302154 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302155
c_mtharufdac6892017-10-12 13:09:01 +05302156 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302157bail:
c_mtharufdac6892017-10-12 13:09:01 +05302158 if (err)
2159 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002160 glink_rx_done(handle, ptr, true);
2161}
2162
c_mtharue1a5ce12017-10-13 20:47:09 +05302163static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002164 unsigned int event)
2165{
2166 struct fastrpc_apps *me = &gfa;
2167 int cid = (int)(uintptr_t)priv;
2168 struct fastrpc_glink_info *link;
2169
2170 if (cid < 0 || cid >= NUM_CHANNELS)
2171 return;
2172 link = &me->channel[cid].link;
2173 switch (event) {
2174 case GLINK_CONNECTED:
2175 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302176 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002177 break;
2178 case GLINK_LOCAL_DISCONNECTED:
2179 link->port_state = FASTRPC_LINK_DISCONNECTED;
2180 break;
2181 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302182 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002183 fastrpc_glink_close(me->channel[cid].chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302184 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002185 }
2186 break;
2187 default:
2188 break;
2189 }
2190}
2191
2192static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2193 struct fastrpc_session_ctx **session)
2194{
2195 int err = 0;
2196 struct fastrpc_apps *me = &gfa;
2197
2198 mutex_lock(&me->smd_mutex);
2199 if (!*session)
2200 err = fastrpc_session_alloc_locked(chan, secure, session);
2201 mutex_unlock(&me->smd_mutex);
2202 return err;
2203}
2204
2205static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2206 struct fastrpc_session_ctx *session)
2207{
2208 struct fastrpc_apps *me = &gfa;
2209
2210 mutex_lock(&me->smd_mutex);
2211 session->used = 0;
2212 mutex_unlock(&me->smd_mutex);
2213}
2214
2215static int fastrpc_file_free(struct fastrpc_file *fl)
2216{
2217 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302218 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002219 int cid;
2220
2221 if (!fl)
2222 return 0;
2223 cid = fl->cid;
2224
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302225 (void)fastrpc_release_current_dsp_process(fl);
2226
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002227 spin_lock(&fl->apps->hlock);
2228 hlist_del_init(&fl->hn);
2229 spin_unlock(&fl->apps->hlock);
2230
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002231 if (!fl->sctx) {
2232 kfree(fl);
2233 return 0;
2234 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302235 spin_lock(&fl->hlock);
2236 fl->file_close = 1;
2237 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002238 fastrpc_context_list_dtor(fl);
2239 fastrpc_buf_list_free(fl);
2240 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2241 fastrpc_mmap_free(map);
2242 }
2243 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2244 kref_put_mutex(&fl->apps->channel[cid].kref,
2245 fastrpc_channel_close, &fl->apps->smd_mutex);
2246 if (fl->sctx)
2247 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2248 if (fl->secsctx)
2249 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2250 kfree(fl);
2251 return 0;
2252}
2253
2254static int fastrpc_device_release(struct inode *inode, struct file *file)
2255{
2256 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2257
2258 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302259 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2260 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002261 if (fl->debugfs_file != NULL)
2262 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002263 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302264 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002265 }
2266 return 0;
2267}
2268
2269static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2270 void *priv)
2271{
2272 struct fastrpc_apps *me = &gfa;
2273 int cid = (int)((uintptr_t)priv);
2274 struct fastrpc_glink_info *link;
2275
2276 if (cid < 0 || cid >= NUM_CHANNELS)
2277 return;
2278
2279 link = &me->channel[cid].link;
2280 switch (cb_info->link_state) {
2281 case GLINK_LINK_STATE_UP:
2282 link->link_state = FASTRPC_LINK_STATE_UP;
2283 complete(&me->channel[cid].work);
2284 break;
2285 case GLINK_LINK_STATE_DOWN:
2286 link->link_state = FASTRPC_LINK_STATE_DOWN;
2287 break;
2288 default:
2289 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2290 break;
2291 }
2292}
2293
2294static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2295{
2296 int err = 0;
2297 struct fastrpc_glink_info *link;
2298
2299 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2300 if (err)
2301 goto bail;
2302
2303 link = &me->channel[cid].link;
2304 if (link->link_notify_handle != NULL)
2305 goto bail;
2306
2307 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2308 link->link_notify_handle = glink_register_link_state_cb(
2309 &link->link_info,
2310 (void *)((uintptr_t)cid));
2311 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2312 if (err) {
2313 link->link_notify_handle = NULL;
2314 goto bail;
2315 }
2316 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2317 RPC_TIMEOUT));
2318bail:
2319 return err;
2320}
2321
2322static void fastrpc_glink_close(void *chan, int cid)
2323{
2324 int err = 0;
2325 struct fastrpc_glink_info *link;
2326
2327 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2328 if (err)
2329 return;
2330 link = &gfa.channel[cid].link;
2331
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302332 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002333 link->port_state = FASTRPC_LINK_DISCONNECTING;
2334 glink_close(chan);
2335 }
2336}
2337
2338static int fastrpc_glink_open(int cid)
2339{
2340 int err = 0;
2341 void *handle = NULL;
2342 struct fastrpc_apps *me = &gfa;
2343 struct glink_open_config *cfg;
2344 struct fastrpc_glink_info *link;
2345
2346 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2347 if (err)
2348 goto bail;
2349 link = &me->channel[cid].link;
2350 cfg = &me->channel[cid].link.cfg;
2351 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2352 if (err)
2353 goto bail;
2354
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302355 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2356 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002357 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002358
2359 link->port_state = FASTRPC_LINK_CONNECTING;
2360 cfg->priv = (void *)(uintptr_t)cid;
2361 cfg->edge = gcinfo[cid].link.link_info.edge;
2362 cfg->transport = gcinfo[cid].link.link_info.transport;
2363 cfg->name = FASTRPC_GLINK_GUID;
2364 cfg->notify_rx = fastrpc_glink_notify_rx;
2365 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2366 cfg->notify_state = fastrpc_glink_notify_state;
2367 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2368 handle = glink_open(cfg);
2369 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302370 if (err) {
2371 if (link->port_state == FASTRPC_LINK_CONNECTING)
2372 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002373 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302374 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002375 me->channel[cid].chan = handle;
2376bail:
2377 return err;
2378}
2379
Sathish Ambley1ca68232017-01-19 10:32:55 -08002380static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2381{
2382 filp->private_data = inode->i_private;
2383 return 0;
2384}
2385
2386static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2387 size_t count, loff_t *position)
2388{
2389 struct fastrpc_file *fl = filp->private_data;
2390 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302391 struct fastrpc_buf *buf = NULL;
2392 struct fastrpc_mmap *map = NULL;
2393 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002394 struct fastrpc_channel_ctx *chan;
2395 struct fastrpc_session_ctx *sess;
2396 unsigned int len = 0;
2397 int i, j, ret = 0;
2398 char *fileinfo = NULL;
2399
2400 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2401 if (!fileinfo)
2402 goto bail;
2403 if (fl == NULL) {
2404 for (i = 0; i < NUM_CHANNELS; i++) {
2405 chan = &gcinfo[i];
2406 len += scnprintf(fileinfo + len,
2407 DEBUGFS_SIZE - len, "%s\n\n",
2408 chan->name);
2409 len += scnprintf(fileinfo + len,
2410 DEBUGFS_SIZE - len, "%s %d\n",
2411 "sesscount:", chan->sesscount);
2412 for (j = 0; j < chan->sesscount; j++) {
2413 sess = &chan->session[j];
2414 len += scnprintf(fileinfo + len,
2415 DEBUGFS_SIZE - len,
2416 "%s%d\n\n", "SESSION", j);
2417 len += scnprintf(fileinfo + len,
2418 DEBUGFS_SIZE - len,
2419 "%s %d\n", "sid:",
2420 sess->smmu.cb);
2421 len += scnprintf(fileinfo + len,
2422 DEBUGFS_SIZE - len,
2423 "%s %d\n", "SECURE:",
2424 sess->smmu.secure);
2425 }
2426 }
2427 } else {
2428 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2429 "%s %d\n\n",
2430 "PROCESS_ID:", fl->tgid);
2431 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2432 "%s %d\n\n",
2433 "CHANNEL_ID:", fl->cid);
2434 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2435 "%s %d\n\n",
2436 "SSRCOUNT:", fl->ssrcount);
2437 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2438 "%s\n",
2439 "LIST OF BUFS:");
2440 spin_lock(&fl->hlock);
2441 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2442 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302443 "%s %pK %s %pK %s %llx\n", "buf:",
2444 buf, "buf->virt:", buf->virt,
2445 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002446 }
2447 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2448 "\n%s\n",
2449 "LIST OF MAPS:");
2450 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2451 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302452 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002453 "map:", map,
2454 "map->va:", map->va,
2455 "map->phys:", map->phys);
2456 }
2457 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2458 "\n%s\n",
2459 "LIST OF PENDING SMQCONTEXTS:");
2460 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2461 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302462 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002463 "smqcontext:", ictx,
2464 "sc:", ictx->sc,
2465 "tid:", ictx->pid,
2466 "handle", ictx->rpra->h);
2467 }
2468 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2469 "\n%s\n",
2470 "LIST OF INTERRUPTED SMQCONTEXTS:");
2471 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2472 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302473 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002474 "smqcontext:", ictx,
2475 "sc:", ictx->sc,
2476 "tid:", ictx->pid,
2477 "handle", ictx->rpra->h);
2478 }
2479 spin_unlock(&fl->hlock);
2480 }
2481 if (len > DEBUGFS_SIZE)
2482 len = DEBUGFS_SIZE;
2483 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2484 kfree(fileinfo);
2485bail:
2486 return ret;
2487}
2488
2489static const struct file_operations debugfs_fops = {
2490 .open = fastrpc_debugfs_open,
2491 .read = fastrpc_debugfs_read,
2492};
Sathish Ambley36849af2017-02-02 09:35:55 -08002493static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002494{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002495 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002496 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002497
2498 mutex_lock(&me->smd_mutex);
2499
Sathish Ambley36849af2017-02-02 09:35:55 -08002500 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002501 if (err)
2502 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002503 cid = fl->cid;
c_mtharue1a5ce12017-10-13 20:47:09 +05302504 if (me->channel[cid].ssrcount !=
2505 me->channel[cid].prevssrcount) {
2506 if (!me->channel[cid].issubsystemup) {
2507 VERIFY(err, 0);
2508 if (err)
2509 goto bail;
2510 }
2511 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002512 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2513 if (err)
2514 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002515 fl->ssrcount = me->channel[cid].ssrcount;
2516 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302517 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302518 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2519 if (err)
2520 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002521 VERIFY(err, 0 == fastrpc_glink_open(cid));
2522 if (err)
2523 goto bail;
2524
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302525 VERIFY(err,
2526 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002527 RPC_TIMEOUT));
2528 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302529 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002530 goto bail;
2531 }
2532 kref_init(&me->channel[cid].kref);
2533 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2534 MAJOR(me->dev_no), cid);
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302535 err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
2536 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002537 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302538 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2539 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002540 if (me->channel[cid].ssrcount !=
2541 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302542 if (fastrpc_mmap_remove_ssr(fl))
2543 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002544 me->channel[cid].prevssrcount =
2545 me->channel[cid].ssrcount;
2546 }
2547 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002548
2549bail:
2550 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002551 return err;
2552}
2553
Sathish Ambley36849af2017-02-02 09:35:55 -08002554static int fastrpc_device_open(struct inode *inode, struct file *filp)
2555{
2556 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002557 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302558 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002559 struct fastrpc_apps *me = &gfa;
2560
c_mtharue1a5ce12017-10-13 20:47:09 +05302561 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002562 if (err)
2563 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002564 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2565 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002566 context_list_ctor(&fl->clst);
2567 spin_lock_init(&fl->hlock);
2568 INIT_HLIST_HEAD(&fl->maps);
2569 INIT_HLIST_HEAD(&fl->bufs);
2570 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302571 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002572 fl->tgid = current->tgid;
2573 fl->apps = me;
2574 fl->mode = FASTRPC_MODE_SERIAL;
2575 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002576 if (debugfs_file != NULL)
2577 fl->debugfs_file = debugfs_file;
2578 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302579 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002580 filp->private_data = fl;
2581 spin_lock(&me->hlock);
2582 hlist_add_head(&fl->hn, &me->drivers);
2583 spin_unlock(&me->hlock);
2584 return 0;
2585}
2586
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2588{
2589 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002590 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002591
c_mtharue1a5ce12017-10-13 20:47:09 +05302592 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002593 if (err)
2594 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002595 if (fl->cid == -1) {
2596 cid = *info;
2597 VERIFY(err, cid < NUM_CHANNELS);
2598 if (err)
2599 goto bail;
2600 fl->cid = cid;
2601 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2602 VERIFY(err, !fastrpc_session_alloc_locked(
2603 &fl->apps->channel[cid], 0, &fl->sctx));
2604 if (err)
2605 goto bail;
2606 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302607 VERIFY(err, fl->sctx != NULL);
2608 if (err)
2609 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002610 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2611bail:
2612 return err;
2613}
2614
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302615static int fastrpc_internal_control(struct fastrpc_file *fl,
2616 struct fastrpc_ioctl_control *cp)
2617{
2618 int err = 0;
2619 int latency;
2620
2621 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2622 if (err)
2623 goto bail;
2624 VERIFY(err, !IS_ERR_OR_NULL(cp));
2625 if (err)
2626 goto bail;
2627
2628 switch (cp->req) {
2629 case FASTRPC_CONTROL_LATENCY:
2630 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2631 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2632 VERIFY(err, latency != 0);
2633 if (err)
2634 goto bail;
2635 if (!fl->qos_request) {
2636 pm_qos_add_request(&fl->pm_qos_req,
2637 PM_QOS_CPU_DMA_LATENCY, latency);
2638 fl->qos_request = 1;
2639 } else
2640 pm_qos_update_request(&fl->pm_qos_req, latency);
2641 break;
2642 default:
2643 err = -ENOTTY;
2644 break;
2645 }
2646bail:
2647 return err;
2648}
2649
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002650static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2651 unsigned long ioctl_param)
2652{
2653 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002654 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002655 struct fastrpc_ioctl_mmap mmap;
2656 struct fastrpc_ioctl_munmap munmap;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002657 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002658 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302659 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002660 } p;
2661 void *param = (char *)ioctl_param;
2662 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2663 int size = 0, err = 0;
2664 uint32_t info;
2665
c_mtharue1a5ce12017-10-13 20:47:09 +05302666 p.inv.fds = NULL;
2667 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002668 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302669 spin_lock(&fl->hlock);
2670 if (fl->file_close == 1) {
2671 err = EBADF;
2672 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2673 spin_unlock(&fl->hlock);
2674 goto bail;
2675 }
2676 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002677
2678 switch (ioctl_num) {
2679 case FASTRPC_IOCTL_INVOKE:
2680 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002681 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002682 case FASTRPC_IOCTL_INVOKE_FD:
2683 if (!size)
2684 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2685 /* fall through */
2686 case FASTRPC_IOCTL_INVOKE_ATTRS:
2687 if (!size)
2688 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002689 /* fall through */
2690 case FASTRPC_IOCTL_INVOKE_CRC:
2691 if (!size)
2692 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302693 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002694 if (err)
2695 goto bail;
2696 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2697 0, &p.inv)));
2698 if (err)
2699 goto bail;
2700 break;
2701 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302702 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2703 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302704 if (err)
2705 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002706 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2707 if (err)
2708 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302709 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002710 if (err)
2711 goto bail;
2712 break;
2713 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302714 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2715 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302716 if (err)
2717 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002718 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2719 &p.munmap)));
2720 if (err)
2721 goto bail;
2722 break;
2723 case FASTRPC_IOCTL_SETMODE:
2724 switch ((uint32_t)ioctl_param) {
2725 case FASTRPC_MODE_PARALLEL:
2726 case FASTRPC_MODE_SERIAL:
2727 fl->mode = (uint32_t)ioctl_param;
2728 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002729 case FASTRPC_MODE_PROFILE:
2730 fl->profile = (uint32_t)ioctl_param;
2731 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302732 case FASTRPC_MODE_SESSION:
2733 fl->sessionid = 1;
2734 fl->tgid |= (1 << SESSION_ID_INDEX);
2735 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002736 default:
2737 err = -ENOTTY;
2738 break;
2739 }
2740 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002741 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302742 K_COPY_FROM_USER(err, 0, &p.perf,
2743 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002744 if (err)
2745 goto bail;
2746 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2747 if (p.perf.keys) {
2748 char *keys = PERF_KEYS;
2749
c_mtharue1a5ce12017-10-13 20:47:09 +05302750 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2751 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002752 if (err)
2753 goto bail;
2754 }
2755 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302756 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2757 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002758 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302759 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002760 if (err)
2761 goto bail;
2762 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302763 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302764 K_COPY_FROM_USER(err, 0, &p.cp, param,
2765 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302766 if (err)
2767 goto bail;
2768 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2769 if (err)
2770 goto bail;
2771 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002772 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302773 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002774 if (err)
2775 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002776 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2777 if (err)
2778 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302779 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002780 if (err)
2781 goto bail;
2782 break;
2783 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002784 p.init.attrs = 0;
2785 p.init.siglen = 0;
2786 size = sizeof(struct fastrpc_ioctl_init);
2787 /* fall through */
2788 case FASTRPC_IOCTL_INIT_ATTRS:
2789 if (!size)
2790 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302791 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002792 if (err)
2793 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302794 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302795 p.init.init.filelen < INIT_FILELEN_MAX);
2796 if (err)
2797 goto bail;
2798 VERIFY(err, p.init.init.memlen >= 0 &&
2799 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302800 if (err)
2801 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002802 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2803 if (err)
2804 goto bail;
2805 break;
2806
2807 default:
2808 err = -ENOTTY;
2809 pr_info("bad ioctl: %d\n", ioctl_num);
2810 break;
2811 }
2812 bail:
2813 return err;
2814}
2815
2816static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2817 unsigned long code,
2818 void *data)
2819{
2820 struct fastrpc_apps *me = &gfa;
2821 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302822 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002823 int cid;
2824
2825 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2826 cid = ctx - &me->channel[0];
2827 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2828 mutex_lock(&me->smd_mutex);
2829 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302830 ctx->issubsystemup = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002831 if (ctx->chan) {
2832 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302833 ctx->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002834 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2835 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2836 }
2837 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302838 if (cid == 0)
2839 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002840 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302841 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2842 if (me->channel[0].remoteheap_ramdump_dev &&
2843 notifdata->enable_ramdump) {
2844 me->channel[0].ramdumpenabled = 1;
2845 }
2846 } else if (code == SUBSYS_AFTER_POWERUP) {
2847 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002848 }
2849
2850 return NOTIFY_DONE;
2851}
2852
2853static const struct file_operations fops = {
2854 .open = fastrpc_device_open,
2855 .release = fastrpc_device_release,
2856 .unlocked_ioctl = fastrpc_device_ioctl,
2857 .compat_ioctl = compat_fastrpc_device_ioctl,
2858};
2859
2860static const struct of_device_id fastrpc_match_table[] = {
2861 { .compatible = "qcom,msm-fastrpc-adsp", },
2862 { .compatible = "qcom,msm-fastrpc-compute", },
2863 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2864 { .compatible = "qcom,msm-adsprpc-mem-region", },
2865 {}
2866};
2867
2868static int fastrpc_cb_probe(struct device *dev)
2869{
2870 struct fastrpc_channel_ctx *chan;
2871 struct fastrpc_session_ctx *sess;
2872 struct of_phandle_args iommuspec;
2873 const char *name;
2874 unsigned int start = 0x80000000;
2875 int err = 0, i;
2876 int secure_vmid = VMID_CP_PIXEL;
2877
c_mtharue1a5ce12017-10-13 20:47:09 +05302878 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2879 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002880 if (err)
2881 goto bail;
2882 for (i = 0; i < NUM_CHANNELS; i++) {
2883 if (!gcinfo[i].name)
2884 continue;
2885 if (!strcmp(name, gcinfo[i].name))
2886 break;
2887 }
2888 VERIFY(err, i < NUM_CHANNELS);
2889 if (err)
2890 goto bail;
2891 chan = &gcinfo[i];
2892 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2893 if (err)
2894 goto bail;
2895
2896 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2897 "#iommu-cells", 0, &iommuspec));
2898 if (err)
2899 goto bail;
2900 sess = &chan->session[chan->sesscount];
2901 sess->smmu.cb = iommuspec.args[0] & 0xf;
2902 sess->used = 0;
2903 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2904 "dma-coherent");
2905 sess->smmu.secure = of_property_read_bool(dev->of_node,
2906 "qcom,secure-context-bank");
2907 if (sess->smmu.secure)
2908 start = 0x60000000;
2909 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2910 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302911 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002912 if (err)
2913 goto bail;
2914
2915 if (sess->smmu.secure)
2916 iommu_domain_set_attr(sess->smmu.mapping->domain,
2917 DOMAIN_ATTR_SECURE_VMID,
2918 &secure_vmid);
2919
2920 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2921 if (err)
2922 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302923 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002924 sess->smmu.enabled = 1;
2925 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002926 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2927 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002928bail:
2929 return err;
2930}
2931
2932static int fastrpc_probe(struct platform_device *pdev)
2933{
2934 int err = 0;
2935 struct fastrpc_apps *me = &gfa;
2936 struct device *dev = &pdev->dev;
2937 struct smq_phy_page range;
2938 struct device_node *ion_node, *node;
2939 struct platform_device *ion_pdev;
2940 struct cma *cma;
2941 uint32_t val;
2942
2943 if (of_device_is_compatible(dev->of_node,
2944 "qcom,msm-fastrpc-compute-cb"))
2945 return fastrpc_cb_probe(dev);
2946
2947 if (of_device_is_compatible(dev->of_node,
2948 "qcom,msm-adsprpc-mem-region")) {
2949 me->dev = dev;
2950 range.addr = 0;
2951 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2952 if (ion_node) {
2953 for_each_available_child_of_node(ion_node, node) {
2954 if (of_property_read_u32(node, "reg", &val))
2955 continue;
2956 if (val != ION_ADSP_HEAP_ID)
2957 continue;
2958 ion_pdev = of_find_device_by_node(node);
2959 if (!ion_pdev)
2960 break;
2961 cma = dev_get_cma_area(&ion_pdev->dev);
2962 if (cma) {
2963 range.addr = cma_get_base(cma);
2964 range.size = (size_t)cma_get_size(cma);
2965 }
2966 break;
2967 }
2968 }
2969 if (range.addr) {
2970 int srcVM[1] = {VMID_HLOS};
2971 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
2972 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07002973 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002974 PERM_READ | PERM_WRITE | PERM_EXEC,
2975 PERM_READ | PERM_WRITE | PERM_EXEC,
2976 PERM_READ | PERM_WRITE | PERM_EXEC,
2977 };
2978
2979 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
2980 srcVM, 1, destVM, destVMperm, 4));
2981 if (err)
2982 goto bail;
2983 }
2984 return 0;
2985 }
2986
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302987 err = of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
2988 &me->latency);
2989 if (err)
2990 me->latency = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002991 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
2992 fastrpc_match_table,
2993 NULL, &pdev->dev));
2994 if (err)
2995 goto bail;
2996bail:
2997 return err;
2998}
2999
3000static void fastrpc_deinit(void)
3001{
3002 struct fastrpc_apps *me = &gfa;
3003 struct fastrpc_channel_ctx *chan = gcinfo;
3004 int i, j;
3005
3006 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3007 if (chan->chan) {
3008 kref_put_mutex(&chan->kref,
3009 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303010 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003011 }
3012 for (j = 0; j < NUM_SESSIONS; j++) {
3013 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303014 if (sess->smmu.dev) {
3015 arm_iommu_detach_device(sess->smmu.dev);
3016 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003017 }
3018 if (sess->smmu.mapping) {
3019 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303020 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003021 }
3022 }
3023 }
3024}
3025
3026static struct platform_driver fastrpc_driver = {
3027 .probe = fastrpc_probe,
3028 .driver = {
3029 .name = "fastrpc",
3030 .owner = THIS_MODULE,
3031 .of_match_table = fastrpc_match_table,
3032 },
3033};
3034
3035static int __init fastrpc_device_init(void)
3036{
3037 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303038 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003039 int err = 0, i;
3040
3041 memset(me, 0, sizeof(*me));
3042
3043 fastrpc_init(me);
3044 me->dev = NULL;
3045 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3046 if (err)
3047 goto register_bail;
3048 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3049 DEVICE_NAME));
3050 if (err)
3051 goto alloc_chrdev_bail;
3052 cdev_init(&me->cdev, &fops);
3053 me->cdev.owner = THIS_MODULE;
3054 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003055 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003056 if (err)
3057 goto cdev_init_bail;
3058 me->class = class_create(THIS_MODULE, "fastrpc");
3059 VERIFY(err, !IS_ERR(me->class));
3060 if (err)
3061 goto class_create_bail;
3062 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003063 dev = device_create(me->class, NULL,
3064 MKDEV(MAJOR(me->dev_no), 0),
3065 NULL, gcinfo[0].name);
3066 VERIFY(err, !IS_ERR_OR_NULL(dev));
3067 if (err)
3068 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003069 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003070 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003071 me->channel[i].ssrcount = 0;
3072 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303073 me->channel[i].issubsystemup = 1;
3074 me->channel[i].ramdumpenabled = 0;
3075 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003076 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3077 me->channel[i].handle = subsys_notif_register_notifier(
3078 gcinfo[i].subsys,
3079 &me->channel[i].nb);
3080 }
3081
3082 me->client = msm_ion_client_create(DEVICE_NAME);
3083 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3084 if (err)
3085 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003086 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003087 return 0;
3088device_create_bail:
3089 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003090 if (me->channel[i].handle)
3091 subsys_notif_unregister_notifier(me->channel[i].handle,
3092 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003093 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003094 if (!IS_ERR_OR_NULL(dev))
3095 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003096 class_destroy(me->class);
3097class_create_bail:
3098 cdev_del(&me->cdev);
3099cdev_init_bail:
3100 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3101alloc_chrdev_bail:
3102register_bail:
3103 fastrpc_deinit();
3104 return err;
3105}
3106
3107static void __exit fastrpc_device_exit(void)
3108{
3109 struct fastrpc_apps *me = &gfa;
3110 int i;
3111
3112 fastrpc_file_list_dtor(me);
3113 fastrpc_deinit();
3114 for (i = 0; i < NUM_CHANNELS; i++) {
3115 if (!gcinfo[i].name)
3116 continue;
3117 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3118 subsys_notif_unregister_notifier(me->channel[i].handle,
3119 &me->channel[i].nb);
3120 }
3121 class_destroy(me->class);
3122 cdev_del(&me->cdev);
3123 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3124 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003125 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003126}
3127
3128late_initcall(fastrpc_device_init);
3129module_exit(fastrpc_device_exit);
3130
3131MODULE_LICENSE("GPL v2");