blob: ec4590e0b411d1f0c9c57e30a292f5c086c8f276 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
78
Sathish Ambleya21b5b52017-01-11 16:11:01 -080079#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
80#define FASTRPC_STATIC_HANDLE_LISTENER (3)
81#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053082#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080083
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053084#define INIT_FILELEN_MAX (2*1024*1024)
85#define INIT_MEMLEN_MAX (8*1024*1024)
86
Sathish Ambleya21b5b52017-01-11 16:11:01 -080087#define PERF_END (void)0
88
89#define PERF(enb, cnt, ff) \
90 {\
91 struct timespec startT = {0};\
92 if (enb) {\
93 getnstimeofday(&startT);\
94 } \
95 ff ;\
96 if (enb) {\
97 cnt += getnstimediff(&startT);\
98 } \
99 }
100
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700101static int fastrpc_glink_open(int cid);
102static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800103static struct dentry *debugfs_root;
104static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700105
106static inline uint64_t buf_page_start(uint64_t buf)
107{
108 uint64_t start = (uint64_t) buf & PAGE_MASK;
109 return start;
110}
111
112static inline uint64_t buf_page_offset(uint64_t buf)
113{
114 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
115 return offset;
116}
117
118static inline int buf_num_pages(uint64_t buf, ssize_t len)
119{
120 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
121 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
122 int nPages = end - start + 1;
123 return nPages;
124}
125
126static inline uint64_t buf_page_size(uint32_t size)
127{
128 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
129
130 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
131}
132
133static inline void *uint64_to_ptr(uint64_t addr)
134{
135 void *ptr = (void *)((uintptr_t)addr);
136
137 return ptr;
138}
139
140static inline uint64_t ptr_to_uint64(void *ptr)
141{
142 uint64_t addr = (uint64_t)((uintptr_t)ptr);
143
144 return addr;
145}
146
147struct fastrpc_file;
148
149struct fastrpc_buf {
150 struct hlist_node hn;
151 struct fastrpc_file *fl;
152 void *virt;
153 uint64_t phys;
154 ssize_t size;
155};
156
157struct fastrpc_ctx_lst;
158
159struct overlap {
160 uintptr_t start;
161 uintptr_t end;
162 int raix;
163 uintptr_t mstart;
164 uintptr_t mend;
165 uintptr_t offset;
166};
167
168struct smq_invoke_ctx {
169 struct hlist_node hn;
170 struct completion work;
171 int retval;
172 int pid;
173 int tgid;
174 remote_arg_t *lpra;
175 remote_arg64_t *rpra;
176 int *fds;
177 unsigned int *attrs;
178 struct fastrpc_mmap **maps;
179 struct fastrpc_buf *buf;
180 ssize_t used;
181 struct fastrpc_file *fl;
182 uint32_t sc;
183 struct overlap *overs;
184 struct overlap **overps;
185 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700186 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530187 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700188};
189
190struct fastrpc_ctx_lst {
191 struct hlist_head pending;
192 struct hlist_head interrupted;
193};
194
195struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530196 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700197 struct dma_iommu_mapping *mapping;
198 int cb;
199 int enabled;
200 int faults;
201 int secure;
202 int coherent;
203};
204
205struct fastrpc_session_ctx {
206 struct device *dev;
207 struct fastrpc_smmu smmu;
208 int used;
209};
210
211struct fastrpc_glink_info {
212 int link_state;
213 int port_state;
214 struct glink_open_config cfg;
215 struct glink_link_info link_info;
216 void *link_notify_handle;
217};
218
219struct fastrpc_channel_ctx {
220 char *name;
221 char *subsys;
222 void *chan;
223 struct device *dev;
224 struct fastrpc_session_ctx session[NUM_SESSIONS];
225 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530226 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700227 struct notifier_block nb;
228 struct kref kref;
229 int sesscount;
230 int ssrcount;
231 void *handle;
232 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530233 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700234 int vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int ramdumpenabled;
236 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700237 struct fastrpc_glink_info link;
238};
239
240struct fastrpc_apps {
241 struct fastrpc_channel_ctx *channel;
242 struct cdev cdev;
243 struct class *class;
244 struct mutex smd_mutex;
245 struct smq_phy_page range;
246 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530247 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 dev_t dev_no;
249 int compat;
250 struct hlist_head drivers;
251 spinlock_t hlock;
252 struct ion_client *client;
253 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530254 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255};
256
257struct fastrpc_mmap {
258 struct hlist_node hn;
259 struct fastrpc_file *fl;
260 struct fastrpc_apps *apps;
261 int fd;
262 uint32_t flags;
263 struct dma_buf *buf;
264 struct sg_table *table;
265 struct dma_buf_attachment *attach;
266 struct ion_handle *handle;
267 uint64_t phys;
268 ssize_t size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530269 uintptr_t __user va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700270 ssize_t len;
271 int refs;
272 uintptr_t raddr;
273 int uncached;
274 int secure;
275 uintptr_t attr;
276};
277
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800278struct fastrpc_perf {
279 int64_t count;
280 int64_t flush;
281 int64_t map;
282 int64_t copy;
283 int64_t link;
284 int64_t getargs;
285 int64_t putargs;
286 int64_t invargs;
287 int64_t invoke;
288};
289
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290struct fastrpc_file {
291 struct hlist_node hn;
292 spinlock_t hlock;
293 struct hlist_head maps;
294 struct hlist_head bufs;
295 struct fastrpc_ctx_lst clst;
296 struct fastrpc_session_ctx *sctx;
297 struct fastrpc_session_ctx *secsctx;
298 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800299 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530300 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700301 int tgid;
302 int cid;
303 int ssrcount;
304 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530305 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800307 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800308 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530309 struct pm_qos_request pm_qos_req;
310 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311};
312
313static struct fastrpc_apps gfa;
314
315static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
316 {
317 .name = "adsprpc-smd",
318 .subsys = "adsp",
319 .link.link_info.edge = "lpass",
320 .link.link_info.transport = "smem",
321 },
322 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700323 .name = "mdsprpc-smd",
324 .subsys = "modem",
325 .link.link_info.edge = "mpss",
326 .link.link_info.transport = "smem",
327 },
328 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800329 .name = "sdsprpc-smd",
330 .subsys = "slpi",
331 .link.link_info.edge = "dsps",
332 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800333 },
334 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700335 .name = "cdsprpc-smd",
336 .subsys = "cdsp",
337 .link.link_info.edge = "cdsp",
338 .link.link_info.transport = "smem",
339 },
340};
341
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800342static inline int64_t getnstimediff(struct timespec *start)
343{
344 int64_t ns;
345 struct timespec ts, b;
346
347 getnstimeofday(&ts);
348 b = timespec_sub(ts, *start);
349 ns = timespec_to_ns(&b);
350 return ns;
351}
352
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700353static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
354{
c_mtharue1a5ce12017-10-13 20:47:09 +0530355 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356 int vmid;
357
358 if (!fl)
359 return;
360 if (cache) {
361 spin_lock(&fl->hlock);
362 hlist_add_head(&buf->hn, &fl->bufs);
363 spin_unlock(&fl->hlock);
364 return;
365 }
366 if (!IS_ERR_OR_NULL(buf->virt)) {
367 int destVM[1] = {VMID_HLOS};
368 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
369
370 if (fl->sctx->smmu.cb)
371 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
372 vmid = fl->apps->channel[fl->cid].vmid;
373 if (vmid) {
374 int srcVM[2] = {VMID_HLOS, vmid};
375
376 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
377 srcVM, 2, destVM, destVMperm, 1);
378 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530379 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700380 buf->phys);
381 }
382 kfree(buf);
383}
384
385static void fastrpc_buf_list_free(struct fastrpc_file *fl)
386{
387 struct fastrpc_buf *buf, *free;
388
389 do {
390 struct hlist_node *n;
391
c_mtharue1a5ce12017-10-13 20:47:09 +0530392 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393 spin_lock(&fl->hlock);
394 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
395 hlist_del_init(&buf->hn);
396 free = buf;
397 break;
398 }
399 spin_unlock(&fl->hlock);
400 if (free)
401 fastrpc_buf_free(free, 0);
402 } while (free);
403}
404
405static void fastrpc_mmap_add(struct fastrpc_mmap *map)
406{
c_mtharue1a5ce12017-10-13 20:47:09 +0530407 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
408 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
409 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700410
c_mtharue1a5ce12017-10-13 20:47:09 +0530411 spin_lock(&me->hlock);
412 hlist_add_head(&map->hn, &me->maps);
413 spin_unlock(&me->hlock);
414 } else {
415 struct fastrpc_file *fl = map->fl;
416
417 spin_lock(&fl->hlock);
418 hlist_add_head(&map->hn, &fl->maps);
419 spin_unlock(&fl->hlock);
420 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700421}
422
c_mtharue1a5ce12017-10-13 20:47:09 +0530423static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
424 uintptr_t __user va, ssize_t len, int mflags, int refs,
425 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700426{
c_mtharue1a5ce12017-10-13 20:47:09 +0530427 struct fastrpc_apps *me = &gfa;
428 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 if (mflags == ADSP_MMAP_HEAP_ADDR ||
431 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
432 spin_lock(&me->hlock);
433 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
434 if (va >= map->va &&
435 va + len <= map->va + map->len &&
436 map->fd == fd) {
437 if (refs)
438 map->refs++;
439 match = map;
440 break;
441 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700442 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530443 spin_unlock(&me->hlock);
444 } else {
445 spin_lock(&fl->hlock);
446 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
447 if (va >= map->va &&
448 va + len <= map->va + map->len &&
449 map->fd == fd) {
450 if (refs)
451 map->refs++;
452 match = map;
453 break;
454 }
455 }
456 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700457 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700458 if (match) {
459 *ppmap = match;
460 return 0;
461 }
462 return -ENOTTY;
463}
464
c_mtharue1a5ce12017-10-13 20:47:09 +0530465static int dma_alloc_memory(phys_addr_t *region_start, ssize_t size)
466{
467 struct fastrpc_apps *me = &gfa;
468 void *vaddr = NULL;
469
470 if (me->dev == NULL) {
471 pr_err("device adsprpc-mem is not initialized\n");
472 return -ENODEV;
473 }
474 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
475 if (!vaddr) {
476 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
477 (unsigned int)size);
478 return -ENOMEM;
479 }
480 return 0;
481}
482
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700483static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
484 ssize_t len, struct fastrpc_mmap **ppmap)
485{
c_mtharue1a5ce12017-10-13 20:47:09 +0530486 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700487 struct hlist_node *n;
488 struct fastrpc_apps *me = &gfa;
489
490 spin_lock(&me->hlock);
491 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
492 if (map->raddr == va &&
493 map->raddr + map->len == va + len &&
494 map->refs == 1) {
495 match = map;
496 hlist_del_init(&map->hn);
497 break;
498 }
499 }
500 spin_unlock(&me->hlock);
501 if (match) {
502 *ppmap = match;
503 return 0;
504 }
505 spin_lock(&fl->hlock);
506 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
507 if (map->raddr == va &&
508 map->raddr + map->len == va + len &&
509 map->refs == 1) {
510 match = map;
511 hlist_del_init(&map->hn);
512 break;
513 }
514 }
515 spin_unlock(&fl->hlock);
516 if (match) {
517 *ppmap = match;
518 return 0;
519 }
520 return -ENOTTY;
521}
522
c_mtharu7bd6a422017-10-17 18:15:37 +0530523static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700524{
c_mtharue1a5ce12017-10-13 20:47:09 +0530525 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700526 struct fastrpc_file *fl;
527 int vmid;
528 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700529
530 if (!map)
531 return;
532 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530533 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
534 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
535 spin_lock(&me->hlock);
536 map->refs--;
537 if (!map->refs)
538 hlist_del_init(&map->hn);
539 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530540 if (map->refs > 0)
541 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530542 } else {
543 spin_lock(&fl->hlock);
544 map->refs--;
545 if (!map->refs)
546 hlist_del_init(&map->hn);
547 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530548 if (map->refs > 0 && !flags)
549 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530550 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530551 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
552 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700553
c_mtharue1a5ce12017-10-13 20:47:09 +0530554 if (me->dev == NULL) {
555 pr_err("failed to free remote heap allocation\n");
556 return;
557 }
558 if (map->phys) {
559 dma_free_coherent(me->dev, map->size,
560 &(map->va), map->phys);
561 }
562 } else {
563 int destVM[1] = {VMID_HLOS};
564 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
565
566 if (map->secure)
567 sess = fl->secsctx;
568 else
569 sess = fl->sctx;
570
571 if (!IS_ERR_OR_NULL(map->handle))
572 ion_free(fl->apps->client, map->handle);
573 if (sess && sess->smmu.enabled) {
574 if (map->size || map->phys)
575 msm_dma_unmap_sg(sess->smmu.dev,
576 map->table->sgl,
577 map->table->nents, DMA_BIDIRECTIONAL,
578 map->buf);
579 }
580 vmid = fl->apps->channel[fl->cid].vmid;
581 if (vmid && map->phys) {
582 int srcVM[2] = {VMID_HLOS, vmid};
583
584 hyp_assign_phys(map->phys, buf_page_size(map->size),
585 srcVM, 2, destVM, destVMperm, 1);
586 }
587
588 if (!IS_ERR_OR_NULL(map->table))
589 dma_buf_unmap_attachment(map->attach, map->table,
590 DMA_BIDIRECTIONAL);
591 if (!IS_ERR_OR_NULL(map->attach))
592 dma_buf_detach(map->buf, map->attach);
593 if (!IS_ERR_OR_NULL(map->buf))
594 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700595 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700596 kfree(map);
597}
598
599static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
600 struct fastrpc_session_ctx **session);
601
602static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
c_mtharue1a5ce12017-10-13 20:47:09 +0530603 unsigned int attr, uintptr_t __user va, ssize_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700604 struct fastrpc_mmap **ppmap)
605{
c_mtharue1a5ce12017-10-13 20:47:09 +0530606 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700607 struct fastrpc_session_ctx *sess;
608 struct fastrpc_apps *apps = fl->apps;
609 int cid = fl->cid;
610 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700612 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530613 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700614 unsigned long flags;
615 int err = 0, vmid;
616
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800617 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700618 return 0;
619 map = kzalloc(sizeof(*map), GFP_KERNEL);
620 VERIFY(err, !IS_ERR_OR_NULL(map));
621 if (err)
622 goto bail;
623 INIT_HLIST_NODE(&map->hn);
624 map->flags = mflags;
625 map->refs = 1;
626 map->fl = fl;
627 map->fd = fd;
628 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530629 if (mflags == ADSP_MMAP_HEAP_ADDR ||
630 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
631 map->apps = me;
632 map->fl = NULL;
633 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700634 if (err)
635 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530636 map->phys = (uintptr_t)region_start;
637 map->size = len;
638 map->va = (uintptr_t __user)map->phys;
639 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530640 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
641 pr_info("adsprpc: buffer mapped with persist attr %x\n",
642 (unsigned int)map->attr);
643 map->refs = 2;
644 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530645 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
646 ion_import_dma_buf_fd(fl->apps->client, fd)));
647 if (err)
648 goto bail;
649 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
650 &flags));
651 if (err)
652 goto bail;
653
654 map->uncached = !ION_IS_CACHED(flags);
655 if (map->attr & FASTRPC_ATTR_NOVA)
656 map->uncached = 1;
657
658 map->secure = flags & ION_FLAG_SECURE;
659 if (map->secure) {
660 if (!fl->secsctx)
661 err = fastrpc_session_alloc(chan, 1,
662 &fl->secsctx);
663 if (err)
664 goto bail;
665 }
666 if (map->secure)
667 sess = fl->secsctx;
668 else
669 sess = fl->sctx;
670 VERIFY(err, !IS_ERR_OR_NULL(sess));
671 if (err)
672 goto bail;
673 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
674 if (err)
675 goto bail;
676 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
677 dma_buf_attach(map->buf, sess->smmu.dev)));
678 if (err)
679 goto bail;
680 VERIFY(err, !IS_ERR_OR_NULL(map->table =
681 dma_buf_map_attachment(map->attach,
682 DMA_BIDIRECTIONAL)));
683 if (err)
684 goto bail;
685 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700686 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530687
688 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
689 (sess->smmu.coherent && map->uncached))
690 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
691 else if (map->attr & FASTRPC_ATTR_COHERENT)
692 attrs |= DMA_ATTR_FORCE_COHERENT;
693
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700694 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530695 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696 map->table->sgl, map->table->nents,
697 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530698 if (err)
699 goto bail;
700 } else {
701 VERIFY(err, map->table->nents == 1);
702 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700703 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530704 }
705 map->phys = sg_dma_address(map->table->sgl);
706 if (sess->smmu.cb) {
707 map->phys += ((uint64_t)sess->smmu.cb << 32);
708 map->size = sg_dma_len(map->table->sgl);
709 } else {
710 map->size = buf_page_size(len);
711 }
712 vmid = fl->apps->channel[fl->cid].vmid;
713 if (vmid) {
714 int srcVM[1] = {VMID_HLOS};
715 int destVM[2] = {VMID_HLOS, vmid};
716 int destVMperm[2] = {PERM_READ | PERM_WRITE,
717 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700718
c_mtharue1a5ce12017-10-13 20:47:09 +0530719 VERIFY(err, !hyp_assign_phys(map->phys,
720 buf_page_size(map->size),
721 srcVM, 1, destVM, destVMperm, 2));
722 if (err)
723 goto bail;
724 }
725 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700726 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700727 map->len = len;
728
729 fastrpc_mmap_add(map);
730 *ppmap = map;
731
732bail:
733 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530734 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 return err;
736}
737
738static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
739 struct fastrpc_buf **obuf)
740{
741 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530742 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700743 struct hlist_node *n;
744
745 VERIFY(err, size > 0);
746 if (err)
747 goto bail;
748
749 /* find the smallest buffer that fits in the cache */
750 spin_lock(&fl->hlock);
751 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
752 if (buf->size >= size && (!fr || fr->size > buf->size))
753 fr = buf;
754 }
755 if (fr)
756 hlist_del_init(&fr->hn);
757 spin_unlock(&fl->hlock);
758 if (fr) {
759 *obuf = fr;
760 return 0;
761 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530762 buf = NULL;
763 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700764 if (err)
765 goto bail;
766 INIT_HLIST_NODE(&buf->hn);
767 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530768 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700769 buf->phys = 0;
770 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530771 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700772 (void *)&buf->phys, GFP_KERNEL);
773 if (IS_ERR_OR_NULL(buf->virt)) {
774 /* free cache and retry */
775 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530776 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777 (void *)&buf->phys, GFP_KERNEL);
778 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
779 }
780 if (err)
781 goto bail;
782 if (fl->sctx->smmu.cb)
783 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
784 vmid = fl->apps->channel[fl->cid].vmid;
785 if (vmid) {
786 int srcVM[1] = {VMID_HLOS};
787 int destVM[2] = {VMID_HLOS, vmid};
788 int destVMperm[2] = {PERM_READ | PERM_WRITE,
789 PERM_READ | PERM_WRITE | PERM_EXEC};
790
791 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
792 srcVM, 1, destVM, destVMperm, 2));
793 if (err)
794 goto bail;
795 }
796
797 *obuf = buf;
798 bail:
799 if (err && buf)
800 fastrpc_buf_free(buf, 0);
801 return err;
802}
803
804
805static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700806 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700807 struct smq_invoke_ctx **po)
808{
809 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530810 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700811 struct hlist_node *n;
812 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
813
814 spin_lock(&fl->hlock);
815 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
816 if (ictx->pid == current->pid) {
817 if (invoke->sc != ictx->sc || ictx->fl != fl)
818 err = -1;
819 else {
820 ctx = ictx;
821 hlist_del_init(&ctx->hn);
822 hlist_add_head(&ctx->hn, &fl->clst.pending);
823 }
824 break;
825 }
826 }
827 spin_unlock(&fl->hlock);
828 if (ctx)
829 *po = ctx;
830 return err;
831}
832
833#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
834static int overlap_ptr_cmp(const void *a, const void *b)
835{
836 struct overlap *pa = *((struct overlap **)a);
837 struct overlap *pb = *((struct overlap **)b);
838 /* sort with lowest starting buffer first */
839 int st = CMP(pa->start, pb->start);
840 /* sort with highest ending buffer first */
841 int ed = CMP(pb->end, pa->end);
842 return st == 0 ? ed : st;
843}
844
Sathish Ambley9466d672017-01-25 10:51:55 -0800845static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700846{
Sathish Ambley9466d672017-01-25 10:51:55 -0800847 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700848 remote_arg_t *lpra = ctx->lpra;
849 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
850 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
851 int nbufs = inbufs + outbufs;
852 struct overlap max;
853
854 for (i = 0; i < nbufs; ++i) {
855 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
856 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800857 if (lpra[i].buf.len) {
858 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
859 if (err)
860 goto bail;
861 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700862 ctx->overs[i].raix = i;
863 ctx->overps[i] = &ctx->overs[i];
864 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530865 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700866 max.start = 0;
867 max.end = 0;
868 for (i = 0; i < nbufs; ++i) {
869 if (ctx->overps[i]->start < max.end) {
870 ctx->overps[i]->mstart = max.end;
871 ctx->overps[i]->mend = ctx->overps[i]->end;
872 ctx->overps[i]->offset = max.end -
873 ctx->overps[i]->start;
874 if (ctx->overps[i]->end > max.end) {
875 max.end = ctx->overps[i]->end;
876 } else {
877 ctx->overps[i]->mend = 0;
878 ctx->overps[i]->mstart = 0;
879 }
880 } else {
881 ctx->overps[i]->mend = ctx->overps[i]->end;
882 ctx->overps[i]->mstart = ctx->overps[i]->start;
883 ctx->overps[i]->offset = 0;
884 max = *ctx->overps[i];
885 }
886 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800887bail:
888 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700889}
890
891#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
892 do {\
893 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530894 VERIFY(err, 0 == copy_from_user((dst),\
895 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700896 (size)));\
897 else\
898 memmove((dst), (src), (size));\
899 } while (0)
900
901#define K_COPY_TO_USER(err, kernel, dst, src, size) \
902 do {\
903 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530904 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
905 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700906 else\
907 memmove((dst), (src), (size));\
908 } while (0)
909
910
911static void context_free(struct smq_invoke_ctx *ctx);
912
913static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700914 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700915 struct smq_invoke_ctx **po)
916{
917 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530918 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700919 struct fastrpc_ctx_lst *clst = &fl->clst;
920 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
921
922 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
923 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
924 sizeof(*ctx->fds) * (bufs) +
925 sizeof(*ctx->attrs) * (bufs) +
926 sizeof(*ctx->overs) * (bufs) +
927 sizeof(*ctx->overps) * (bufs);
928
c_mtharue1a5ce12017-10-13 20:47:09 +0530929 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700930 if (err)
931 goto bail;
932
933 INIT_HLIST_NODE(&ctx->hn);
934 hlist_add_fake(&ctx->hn);
935 ctx->fl = fl;
936 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
937 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
938 ctx->fds = (int *)(&ctx->lpra[bufs]);
939 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
940 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
941 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
942
c_mtharue1a5ce12017-10-13 20:47:09 +0530943 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700944 bufs * sizeof(*ctx->lpra));
945 if (err)
946 goto bail;
947
948 if (invokefd->fds) {
949 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
950 bufs * sizeof(*ctx->fds));
951 if (err)
952 goto bail;
953 }
954 if (invokefd->attrs) {
955 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
956 bufs * sizeof(*ctx->attrs));
957 if (err)
958 goto bail;
959 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700960 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700961 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800962 if (bufs) {
963 VERIFY(err, 0 == context_build_overlap(ctx));
964 if (err)
965 goto bail;
966 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700967 ctx->retval = -1;
968 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530969 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700970 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530971 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700972
973 spin_lock(&fl->hlock);
974 hlist_add_head(&ctx->hn, &clst->pending);
975 spin_unlock(&fl->hlock);
976
977 *po = ctx;
978bail:
979 if (ctx && err)
980 context_free(ctx);
981 return err;
982}
983
984static void context_save_interrupted(struct smq_invoke_ctx *ctx)
985{
986 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
987
988 spin_lock(&ctx->fl->hlock);
989 hlist_del_init(&ctx->hn);
990 hlist_add_head(&ctx->hn, &clst->interrupted);
991 spin_unlock(&ctx->fl->hlock);
992 /* free the cache on power collapse */
993 fastrpc_buf_list_free(ctx->fl);
994}
995
996static void context_free(struct smq_invoke_ctx *ctx)
997{
998 int i;
999 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1000 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1001 spin_lock(&ctx->fl->hlock);
1002 hlist_del_init(&ctx->hn);
1003 spin_unlock(&ctx->fl->hlock);
1004 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301005 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001006 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301007 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001008 kfree(ctx);
1009}
1010
1011static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1012{
1013 ctx->retval = retval;
1014 complete(&ctx->work);
1015}
1016
1017
1018static void fastrpc_notify_users(struct fastrpc_file *me)
1019{
1020 struct smq_invoke_ctx *ictx;
1021 struct hlist_node *n;
1022
1023 spin_lock(&me->hlock);
1024 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1025 complete(&ictx->work);
1026 }
1027 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1028 complete(&ictx->work);
1029 }
1030 spin_unlock(&me->hlock);
1031
1032}
1033
1034static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1035{
1036 struct fastrpc_file *fl;
1037 struct hlist_node *n;
1038
1039 spin_lock(&me->hlock);
1040 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1041 if (fl->cid == cid)
1042 fastrpc_notify_users(fl);
1043 }
1044 spin_unlock(&me->hlock);
1045
1046}
1047static void context_list_ctor(struct fastrpc_ctx_lst *me)
1048{
1049 INIT_HLIST_HEAD(&me->interrupted);
1050 INIT_HLIST_HEAD(&me->pending);
1051}
1052
1053static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1054{
1055 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301056 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001057 struct hlist_node *n;
1058
1059 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301060 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001061 spin_lock(&fl->hlock);
1062 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1063 hlist_del_init(&ictx->hn);
1064 ctxfree = ictx;
1065 break;
1066 }
1067 spin_unlock(&fl->hlock);
1068 if (ctxfree)
1069 context_free(ctxfree);
1070 } while (ctxfree);
1071 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301072 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001073 spin_lock(&fl->hlock);
1074 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1075 hlist_del_init(&ictx->hn);
1076 ctxfree = ictx;
1077 break;
1078 }
1079 spin_unlock(&fl->hlock);
1080 if (ctxfree)
1081 context_free(ctxfree);
1082 } while (ctxfree);
1083}
1084
1085static int fastrpc_file_free(struct fastrpc_file *fl);
1086static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1087{
1088 struct fastrpc_file *fl, *free;
1089 struct hlist_node *n;
1090
1091 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301092 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001093 spin_lock(&me->hlock);
1094 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1095 hlist_del_init(&fl->hn);
1096 free = fl;
1097 break;
1098 }
1099 spin_unlock(&me->hlock);
1100 if (free)
1101 fastrpc_file_free(free);
1102 } while (free);
1103}
1104
1105static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1106{
1107 remote_arg64_t *rpra;
1108 remote_arg_t *lpra = ctx->lpra;
1109 struct smq_invoke_buf *list;
1110 struct smq_phy_page *pages, *ipage;
1111 uint32_t sc = ctx->sc;
1112 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1113 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001114 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001115 uintptr_t args;
1116 ssize_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001117 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 int err = 0;
1119 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001120 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001121 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001122
1123 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301124 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001125 list = smq_invoke_buf_start(rpra, sc);
1126 pages = smq_phy_page_start(sc, list);
1127 ipage = pages;
1128
1129 for (i = 0; i < bufs; ++i) {
c_mtharue1a5ce12017-10-13 20:47:09 +05301130 uintptr_t __user buf = (uintptr_t __user)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001131 ssize_t len = lpra[i].buf.len;
1132
1133 if (ctx->fds[i] && (ctx->fds[i] != -1))
1134 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1135 ctx->attrs[i], buf, len,
1136 mflags, &ctx->maps[i]);
1137 ipage += 1;
1138 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001139 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1140 for (i = bufs; i < bufs + handles; i++) {
1141 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1142 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1143 if (err)
1144 goto bail;
1145 ipage += 1;
1146 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001147 metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
1148 (sizeof(uint32_t) * M_CRCLIST);
1149
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001150 /* calculate len requreed for copying */
1151 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1152 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001153 uintptr_t mstart, mend;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001154 ssize_t len = lpra[i].buf.len;
1155
1156 if (!len)
1157 continue;
1158 if (ctx->maps[i])
1159 continue;
1160 if (ctx->overps[oix]->offset == 0)
1161 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001162 mstart = ctx->overps[oix]->mstart;
1163 mend = ctx->overps[oix]->mend;
1164 VERIFY(err, (mend - mstart) <= LONG_MAX);
1165 if (err)
1166 goto bail;
1167 copylen += mend - mstart;
1168 VERIFY(err, copylen >= 0);
1169 if (err)
1170 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001171 }
1172 ctx->used = copylen;
1173
1174 /* allocate new buffer */
1175 if (copylen) {
1176 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1177 if (err)
1178 goto bail;
1179 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301180 if (ctx->buf->virt && metalen <= copylen)
1181 memset(ctx->buf->virt, 0, metalen);
1182
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001183 /* copy metadata */
1184 rpra = ctx->buf->virt;
1185 ctx->rpra = rpra;
1186 list = smq_invoke_buf_start(rpra, sc);
1187 pages = smq_phy_page_start(sc, list);
1188 ipage = pages;
1189 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001190 for (i = 0; i < bufs + handles; ++i) {
1191 if (lpra[i].buf.len)
1192 list[i].num = 1;
1193 else
1194 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001195 list[i].pgidx = ipage - pages;
1196 ipage++;
1197 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301198
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001199 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001200 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301201 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001202 struct fastrpc_mmap *map = ctx->maps[i];
1203 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
1204 ssize_t len = lpra[i].buf.len;
1205
1206 rpra[i].buf.pv = 0;
1207 rpra[i].buf.len = len;
1208 if (!len)
1209 continue;
1210 if (map) {
1211 struct vm_area_struct *vma;
1212 uintptr_t offset;
1213 int num = buf_num_pages(buf, len);
1214 int idx = list[i].pgidx;
1215
1216 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001217 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001218 } else {
1219 down_read(&current->mm->mmap_sem);
1220 VERIFY(err, NULL != (vma = find_vma(current->mm,
1221 map->va)));
1222 if (err) {
1223 up_read(&current->mm->mmap_sem);
1224 goto bail;
1225 }
1226 offset = buf_page_start(buf) - vma->vm_start;
1227 up_read(&current->mm->mmap_sem);
1228 VERIFY(err, offset < (uintptr_t)map->size);
1229 if (err)
1230 goto bail;
1231 }
1232 pages[idx].addr = map->phys + offset;
1233 pages[idx].size = num << PAGE_SHIFT;
1234 }
1235 rpra[i].buf.pv = buf;
1236 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001237 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001238 for (i = bufs; i < bufs + handles; ++i) {
1239 struct fastrpc_mmap *map = ctx->maps[i];
1240
1241 pages[i].addr = map->phys;
1242 pages[i].size = map->size;
1243 }
1244 fdlist = (uint64_t *)&pages[bufs + handles];
1245 for (i = 0; i < M_FDLIST; i++)
1246 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001247 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301248 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001249
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001250 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001251 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001252 rlen = copylen - metalen;
1253 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1254 int i = ctx->overps[oix]->raix;
1255 struct fastrpc_mmap *map = ctx->maps[i];
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001256 ssize_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001257 uint64_t buf;
1258 ssize_t len = lpra[i].buf.len;
1259
1260 if (!len)
1261 continue;
1262 if (map)
1263 continue;
1264 if (ctx->overps[oix]->offset == 0) {
1265 rlen -= ALIGN(args, BALIGN) - args;
1266 args = ALIGN(args, BALIGN);
1267 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001268 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001269 VERIFY(err, rlen >= mlen);
1270 if (err)
1271 goto bail;
1272 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1273 pages[list[i].pgidx].addr = ctx->buf->phys -
1274 ctx->overps[oix]->offset +
1275 (copylen - rlen);
1276 pages[list[i].pgidx].addr =
1277 buf_page_start(pages[list[i].pgidx].addr);
1278 buf = rpra[i].buf.pv;
1279 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1280 if (i < inbufs) {
1281 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1282 lpra[i].buf.pv, len);
1283 if (err)
1284 goto bail;
1285 }
1286 args = args + mlen;
1287 rlen -= mlen;
1288 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001289 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001290
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001291 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001292 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1293 int i = ctx->overps[oix]->raix;
1294 struct fastrpc_mmap *map = ctx->maps[i];
1295
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001296 if (map && map->uncached)
1297 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301298 if (ctx->fl->sctx->smmu.coherent &&
1299 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1300 continue;
1301 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1302 continue;
1303
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001304 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1305 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1306 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1307 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001308 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301309 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001310 rpra[i].dma.fd = ctx->fds[i];
1311 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1312 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001313 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001314
1315 if (!ctx->fl->sctx->smmu.coherent) {
1316 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001317 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001318 PERF_END);
1319 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001320 bail:
1321 return err;
1322}
1323
1324static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1325 remote_arg_t *upra)
1326{
1327 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001328 struct smq_invoke_buf *list;
1329 struct smq_phy_page *pages;
1330 struct fastrpc_mmap *mmap;
1331 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001332 uint32_t *crclist = NULL;
1333
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001334 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001335 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336 int err = 0;
1337
1338 inbufs = REMOTE_SCALARS_INBUFS(sc);
1339 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001340 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1341 list = smq_invoke_buf_start(ctx->rpra, sc);
1342 pages = smq_phy_page_start(sc, list);
1343 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001344 crclist = (uint32_t *)(fdlist + M_FDLIST);
1345
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001346 for (i = inbufs; i < inbufs + outbufs; ++i) {
1347 if (!ctx->maps[i]) {
1348 K_COPY_TO_USER(err, kernel,
1349 ctx->lpra[i].buf.pv,
1350 uint64_to_ptr(rpra[i].buf.pv),
1351 rpra[i].buf.len);
1352 if (err)
1353 goto bail;
1354 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301355 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301356 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001357 }
1358 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001359 if (inbufs + outbufs + handles) {
1360 for (i = 0; i < M_FDLIST; i++) {
1361 if (!fdlist[i])
1362 break;
1363 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001364 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301365 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001366 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001367 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001368 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301369 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001370 crclist, M_CRCLIST*sizeof(uint32_t));
1371
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001372 bail:
1373 return err;
1374}
1375
1376static void inv_args_pre(struct smq_invoke_ctx *ctx)
1377{
1378 int i, inbufs, outbufs;
1379 uint32_t sc = ctx->sc;
1380 remote_arg64_t *rpra = ctx->rpra;
1381 uintptr_t end;
1382
1383 inbufs = REMOTE_SCALARS_INBUFS(sc);
1384 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1385 for (i = inbufs; i < inbufs + outbufs; ++i) {
1386 struct fastrpc_mmap *map = ctx->maps[i];
1387
1388 if (map && map->uncached)
1389 continue;
1390 if (!rpra[i].buf.len)
1391 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301392 if (ctx->fl->sctx->smmu.coherent &&
1393 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1394 continue;
1395 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1396 continue;
1397
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001398 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1399 buf_page_start(rpra[i].buf.pv))
1400 continue;
1401 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1402 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1403 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1404 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1405 rpra[i].buf.len);
1406 if (!IS_CACHE_ALIGNED(end))
1407 dmac_flush_range((char *)end,
1408 (char *)end + 1);
1409 }
1410}
1411
1412static void inv_args(struct smq_invoke_ctx *ctx)
1413{
1414 int i, inbufs, outbufs;
1415 uint32_t sc = ctx->sc;
1416 remote_arg64_t *rpra = ctx->rpra;
1417 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001418
1419 inbufs = REMOTE_SCALARS_INBUFS(sc);
1420 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1421 for (i = inbufs; i < inbufs + outbufs; ++i) {
1422 struct fastrpc_mmap *map = ctx->maps[i];
1423
1424 if (map && map->uncached)
1425 continue;
1426 if (!rpra[i].buf.len)
1427 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301428 if (ctx->fl->sctx->smmu.coherent &&
1429 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1430 continue;
1431 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1432 continue;
1433
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001434 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1435 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436 continue;
1437 }
1438 if (map && map->handle)
1439 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1440 (char *)uint64_to_ptr(rpra[i].buf.pv),
1441 rpra[i].buf.len, ION_IOC_INV_CACHES);
1442 else
1443 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1444 (char *)uint64_to_ptr(rpra[i].buf.pv
1445 + rpra[i].buf.len));
1446 }
1447
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001448 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001449 dmac_inv_range(rpra, (char *)rpra + used);
1450}
1451
1452static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1453 uint32_t kernel, uint32_t handle)
1454{
1455 struct smq_msg *msg = &ctx->msg;
1456 struct fastrpc_file *fl = ctx->fl;
1457 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1458 int err = 0;
1459
c_mtharue1a5ce12017-10-13 20:47:09 +05301460 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001461 if (err)
1462 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301463 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001464 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301465 if (fl->sessionid)
1466 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001467 if (kernel)
1468 msg->pid = 0;
1469 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1470 msg->invoke.header.handle = handle;
1471 msg->invoke.header.sc = ctx->sc;
1472 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1473 msg->invoke.page.size = buf_page_size(ctx->used);
1474
1475 if (fl->ssrcount != channel_ctx->ssrcount) {
1476 err = -ECONNRESET;
1477 goto bail;
1478 }
1479 VERIFY(err, channel_ctx->link.port_state ==
1480 FASTRPC_LINK_CONNECTED);
1481 if (err)
1482 goto bail;
1483 err = glink_tx(channel_ctx->chan,
1484 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1485 GLINK_TX_REQ_INTENT);
1486 bail:
1487 return err;
1488}
1489
1490static void fastrpc_init(struct fastrpc_apps *me)
1491{
1492 int i;
1493
1494 INIT_HLIST_HEAD(&me->drivers);
1495 spin_lock_init(&me->hlock);
1496 mutex_init(&me->smd_mutex);
1497 me->channel = &gcinfo[0];
1498 for (i = 0; i < NUM_CHANNELS; i++) {
1499 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301500 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001501 me->channel[i].sesscount = 0;
1502 }
1503}
1504
1505static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1506
1507static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1508 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001509 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510{
c_mtharue1a5ce12017-10-13 20:47:09 +05301511 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001512 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1513 int cid = fl->cid;
1514 int interrupted = 0;
1515 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001516 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001517
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001518 if (fl->profile)
1519 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301520
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301521
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301522 VERIFY(err, fl->sctx != NULL);
1523 if (err)
1524 goto bail;
1525 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1526 if (err)
1527 goto bail;
1528
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001529 if (!kernel) {
1530 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1531 &ctx));
1532 if (err)
1533 goto bail;
1534 if (fl->sctx->smmu.faults)
1535 err = FASTRPC_ENOSUCH;
1536 if (err)
1537 goto bail;
1538 if (ctx)
1539 goto wait;
1540 }
1541
1542 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1543 if (err)
1544 goto bail;
1545
1546 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001547 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001548 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001549 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001550 if (err)
1551 goto bail;
1552 }
1553
Sathish Ambleyc432b502017-06-05 12:03:42 -07001554 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001555 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001556 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001557 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001558 PERF_END);
1559
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001560 if (err)
1561 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001562 wait:
1563 if (kernel)
1564 wait_for_completion(&ctx->work);
1565 else {
1566 interrupted = wait_for_completion_interruptible(&ctx->work);
1567 VERIFY(err, 0 == (err = interrupted));
1568 if (err)
1569 goto bail;
1570 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001571
1572 PERF(fl->profile, fl->perf.invargs,
1573 if (!fl->sctx->smmu.coherent)
1574 inv_args(ctx);
1575 PERF_END);
1576
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001577 VERIFY(err, 0 == (err = ctx->retval));
1578 if (err)
1579 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001580
1581 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001582 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001583 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001584 if (err)
1585 goto bail;
1586 bail:
1587 if (ctx && interrupted == -ERESTARTSYS)
1588 context_save_interrupted(ctx);
1589 else if (ctx)
1590 context_free(ctx);
1591 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1592 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001593
1594 if (fl->profile && !interrupted) {
1595 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1596 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301597 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001598 fl->perf.count++;
1599 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001600 return err;
1601}
1602
Sathish Ambley36849af2017-02-02 09:35:55 -08001603static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001604static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001605 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001606{
1607 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301608 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001609 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001610 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301612 struct fastrpc_mmap *file = NULL, *mem = NULL;
1613 char *proc_name = NULL;
1614 int srcVM[1] = {VMID_HLOS};
1615 int destVM[1] = {VMID_ADSP_Q6};
1616 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1617 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001618
Sathish Ambley36849af2017-02-02 09:35:55 -08001619 VERIFY(err, !fastrpc_channel_open(fl));
1620 if (err)
1621 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001622 if (init->flags == FASTRPC_INIT_ATTACH) {
1623 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301624 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001625
1626 ra[0].buf.pv = (void *)&tgid;
1627 ra[0].buf.len = sizeof(tgid);
1628 ioctl.inv.handle = 1;
1629 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1630 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301631 ioctl.fds = NULL;
1632 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001633 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001634 fl->pd = 0;
1635 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1636 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1637 if (err)
1638 goto bail;
1639 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001640 remote_arg_t ra[6];
1641 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001642 int mflags = 0;
1643 struct {
1644 int pgid;
1645 int namelen;
1646 int filelen;
1647 int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001648 int attrs;
1649 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001650 } inbuf;
1651
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301652 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001653 inbuf.namelen = strlen(current->comm) + 1;
1654 inbuf.filelen = init->filelen;
1655 fl->pd = 1;
1656 if (init->filelen) {
1657 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1658 init->file, init->filelen, mflags, &file));
1659 if (err)
1660 goto bail;
1661 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301662
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001663 inbuf.pageslen = 1;
1664 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1665 init->mem, init->memlen, mflags, &mem));
1666 if (err)
1667 goto bail;
1668 inbuf.pageslen = 1;
1669 ra[0].buf.pv = (void *)&inbuf;
1670 ra[0].buf.len = sizeof(inbuf);
1671 fds[0] = 0;
1672
1673 ra[1].buf.pv = (void *)current->comm;
1674 ra[1].buf.len = inbuf.namelen;
1675 fds[1] = 0;
1676
1677 ra[2].buf.pv = (void *)init->file;
1678 ra[2].buf.len = inbuf.filelen;
1679 fds[2] = init->filefd;
1680
1681 pages[0].addr = mem->phys;
1682 pages[0].size = mem->size;
1683 ra[3].buf.pv = (void *)pages;
1684 ra[3].buf.len = 1 * sizeof(*pages);
1685 fds[3] = 0;
1686
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001687 inbuf.attrs = uproc->attrs;
1688 ra[4].buf.pv = (void *)&(inbuf.attrs);
1689 ra[4].buf.len = sizeof(inbuf.attrs);
1690 fds[4] = 0;
1691
1692 inbuf.siglen = uproc->siglen;
1693 ra[5].buf.pv = (void *)&(inbuf.siglen);
1694 ra[5].buf.len = sizeof(inbuf.siglen);
1695 fds[5] = 0;
1696
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001697 ioctl.inv.handle = 1;
1698 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001699 if (uproc->attrs)
1700 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001701 ioctl.inv.pra = ra;
1702 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301703 ioctl.attrs = NULL;
1704 ioctl.crc = NULL;
1705 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1706 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1707 if (err)
1708 goto bail;
1709 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1710 remote_arg_t ra[3];
1711 uint64_t phys = 0;
1712 ssize_t size = 0;
1713 int fds[3];
1714 struct {
1715 int pgid;
1716 int namelen;
1717 int pageslen;
1718 } inbuf;
1719
1720 if (!init->filelen)
1721 goto bail;
1722
1723 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1724 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1725 if (err)
1726 goto bail;
1727 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1728 (void __user *)init->file, init->filelen));
1729 if (err)
1730 goto bail;
1731
1732 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301733 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301734 inbuf.pageslen = 0;
1735 if (!me->staticpd_flags) {
1736 inbuf.pageslen = 1;
1737 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1738 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1739 &mem));
1740 if (err)
1741 goto bail;
1742 phys = mem->phys;
1743 size = mem->size;
1744 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1745 srcVM, 1, destVM, destVMperm, 1));
1746 if (err) {
1747 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1748 err);
1749 pr_err("map->phys %llx, map->size %d\n",
1750 phys, (int)size);
1751 goto bail;
1752 }
1753 me->staticpd_flags = 1;
1754 }
1755
1756 ra[0].buf.pv = (void *)&inbuf;
1757 ra[0].buf.len = sizeof(inbuf);
1758 fds[0] = 0;
1759
1760 ra[1].buf.pv = (void *)proc_name;
1761 ra[1].buf.len = inbuf.namelen;
1762 fds[1] = 0;
1763
1764 pages[0].addr = phys;
1765 pages[0].size = size;
1766
1767 ra[2].buf.pv = (void *)pages;
1768 ra[2].buf.len = sizeof(*pages);
1769 fds[2] = 0;
1770 ioctl.inv.handle = 1;
1771
1772 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1773 ioctl.inv.pra = ra;
1774 ioctl.fds = NULL;
1775 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001776 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001777 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1778 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1779 if (err)
1780 goto bail;
1781 } else {
1782 err = -ENOTTY;
1783 }
1784bail:
c_mtharud91205a2017-11-07 16:01:06 +05301785 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301786 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1787 me->staticpd_flags = 0;
1788 if (mem && err) {
1789 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1790 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1791 destVM, 1, srcVM, hlosVMperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301792 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301793 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001794 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301795 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001796 return err;
1797}
1798
1799static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1800{
1801 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001802 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001803 remote_arg_t ra[1];
1804 int tgid = 0;
1805
Sathish Ambley36849af2017-02-02 09:35:55 -08001806 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1807 if (err)
1808 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301809 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001810 if (err)
1811 goto bail;
1812 tgid = fl->tgid;
1813 ra[0].buf.pv = (void *)&tgid;
1814 ra[0].buf.len = sizeof(tgid);
1815 ioctl.inv.handle = 1;
1816 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1817 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301818 ioctl.fds = NULL;
1819 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001820 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001821 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1822 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1823bail:
1824 return err;
1825}
1826
1827static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1828 struct fastrpc_mmap *map)
1829{
Sathish Ambleybae51902017-07-03 15:00:49 -07001830 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001831 struct smq_phy_page page;
1832 int num = 1;
1833 remote_arg_t ra[3];
1834 int err = 0;
1835 struct {
1836 int pid;
1837 uint32_t flags;
1838 uintptr_t vaddrin;
1839 int num;
1840 } inargs;
1841 struct {
1842 uintptr_t vaddrout;
1843 } routargs;
1844
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301845 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001846 inargs.vaddrin = (uintptr_t)map->va;
1847 inargs.flags = flags;
1848 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1849 ra[0].buf.pv = (void *)&inargs;
1850 ra[0].buf.len = sizeof(inargs);
1851 page.addr = map->phys;
1852 page.size = map->size;
1853 ra[1].buf.pv = (void *)&page;
1854 ra[1].buf.len = num * sizeof(page);
1855
1856 ra[2].buf.pv = (void *)&routargs;
1857 ra[2].buf.len = sizeof(routargs);
1858
1859 ioctl.inv.handle = 1;
1860 if (fl->apps->compat)
1861 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1862 else
1863 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1864 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301865 ioctl.fds = NULL;
1866 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001867 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001868 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1869 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1870 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301871 if (err)
1872 goto bail;
1873 if (flags == ADSP_MMAP_HEAP_ADDR) {
1874 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001875
c_mtharue1a5ce12017-10-13 20:47:09 +05301876 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1877 desc.args[1] = map->phys;
1878 desc.args[2] = map->size;
1879 desc.arginfo = SCM_ARGS(3);
1880 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1881 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1882 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1883
1884 int srcVM[1] = {VMID_HLOS};
1885 int destVM[1] = {VMID_ADSP_Q6};
1886 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1887
1888 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1889 srcVM, 1, destVM, destVMperm, 1));
1890 if (err)
1891 goto bail;
1892 }
1893bail:
1894 return err;
1895}
1896
1897static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1898 struct fastrpc_mmap *map)
1899{
1900 int err = 0;
1901 int srcVM[1] = {VMID_ADSP_Q6};
1902 int destVM[1] = {VMID_HLOS};
1903 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1904
1905 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1906 struct fastrpc_ioctl_invoke_crc ioctl;
1907 struct scm_desc desc = {0};
1908 remote_arg_t ra[1];
1909 int err = 0;
1910 struct {
1911 uint8_t skey;
1912 } routargs;
1913
1914 ra[0].buf.pv = (void *)&routargs;
1915 ra[0].buf.len = sizeof(routargs);
1916
1917 ioctl.inv.handle = 1;
1918 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1919 ioctl.inv.pra = ra;
1920 ioctl.fds = NULL;
1921 ioctl.attrs = NULL;
1922 ioctl.crc = NULL;
1923 if (fl == NULL)
1924 goto bail;
1925
1926 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1927 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1928 if (err)
1929 goto bail;
1930 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1931 desc.args[1] = map->phys;
1932 desc.args[2] = map->size;
1933 desc.args[3] = routargs.skey;
1934 desc.arginfo = SCM_ARGS(4);
1935 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1936 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1937 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1938 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1939 srcVM, 1, destVM, destVMperm, 1));
1940 if (err)
1941 goto bail;
1942 }
1943
1944bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001945 return err;
1946}
1947
1948static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1949 struct fastrpc_mmap *map)
1950{
Sathish Ambleybae51902017-07-03 15:00:49 -07001951 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001952 remote_arg_t ra[1];
1953 int err = 0;
1954 struct {
1955 int pid;
1956 uintptr_t vaddrout;
1957 ssize_t size;
1958 } inargs;
1959
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301960 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001961 inargs.size = map->size;
1962 inargs.vaddrout = map->raddr;
1963 ra[0].buf.pv = (void *)&inargs;
1964 ra[0].buf.len = sizeof(inargs);
1965
1966 ioctl.inv.handle = 1;
1967 if (fl->apps->compat)
1968 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1969 else
1970 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1971 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301972 ioctl.fds = NULL;
1973 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001974 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001975 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1976 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301977 if (err)
1978 goto bail;
1979 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1980 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1981 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
1982 if (err)
1983 goto bail;
1984 }
1985bail:
1986 return err;
1987}
1988
1989static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
1990{
1991 struct fastrpc_mmap *match = NULL, *map = NULL;
1992 struct hlist_node *n = NULL;
1993 int err = 0, ret = 0;
1994 struct fastrpc_apps *me = &gfa;
1995 struct ramdump_segment *ramdump_segments_rh = NULL;
1996
1997 do {
1998 match = NULL;
1999 spin_lock(&me->hlock);
2000 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2001 match = map;
2002 hlist_del_init(&map->hn);
2003 break;
2004 }
2005 spin_unlock(&me->hlock);
2006
2007 if (match) {
2008 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2009 if (err)
2010 goto bail;
2011 if (me->channel[0].ramdumpenabled) {
2012 ramdump_segments_rh = kcalloc(1,
2013 sizeof(struct ramdump_segment), GFP_KERNEL);
2014 if (ramdump_segments_rh) {
2015 ramdump_segments_rh->address =
2016 match->phys;
2017 ramdump_segments_rh->size = match->size;
2018 ret = do_elf_ramdump(
2019 me->channel[0].remoteheap_ramdump_dev,
2020 ramdump_segments_rh, 1);
2021 if (ret < 0)
2022 pr_err("ADSPRPC: unable to dump heap");
2023 kfree(ramdump_segments_rh);
2024 }
2025 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302026 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302027 }
2028 } while (match);
2029bail:
2030 if (err && match)
2031 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002032 return err;
2033}
2034
2035static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
2036 ssize_t len, struct fastrpc_mmap **ppmap);
2037
2038static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2039
2040static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2041 struct fastrpc_ioctl_munmap *ud)
2042{
2043 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302044 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002045
2046 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2047 if (err)
2048 goto bail;
2049 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2050 if (err)
2051 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302052 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002053bail:
2054 if (err && map)
2055 fastrpc_mmap_add(map);
2056 return err;
2057}
2058
c_mtharu7bd6a422017-10-17 18:15:37 +05302059static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2060 struct fastrpc_ioctl_munmap_fd *ud) {
2061 int err = 0;
2062 struct fastrpc_mmap *map = NULL;
2063
2064 VERIFY(err, (fl && ud));
2065 if (err)
2066 goto bail;
2067
2068 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2069 pr_err("mapping not found to unamp %x va %llx %x\n",
2070 ud->fd, (unsigned long long)ud->va,
2071 (unsigned int)ud->len);
2072 err = -1;
2073 goto bail;
2074 }
2075 if (map)
2076 fastrpc_mmap_free(map, 0);
2077bail:
2078 return err;
2079}
2080
2081
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002082static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2083 struct fastrpc_ioctl_mmap *ud)
2084{
2085
c_mtharue1a5ce12017-10-13 20:47:09 +05302086 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002087 int err = 0;
2088
c_mtharue1a5ce12017-10-13 20:47:09 +05302089 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t __user)ud->vaddrin,
2090 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002091 return 0;
2092
2093 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
c_mtharue1a5ce12017-10-13 20:47:09 +05302094 (uintptr_t __user)ud->vaddrin, ud->size,
2095 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002096 if (err)
2097 goto bail;
2098 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2099 if (err)
2100 goto bail;
2101 ud->vaddrout = map->raddr;
2102 bail:
2103 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302104 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002105 return err;
2106}
2107
2108static void fastrpc_channel_close(struct kref *kref)
2109{
2110 struct fastrpc_apps *me = &gfa;
2111 struct fastrpc_channel_ctx *ctx;
2112 int cid;
2113
2114 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2115 cid = ctx - &gcinfo[0];
2116 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302117 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302118 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2119 ctx->link.link_notify_handle = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002120 mutex_unlock(&me->smd_mutex);
2121 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2122 MAJOR(me->dev_no), cid);
2123}
2124
2125static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2126
2127static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2128 int secure, struct fastrpc_session_ctx **session)
2129{
2130 struct fastrpc_apps *me = &gfa;
2131 int idx = 0, err = 0;
2132
2133 if (chan->sesscount) {
2134 for (idx = 0; idx < chan->sesscount; ++idx) {
2135 if (!chan->session[idx].used &&
2136 chan->session[idx].smmu.secure == secure) {
2137 chan->session[idx].used = 1;
2138 break;
2139 }
2140 }
2141 VERIFY(err, idx < chan->sesscount);
2142 if (err)
2143 goto bail;
2144 chan->session[idx].smmu.faults = 0;
2145 } else {
2146 VERIFY(err, me->dev != NULL);
2147 if (err)
2148 goto bail;
2149 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302150 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002151 }
2152
2153 *session = &chan->session[idx];
2154 bail:
2155 return err;
2156}
2157
c_mtharue1a5ce12017-10-13 20:47:09 +05302158static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2159 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002160{
2161 if (glink_queue_rx_intent(h, NULL, size))
2162 return false;
2163 return true;
2164}
2165
c_mtharue1a5ce12017-10-13 20:47:09 +05302166static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002167 const void *pkt_priv, const void *ptr)
2168{
2169}
2170
c_mtharue1a5ce12017-10-13 20:47:09 +05302171static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002172 const void *pkt_priv, const void *ptr, size_t size)
2173{
2174 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302175 struct smq_invoke_ctx *ctx;
2176 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002177
c_mtharufdac6892017-10-12 13:09:01 +05302178 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2179 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302180 goto bail;
2181
c_mtharufdac6892017-10-12 13:09:01 +05302182 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2183 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2184 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302185 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302186
c_mtharufdac6892017-10-12 13:09:01 +05302187 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302188bail:
c_mtharufdac6892017-10-12 13:09:01 +05302189 if (err)
2190 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002191 glink_rx_done(handle, ptr, true);
2192}
2193
c_mtharue1a5ce12017-10-13 20:47:09 +05302194static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002195 unsigned int event)
2196{
2197 struct fastrpc_apps *me = &gfa;
2198 int cid = (int)(uintptr_t)priv;
2199 struct fastrpc_glink_info *link;
2200
2201 if (cid < 0 || cid >= NUM_CHANNELS)
2202 return;
2203 link = &me->channel[cid].link;
2204 switch (event) {
2205 case GLINK_CONNECTED:
2206 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302207 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002208 break;
2209 case GLINK_LOCAL_DISCONNECTED:
2210 link->port_state = FASTRPC_LINK_DISCONNECTED;
2211 break;
2212 case GLINK_REMOTE_DISCONNECTED:
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302213 if (me->channel[cid].chan) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002214 fastrpc_glink_close(me->channel[cid].chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302215 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002216 }
2217 break;
2218 default:
2219 break;
2220 }
2221}
2222
2223static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2224 struct fastrpc_session_ctx **session)
2225{
2226 int err = 0;
2227 struct fastrpc_apps *me = &gfa;
2228
2229 mutex_lock(&me->smd_mutex);
2230 if (!*session)
2231 err = fastrpc_session_alloc_locked(chan, secure, session);
2232 mutex_unlock(&me->smd_mutex);
2233 return err;
2234}
2235
2236static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2237 struct fastrpc_session_ctx *session)
2238{
2239 struct fastrpc_apps *me = &gfa;
2240
2241 mutex_lock(&me->smd_mutex);
2242 session->used = 0;
2243 mutex_unlock(&me->smd_mutex);
2244}
2245
2246static int fastrpc_file_free(struct fastrpc_file *fl)
2247{
2248 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302249 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002250 int cid;
2251
2252 if (!fl)
2253 return 0;
2254 cid = fl->cid;
2255
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302256 (void)fastrpc_release_current_dsp_process(fl);
2257
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002258 spin_lock(&fl->apps->hlock);
2259 hlist_del_init(&fl->hn);
2260 spin_unlock(&fl->apps->hlock);
2261
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002262 if (!fl->sctx) {
2263 kfree(fl);
2264 return 0;
2265 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302266 spin_lock(&fl->hlock);
2267 fl->file_close = 1;
2268 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002269 fastrpc_context_list_dtor(fl);
2270 fastrpc_buf_list_free(fl);
2271 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302272 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002273 }
2274 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2275 kref_put_mutex(&fl->apps->channel[cid].kref,
2276 fastrpc_channel_close, &fl->apps->smd_mutex);
2277 if (fl->sctx)
2278 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2279 if (fl->secsctx)
2280 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2281 kfree(fl);
2282 return 0;
2283}
2284
2285static int fastrpc_device_release(struct inode *inode, struct file *file)
2286{
2287 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2288
2289 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302290 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2291 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002292 if (fl->debugfs_file != NULL)
2293 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002294 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302295 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002296 }
2297 return 0;
2298}
2299
2300static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2301 void *priv)
2302{
2303 struct fastrpc_apps *me = &gfa;
2304 int cid = (int)((uintptr_t)priv);
2305 struct fastrpc_glink_info *link;
2306
2307 if (cid < 0 || cid >= NUM_CHANNELS)
2308 return;
2309
2310 link = &me->channel[cid].link;
2311 switch (cb_info->link_state) {
2312 case GLINK_LINK_STATE_UP:
2313 link->link_state = FASTRPC_LINK_STATE_UP;
2314 complete(&me->channel[cid].work);
2315 break;
2316 case GLINK_LINK_STATE_DOWN:
2317 link->link_state = FASTRPC_LINK_STATE_DOWN;
2318 break;
2319 default:
2320 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2321 break;
2322 }
2323}
2324
2325static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2326{
2327 int err = 0;
2328 struct fastrpc_glink_info *link;
2329
2330 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2331 if (err)
2332 goto bail;
2333
2334 link = &me->channel[cid].link;
2335 if (link->link_notify_handle != NULL)
2336 goto bail;
2337
2338 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2339 link->link_notify_handle = glink_register_link_state_cb(
2340 &link->link_info,
2341 (void *)((uintptr_t)cid));
2342 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2343 if (err) {
2344 link->link_notify_handle = NULL;
2345 goto bail;
2346 }
2347 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2348 RPC_TIMEOUT));
2349bail:
2350 return err;
2351}
2352
2353static void fastrpc_glink_close(void *chan, int cid)
2354{
2355 int err = 0;
2356 struct fastrpc_glink_info *link;
2357
2358 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2359 if (err)
2360 return;
2361 link = &gfa.channel[cid].link;
2362
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302363 if (link->port_state == FASTRPC_LINK_CONNECTED) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002364 link->port_state = FASTRPC_LINK_DISCONNECTING;
2365 glink_close(chan);
2366 }
2367}
2368
2369static int fastrpc_glink_open(int cid)
2370{
2371 int err = 0;
2372 void *handle = NULL;
2373 struct fastrpc_apps *me = &gfa;
2374 struct glink_open_config *cfg;
2375 struct fastrpc_glink_info *link;
2376
2377 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2378 if (err)
2379 goto bail;
2380 link = &me->channel[cid].link;
2381 cfg = &me->channel[cid].link.cfg;
2382 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2383 if (err)
2384 goto bail;
2385
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302386 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2387 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002388 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002389
2390 link->port_state = FASTRPC_LINK_CONNECTING;
2391 cfg->priv = (void *)(uintptr_t)cid;
2392 cfg->edge = gcinfo[cid].link.link_info.edge;
2393 cfg->transport = gcinfo[cid].link.link_info.transport;
2394 cfg->name = FASTRPC_GLINK_GUID;
2395 cfg->notify_rx = fastrpc_glink_notify_rx;
2396 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2397 cfg->notify_state = fastrpc_glink_notify_state;
2398 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2399 handle = glink_open(cfg);
2400 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302401 if (err) {
2402 if (link->port_state == FASTRPC_LINK_CONNECTING)
2403 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002404 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302405 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002406 me->channel[cid].chan = handle;
2407bail:
2408 return err;
2409}
2410
Sathish Ambley1ca68232017-01-19 10:32:55 -08002411static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2412{
2413 filp->private_data = inode->i_private;
2414 return 0;
2415}
2416
2417static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2418 size_t count, loff_t *position)
2419{
2420 struct fastrpc_file *fl = filp->private_data;
2421 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302422 struct fastrpc_buf *buf = NULL;
2423 struct fastrpc_mmap *map = NULL;
2424 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002425 struct fastrpc_channel_ctx *chan;
2426 struct fastrpc_session_ctx *sess;
2427 unsigned int len = 0;
2428 int i, j, ret = 0;
2429 char *fileinfo = NULL;
2430
2431 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2432 if (!fileinfo)
2433 goto bail;
2434 if (fl == NULL) {
2435 for (i = 0; i < NUM_CHANNELS; i++) {
2436 chan = &gcinfo[i];
2437 len += scnprintf(fileinfo + len,
2438 DEBUGFS_SIZE - len, "%s\n\n",
2439 chan->name);
2440 len += scnprintf(fileinfo + len,
2441 DEBUGFS_SIZE - len, "%s %d\n",
2442 "sesscount:", chan->sesscount);
2443 for (j = 0; j < chan->sesscount; j++) {
2444 sess = &chan->session[j];
2445 len += scnprintf(fileinfo + len,
2446 DEBUGFS_SIZE - len,
2447 "%s%d\n\n", "SESSION", j);
2448 len += scnprintf(fileinfo + len,
2449 DEBUGFS_SIZE - len,
2450 "%s %d\n", "sid:",
2451 sess->smmu.cb);
2452 len += scnprintf(fileinfo + len,
2453 DEBUGFS_SIZE - len,
2454 "%s %d\n", "SECURE:",
2455 sess->smmu.secure);
2456 }
2457 }
2458 } else {
2459 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2460 "%s %d\n\n",
2461 "PROCESS_ID:", fl->tgid);
2462 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2463 "%s %d\n\n",
2464 "CHANNEL_ID:", fl->cid);
2465 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2466 "%s %d\n\n",
2467 "SSRCOUNT:", fl->ssrcount);
2468 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2469 "%s\n",
2470 "LIST OF BUFS:");
2471 spin_lock(&fl->hlock);
2472 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2473 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302474 "%s %pK %s %pK %s %llx\n", "buf:",
2475 buf, "buf->virt:", buf->virt,
2476 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002477 }
2478 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2479 "\n%s\n",
2480 "LIST OF MAPS:");
2481 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2482 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302483 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002484 "map:", map,
2485 "map->va:", map->va,
2486 "map->phys:", map->phys);
2487 }
2488 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2489 "\n%s\n",
2490 "LIST OF PENDING SMQCONTEXTS:");
2491 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2492 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302493 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002494 "smqcontext:", ictx,
2495 "sc:", ictx->sc,
2496 "tid:", ictx->pid,
2497 "handle", ictx->rpra->h);
2498 }
2499 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2500 "\n%s\n",
2501 "LIST OF INTERRUPTED SMQCONTEXTS:");
2502 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2503 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302504 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002505 "smqcontext:", ictx,
2506 "sc:", ictx->sc,
2507 "tid:", ictx->pid,
2508 "handle", ictx->rpra->h);
2509 }
2510 spin_unlock(&fl->hlock);
2511 }
2512 if (len > DEBUGFS_SIZE)
2513 len = DEBUGFS_SIZE;
2514 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2515 kfree(fileinfo);
2516bail:
2517 return ret;
2518}
2519
2520static const struct file_operations debugfs_fops = {
2521 .open = fastrpc_debugfs_open,
2522 .read = fastrpc_debugfs_read,
2523};
Sathish Ambley36849af2017-02-02 09:35:55 -08002524static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002525{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002526 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002527 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002528
2529 mutex_lock(&me->smd_mutex);
2530
Sathish Ambley36849af2017-02-02 09:35:55 -08002531 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002532 if (err)
2533 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002534 cid = fl->cid;
c_mtharue1a5ce12017-10-13 20:47:09 +05302535 if (me->channel[cid].ssrcount !=
2536 me->channel[cid].prevssrcount) {
2537 if (!me->channel[cid].issubsystemup) {
2538 VERIFY(err, 0);
2539 if (err)
2540 goto bail;
2541 }
2542 }
Sathish Ambley36849af2017-02-02 09:35:55 -08002543 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2544 if (err)
2545 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002546 fl->ssrcount = me->channel[cid].ssrcount;
2547 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302548 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302549 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2550 if (err)
2551 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552 VERIFY(err, 0 == fastrpc_glink_open(cid));
2553 if (err)
2554 goto bail;
2555
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302556 VERIFY(err,
2557 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002558 RPC_TIMEOUT));
2559 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302560 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002561 goto bail;
2562 }
2563 kref_init(&me->channel[cid].kref);
2564 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2565 MAJOR(me->dev_no), cid);
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302566 err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
2567 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002568 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302569 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2570 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002571 if (me->channel[cid].ssrcount !=
2572 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302573 if (fastrpc_mmap_remove_ssr(fl))
2574 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002575 me->channel[cid].prevssrcount =
2576 me->channel[cid].ssrcount;
2577 }
2578 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002579
2580bail:
2581 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002582 return err;
2583}
2584
Sathish Ambley36849af2017-02-02 09:35:55 -08002585static int fastrpc_device_open(struct inode *inode, struct file *filp)
2586{
2587 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002588 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302589 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002590 struct fastrpc_apps *me = &gfa;
2591
c_mtharue1a5ce12017-10-13 20:47:09 +05302592 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002593 if (err)
2594 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002595 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2596 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002597 context_list_ctor(&fl->clst);
2598 spin_lock_init(&fl->hlock);
2599 INIT_HLIST_HEAD(&fl->maps);
2600 INIT_HLIST_HEAD(&fl->bufs);
2601 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302602 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002603 fl->tgid = current->tgid;
2604 fl->apps = me;
2605 fl->mode = FASTRPC_MODE_SERIAL;
2606 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002607 if (debugfs_file != NULL)
2608 fl->debugfs_file = debugfs_file;
2609 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302610 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002611 filp->private_data = fl;
2612 spin_lock(&me->hlock);
2613 hlist_add_head(&fl->hn, &me->drivers);
2614 spin_unlock(&me->hlock);
2615 return 0;
2616}
2617
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002618static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2619{
2620 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002621 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002622
c_mtharue1a5ce12017-10-13 20:47:09 +05302623 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002624 if (err)
2625 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002626 if (fl->cid == -1) {
2627 cid = *info;
2628 VERIFY(err, cid < NUM_CHANNELS);
2629 if (err)
2630 goto bail;
2631 fl->cid = cid;
2632 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2633 VERIFY(err, !fastrpc_session_alloc_locked(
2634 &fl->apps->channel[cid], 0, &fl->sctx));
2635 if (err)
2636 goto bail;
2637 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302638 VERIFY(err, fl->sctx != NULL);
2639 if (err)
2640 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002641 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2642bail:
2643 return err;
2644}
2645
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302646static int fastrpc_internal_control(struct fastrpc_file *fl,
2647 struct fastrpc_ioctl_control *cp)
2648{
2649 int err = 0;
2650 int latency;
2651
2652 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2653 if (err)
2654 goto bail;
2655 VERIFY(err, !IS_ERR_OR_NULL(cp));
2656 if (err)
2657 goto bail;
2658
2659 switch (cp->req) {
2660 case FASTRPC_CONTROL_LATENCY:
2661 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2662 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2663 VERIFY(err, latency != 0);
2664 if (err)
2665 goto bail;
2666 if (!fl->qos_request) {
2667 pm_qos_add_request(&fl->pm_qos_req,
2668 PM_QOS_CPU_DMA_LATENCY, latency);
2669 fl->qos_request = 1;
2670 } else
2671 pm_qos_update_request(&fl->pm_qos_req, latency);
2672 break;
2673 default:
2674 err = -ENOTTY;
2675 break;
2676 }
2677bail:
2678 return err;
2679}
2680
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002681static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2682 unsigned long ioctl_param)
2683{
2684 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002685 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002686 struct fastrpc_ioctl_mmap mmap;
2687 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302688 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002689 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002690 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302691 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002692 } p;
2693 void *param = (char *)ioctl_param;
2694 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2695 int size = 0, err = 0;
2696 uint32_t info;
2697
c_mtharue1a5ce12017-10-13 20:47:09 +05302698 p.inv.fds = NULL;
2699 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002700 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302701 spin_lock(&fl->hlock);
2702 if (fl->file_close == 1) {
2703 err = EBADF;
2704 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2705 spin_unlock(&fl->hlock);
2706 goto bail;
2707 }
2708 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002709
2710 switch (ioctl_num) {
2711 case FASTRPC_IOCTL_INVOKE:
2712 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002713 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002714 case FASTRPC_IOCTL_INVOKE_FD:
2715 if (!size)
2716 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2717 /* fall through */
2718 case FASTRPC_IOCTL_INVOKE_ATTRS:
2719 if (!size)
2720 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002721 /* fall through */
2722 case FASTRPC_IOCTL_INVOKE_CRC:
2723 if (!size)
2724 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302725 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002726 if (err)
2727 goto bail;
2728 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2729 0, &p.inv)));
2730 if (err)
2731 goto bail;
2732 break;
2733 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302734 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2735 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302736 if (err)
2737 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002738 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2739 if (err)
2740 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302741 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002742 if (err)
2743 goto bail;
2744 break;
2745 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302746 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2747 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302748 if (err)
2749 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002750 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2751 &p.munmap)));
2752 if (err)
2753 goto bail;
2754 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302755 case FASTRPC_IOCTL_MUNMAP_FD:
2756 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2757 sizeof(p.munmap_fd));
2758 if (err)
2759 goto bail;
2760 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2761 &p.munmap_fd)));
2762 if (err)
2763 goto bail;
2764 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002765 case FASTRPC_IOCTL_SETMODE:
2766 switch ((uint32_t)ioctl_param) {
2767 case FASTRPC_MODE_PARALLEL:
2768 case FASTRPC_MODE_SERIAL:
2769 fl->mode = (uint32_t)ioctl_param;
2770 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002771 case FASTRPC_MODE_PROFILE:
2772 fl->profile = (uint32_t)ioctl_param;
2773 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302774 case FASTRPC_MODE_SESSION:
2775 fl->sessionid = 1;
2776 fl->tgid |= (1 << SESSION_ID_INDEX);
2777 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002778 default:
2779 err = -ENOTTY;
2780 break;
2781 }
2782 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002783 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302784 K_COPY_FROM_USER(err, 0, &p.perf,
2785 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002786 if (err)
2787 goto bail;
2788 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2789 if (p.perf.keys) {
2790 char *keys = PERF_KEYS;
2791
c_mtharue1a5ce12017-10-13 20:47:09 +05302792 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2793 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002794 if (err)
2795 goto bail;
2796 }
2797 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302798 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2799 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002800 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302801 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002802 if (err)
2803 goto bail;
2804 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302805 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302806 K_COPY_FROM_USER(err, 0, &p.cp, param,
2807 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302808 if (err)
2809 goto bail;
2810 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2811 if (err)
2812 goto bail;
2813 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002814 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302815 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002816 if (err)
2817 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002818 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2819 if (err)
2820 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302821 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002822 if (err)
2823 goto bail;
2824 break;
2825 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002826 p.init.attrs = 0;
2827 p.init.siglen = 0;
2828 size = sizeof(struct fastrpc_ioctl_init);
2829 /* fall through */
2830 case FASTRPC_IOCTL_INIT_ATTRS:
2831 if (!size)
2832 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302833 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002834 if (err)
2835 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302836 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302837 p.init.init.filelen < INIT_FILELEN_MAX);
2838 if (err)
2839 goto bail;
2840 VERIFY(err, p.init.init.memlen >= 0 &&
2841 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302842 if (err)
2843 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002844 VERIFY(err, 0 == fastrpc_init_process(fl, &p.init));
2845 if (err)
2846 goto bail;
2847 break;
2848
2849 default:
2850 err = -ENOTTY;
2851 pr_info("bad ioctl: %d\n", ioctl_num);
2852 break;
2853 }
2854 bail:
2855 return err;
2856}
2857
2858static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2859 unsigned long code,
2860 void *data)
2861{
2862 struct fastrpc_apps *me = &gfa;
2863 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302864 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002865 int cid;
2866
2867 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2868 cid = ctx - &me->channel[0];
2869 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2870 mutex_lock(&me->smd_mutex);
2871 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302872 ctx->issubsystemup = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002873 if (ctx->chan) {
2874 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302875 ctx->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002876 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2877 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2878 }
2879 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302880 if (cid == 0)
2881 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002882 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302883 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2884 if (me->channel[0].remoteheap_ramdump_dev &&
2885 notifdata->enable_ramdump) {
2886 me->channel[0].ramdumpenabled = 1;
2887 }
2888 } else if (code == SUBSYS_AFTER_POWERUP) {
2889 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002890 }
2891
2892 return NOTIFY_DONE;
2893}
2894
2895static const struct file_operations fops = {
2896 .open = fastrpc_device_open,
2897 .release = fastrpc_device_release,
2898 .unlocked_ioctl = fastrpc_device_ioctl,
2899 .compat_ioctl = compat_fastrpc_device_ioctl,
2900};
2901
2902static const struct of_device_id fastrpc_match_table[] = {
2903 { .compatible = "qcom,msm-fastrpc-adsp", },
2904 { .compatible = "qcom,msm-fastrpc-compute", },
2905 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2906 { .compatible = "qcom,msm-adsprpc-mem-region", },
2907 {}
2908};
2909
2910static int fastrpc_cb_probe(struct device *dev)
2911{
2912 struct fastrpc_channel_ctx *chan;
2913 struct fastrpc_session_ctx *sess;
2914 struct of_phandle_args iommuspec;
2915 const char *name;
2916 unsigned int start = 0x80000000;
2917 int err = 0, i;
2918 int secure_vmid = VMID_CP_PIXEL;
2919
c_mtharue1a5ce12017-10-13 20:47:09 +05302920 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2921 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002922 if (err)
2923 goto bail;
2924 for (i = 0; i < NUM_CHANNELS; i++) {
2925 if (!gcinfo[i].name)
2926 continue;
2927 if (!strcmp(name, gcinfo[i].name))
2928 break;
2929 }
2930 VERIFY(err, i < NUM_CHANNELS);
2931 if (err)
2932 goto bail;
2933 chan = &gcinfo[i];
2934 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2935 if (err)
2936 goto bail;
2937
2938 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2939 "#iommu-cells", 0, &iommuspec));
2940 if (err)
2941 goto bail;
2942 sess = &chan->session[chan->sesscount];
2943 sess->smmu.cb = iommuspec.args[0] & 0xf;
2944 sess->used = 0;
2945 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2946 "dma-coherent");
2947 sess->smmu.secure = of_property_read_bool(dev->of_node,
2948 "qcom,secure-context-bank");
2949 if (sess->smmu.secure)
2950 start = 0x60000000;
2951 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2952 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302953 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002954 if (err)
2955 goto bail;
2956
2957 if (sess->smmu.secure)
2958 iommu_domain_set_attr(sess->smmu.mapping->domain,
2959 DOMAIN_ATTR_SECURE_VMID,
2960 &secure_vmid);
2961
2962 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2963 if (err)
2964 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302965 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002966 sess->smmu.enabled = 1;
2967 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002968 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2969 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002970bail:
2971 return err;
2972}
2973
2974static int fastrpc_probe(struct platform_device *pdev)
2975{
2976 int err = 0;
2977 struct fastrpc_apps *me = &gfa;
2978 struct device *dev = &pdev->dev;
2979 struct smq_phy_page range;
2980 struct device_node *ion_node, *node;
2981 struct platform_device *ion_pdev;
2982 struct cma *cma;
2983 uint32_t val;
2984
2985 if (of_device_is_compatible(dev->of_node,
2986 "qcom,msm-fastrpc-compute-cb"))
2987 return fastrpc_cb_probe(dev);
2988
2989 if (of_device_is_compatible(dev->of_node,
2990 "qcom,msm-adsprpc-mem-region")) {
2991 me->dev = dev;
2992 range.addr = 0;
2993 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
2994 if (ion_node) {
2995 for_each_available_child_of_node(ion_node, node) {
2996 if (of_property_read_u32(node, "reg", &val))
2997 continue;
2998 if (val != ION_ADSP_HEAP_ID)
2999 continue;
3000 ion_pdev = of_find_device_by_node(node);
3001 if (!ion_pdev)
3002 break;
3003 cma = dev_get_cma_area(&ion_pdev->dev);
3004 if (cma) {
3005 range.addr = cma_get_base(cma);
3006 range.size = (size_t)cma_get_size(cma);
3007 }
3008 break;
3009 }
3010 }
3011 if (range.addr) {
3012 int srcVM[1] = {VMID_HLOS};
3013 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3014 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003015 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003016 PERM_READ | PERM_WRITE | PERM_EXEC,
3017 PERM_READ | PERM_WRITE | PERM_EXEC,
3018 PERM_READ | PERM_WRITE | PERM_EXEC,
3019 };
3020
3021 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3022 srcVM, 1, destVM, destVMperm, 4));
3023 if (err)
3024 goto bail;
3025 }
3026 return 0;
3027 }
3028
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303029 err = of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3030 &me->latency);
3031 if (err)
3032 me->latency = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003033 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3034 fastrpc_match_table,
3035 NULL, &pdev->dev));
3036 if (err)
3037 goto bail;
3038bail:
3039 return err;
3040}
3041
3042static void fastrpc_deinit(void)
3043{
3044 struct fastrpc_apps *me = &gfa;
3045 struct fastrpc_channel_ctx *chan = gcinfo;
3046 int i, j;
3047
3048 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3049 if (chan->chan) {
3050 kref_put_mutex(&chan->kref,
3051 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303052 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003053 }
3054 for (j = 0; j < NUM_SESSIONS; j++) {
3055 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303056 if (sess->smmu.dev) {
3057 arm_iommu_detach_device(sess->smmu.dev);
3058 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003059 }
3060 if (sess->smmu.mapping) {
3061 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303062 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003063 }
3064 }
3065 }
3066}
3067
3068static struct platform_driver fastrpc_driver = {
3069 .probe = fastrpc_probe,
3070 .driver = {
3071 .name = "fastrpc",
3072 .owner = THIS_MODULE,
3073 .of_match_table = fastrpc_match_table,
3074 },
3075};
3076
3077static int __init fastrpc_device_init(void)
3078{
3079 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303080 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003081 int err = 0, i;
3082
3083 memset(me, 0, sizeof(*me));
3084
3085 fastrpc_init(me);
3086 me->dev = NULL;
3087 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3088 if (err)
3089 goto register_bail;
3090 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3091 DEVICE_NAME));
3092 if (err)
3093 goto alloc_chrdev_bail;
3094 cdev_init(&me->cdev, &fops);
3095 me->cdev.owner = THIS_MODULE;
3096 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003097 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003098 if (err)
3099 goto cdev_init_bail;
3100 me->class = class_create(THIS_MODULE, "fastrpc");
3101 VERIFY(err, !IS_ERR(me->class));
3102 if (err)
3103 goto class_create_bail;
3104 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003105 dev = device_create(me->class, NULL,
3106 MKDEV(MAJOR(me->dev_no), 0),
3107 NULL, gcinfo[0].name);
3108 VERIFY(err, !IS_ERR_OR_NULL(dev));
3109 if (err)
3110 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003111 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003112 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003113 me->channel[i].ssrcount = 0;
3114 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303115 me->channel[i].issubsystemup = 1;
3116 me->channel[i].ramdumpenabled = 0;
3117 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003118 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3119 me->channel[i].handle = subsys_notif_register_notifier(
3120 gcinfo[i].subsys,
3121 &me->channel[i].nb);
3122 }
3123
3124 me->client = msm_ion_client_create(DEVICE_NAME);
3125 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3126 if (err)
3127 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003128 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003129 return 0;
3130device_create_bail:
3131 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003132 if (me->channel[i].handle)
3133 subsys_notif_unregister_notifier(me->channel[i].handle,
3134 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003135 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003136 if (!IS_ERR_OR_NULL(dev))
3137 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003138 class_destroy(me->class);
3139class_create_bail:
3140 cdev_del(&me->cdev);
3141cdev_init_bail:
3142 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3143alloc_chrdev_bail:
3144register_bail:
3145 fastrpc_deinit();
3146 return err;
3147}
3148
3149static void __exit fastrpc_device_exit(void)
3150{
3151 struct fastrpc_apps *me = &gfa;
3152 int i;
3153
3154 fastrpc_file_list_dtor(me);
3155 fastrpc_deinit();
3156 for (i = 0; i < NUM_CHANNELS; i++) {
3157 if (!gcinfo[i].name)
3158 continue;
3159 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3160 subsys_notif_unregister_notifier(me->channel[i].handle,
3161 &me->channel[i].nb);
3162 }
3163 class_destroy(me->class);
3164 cdev_del(&me->cdev);
3165 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3166 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003167 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003168}
3169
3170late_initcall(fastrpc_device_init);
3171module_exit(fastrpc_device_exit);
3172
3173MODULE_LICENSE("GPL v2");