blob: 0de4ed9c41c269575476331daf920be5d8ec857b [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053078#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
79#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070080
Sathish Ambleya21b5b52017-01-11 16:11:01 -080081#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
82#define FASTRPC_STATIC_HANDLE_LISTENER (3)
83#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053084#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080085
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053086#define INIT_FILELEN_MAX (2*1024*1024)
87#define INIT_MEMLEN_MAX (8*1024*1024)
88
Sathish Ambleya21b5b52017-01-11 16:11:01 -080089#define PERF_END (void)0
90
91#define PERF(enb, cnt, ff) \
92 {\
93 struct timespec startT = {0};\
94 if (enb) {\
95 getnstimeofday(&startT);\
96 } \
97 ff ;\
98 if (enb) {\
99 cnt += getnstimediff(&startT);\
100 } \
101 }
102
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700103static int fastrpc_glink_open(int cid);
104static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800105static struct dentry *debugfs_root;
106static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
108static inline uint64_t buf_page_start(uint64_t buf)
109{
110 uint64_t start = (uint64_t) buf & PAGE_MASK;
111 return start;
112}
113
114static inline uint64_t buf_page_offset(uint64_t buf)
115{
116 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
117 return offset;
118}
119
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530120static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700121{
122 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
123 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530124 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700125 return nPages;
126}
127
128static inline uint64_t buf_page_size(uint32_t size)
129{
130 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
131
132 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
133}
134
135static inline void *uint64_to_ptr(uint64_t addr)
136{
137 void *ptr = (void *)((uintptr_t)addr);
138
139 return ptr;
140}
141
142static inline uint64_t ptr_to_uint64(void *ptr)
143{
144 uint64_t addr = (uint64_t)((uintptr_t)ptr);
145
146 return addr;
147}
148
149struct fastrpc_file;
150
151struct fastrpc_buf {
152 struct hlist_node hn;
153 struct fastrpc_file *fl;
154 void *virt;
155 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530156 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700157};
158
159struct fastrpc_ctx_lst;
160
161struct overlap {
162 uintptr_t start;
163 uintptr_t end;
164 int raix;
165 uintptr_t mstart;
166 uintptr_t mend;
167 uintptr_t offset;
168};
169
170struct smq_invoke_ctx {
171 struct hlist_node hn;
172 struct completion work;
173 int retval;
174 int pid;
175 int tgid;
176 remote_arg_t *lpra;
177 remote_arg64_t *rpra;
178 int *fds;
179 unsigned int *attrs;
180 struct fastrpc_mmap **maps;
181 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530182 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700183 struct fastrpc_file *fl;
184 uint32_t sc;
185 struct overlap *overs;
186 struct overlap **overps;
187 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700188 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530189 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700190};
191
192struct fastrpc_ctx_lst {
193 struct hlist_head pending;
194 struct hlist_head interrupted;
195};
196
197struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530198 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct dma_iommu_mapping *mapping;
200 int cb;
201 int enabled;
202 int faults;
203 int secure;
204 int coherent;
205};
206
207struct fastrpc_session_ctx {
208 struct device *dev;
209 struct fastrpc_smmu smmu;
210 int used;
211};
212
213struct fastrpc_glink_info {
214 int link_state;
215 int port_state;
216 struct glink_open_config cfg;
217 struct glink_link_info link_info;
218 void *link_notify_handle;
219};
220
221struct fastrpc_channel_ctx {
222 char *name;
223 char *subsys;
224 void *chan;
225 struct device *dev;
226 struct fastrpc_session_ctx session[NUM_SESSIONS];
227 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530228 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 struct notifier_block nb;
230 struct kref kref;
231 int sesscount;
232 int ssrcount;
233 void *handle;
234 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700236 int vmid;
c_mtharu63ffc012017-11-16 15:26:56 +0530237 int rhvmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530238 int ramdumpenabled;
239 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700240 struct fastrpc_glink_info link;
241};
242
243struct fastrpc_apps {
244 struct fastrpc_channel_ctx *channel;
245 struct cdev cdev;
246 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530247 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 struct smq_phy_page range;
249 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530250 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700251 dev_t dev_no;
252 int compat;
253 struct hlist_head drivers;
254 spinlock_t hlock;
255 struct ion_client *client;
256 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530257 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700258};
259
260struct fastrpc_mmap {
261 struct hlist_node hn;
262 struct fastrpc_file *fl;
263 struct fastrpc_apps *apps;
264 int fd;
265 uint32_t flags;
266 struct dma_buf *buf;
267 struct sg_table *table;
268 struct dma_buf_attachment *attach;
269 struct ion_handle *handle;
270 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530271 size_t size;
272 uintptr_t va;
273 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700274 int refs;
275 uintptr_t raddr;
276 int uncached;
277 int secure;
278 uintptr_t attr;
279};
280
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281struct fastrpc_perf {
282 int64_t count;
283 int64_t flush;
284 int64_t map;
285 int64_t copy;
286 int64_t link;
287 int64_t getargs;
288 int64_t putargs;
289 int64_t invargs;
290 int64_t invoke;
291};
292
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293struct fastrpc_file {
294 struct hlist_node hn;
295 spinlock_t hlock;
296 struct hlist_head maps;
297 struct hlist_head bufs;
298 struct fastrpc_ctx_lst clst;
299 struct fastrpc_session_ctx *sctx;
300 struct fastrpc_session_ctx *secsctx;
301 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800302 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530303 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 int tgid;
305 int cid;
306 int ssrcount;
307 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530308 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800310 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800311 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530312 struct pm_qos_request pm_qos_req;
313 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314};
315
316static struct fastrpc_apps gfa;
317
318static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
319 {
320 .name = "adsprpc-smd",
321 .subsys = "adsp",
322 .link.link_info.edge = "lpass",
323 .link.link_info.transport = "smem",
324 },
325 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700326 .name = "mdsprpc-smd",
327 .subsys = "modem",
328 .link.link_info.edge = "mpss",
329 .link.link_info.transport = "smem",
330 },
331 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800332 .name = "sdsprpc-smd",
333 .subsys = "slpi",
334 .link.link_info.edge = "dsps",
335 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800336 },
337 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700338 .name = "cdsprpc-smd",
339 .subsys = "cdsp",
340 .link.link_info.edge = "cdsp",
341 .link.link_info.transport = "smem",
342 },
343};
344
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800345static inline int64_t getnstimediff(struct timespec *start)
346{
347 int64_t ns;
348 struct timespec ts, b;
349
350 getnstimeofday(&ts);
351 b = timespec_sub(ts, *start);
352 ns = timespec_to_ns(&b);
353 return ns;
354}
355
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
357{
c_mtharue1a5ce12017-10-13 20:47:09 +0530358 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700359 int vmid;
360
361 if (!fl)
362 return;
363 if (cache) {
364 spin_lock(&fl->hlock);
365 hlist_add_head(&buf->hn, &fl->bufs);
366 spin_unlock(&fl->hlock);
367 return;
368 }
369 if (!IS_ERR_OR_NULL(buf->virt)) {
370 int destVM[1] = {VMID_HLOS};
371 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
372
373 if (fl->sctx->smmu.cb)
374 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
375 vmid = fl->apps->channel[fl->cid].vmid;
376 if (vmid) {
377 int srcVM[2] = {VMID_HLOS, vmid};
378
379 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
380 srcVM, 2, destVM, destVMperm, 1);
381 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530382 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383 buf->phys);
384 }
385 kfree(buf);
386}
387
388static void fastrpc_buf_list_free(struct fastrpc_file *fl)
389{
390 struct fastrpc_buf *buf, *free;
391
392 do {
393 struct hlist_node *n;
394
c_mtharue1a5ce12017-10-13 20:47:09 +0530395 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396 spin_lock(&fl->hlock);
397 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
398 hlist_del_init(&buf->hn);
399 free = buf;
400 break;
401 }
402 spin_unlock(&fl->hlock);
403 if (free)
404 fastrpc_buf_free(free, 0);
405 } while (free);
406}
407
408static void fastrpc_mmap_add(struct fastrpc_mmap *map)
409{
c_mtharue1a5ce12017-10-13 20:47:09 +0530410 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
411 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
412 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413
c_mtharue1a5ce12017-10-13 20:47:09 +0530414 spin_lock(&me->hlock);
415 hlist_add_head(&map->hn, &me->maps);
416 spin_unlock(&me->hlock);
417 } else {
418 struct fastrpc_file *fl = map->fl;
419
420 spin_lock(&fl->hlock);
421 hlist_add_head(&map->hn, &fl->maps);
422 spin_unlock(&fl->hlock);
423 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424}
425
c_mtharue1a5ce12017-10-13 20:47:09 +0530426static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530427 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530428 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429{
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 struct fastrpc_apps *me = &gfa;
431 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700432 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530433
434 if ((va + len) < va)
435 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530436 if (mflags == ADSP_MMAP_HEAP_ADDR ||
437 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
438 spin_lock(&me->hlock);
439 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
440 if (va >= map->va &&
441 va + len <= map->va + map->len &&
442 map->fd == fd) {
443 if (refs)
444 map->refs++;
445 match = map;
446 break;
447 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700448 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530449 spin_unlock(&me->hlock);
450 } else {
451 spin_lock(&fl->hlock);
452 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
453 if (va >= map->va &&
454 va + len <= map->va + map->len &&
455 map->fd == fd) {
456 if (refs)
457 map->refs++;
458 match = map;
459 break;
460 }
461 }
462 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700463 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700464 if (match) {
465 *ppmap = match;
466 return 0;
467 }
468 return -ENOTTY;
469}
470
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530471static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530472{
473 struct fastrpc_apps *me = &gfa;
474 void *vaddr = NULL;
475
476 if (me->dev == NULL) {
477 pr_err("device adsprpc-mem is not initialized\n");
478 return -ENODEV;
479 }
480 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
481 if (!vaddr) {
482 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
483 (unsigned int)size);
484 return -ENOMEM;
485 }
486 return 0;
487}
488
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700489static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530490 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700491{
c_mtharue1a5ce12017-10-13 20:47:09 +0530492 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700493 struct hlist_node *n;
494 struct fastrpc_apps *me = &gfa;
495
496 spin_lock(&me->hlock);
497 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
498 if (map->raddr == va &&
499 map->raddr + map->len == va + len &&
500 map->refs == 1) {
501 match = map;
502 hlist_del_init(&map->hn);
503 break;
504 }
505 }
506 spin_unlock(&me->hlock);
507 if (match) {
508 *ppmap = match;
509 return 0;
510 }
511 spin_lock(&fl->hlock);
512 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
513 if (map->raddr == va &&
514 map->raddr + map->len == va + len &&
515 map->refs == 1) {
516 match = map;
517 hlist_del_init(&map->hn);
518 break;
519 }
520 }
521 spin_unlock(&fl->hlock);
522 if (match) {
523 *ppmap = match;
524 return 0;
525 }
526 return -ENOTTY;
527}
528
c_mtharu7bd6a422017-10-17 18:15:37 +0530529static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700530{
c_mtharue1a5ce12017-10-13 20:47:09 +0530531 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532 struct fastrpc_file *fl;
533 int vmid;
534 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700535
536 if (!map)
537 return;
538 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530539 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
540 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
541 spin_lock(&me->hlock);
542 map->refs--;
543 if (!map->refs)
544 hlist_del_init(&map->hn);
545 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530546 if (map->refs > 0)
547 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530548 } else {
549 spin_lock(&fl->hlock);
550 map->refs--;
551 if (!map->refs)
552 hlist_del_init(&map->hn);
553 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530554 if (map->refs > 0 && !flags)
555 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530556 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530557 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
558 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700559
c_mtharue1a5ce12017-10-13 20:47:09 +0530560 if (me->dev == NULL) {
561 pr_err("failed to free remote heap allocation\n");
562 return;
563 }
564 if (map->phys) {
565 dma_free_coherent(me->dev, map->size,
566 &(map->va), map->phys);
567 }
568 } else {
569 int destVM[1] = {VMID_HLOS};
570 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
571
572 if (map->secure)
573 sess = fl->secsctx;
574 else
575 sess = fl->sctx;
576
577 if (!IS_ERR_OR_NULL(map->handle))
578 ion_free(fl->apps->client, map->handle);
579 if (sess && sess->smmu.enabled) {
580 if (map->size || map->phys)
581 msm_dma_unmap_sg(sess->smmu.dev,
582 map->table->sgl,
583 map->table->nents, DMA_BIDIRECTIONAL,
584 map->buf);
585 }
586 vmid = fl->apps->channel[fl->cid].vmid;
587 if (vmid && map->phys) {
588 int srcVM[2] = {VMID_HLOS, vmid};
589
590 hyp_assign_phys(map->phys, buf_page_size(map->size),
591 srcVM, 2, destVM, destVMperm, 1);
592 }
593
594 if (!IS_ERR_OR_NULL(map->table))
595 dma_buf_unmap_attachment(map->attach, map->table,
596 DMA_BIDIRECTIONAL);
597 if (!IS_ERR_OR_NULL(map->attach))
598 dma_buf_detach(map->buf, map->attach);
599 if (!IS_ERR_OR_NULL(map->buf))
600 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700601 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700602 kfree(map);
603}
604
605static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
606 struct fastrpc_session_ctx **session);
607
608static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530609 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 struct fastrpc_mmap **ppmap)
611{
c_mtharue1a5ce12017-10-13 20:47:09 +0530612 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700613 struct fastrpc_session_ctx *sess;
614 struct fastrpc_apps *apps = fl->apps;
615 int cid = fl->cid;
616 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530617 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700618 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530619 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700620 unsigned long flags;
621 int err = 0, vmid;
622
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800623 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700624 return 0;
625 map = kzalloc(sizeof(*map), GFP_KERNEL);
626 VERIFY(err, !IS_ERR_OR_NULL(map));
627 if (err)
628 goto bail;
629 INIT_HLIST_NODE(&map->hn);
630 map->flags = mflags;
631 map->refs = 1;
632 map->fl = fl;
633 map->fd = fd;
634 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530635 if (mflags == ADSP_MMAP_HEAP_ADDR ||
636 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
637 map->apps = me;
638 map->fl = NULL;
639 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700640 if (err)
641 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530642 map->phys = (uintptr_t)region_start;
643 map->size = len;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530644 map->va = (uintptr_t)map->phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530645 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530646 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
647 pr_info("adsprpc: buffer mapped with persist attr %x\n",
648 (unsigned int)map->attr);
649 map->refs = 2;
650 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530651 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
652 ion_import_dma_buf_fd(fl->apps->client, fd)));
653 if (err)
654 goto bail;
655 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
656 &flags));
657 if (err)
658 goto bail;
659
c_mtharue1a5ce12017-10-13 20:47:09 +0530660 map->secure = flags & ION_FLAG_SECURE;
661 if (map->secure) {
662 if (!fl->secsctx)
663 err = fastrpc_session_alloc(chan, 1,
664 &fl->secsctx);
665 if (err)
666 goto bail;
667 }
668 if (map->secure)
669 sess = fl->secsctx;
670 else
671 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530672
c_mtharue1a5ce12017-10-13 20:47:09 +0530673 VERIFY(err, !IS_ERR_OR_NULL(sess));
674 if (err)
675 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530676
677 map->uncached = !ION_IS_CACHED(flags);
678 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
679 map->uncached = 1;
680
c_mtharue1a5ce12017-10-13 20:47:09 +0530681 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
682 if (err)
683 goto bail;
684 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
685 dma_buf_attach(map->buf, sess->smmu.dev)));
686 if (err)
687 goto bail;
688 VERIFY(err, !IS_ERR_OR_NULL(map->table =
689 dma_buf_map_attachment(map->attach,
690 DMA_BIDIRECTIONAL)));
691 if (err)
692 goto bail;
693 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700694 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530695
696 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
697 (sess->smmu.coherent && map->uncached))
698 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
699 else if (map->attr & FASTRPC_ATTR_COHERENT)
700 attrs |= DMA_ATTR_FORCE_COHERENT;
701
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700702 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530703 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700704 map->table->sgl, map->table->nents,
705 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530706 if (err)
707 goto bail;
708 } else {
709 VERIFY(err, map->table->nents == 1);
710 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700711 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530712 }
713 map->phys = sg_dma_address(map->table->sgl);
714 if (sess->smmu.cb) {
715 map->phys += ((uint64_t)sess->smmu.cb << 32);
716 map->size = sg_dma_len(map->table->sgl);
717 } else {
718 map->size = buf_page_size(len);
719 }
720 vmid = fl->apps->channel[fl->cid].vmid;
721 if (vmid) {
722 int srcVM[1] = {VMID_HLOS};
723 int destVM[2] = {VMID_HLOS, vmid};
724 int destVMperm[2] = {PERM_READ | PERM_WRITE,
725 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700726
c_mtharue1a5ce12017-10-13 20:47:09 +0530727 VERIFY(err, !hyp_assign_phys(map->phys,
728 buf_page_size(map->size),
729 srcVM, 1, destVM, destVMperm, 2));
730 if (err)
731 goto bail;
732 }
733 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700734 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 map->len = len;
736
737 fastrpc_mmap_add(map);
738 *ppmap = map;
739
740bail:
741 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530742 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700743 return err;
744}
745
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530746static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700747 struct fastrpc_buf **obuf)
748{
749 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530750 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700751 struct hlist_node *n;
752
753 VERIFY(err, size > 0);
754 if (err)
755 goto bail;
756
757 /* find the smallest buffer that fits in the cache */
758 spin_lock(&fl->hlock);
759 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
760 if (buf->size >= size && (!fr || fr->size > buf->size))
761 fr = buf;
762 }
763 if (fr)
764 hlist_del_init(&fr->hn);
765 spin_unlock(&fl->hlock);
766 if (fr) {
767 *obuf = fr;
768 return 0;
769 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530770 buf = NULL;
771 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700772 if (err)
773 goto bail;
774 INIT_HLIST_NODE(&buf->hn);
775 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530776 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777 buf->phys = 0;
778 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530779 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700780 (void *)&buf->phys, GFP_KERNEL);
781 if (IS_ERR_OR_NULL(buf->virt)) {
782 /* free cache and retry */
783 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530784 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700785 (void *)&buf->phys, GFP_KERNEL);
786 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
787 }
788 if (err)
789 goto bail;
790 if (fl->sctx->smmu.cb)
791 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
792 vmid = fl->apps->channel[fl->cid].vmid;
793 if (vmid) {
794 int srcVM[1] = {VMID_HLOS};
795 int destVM[2] = {VMID_HLOS, vmid};
796 int destVMperm[2] = {PERM_READ | PERM_WRITE,
797 PERM_READ | PERM_WRITE | PERM_EXEC};
798
799 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
800 srcVM, 1, destVM, destVMperm, 2));
801 if (err)
802 goto bail;
803 }
804
805 *obuf = buf;
806 bail:
807 if (err && buf)
808 fastrpc_buf_free(buf, 0);
809 return err;
810}
811
812
813static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700814 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700815 struct smq_invoke_ctx **po)
816{
817 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530818 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700819 struct hlist_node *n;
820 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
821
822 spin_lock(&fl->hlock);
823 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
824 if (ictx->pid == current->pid) {
825 if (invoke->sc != ictx->sc || ictx->fl != fl)
826 err = -1;
827 else {
828 ctx = ictx;
829 hlist_del_init(&ctx->hn);
830 hlist_add_head(&ctx->hn, &fl->clst.pending);
831 }
832 break;
833 }
834 }
835 spin_unlock(&fl->hlock);
836 if (ctx)
837 *po = ctx;
838 return err;
839}
840
841#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
842static int overlap_ptr_cmp(const void *a, const void *b)
843{
844 struct overlap *pa = *((struct overlap **)a);
845 struct overlap *pb = *((struct overlap **)b);
846 /* sort with lowest starting buffer first */
847 int st = CMP(pa->start, pb->start);
848 /* sort with highest ending buffer first */
849 int ed = CMP(pb->end, pa->end);
850 return st == 0 ? ed : st;
851}
852
Sathish Ambley9466d672017-01-25 10:51:55 -0800853static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700854{
Sathish Ambley9466d672017-01-25 10:51:55 -0800855 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700856 remote_arg_t *lpra = ctx->lpra;
857 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
858 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
859 int nbufs = inbufs + outbufs;
860 struct overlap max;
861
862 for (i = 0; i < nbufs; ++i) {
863 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
864 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800865 if (lpra[i].buf.len) {
866 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
867 if (err)
868 goto bail;
869 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700870 ctx->overs[i].raix = i;
871 ctx->overps[i] = &ctx->overs[i];
872 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530873 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700874 max.start = 0;
875 max.end = 0;
876 for (i = 0; i < nbufs; ++i) {
877 if (ctx->overps[i]->start < max.end) {
878 ctx->overps[i]->mstart = max.end;
879 ctx->overps[i]->mend = ctx->overps[i]->end;
880 ctx->overps[i]->offset = max.end -
881 ctx->overps[i]->start;
882 if (ctx->overps[i]->end > max.end) {
883 max.end = ctx->overps[i]->end;
884 } else {
885 ctx->overps[i]->mend = 0;
886 ctx->overps[i]->mstart = 0;
887 }
888 } else {
889 ctx->overps[i]->mend = ctx->overps[i]->end;
890 ctx->overps[i]->mstart = ctx->overps[i]->start;
891 ctx->overps[i]->offset = 0;
892 max = *ctx->overps[i];
893 }
894 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800895bail:
896 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700897}
898
899#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
900 do {\
901 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530902 VERIFY(err, 0 == copy_from_user((dst),\
903 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700904 (size)));\
905 else\
906 memmove((dst), (src), (size));\
907 } while (0)
908
909#define K_COPY_TO_USER(err, kernel, dst, src, size) \
910 do {\
911 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530912 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
913 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700914 else\
915 memmove((dst), (src), (size));\
916 } while (0)
917
918
919static void context_free(struct smq_invoke_ctx *ctx);
920
921static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700922 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700923 struct smq_invoke_ctx **po)
924{
925 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530926 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700927 struct fastrpc_ctx_lst *clst = &fl->clst;
928 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
929
930 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
931 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
932 sizeof(*ctx->fds) * (bufs) +
933 sizeof(*ctx->attrs) * (bufs) +
934 sizeof(*ctx->overs) * (bufs) +
935 sizeof(*ctx->overps) * (bufs);
936
c_mtharue1a5ce12017-10-13 20:47:09 +0530937 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700938 if (err)
939 goto bail;
940
941 INIT_HLIST_NODE(&ctx->hn);
942 hlist_add_fake(&ctx->hn);
943 ctx->fl = fl;
944 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
945 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
946 ctx->fds = (int *)(&ctx->lpra[bufs]);
947 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
948 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
949 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
950
c_mtharue1a5ce12017-10-13 20:47:09 +0530951 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700952 bufs * sizeof(*ctx->lpra));
953 if (err)
954 goto bail;
955
956 if (invokefd->fds) {
957 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
958 bufs * sizeof(*ctx->fds));
959 if (err)
960 goto bail;
961 }
962 if (invokefd->attrs) {
963 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
964 bufs * sizeof(*ctx->attrs));
965 if (err)
966 goto bail;
967 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700968 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700969 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800970 if (bufs) {
971 VERIFY(err, 0 == context_build_overlap(ctx));
972 if (err)
973 goto bail;
974 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700975 ctx->retval = -1;
976 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530977 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700978 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530979 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700980
981 spin_lock(&fl->hlock);
982 hlist_add_head(&ctx->hn, &clst->pending);
983 spin_unlock(&fl->hlock);
984
985 *po = ctx;
986bail:
987 if (ctx && err)
988 context_free(ctx);
989 return err;
990}
991
992static void context_save_interrupted(struct smq_invoke_ctx *ctx)
993{
994 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
995
996 spin_lock(&ctx->fl->hlock);
997 hlist_del_init(&ctx->hn);
998 hlist_add_head(&ctx->hn, &clst->interrupted);
999 spin_unlock(&ctx->fl->hlock);
1000 /* free the cache on power collapse */
1001 fastrpc_buf_list_free(ctx->fl);
1002}
1003
1004static void context_free(struct smq_invoke_ctx *ctx)
1005{
1006 int i;
1007 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1008 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1009 spin_lock(&ctx->fl->hlock);
1010 hlist_del_init(&ctx->hn);
1011 spin_unlock(&ctx->fl->hlock);
1012 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301013 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001014 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301015 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016 kfree(ctx);
1017}
1018
1019static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1020{
1021 ctx->retval = retval;
1022 complete(&ctx->work);
1023}
1024
1025
1026static void fastrpc_notify_users(struct fastrpc_file *me)
1027{
1028 struct smq_invoke_ctx *ictx;
1029 struct hlist_node *n;
1030
1031 spin_lock(&me->hlock);
1032 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1033 complete(&ictx->work);
1034 }
1035 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1036 complete(&ictx->work);
1037 }
1038 spin_unlock(&me->hlock);
1039
1040}
1041
1042static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1043{
1044 struct fastrpc_file *fl;
1045 struct hlist_node *n;
1046
1047 spin_lock(&me->hlock);
1048 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1049 if (fl->cid == cid)
1050 fastrpc_notify_users(fl);
1051 }
1052 spin_unlock(&me->hlock);
1053
1054}
1055static void context_list_ctor(struct fastrpc_ctx_lst *me)
1056{
1057 INIT_HLIST_HEAD(&me->interrupted);
1058 INIT_HLIST_HEAD(&me->pending);
1059}
1060
1061static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1062{
1063 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301064 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001065 struct hlist_node *n;
1066
1067 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301068 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 spin_lock(&fl->hlock);
1070 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1071 hlist_del_init(&ictx->hn);
1072 ctxfree = ictx;
1073 break;
1074 }
1075 spin_unlock(&fl->hlock);
1076 if (ctxfree)
1077 context_free(ctxfree);
1078 } while (ctxfree);
1079 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301080 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001081 spin_lock(&fl->hlock);
1082 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1083 hlist_del_init(&ictx->hn);
1084 ctxfree = ictx;
1085 break;
1086 }
1087 spin_unlock(&fl->hlock);
1088 if (ctxfree)
1089 context_free(ctxfree);
1090 } while (ctxfree);
1091}
1092
1093static int fastrpc_file_free(struct fastrpc_file *fl);
1094static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1095{
1096 struct fastrpc_file *fl, *free;
1097 struct hlist_node *n;
1098
1099 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301100 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001101 spin_lock(&me->hlock);
1102 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1103 hlist_del_init(&fl->hn);
1104 free = fl;
1105 break;
1106 }
1107 spin_unlock(&me->hlock);
1108 if (free)
1109 fastrpc_file_free(free);
1110 } while (free);
1111}
1112
1113static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1114{
1115 remote_arg64_t *rpra;
1116 remote_arg_t *lpra = ctx->lpra;
1117 struct smq_invoke_buf *list;
1118 struct smq_phy_page *pages, *ipage;
1119 uint32_t sc = ctx->sc;
1120 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1121 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001122 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301124 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001125 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001126 int err = 0;
1127 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001128 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001129 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001130
1131 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301132 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001133 list = smq_invoke_buf_start(rpra, sc);
1134 pages = smq_phy_page_start(sc, list);
1135 ipage = pages;
1136
1137 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301138 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1139 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001140
1141 if (ctx->fds[i] && (ctx->fds[i] != -1))
1142 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1143 ctx->attrs[i], buf, len,
1144 mflags, &ctx->maps[i]);
1145 ipage += 1;
1146 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001147 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1148 for (i = bufs; i < bufs + handles; i++) {
1149 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1150 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1151 if (err)
1152 goto bail;
1153 ipage += 1;
1154 }
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301155 metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
Sathish Ambleybae51902017-07-03 15:00:49 -07001156 (sizeof(uint32_t) * M_CRCLIST);
1157
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001158 /* calculate len requreed for copying */
1159 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1160 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001161 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301162 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001163
1164 if (!len)
1165 continue;
1166 if (ctx->maps[i])
1167 continue;
1168 if (ctx->overps[oix]->offset == 0)
1169 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001170 mstart = ctx->overps[oix]->mstart;
1171 mend = ctx->overps[oix]->mend;
1172 VERIFY(err, (mend - mstart) <= LONG_MAX);
1173 if (err)
1174 goto bail;
1175 copylen += mend - mstart;
1176 VERIFY(err, copylen >= 0);
1177 if (err)
1178 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 }
1180 ctx->used = copylen;
1181
1182 /* allocate new buffer */
1183 if (copylen) {
1184 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1185 if (err)
1186 goto bail;
1187 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301188 if (ctx->buf->virt && metalen <= copylen)
1189 memset(ctx->buf->virt, 0, metalen);
1190
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001191 /* copy metadata */
1192 rpra = ctx->buf->virt;
1193 ctx->rpra = rpra;
1194 list = smq_invoke_buf_start(rpra, sc);
1195 pages = smq_phy_page_start(sc, list);
1196 ipage = pages;
1197 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001198 for (i = 0; i < bufs + handles; ++i) {
1199 if (lpra[i].buf.len)
1200 list[i].num = 1;
1201 else
1202 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001203 list[i].pgidx = ipage - pages;
1204 ipage++;
1205 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301206
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001207 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001208 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301209 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001210 struct fastrpc_mmap *map = ctx->maps[i];
1211 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301212 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001213
1214 rpra[i].buf.pv = 0;
1215 rpra[i].buf.len = len;
1216 if (!len)
1217 continue;
1218 if (map) {
1219 struct vm_area_struct *vma;
1220 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301221 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 int idx = list[i].pgidx;
1223
1224 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001225 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001226 } else {
1227 down_read(&current->mm->mmap_sem);
1228 VERIFY(err, NULL != (vma = find_vma(current->mm,
1229 map->va)));
1230 if (err) {
1231 up_read(&current->mm->mmap_sem);
1232 goto bail;
1233 }
1234 offset = buf_page_start(buf) - vma->vm_start;
1235 up_read(&current->mm->mmap_sem);
1236 VERIFY(err, offset < (uintptr_t)map->size);
1237 if (err)
1238 goto bail;
1239 }
1240 pages[idx].addr = map->phys + offset;
1241 pages[idx].size = num << PAGE_SHIFT;
1242 }
1243 rpra[i].buf.pv = buf;
1244 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001245 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001246 for (i = bufs; i < bufs + handles; ++i) {
1247 struct fastrpc_mmap *map = ctx->maps[i];
1248
1249 pages[i].addr = map->phys;
1250 pages[i].size = map->size;
1251 }
1252 fdlist = (uint64_t *)&pages[bufs + handles];
1253 for (i = 0; i < M_FDLIST; i++)
1254 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001255 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301256 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001257
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001258 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001259 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001260 rlen = copylen - metalen;
1261 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1262 int i = ctx->overps[oix]->raix;
1263 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301264 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001265 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301266 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001267
1268 if (!len)
1269 continue;
1270 if (map)
1271 continue;
1272 if (ctx->overps[oix]->offset == 0) {
1273 rlen -= ALIGN(args, BALIGN) - args;
1274 args = ALIGN(args, BALIGN);
1275 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001276 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001277 VERIFY(err, rlen >= mlen);
1278 if (err)
1279 goto bail;
1280 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1281 pages[list[i].pgidx].addr = ctx->buf->phys -
1282 ctx->overps[oix]->offset +
1283 (copylen - rlen);
1284 pages[list[i].pgidx].addr =
1285 buf_page_start(pages[list[i].pgidx].addr);
1286 buf = rpra[i].buf.pv;
1287 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1288 if (i < inbufs) {
1289 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1290 lpra[i].buf.pv, len);
1291 if (err)
1292 goto bail;
1293 }
1294 args = args + mlen;
1295 rlen -= mlen;
1296 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001297 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001298
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001299 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001300 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1301 int i = ctx->overps[oix]->raix;
1302 struct fastrpc_mmap *map = ctx->maps[i];
1303
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001304 if (map && map->uncached)
1305 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301306 if (ctx->fl->sctx->smmu.coherent &&
1307 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1308 continue;
1309 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1310 continue;
1311
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001312 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1313 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1314 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1315 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001316 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301317 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001318 rpra[i].dma.fd = ctx->fds[i];
1319 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1320 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001321 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001322
1323 if (!ctx->fl->sctx->smmu.coherent) {
1324 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001325 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001326 PERF_END);
1327 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001328 bail:
1329 return err;
1330}
1331
1332static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1333 remote_arg_t *upra)
1334{
1335 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001336 struct smq_invoke_buf *list;
1337 struct smq_phy_page *pages;
1338 struct fastrpc_mmap *mmap;
1339 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001340 uint32_t *crclist = NULL;
1341
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001342 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001343 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001344 int err = 0;
1345
1346 inbufs = REMOTE_SCALARS_INBUFS(sc);
1347 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001348 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1349 list = smq_invoke_buf_start(ctx->rpra, sc);
1350 pages = smq_phy_page_start(sc, list);
1351 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001352 crclist = (uint32_t *)(fdlist + M_FDLIST);
1353
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001354 for (i = inbufs; i < inbufs + outbufs; ++i) {
1355 if (!ctx->maps[i]) {
1356 K_COPY_TO_USER(err, kernel,
1357 ctx->lpra[i].buf.pv,
1358 uint64_to_ptr(rpra[i].buf.pv),
1359 rpra[i].buf.len);
1360 if (err)
1361 goto bail;
1362 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301363 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301364 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001365 }
1366 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001367 if (inbufs + outbufs + handles) {
1368 for (i = 0; i < M_FDLIST; i++) {
1369 if (!fdlist[i])
1370 break;
1371 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001372 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301373 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001374 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001375 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001376 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301377 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001378 crclist, M_CRCLIST*sizeof(uint32_t));
1379
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001380 bail:
1381 return err;
1382}
1383
1384static void inv_args_pre(struct smq_invoke_ctx *ctx)
1385{
1386 int i, inbufs, outbufs;
1387 uint32_t sc = ctx->sc;
1388 remote_arg64_t *rpra = ctx->rpra;
1389 uintptr_t end;
1390
1391 inbufs = REMOTE_SCALARS_INBUFS(sc);
1392 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1393 for (i = inbufs; i < inbufs + outbufs; ++i) {
1394 struct fastrpc_mmap *map = ctx->maps[i];
1395
1396 if (map && map->uncached)
1397 continue;
1398 if (!rpra[i].buf.len)
1399 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301400 if (ctx->fl->sctx->smmu.coherent &&
1401 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1402 continue;
1403 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1404 continue;
1405
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001406 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1407 buf_page_start(rpra[i].buf.pv))
1408 continue;
1409 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1410 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1411 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1412 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1413 rpra[i].buf.len);
1414 if (!IS_CACHE_ALIGNED(end))
1415 dmac_flush_range((char *)end,
1416 (char *)end + 1);
1417 }
1418}
1419
1420static void inv_args(struct smq_invoke_ctx *ctx)
1421{
1422 int i, inbufs, outbufs;
1423 uint32_t sc = ctx->sc;
1424 remote_arg64_t *rpra = ctx->rpra;
1425 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001426
1427 inbufs = REMOTE_SCALARS_INBUFS(sc);
1428 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1429 for (i = inbufs; i < inbufs + outbufs; ++i) {
1430 struct fastrpc_mmap *map = ctx->maps[i];
1431
1432 if (map && map->uncached)
1433 continue;
1434 if (!rpra[i].buf.len)
1435 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301436 if (ctx->fl->sctx->smmu.coherent &&
1437 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1438 continue;
1439 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1440 continue;
1441
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1443 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001444 continue;
1445 }
1446 if (map && map->handle)
1447 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1448 (char *)uint64_to_ptr(rpra[i].buf.pv),
1449 rpra[i].buf.len, ION_IOC_INV_CACHES);
1450 else
1451 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1452 (char *)uint64_to_ptr(rpra[i].buf.pv
1453 + rpra[i].buf.len));
1454 }
1455
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001456 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 dmac_inv_range(rpra, (char *)rpra + used);
1458}
1459
1460static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1461 uint32_t kernel, uint32_t handle)
1462{
1463 struct smq_msg *msg = &ctx->msg;
1464 struct fastrpc_file *fl = ctx->fl;
1465 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1466 int err = 0;
1467
c_mtharue1a5ce12017-10-13 20:47:09 +05301468 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001469 if (err)
1470 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301471 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001472 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301473 if (fl->sessionid)
1474 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001475 if (kernel)
1476 msg->pid = 0;
1477 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1478 msg->invoke.header.handle = handle;
1479 msg->invoke.header.sc = ctx->sc;
1480 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1481 msg->invoke.page.size = buf_page_size(ctx->used);
1482
1483 if (fl->ssrcount != channel_ctx->ssrcount) {
1484 err = -ECONNRESET;
1485 goto bail;
1486 }
1487 VERIFY(err, channel_ctx->link.port_state ==
1488 FASTRPC_LINK_CONNECTED);
1489 if (err)
1490 goto bail;
1491 err = glink_tx(channel_ctx->chan,
1492 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1493 GLINK_TX_REQ_INTENT);
1494 bail:
1495 return err;
1496}
1497
1498static void fastrpc_init(struct fastrpc_apps *me)
1499{
1500 int i;
1501
1502 INIT_HLIST_HEAD(&me->drivers);
1503 spin_lock_init(&me->hlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301504 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 me->channel = &gcinfo[0];
1506 for (i = 0; i < NUM_CHANNELS; i++) {
1507 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301508 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001509 me->channel[i].sesscount = 0;
1510 }
1511}
1512
1513static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1514
1515static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1516 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001517 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001518{
c_mtharue1a5ce12017-10-13 20:47:09 +05301519 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001520 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1521 int cid = fl->cid;
1522 int interrupted = 0;
1523 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001524 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001525
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001526 if (fl->profile)
1527 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301528
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301529
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301530 VERIFY(err, fl->sctx != NULL);
1531 if (err)
1532 goto bail;
1533 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1534 if (err)
1535 goto bail;
1536
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001537 if (!kernel) {
1538 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1539 &ctx));
1540 if (err)
1541 goto bail;
1542 if (fl->sctx->smmu.faults)
1543 err = FASTRPC_ENOSUCH;
1544 if (err)
1545 goto bail;
1546 if (ctx)
1547 goto wait;
1548 }
1549
1550 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1551 if (err)
1552 goto bail;
1553
1554 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001555 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001556 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001557 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001558 if (err)
1559 goto bail;
1560 }
1561
Sathish Ambleyc432b502017-06-05 12:03:42 -07001562 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001564 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001565 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001566 PERF_END);
1567
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001568 if (err)
1569 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 wait:
1571 if (kernel)
1572 wait_for_completion(&ctx->work);
1573 else {
1574 interrupted = wait_for_completion_interruptible(&ctx->work);
1575 VERIFY(err, 0 == (err = interrupted));
1576 if (err)
1577 goto bail;
1578 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001579
1580 PERF(fl->profile, fl->perf.invargs,
1581 if (!fl->sctx->smmu.coherent)
1582 inv_args(ctx);
1583 PERF_END);
1584
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001585 VERIFY(err, 0 == (err = ctx->retval));
1586 if (err)
1587 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001588
1589 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001590 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001591 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 if (err)
1593 goto bail;
1594 bail:
1595 if (ctx && interrupted == -ERESTARTSYS)
1596 context_save_interrupted(ctx);
1597 else if (ctx)
1598 context_free(ctx);
1599 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1600 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001601
1602 if (fl->profile && !interrupted) {
1603 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1604 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301605 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001606 fl->perf.count++;
1607 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001608 return err;
1609}
1610
Sathish Ambley36849af2017-02-02 09:35:55 -08001611static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001612static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001613 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001614{
1615 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301616 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001617 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001618 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001619 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301620 struct fastrpc_mmap *file = NULL, *mem = NULL;
1621 char *proc_name = NULL;
1622 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301623 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301624 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1625 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001626
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301627 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001628 if (err)
1629 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 if (init->flags == FASTRPC_INIT_ATTACH) {
1631 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301632 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633
1634 ra[0].buf.pv = (void *)&tgid;
1635 ra[0].buf.len = sizeof(tgid);
1636 ioctl.inv.handle = 1;
1637 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1638 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301639 ioctl.fds = NULL;
1640 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001641 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001642 fl->pd = 0;
1643 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1644 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1645 if (err)
1646 goto bail;
1647 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001648 remote_arg_t ra[6];
1649 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001650 int mflags = 0;
1651 struct {
1652 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301653 unsigned int namelen;
1654 unsigned int filelen;
1655 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001656 int attrs;
1657 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001658 } inbuf;
1659
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301660 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001661 inbuf.namelen = strlen(current->comm) + 1;
1662 inbuf.filelen = init->filelen;
1663 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301664
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301665 VERIFY(err, access_ok(0, (void __user *)init->file,
1666 init->filelen));
1667 if (err)
1668 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001669 if (init->filelen) {
1670 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1671 init->file, init->filelen, mflags, &file));
1672 if (err)
1673 goto bail;
1674 }
1675 inbuf.pageslen = 1;
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301676 VERIFY(err, access_ok(1, (void __user *)init->mem,
1677 init->memlen));
1678 if (err)
1679 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001680 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1681 init->mem, init->memlen, mflags, &mem));
1682 if (err)
1683 goto bail;
1684 inbuf.pageslen = 1;
1685 ra[0].buf.pv = (void *)&inbuf;
1686 ra[0].buf.len = sizeof(inbuf);
1687 fds[0] = 0;
1688
1689 ra[1].buf.pv = (void *)current->comm;
1690 ra[1].buf.len = inbuf.namelen;
1691 fds[1] = 0;
1692
1693 ra[2].buf.pv = (void *)init->file;
1694 ra[2].buf.len = inbuf.filelen;
1695 fds[2] = init->filefd;
1696
1697 pages[0].addr = mem->phys;
1698 pages[0].size = mem->size;
1699 ra[3].buf.pv = (void *)pages;
1700 ra[3].buf.len = 1 * sizeof(*pages);
1701 fds[3] = 0;
1702
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001703 inbuf.attrs = uproc->attrs;
1704 ra[4].buf.pv = (void *)&(inbuf.attrs);
1705 ra[4].buf.len = sizeof(inbuf.attrs);
1706 fds[4] = 0;
1707
1708 inbuf.siglen = uproc->siglen;
1709 ra[5].buf.pv = (void *)&(inbuf.siglen);
1710 ra[5].buf.len = sizeof(inbuf.siglen);
1711 fds[5] = 0;
1712
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001713 ioctl.inv.handle = 1;
1714 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001715 if (uproc->attrs)
1716 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001717 ioctl.inv.pra = ra;
1718 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301719 ioctl.attrs = NULL;
1720 ioctl.crc = NULL;
1721 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1722 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1723 if (err)
1724 goto bail;
1725 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1726 remote_arg_t ra[3];
1727 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301728 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301729 int fds[3];
1730 struct {
1731 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301732 unsigned int namelen;
1733 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301734 } inbuf;
1735
1736 if (!init->filelen)
1737 goto bail;
1738
1739 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1740 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1741 if (err)
1742 goto bail;
1743 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1744 (void __user *)init->file, init->filelen));
1745 if (err)
1746 goto bail;
1747
1748 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301749 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301750 inbuf.pageslen = 0;
1751 if (!me->staticpd_flags) {
1752 inbuf.pageslen = 1;
1753 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1754 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1755 &mem));
1756 if (err)
1757 goto bail;
1758 phys = mem->phys;
1759 size = mem->size;
1760 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1761 srcVM, 1, destVM, destVMperm, 1));
1762 if (err) {
1763 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1764 err);
1765 pr_err("map->phys %llx, map->size %d\n",
1766 phys, (int)size);
1767 goto bail;
1768 }
1769 me->staticpd_flags = 1;
1770 }
1771
1772 ra[0].buf.pv = (void *)&inbuf;
1773 ra[0].buf.len = sizeof(inbuf);
1774 fds[0] = 0;
1775
1776 ra[1].buf.pv = (void *)proc_name;
1777 ra[1].buf.len = inbuf.namelen;
1778 fds[1] = 0;
1779
1780 pages[0].addr = phys;
1781 pages[0].size = size;
1782
1783 ra[2].buf.pv = (void *)pages;
1784 ra[2].buf.len = sizeof(*pages);
1785 fds[2] = 0;
1786 ioctl.inv.handle = 1;
1787
1788 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1789 ioctl.inv.pra = ra;
1790 ioctl.fds = NULL;
1791 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001792 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001793 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1794 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1795 if (err)
1796 goto bail;
1797 } else {
1798 err = -ENOTTY;
1799 }
1800bail:
c_mtharud91205a2017-11-07 16:01:06 +05301801 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301802 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1803 me->staticpd_flags = 0;
1804 if (mem && err) {
1805 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1806 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1807 destVM, 1, srcVM, hlosVMperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301808 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301809 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001810 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301811 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001812 return err;
1813}
1814
1815static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1816{
1817 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001818 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001819 remote_arg_t ra[1];
1820 int tgid = 0;
1821
Sathish Ambley36849af2017-02-02 09:35:55 -08001822 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1823 if (err)
1824 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301825 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001826 if (err)
1827 goto bail;
1828 tgid = fl->tgid;
1829 ra[0].buf.pv = (void *)&tgid;
1830 ra[0].buf.len = sizeof(tgid);
1831 ioctl.inv.handle = 1;
1832 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1833 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301834 ioctl.fds = NULL;
1835 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001836 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001837 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1838 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1839bail:
1840 return err;
1841}
1842
1843static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1844 struct fastrpc_mmap *map)
1845{
Sathish Ambleybae51902017-07-03 15:00:49 -07001846 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05301847 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001848 struct smq_phy_page page;
1849 int num = 1;
1850 remote_arg_t ra[3];
1851 int err = 0;
1852 struct {
1853 int pid;
1854 uint32_t flags;
1855 uintptr_t vaddrin;
1856 int num;
1857 } inargs;
1858 struct {
1859 uintptr_t vaddrout;
1860 } routargs;
1861
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301862 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001863 inargs.vaddrin = (uintptr_t)map->va;
1864 inargs.flags = flags;
1865 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1866 ra[0].buf.pv = (void *)&inargs;
1867 ra[0].buf.len = sizeof(inargs);
1868 page.addr = map->phys;
1869 page.size = map->size;
1870 ra[1].buf.pv = (void *)&page;
1871 ra[1].buf.len = num * sizeof(page);
1872
1873 ra[2].buf.pv = (void *)&routargs;
1874 ra[2].buf.len = sizeof(routargs);
1875
1876 ioctl.inv.handle = 1;
1877 if (fl->apps->compat)
1878 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1879 else
1880 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1881 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301882 ioctl.fds = NULL;
1883 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001884 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001885 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1886 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1887 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301888 if (err)
1889 goto bail;
1890 if (flags == ADSP_MMAP_HEAP_ADDR) {
1891 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001892
c_mtharue1a5ce12017-10-13 20:47:09 +05301893 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1894 desc.args[1] = map->phys;
1895 desc.args[2] = map->size;
1896 desc.arginfo = SCM_ARGS(3);
1897 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1898 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1899 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1900
1901 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301902 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301903 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1904
1905 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1906 srcVM, 1, destVM, destVMperm, 1));
1907 if (err)
1908 goto bail;
1909 }
1910bail:
1911 return err;
1912}
1913
1914static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1915 struct fastrpc_mmap *map)
1916{
1917 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05301918 struct fastrpc_apps *me = &gfa;
1919 int srcVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301920 int destVM[1] = {VMID_HLOS};
1921 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1922
1923 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1924 struct fastrpc_ioctl_invoke_crc ioctl;
1925 struct scm_desc desc = {0};
1926 remote_arg_t ra[1];
1927 int err = 0;
1928 struct {
1929 uint8_t skey;
1930 } routargs;
1931
1932 ra[0].buf.pv = (void *)&routargs;
1933 ra[0].buf.len = sizeof(routargs);
1934
1935 ioctl.inv.handle = 1;
1936 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1937 ioctl.inv.pra = ra;
1938 ioctl.fds = NULL;
1939 ioctl.attrs = NULL;
1940 ioctl.crc = NULL;
1941 if (fl == NULL)
1942 goto bail;
1943
1944 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1945 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1946 if (err)
1947 goto bail;
1948 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1949 desc.args[1] = map->phys;
1950 desc.args[2] = map->size;
1951 desc.args[3] = routargs.skey;
1952 desc.arginfo = SCM_ARGS(4);
1953 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1954 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1955 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1956 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1957 srcVM, 1, destVM, destVMperm, 1));
1958 if (err)
1959 goto bail;
1960 }
1961
1962bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001963 return err;
1964}
1965
1966static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1967 struct fastrpc_mmap *map)
1968{
Sathish Ambleybae51902017-07-03 15:00:49 -07001969 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001970 remote_arg_t ra[1];
1971 int err = 0;
1972 struct {
1973 int pid;
1974 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301975 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001976 } inargs;
1977
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301978 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001979 inargs.size = map->size;
1980 inargs.vaddrout = map->raddr;
1981 ra[0].buf.pv = (void *)&inargs;
1982 ra[0].buf.len = sizeof(inargs);
1983
1984 ioctl.inv.handle = 1;
1985 if (fl->apps->compat)
1986 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1987 else
1988 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1989 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301990 ioctl.fds = NULL;
1991 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001992 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001993 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1994 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301995 if (err)
1996 goto bail;
1997 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1998 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1999 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
2000 if (err)
2001 goto bail;
2002 }
2003bail:
2004 return err;
2005}
2006
2007static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2008{
2009 struct fastrpc_mmap *match = NULL, *map = NULL;
2010 struct hlist_node *n = NULL;
2011 int err = 0, ret = 0;
2012 struct fastrpc_apps *me = &gfa;
2013 struct ramdump_segment *ramdump_segments_rh = NULL;
2014
2015 do {
2016 match = NULL;
2017 spin_lock(&me->hlock);
2018 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2019 match = map;
2020 hlist_del_init(&map->hn);
2021 break;
2022 }
2023 spin_unlock(&me->hlock);
2024
2025 if (match) {
2026 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2027 if (err)
2028 goto bail;
2029 if (me->channel[0].ramdumpenabled) {
2030 ramdump_segments_rh = kcalloc(1,
2031 sizeof(struct ramdump_segment), GFP_KERNEL);
2032 if (ramdump_segments_rh) {
2033 ramdump_segments_rh->address =
2034 match->phys;
2035 ramdump_segments_rh->size = match->size;
2036 ret = do_elf_ramdump(
2037 me->channel[0].remoteheap_ramdump_dev,
2038 ramdump_segments_rh, 1);
2039 if (ret < 0)
2040 pr_err("ADSPRPC: unable to dump heap");
2041 kfree(ramdump_segments_rh);
2042 }
2043 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302044 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302045 }
2046 } while (match);
2047bail:
2048 if (err && match)
2049 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002050 return err;
2051}
2052
2053static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302054 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002055
2056static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2057
2058static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2059 struct fastrpc_ioctl_munmap *ud)
2060{
2061 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302062 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002063
2064 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2065 if (err)
2066 goto bail;
2067 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2068 if (err)
2069 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302070 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002071bail:
2072 if (err && map)
2073 fastrpc_mmap_add(map);
2074 return err;
2075}
2076
c_mtharu7bd6a422017-10-17 18:15:37 +05302077static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2078 struct fastrpc_ioctl_munmap_fd *ud) {
2079 int err = 0;
2080 struct fastrpc_mmap *map = NULL;
2081
2082 VERIFY(err, (fl && ud));
2083 if (err)
2084 goto bail;
2085
2086 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2087 pr_err("mapping not found to unamp %x va %llx %x\n",
2088 ud->fd, (unsigned long long)ud->va,
2089 (unsigned int)ud->len);
2090 err = -1;
2091 goto bail;
2092 }
2093 if (map)
2094 fastrpc_mmap_free(map, 0);
2095bail:
2096 return err;
2097}
2098
2099
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002100static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2101 struct fastrpc_ioctl_mmap *ud)
2102{
2103
c_mtharue1a5ce12017-10-13 20:47:09 +05302104 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002105 int err = 0;
2106
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302107 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
c_mtharue1a5ce12017-10-13 20:47:09 +05302108 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002109 return 0;
2110
2111 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302112 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302113 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002114 if (err)
2115 goto bail;
2116 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2117 if (err)
2118 goto bail;
2119 ud->vaddrout = map->raddr;
2120 bail:
2121 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302122 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002123 return err;
2124}
2125
2126static void fastrpc_channel_close(struct kref *kref)
2127{
2128 struct fastrpc_apps *me = &gfa;
2129 struct fastrpc_channel_ctx *ctx;
2130 int cid;
2131
2132 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2133 cid = ctx - &gcinfo[0];
2134 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302135 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302136 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2137 ctx->link.link_notify_handle = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302138 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002139 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2140 MAJOR(me->dev_no), cid);
2141}
2142
2143static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2144
2145static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2146 int secure, struct fastrpc_session_ctx **session)
2147{
2148 struct fastrpc_apps *me = &gfa;
2149 int idx = 0, err = 0;
2150
2151 if (chan->sesscount) {
2152 for (idx = 0; idx < chan->sesscount; ++idx) {
2153 if (!chan->session[idx].used &&
2154 chan->session[idx].smmu.secure == secure) {
2155 chan->session[idx].used = 1;
2156 break;
2157 }
2158 }
2159 VERIFY(err, idx < chan->sesscount);
2160 if (err)
2161 goto bail;
2162 chan->session[idx].smmu.faults = 0;
2163 } else {
2164 VERIFY(err, me->dev != NULL);
2165 if (err)
2166 goto bail;
2167 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302168 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002169 }
2170
2171 *session = &chan->session[idx];
2172 bail:
2173 return err;
2174}
2175
c_mtharue1a5ce12017-10-13 20:47:09 +05302176static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2177 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002178{
2179 if (glink_queue_rx_intent(h, NULL, size))
2180 return false;
2181 return true;
2182}
2183
c_mtharue1a5ce12017-10-13 20:47:09 +05302184static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002185 const void *pkt_priv, const void *ptr)
2186{
2187}
2188
c_mtharue1a5ce12017-10-13 20:47:09 +05302189static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002190 const void *pkt_priv, const void *ptr, size_t size)
2191{
2192 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302193 struct smq_invoke_ctx *ctx;
2194 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002195
c_mtharufdac6892017-10-12 13:09:01 +05302196 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2197 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302198 goto bail;
2199
c_mtharufdac6892017-10-12 13:09:01 +05302200 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2201 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2202 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302203 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302204
c_mtharufdac6892017-10-12 13:09:01 +05302205 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302206bail:
c_mtharufdac6892017-10-12 13:09:01 +05302207 if (err)
2208 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002209 glink_rx_done(handle, ptr, true);
2210}
2211
c_mtharue1a5ce12017-10-13 20:47:09 +05302212static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002213 unsigned int event)
2214{
2215 struct fastrpc_apps *me = &gfa;
2216 int cid = (int)(uintptr_t)priv;
2217 struct fastrpc_glink_info *link;
2218
2219 if (cid < 0 || cid >= NUM_CHANNELS)
2220 return;
2221 link = &me->channel[cid].link;
2222 switch (event) {
2223 case GLINK_CONNECTED:
2224 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302225 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002226 break;
2227 case GLINK_LOCAL_DISCONNECTED:
2228 link->port_state = FASTRPC_LINK_DISCONNECTED;
2229 break;
2230 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002231 break;
2232 default:
2233 break;
2234 }
2235}
2236
2237static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2238 struct fastrpc_session_ctx **session)
2239{
2240 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302241 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002242
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302243 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002244 if (!*session)
2245 err = fastrpc_session_alloc_locked(chan, secure, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302246 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002247 return err;
2248}
2249
2250static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2251 struct fastrpc_session_ctx *session)
2252{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302253 struct fastrpc_apps *me = &gfa;
2254
2255 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002256 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302257 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002258}
2259
2260static int fastrpc_file_free(struct fastrpc_file *fl)
2261{
2262 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302263 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002264 int cid;
2265
2266 if (!fl)
2267 return 0;
2268 cid = fl->cid;
2269
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302270 (void)fastrpc_release_current_dsp_process(fl);
2271
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002272 spin_lock(&fl->apps->hlock);
2273 hlist_del_init(&fl->hn);
2274 spin_unlock(&fl->apps->hlock);
2275
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002276 if (!fl->sctx) {
2277 kfree(fl);
2278 return 0;
2279 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302280 spin_lock(&fl->hlock);
2281 fl->file_close = 1;
2282 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002283 fastrpc_context_list_dtor(fl);
2284 fastrpc_buf_list_free(fl);
2285 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302286 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002287 }
2288 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2289 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302290 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002291 if (fl->sctx)
2292 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2293 if (fl->secsctx)
2294 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2295 kfree(fl);
2296 return 0;
2297}
2298
2299static int fastrpc_device_release(struct inode *inode, struct file *file)
2300{
2301 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2302
2303 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302304 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2305 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002306 if (fl->debugfs_file != NULL)
2307 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002308 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302309 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002310 }
2311 return 0;
2312}
2313
2314static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2315 void *priv)
2316{
2317 struct fastrpc_apps *me = &gfa;
2318 int cid = (int)((uintptr_t)priv);
2319 struct fastrpc_glink_info *link;
2320
2321 if (cid < 0 || cid >= NUM_CHANNELS)
2322 return;
2323
2324 link = &me->channel[cid].link;
2325 switch (cb_info->link_state) {
2326 case GLINK_LINK_STATE_UP:
2327 link->link_state = FASTRPC_LINK_STATE_UP;
2328 complete(&me->channel[cid].work);
2329 break;
2330 case GLINK_LINK_STATE_DOWN:
2331 link->link_state = FASTRPC_LINK_STATE_DOWN;
2332 break;
2333 default:
2334 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2335 break;
2336 }
2337}
2338
2339static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2340{
2341 int err = 0;
2342 struct fastrpc_glink_info *link;
2343
2344 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2345 if (err)
2346 goto bail;
2347
2348 link = &me->channel[cid].link;
2349 if (link->link_notify_handle != NULL)
2350 goto bail;
2351
2352 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2353 link->link_notify_handle = glink_register_link_state_cb(
2354 &link->link_info,
2355 (void *)((uintptr_t)cid));
2356 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2357 if (err) {
2358 link->link_notify_handle = NULL;
2359 goto bail;
2360 }
2361 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2362 RPC_TIMEOUT));
2363bail:
2364 return err;
2365}
2366
2367static void fastrpc_glink_close(void *chan, int cid)
2368{
2369 int err = 0;
2370 struct fastrpc_glink_info *link;
2371
2372 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2373 if (err)
2374 return;
2375 link = &gfa.channel[cid].link;
2376
c_mtharu314a4202017-11-15 22:09:17 +05302377 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2378 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002379 link->port_state = FASTRPC_LINK_DISCONNECTING;
2380 glink_close(chan);
2381 }
2382}
2383
2384static int fastrpc_glink_open(int cid)
2385{
2386 int err = 0;
2387 void *handle = NULL;
2388 struct fastrpc_apps *me = &gfa;
2389 struct glink_open_config *cfg;
2390 struct fastrpc_glink_info *link;
2391
2392 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2393 if (err)
2394 goto bail;
2395 link = &me->channel[cid].link;
2396 cfg = &me->channel[cid].link.cfg;
2397 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2398 if (err)
2399 goto bail;
2400
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302401 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2402 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002403 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002404
2405 link->port_state = FASTRPC_LINK_CONNECTING;
2406 cfg->priv = (void *)(uintptr_t)cid;
2407 cfg->edge = gcinfo[cid].link.link_info.edge;
2408 cfg->transport = gcinfo[cid].link.link_info.transport;
2409 cfg->name = FASTRPC_GLINK_GUID;
2410 cfg->notify_rx = fastrpc_glink_notify_rx;
2411 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2412 cfg->notify_state = fastrpc_glink_notify_state;
2413 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2414 handle = glink_open(cfg);
2415 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302416 if (err) {
2417 if (link->port_state == FASTRPC_LINK_CONNECTING)
2418 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002419 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302420 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002421 me->channel[cid].chan = handle;
2422bail:
2423 return err;
2424}
2425
Sathish Ambley1ca68232017-01-19 10:32:55 -08002426static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2427{
2428 filp->private_data = inode->i_private;
2429 return 0;
2430}
2431
2432static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2433 size_t count, loff_t *position)
2434{
2435 struct fastrpc_file *fl = filp->private_data;
2436 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302437 struct fastrpc_buf *buf = NULL;
2438 struct fastrpc_mmap *map = NULL;
2439 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002440 struct fastrpc_channel_ctx *chan;
2441 struct fastrpc_session_ctx *sess;
2442 unsigned int len = 0;
2443 int i, j, ret = 0;
2444 char *fileinfo = NULL;
2445
2446 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2447 if (!fileinfo)
2448 goto bail;
2449 if (fl == NULL) {
2450 for (i = 0; i < NUM_CHANNELS; i++) {
2451 chan = &gcinfo[i];
2452 len += scnprintf(fileinfo + len,
2453 DEBUGFS_SIZE - len, "%s\n\n",
2454 chan->name);
2455 len += scnprintf(fileinfo + len,
2456 DEBUGFS_SIZE - len, "%s %d\n",
2457 "sesscount:", chan->sesscount);
2458 for (j = 0; j < chan->sesscount; j++) {
2459 sess = &chan->session[j];
2460 len += scnprintf(fileinfo + len,
2461 DEBUGFS_SIZE - len,
2462 "%s%d\n\n", "SESSION", j);
2463 len += scnprintf(fileinfo + len,
2464 DEBUGFS_SIZE - len,
2465 "%s %d\n", "sid:",
2466 sess->smmu.cb);
2467 len += scnprintf(fileinfo + len,
2468 DEBUGFS_SIZE - len,
2469 "%s %d\n", "SECURE:",
2470 sess->smmu.secure);
2471 }
2472 }
2473 } else {
2474 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2475 "%s %d\n\n",
2476 "PROCESS_ID:", fl->tgid);
2477 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2478 "%s %d\n\n",
2479 "CHANNEL_ID:", fl->cid);
2480 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2481 "%s %d\n\n",
2482 "SSRCOUNT:", fl->ssrcount);
2483 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2484 "%s\n",
2485 "LIST OF BUFS:");
2486 spin_lock(&fl->hlock);
2487 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2488 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302489 "%s %pK %s %pK %s %llx\n", "buf:",
2490 buf, "buf->virt:", buf->virt,
2491 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002492 }
2493 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2494 "\n%s\n",
2495 "LIST OF MAPS:");
2496 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2497 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302498 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002499 "map:", map,
2500 "map->va:", map->va,
2501 "map->phys:", map->phys);
2502 }
2503 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2504 "\n%s\n",
2505 "LIST OF PENDING SMQCONTEXTS:");
2506 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2507 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302508 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002509 "smqcontext:", ictx,
2510 "sc:", ictx->sc,
2511 "tid:", ictx->pid,
2512 "handle", ictx->rpra->h);
2513 }
2514 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2515 "\n%s\n",
2516 "LIST OF INTERRUPTED SMQCONTEXTS:");
2517 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2518 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302519 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002520 "smqcontext:", ictx,
2521 "sc:", ictx->sc,
2522 "tid:", ictx->pid,
2523 "handle", ictx->rpra->h);
2524 }
2525 spin_unlock(&fl->hlock);
2526 }
2527 if (len > DEBUGFS_SIZE)
2528 len = DEBUGFS_SIZE;
2529 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2530 kfree(fileinfo);
2531bail:
2532 return ret;
2533}
2534
2535static const struct file_operations debugfs_fops = {
2536 .open = fastrpc_debugfs_open,
2537 .read = fastrpc_debugfs_read,
2538};
Sathish Ambley36849af2017-02-02 09:35:55 -08002539static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002540{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002541 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002542 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002543
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302544 mutex_lock(&me->smd_mutex);
2545
Sathish Ambley36849af2017-02-02 09:35:55 -08002546 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002547 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302548 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002549 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302550 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2551 if (err)
2552 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302553 if (me->channel[cid].ssrcount !=
2554 me->channel[cid].prevssrcount) {
2555 if (!me->channel[cid].issubsystemup) {
2556 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302557 if (err) {
2558 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05302559 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302560 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302561 }
2562 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002563 fl->ssrcount = me->channel[cid].ssrcount;
2564 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302565 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302566 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2567 if (err)
2568 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002569 VERIFY(err, 0 == fastrpc_glink_open(cid));
2570 if (err)
2571 goto bail;
2572
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302573 VERIFY(err,
2574 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002575 RPC_TIMEOUT));
2576 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302577 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002578 goto bail;
2579 }
2580 kref_init(&me->channel[cid].kref);
2581 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2582 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302583 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2584 FASTRPC_GLINK_INTENT_LEN);
2585 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2586 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002587 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302588 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2589 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002590 if (me->channel[cid].ssrcount !=
2591 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302592 if (fastrpc_mmap_remove_ssr(fl))
2593 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002594 me->channel[cid].prevssrcount =
2595 me->channel[cid].ssrcount;
2596 }
2597 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002598
2599bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302600 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002601 return err;
2602}
2603
Sathish Ambley36849af2017-02-02 09:35:55 -08002604static int fastrpc_device_open(struct inode *inode, struct file *filp)
2605{
2606 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002607 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302608 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002609 struct fastrpc_apps *me = &gfa;
2610
c_mtharue1a5ce12017-10-13 20:47:09 +05302611 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002612 if (err)
2613 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002614 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2615 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002616 context_list_ctor(&fl->clst);
2617 spin_lock_init(&fl->hlock);
2618 INIT_HLIST_HEAD(&fl->maps);
2619 INIT_HLIST_HEAD(&fl->bufs);
2620 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302621 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002622 fl->tgid = current->tgid;
2623 fl->apps = me;
2624 fl->mode = FASTRPC_MODE_SERIAL;
2625 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002626 if (debugfs_file != NULL)
2627 fl->debugfs_file = debugfs_file;
2628 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302629 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002630 filp->private_data = fl;
2631 spin_lock(&me->hlock);
2632 hlist_add_head(&fl->hn, &me->drivers);
2633 spin_unlock(&me->hlock);
2634 return 0;
2635}
2636
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002637static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2638{
2639 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002640 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002641
c_mtharue1a5ce12017-10-13 20:47:09 +05302642 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002643 if (err)
2644 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002645 if (fl->cid == -1) {
2646 cid = *info;
2647 VERIFY(err, cid < NUM_CHANNELS);
2648 if (err)
2649 goto bail;
2650 fl->cid = cid;
2651 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2652 VERIFY(err, !fastrpc_session_alloc_locked(
2653 &fl->apps->channel[cid], 0, &fl->sctx));
2654 if (err)
2655 goto bail;
2656 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302657 VERIFY(err, fl->sctx != NULL);
2658 if (err)
2659 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002660 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2661bail:
2662 return err;
2663}
2664
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302665static int fastrpc_internal_control(struct fastrpc_file *fl,
2666 struct fastrpc_ioctl_control *cp)
2667{
2668 int err = 0;
2669 int latency;
2670
2671 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2672 if (err)
2673 goto bail;
2674 VERIFY(err, !IS_ERR_OR_NULL(cp));
2675 if (err)
2676 goto bail;
2677
2678 switch (cp->req) {
2679 case FASTRPC_CONTROL_LATENCY:
2680 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2681 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2682 VERIFY(err, latency != 0);
2683 if (err)
2684 goto bail;
2685 if (!fl->qos_request) {
2686 pm_qos_add_request(&fl->pm_qos_req,
2687 PM_QOS_CPU_DMA_LATENCY, latency);
2688 fl->qos_request = 1;
2689 } else
2690 pm_qos_update_request(&fl->pm_qos_req, latency);
2691 break;
2692 default:
2693 err = -ENOTTY;
2694 break;
2695 }
2696bail:
2697 return err;
2698}
2699
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002700static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2701 unsigned long ioctl_param)
2702{
2703 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002704 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002705 struct fastrpc_ioctl_mmap mmap;
2706 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302707 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002708 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002709 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302710 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002711 } p;
2712 void *param = (char *)ioctl_param;
2713 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2714 int size = 0, err = 0;
2715 uint32_t info;
2716
c_mtharue1a5ce12017-10-13 20:47:09 +05302717 p.inv.fds = NULL;
2718 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002719 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302720 spin_lock(&fl->hlock);
2721 if (fl->file_close == 1) {
2722 err = EBADF;
2723 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2724 spin_unlock(&fl->hlock);
2725 goto bail;
2726 }
2727 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002728
2729 switch (ioctl_num) {
2730 case FASTRPC_IOCTL_INVOKE:
2731 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002732 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002733 case FASTRPC_IOCTL_INVOKE_FD:
2734 if (!size)
2735 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2736 /* fall through */
2737 case FASTRPC_IOCTL_INVOKE_ATTRS:
2738 if (!size)
2739 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002740 /* fall through */
2741 case FASTRPC_IOCTL_INVOKE_CRC:
2742 if (!size)
2743 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302744 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002745 if (err)
2746 goto bail;
2747 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2748 0, &p.inv)));
2749 if (err)
2750 goto bail;
2751 break;
2752 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302753 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2754 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302755 if (err)
2756 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002757 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2758 if (err)
2759 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302760 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002761 if (err)
2762 goto bail;
2763 break;
2764 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302765 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2766 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302767 if (err)
2768 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002769 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2770 &p.munmap)));
2771 if (err)
2772 goto bail;
2773 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302774 case FASTRPC_IOCTL_MUNMAP_FD:
2775 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2776 sizeof(p.munmap_fd));
2777 if (err)
2778 goto bail;
2779 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2780 &p.munmap_fd)));
2781 if (err)
2782 goto bail;
2783 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002784 case FASTRPC_IOCTL_SETMODE:
2785 switch ((uint32_t)ioctl_param) {
2786 case FASTRPC_MODE_PARALLEL:
2787 case FASTRPC_MODE_SERIAL:
2788 fl->mode = (uint32_t)ioctl_param;
2789 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002790 case FASTRPC_MODE_PROFILE:
2791 fl->profile = (uint32_t)ioctl_param;
2792 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302793 case FASTRPC_MODE_SESSION:
2794 fl->sessionid = 1;
2795 fl->tgid |= (1 << SESSION_ID_INDEX);
2796 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002797 default:
2798 err = -ENOTTY;
2799 break;
2800 }
2801 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002802 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302803 K_COPY_FROM_USER(err, 0, &p.perf,
2804 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002805 if (err)
2806 goto bail;
2807 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2808 if (p.perf.keys) {
2809 char *keys = PERF_KEYS;
2810
c_mtharue1a5ce12017-10-13 20:47:09 +05302811 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2812 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002813 if (err)
2814 goto bail;
2815 }
2816 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302817 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2818 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002819 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302820 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002821 if (err)
2822 goto bail;
2823 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302824 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302825 K_COPY_FROM_USER(err, 0, &p.cp, param,
2826 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302827 if (err)
2828 goto bail;
2829 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2830 if (err)
2831 goto bail;
2832 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002833 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302834 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002835 if (err)
2836 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002837 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2838 if (err)
2839 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302840 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002841 if (err)
2842 goto bail;
2843 break;
2844 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002845 p.init.attrs = 0;
2846 p.init.siglen = 0;
2847 size = sizeof(struct fastrpc_ioctl_init);
2848 /* fall through */
2849 case FASTRPC_IOCTL_INIT_ATTRS:
2850 if (!size)
2851 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302852 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002853 if (err)
2854 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302855 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302856 p.init.init.filelen < INIT_FILELEN_MAX);
2857 if (err)
2858 goto bail;
2859 VERIFY(err, p.init.init.memlen >= 0 &&
2860 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302861 if (err)
2862 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302863 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002864 if (err)
2865 goto bail;
2866 break;
2867
2868 default:
2869 err = -ENOTTY;
2870 pr_info("bad ioctl: %d\n", ioctl_num);
2871 break;
2872 }
2873 bail:
2874 return err;
2875}
2876
2877static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2878 unsigned long code,
2879 void *data)
2880{
2881 struct fastrpc_apps *me = &gfa;
2882 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302883 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002884 int cid;
2885
2886 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2887 cid = ctx - &me->channel[0];
2888 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302889 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002890 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302891 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302892 if (ctx->chan) {
2893 fastrpc_glink_close(ctx->chan, cid);
2894 ctx->chan = NULL;
2895 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2896 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2897 }
2898 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302899 if (cid == 0)
2900 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002901 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302902 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2903 if (me->channel[0].remoteheap_ramdump_dev &&
2904 notifdata->enable_ramdump) {
2905 me->channel[0].ramdumpenabled = 1;
2906 }
2907 } else if (code == SUBSYS_AFTER_POWERUP) {
2908 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002909 }
2910
2911 return NOTIFY_DONE;
2912}
2913
2914static const struct file_operations fops = {
2915 .open = fastrpc_device_open,
2916 .release = fastrpc_device_release,
2917 .unlocked_ioctl = fastrpc_device_ioctl,
2918 .compat_ioctl = compat_fastrpc_device_ioctl,
2919};
2920
2921static const struct of_device_id fastrpc_match_table[] = {
2922 { .compatible = "qcom,msm-fastrpc-adsp", },
2923 { .compatible = "qcom,msm-fastrpc-compute", },
2924 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2925 { .compatible = "qcom,msm-adsprpc-mem-region", },
2926 {}
2927};
2928
2929static int fastrpc_cb_probe(struct device *dev)
2930{
2931 struct fastrpc_channel_ctx *chan;
2932 struct fastrpc_session_ctx *sess;
2933 struct of_phandle_args iommuspec;
2934 const char *name;
2935 unsigned int start = 0x80000000;
2936 int err = 0, i;
2937 int secure_vmid = VMID_CP_PIXEL;
2938
c_mtharue1a5ce12017-10-13 20:47:09 +05302939 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2940 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002941 if (err)
2942 goto bail;
2943 for (i = 0; i < NUM_CHANNELS; i++) {
2944 if (!gcinfo[i].name)
2945 continue;
2946 if (!strcmp(name, gcinfo[i].name))
2947 break;
2948 }
2949 VERIFY(err, i < NUM_CHANNELS);
2950 if (err)
2951 goto bail;
2952 chan = &gcinfo[i];
2953 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2954 if (err)
2955 goto bail;
2956
2957 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2958 "#iommu-cells", 0, &iommuspec));
2959 if (err)
2960 goto bail;
2961 sess = &chan->session[chan->sesscount];
2962 sess->smmu.cb = iommuspec.args[0] & 0xf;
2963 sess->used = 0;
2964 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2965 "dma-coherent");
2966 sess->smmu.secure = of_property_read_bool(dev->of_node,
2967 "qcom,secure-context-bank");
2968 if (sess->smmu.secure)
2969 start = 0x60000000;
2970 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2971 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302972 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002973 if (err)
2974 goto bail;
2975
2976 if (sess->smmu.secure)
2977 iommu_domain_set_attr(sess->smmu.mapping->domain,
2978 DOMAIN_ATTR_SECURE_VMID,
2979 &secure_vmid);
2980
2981 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2982 if (err)
2983 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302984 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002985 sess->smmu.enabled = 1;
2986 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002987 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2988 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002989bail:
2990 return err;
2991}
2992
2993static int fastrpc_probe(struct platform_device *pdev)
2994{
2995 int err = 0;
2996 struct fastrpc_apps *me = &gfa;
2997 struct device *dev = &pdev->dev;
2998 struct smq_phy_page range;
2999 struct device_node *ion_node, *node;
3000 struct platform_device *ion_pdev;
3001 struct cma *cma;
3002 uint32_t val;
3003
c_mtharu63ffc012017-11-16 15:26:56 +05303004
3005 if (of_device_is_compatible(dev->of_node,
3006 "qcom,msm-fastrpc-compute")) {
3007 of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
3008 &gcinfo[0].rhvmid);
3009
3010 pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
3011
3012 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3013 &me->latency);
3014 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003015 if (of_device_is_compatible(dev->of_node,
3016 "qcom,msm-fastrpc-compute-cb"))
3017 return fastrpc_cb_probe(dev);
3018
3019 if (of_device_is_compatible(dev->of_node,
3020 "qcom,msm-adsprpc-mem-region")) {
3021 me->dev = dev;
3022 range.addr = 0;
3023 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3024 if (ion_node) {
3025 for_each_available_child_of_node(ion_node, node) {
3026 if (of_property_read_u32(node, "reg", &val))
3027 continue;
3028 if (val != ION_ADSP_HEAP_ID)
3029 continue;
3030 ion_pdev = of_find_device_by_node(node);
3031 if (!ion_pdev)
3032 break;
3033 cma = dev_get_cma_area(&ion_pdev->dev);
3034 if (cma) {
3035 range.addr = cma_get_base(cma);
3036 range.size = (size_t)cma_get_size(cma);
3037 }
3038 break;
3039 }
3040 }
3041 if (range.addr) {
3042 int srcVM[1] = {VMID_HLOS};
3043 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3044 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003045 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003046 PERM_READ | PERM_WRITE | PERM_EXEC,
3047 PERM_READ | PERM_WRITE | PERM_EXEC,
3048 PERM_READ | PERM_WRITE | PERM_EXEC,
3049 };
3050
3051 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3052 srcVM, 1, destVM, destVMperm, 4));
3053 if (err)
3054 goto bail;
3055 }
3056 return 0;
3057 }
3058
3059 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3060 fastrpc_match_table,
3061 NULL, &pdev->dev));
3062 if (err)
3063 goto bail;
3064bail:
3065 return err;
3066}
3067
3068static void fastrpc_deinit(void)
3069{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303070 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003071 struct fastrpc_channel_ctx *chan = gcinfo;
3072 int i, j;
3073
3074 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3075 if (chan->chan) {
3076 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303077 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303078 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003079 }
3080 for (j = 0; j < NUM_SESSIONS; j++) {
3081 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303082 if (sess->smmu.dev) {
3083 arm_iommu_detach_device(sess->smmu.dev);
3084 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003085 }
3086 if (sess->smmu.mapping) {
3087 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303088 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003089 }
3090 }
3091 }
3092}
3093
3094static struct platform_driver fastrpc_driver = {
3095 .probe = fastrpc_probe,
3096 .driver = {
3097 .name = "fastrpc",
3098 .owner = THIS_MODULE,
3099 .of_match_table = fastrpc_match_table,
3100 },
3101};
3102
3103static int __init fastrpc_device_init(void)
3104{
3105 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303106 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003107 int err = 0, i;
3108
3109 memset(me, 0, sizeof(*me));
3110
3111 fastrpc_init(me);
3112 me->dev = NULL;
3113 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3114 if (err)
3115 goto register_bail;
3116 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3117 DEVICE_NAME));
3118 if (err)
3119 goto alloc_chrdev_bail;
3120 cdev_init(&me->cdev, &fops);
3121 me->cdev.owner = THIS_MODULE;
3122 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003123 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003124 if (err)
3125 goto cdev_init_bail;
3126 me->class = class_create(THIS_MODULE, "fastrpc");
3127 VERIFY(err, !IS_ERR(me->class));
3128 if (err)
3129 goto class_create_bail;
3130 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003131 dev = device_create(me->class, NULL,
3132 MKDEV(MAJOR(me->dev_no), 0),
3133 NULL, gcinfo[0].name);
3134 VERIFY(err, !IS_ERR_OR_NULL(dev));
3135 if (err)
3136 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003137 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003138 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003139 me->channel[i].ssrcount = 0;
3140 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303141 me->channel[i].issubsystemup = 1;
3142 me->channel[i].ramdumpenabled = 0;
3143 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003144 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3145 me->channel[i].handle = subsys_notif_register_notifier(
3146 gcinfo[i].subsys,
3147 &me->channel[i].nb);
3148 }
3149
3150 me->client = msm_ion_client_create(DEVICE_NAME);
3151 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3152 if (err)
3153 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003154 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003155 return 0;
3156device_create_bail:
3157 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003158 if (me->channel[i].handle)
3159 subsys_notif_unregister_notifier(me->channel[i].handle,
3160 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003161 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003162 if (!IS_ERR_OR_NULL(dev))
3163 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003164 class_destroy(me->class);
3165class_create_bail:
3166 cdev_del(&me->cdev);
3167cdev_init_bail:
3168 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3169alloc_chrdev_bail:
3170register_bail:
3171 fastrpc_deinit();
3172 return err;
3173}
3174
3175static void __exit fastrpc_device_exit(void)
3176{
3177 struct fastrpc_apps *me = &gfa;
3178 int i;
3179
3180 fastrpc_file_list_dtor(me);
3181 fastrpc_deinit();
3182 for (i = 0; i < NUM_CHANNELS; i++) {
3183 if (!gcinfo[i].name)
3184 continue;
3185 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3186 subsys_notif_unregister_notifier(me->channel[i].handle,
3187 &me->channel[i].nb);
3188 }
3189 class_destroy(me->class);
3190 cdev_del(&me->cdev);
3191 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3192 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003193 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003194}
3195
3196late_initcall(fastrpc_device_init);
3197module_exit(fastrpc_device_exit);
3198
3199MODULE_LICENSE("GPL v2");