blob: befe5b8dfda1c5f2c9c97664aae26b5c003e8c6c [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053078#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
79#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070080
Sathish Ambleya21b5b52017-01-11 16:11:01 -080081#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
82#define FASTRPC_STATIC_HANDLE_LISTENER (3)
83#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053084#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080085
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053086#define INIT_FILELEN_MAX (2*1024*1024)
87#define INIT_MEMLEN_MAX (8*1024*1024)
88
Sathish Ambleya21b5b52017-01-11 16:11:01 -080089#define PERF_END (void)0
90
91#define PERF(enb, cnt, ff) \
92 {\
93 struct timespec startT = {0};\
94 if (enb) {\
95 getnstimeofday(&startT);\
96 } \
97 ff ;\
98 if (enb) {\
99 cnt += getnstimediff(&startT);\
100 } \
101 }
102
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700103static int fastrpc_glink_open(int cid);
104static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800105static struct dentry *debugfs_root;
106static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
108static inline uint64_t buf_page_start(uint64_t buf)
109{
110 uint64_t start = (uint64_t) buf & PAGE_MASK;
111 return start;
112}
113
114static inline uint64_t buf_page_offset(uint64_t buf)
115{
116 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
117 return offset;
118}
119
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530120static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700121{
122 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
123 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530124 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700125 return nPages;
126}
127
128static inline uint64_t buf_page_size(uint32_t size)
129{
130 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
131
132 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
133}
134
135static inline void *uint64_to_ptr(uint64_t addr)
136{
137 void *ptr = (void *)((uintptr_t)addr);
138
139 return ptr;
140}
141
142static inline uint64_t ptr_to_uint64(void *ptr)
143{
144 uint64_t addr = (uint64_t)((uintptr_t)ptr);
145
146 return addr;
147}
148
149struct fastrpc_file;
150
151struct fastrpc_buf {
152 struct hlist_node hn;
153 struct fastrpc_file *fl;
154 void *virt;
155 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530156 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700157};
158
159struct fastrpc_ctx_lst;
160
161struct overlap {
162 uintptr_t start;
163 uintptr_t end;
164 int raix;
165 uintptr_t mstart;
166 uintptr_t mend;
167 uintptr_t offset;
168};
169
170struct smq_invoke_ctx {
171 struct hlist_node hn;
172 struct completion work;
173 int retval;
174 int pid;
175 int tgid;
176 remote_arg_t *lpra;
177 remote_arg64_t *rpra;
178 int *fds;
179 unsigned int *attrs;
180 struct fastrpc_mmap **maps;
181 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530182 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700183 struct fastrpc_file *fl;
184 uint32_t sc;
185 struct overlap *overs;
186 struct overlap **overps;
187 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700188 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530189 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700190};
191
192struct fastrpc_ctx_lst {
193 struct hlist_head pending;
194 struct hlist_head interrupted;
195};
196
197struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530198 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct dma_iommu_mapping *mapping;
200 int cb;
201 int enabled;
202 int faults;
203 int secure;
204 int coherent;
205};
206
207struct fastrpc_session_ctx {
208 struct device *dev;
209 struct fastrpc_smmu smmu;
210 int used;
211};
212
213struct fastrpc_glink_info {
214 int link_state;
215 int port_state;
216 struct glink_open_config cfg;
217 struct glink_link_info link_info;
218 void *link_notify_handle;
219};
220
221struct fastrpc_channel_ctx {
222 char *name;
223 char *subsys;
224 void *chan;
225 struct device *dev;
226 struct fastrpc_session_ctx session[NUM_SESSIONS];
227 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530228 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 struct notifier_block nb;
230 struct kref kref;
231 int sesscount;
232 int ssrcount;
233 void *handle;
234 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700236 int vmid;
c_mtharu63ffc012017-11-16 15:26:56 +0530237 int rhvmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530238 int ramdumpenabled;
239 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700240 struct fastrpc_glink_info link;
241};
242
243struct fastrpc_apps {
244 struct fastrpc_channel_ctx *channel;
245 struct cdev cdev;
246 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530247 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 struct smq_phy_page range;
249 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530250 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700251 dev_t dev_no;
252 int compat;
253 struct hlist_head drivers;
254 spinlock_t hlock;
255 struct ion_client *client;
256 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530257 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700258};
259
260struct fastrpc_mmap {
261 struct hlist_node hn;
262 struct fastrpc_file *fl;
263 struct fastrpc_apps *apps;
264 int fd;
265 uint32_t flags;
266 struct dma_buf *buf;
267 struct sg_table *table;
268 struct dma_buf_attachment *attach;
269 struct ion_handle *handle;
270 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530271 size_t size;
272 uintptr_t va;
273 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700274 int refs;
275 uintptr_t raddr;
276 int uncached;
277 int secure;
278 uintptr_t attr;
279};
280
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281struct fastrpc_perf {
282 int64_t count;
283 int64_t flush;
284 int64_t map;
285 int64_t copy;
286 int64_t link;
287 int64_t getargs;
288 int64_t putargs;
289 int64_t invargs;
290 int64_t invoke;
291};
292
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293struct fastrpc_file {
294 struct hlist_node hn;
295 spinlock_t hlock;
296 struct hlist_head maps;
297 struct hlist_head bufs;
298 struct fastrpc_ctx_lst clst;
299 struct fastrpc_session_ctx *sctx;
300 struct fastrpc_session_ctx *secsctx;
301 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800302 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530303 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 int tgid;
305 int cid;
306 int ssrcount;
307 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530308 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800310 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800311 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530312 struct pm_qos_request pm_qos_req;
313 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314};
315
316static struct fastrpc_apps gfa;
317
318static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
319 {
320 .name = "adsprpc-smd",
321 .subsys = "adsp",
322 .link.link_info.edge = "lpass",
323 .link.link_info.transport = "smem",
324 },
325 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700326 .name = "mdsprpc-smd",
327 .subsys = "modem",
328 .link.link_info.edge = "mpss",
329 .link.link_info.transport = "smem",
330 },
331 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800332 .name = "sdsprpc-smd",
333 .subsys = "slpi",
334 .link.link_info.edge = "dsps",
335 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800336 },
337 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700338 .name = "cdsprpc-smd",
339 .subsys = "cdsp",
340 .link.link_info.edge = "cdsp",
341 .link.link_info.transport = "smem",
342 },
343};
344
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800345static inline int64_t getnstimediff(struct timespec *start)
346{
347 int64_t ns;
348 struct timespec ts, b;
349
350 getnstimeofday(&ts);
351 b = timespec_sub(ts, *start);
352 ns = timespec_to_ns(&b);
353 return ns;
354}
355
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
357{
c_mtharue1a5ce12017-10-13 20:47:09 +0530358 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700359 int vmid;
360
361 if (!fl)
362 return;
363 if (cache) {
364 spin_lock(&fl->hlock);
365 hlist_add_head(&buf->hn, &fl->bufs);
366 spin_unlock(&fl->hlock);
367 return;
368 }
369 if (!IS_ERR_OR_NULL(buf->virt)) {
370 int destVM[1] = {VMID_HLOS};
371 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
372
373 if (fl->sctx->smmu.cb)
374 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
375 vmid = fl->apps->channel[fl->cid].vmid;
376 if (vmid) {
377 int srcVM[2] = {VMID_HLOS, vmid};
378
379 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
380 srcVM, 2, destVM, destVMperm, 1);
381 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530382 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383 buf->phys);
384 }
385 kfree(buf);
386}
387
388static void fastrpc_buf_list_free(struct fastrpc_file *fl)
389{
390 struct fastrpc_buf *buf, *free;
391
392 do {
393 struct hlist_node *n;
394
c_mtharue1a5ce12017-10-13 20:47:09 +0530395 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396 spin_lock(&fl->hlock);
397 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
398 hlist_del_init(&buf->hn);
399 free = buf;
400 break;
401 }
402 spin_unlock(&fl->hlock);
403 if (free)
404 fastrpc_buf_free(free, 0);
405 } while (free);
406}
407
408static void fastrpc_mmap_add(struct fastrpc_mmap *map)
409{
c_mtharue1a5ce12017-10-13 20:47:09 +0530410 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
411 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
412 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413
c_mtharue1a5ce12017-10-13 20:47:09 +0530414 spin_lock(&me->hlock);
415 hlist_add_head(&map->hn, &me->maps);
416 spin_unlock(&me->hlock);
417 } else {
418 struct fastrpc_file *fl = map->fl;
419
420 spin_lock(&fl->hlock);
421 hlist_add_head(&map->hn, &fl->maps);
422 spin_unlock(&fl->hlock);
423 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424}
425
c_mtharue1a5ce12017-10-13 20:47:09 +0530426static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530427 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530428 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429{
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 struct fastrpc_apps *me = &gfa;
431 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700432 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530433
434 if ((va + len) < va)
435 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530436 if (mflags == ADSP_MMAP_HEAP_ADDR ||
437 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
438 spin_lock(&me->hlock);
439 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
440 if (va >= map->va &&
441 va + len <= map->va + map->len &&
442 map->fd == fd) {
443 if (refs)
444 map->refs++;
445 match = map;
446 break;
447 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700448 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530449 spin_unlock(&me->hlock);
450 } else {
451 spin_lock(&fl->hlock);
452 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
453 if (va >= map->va &&
454 va + len <= map->va + map->len &&
455 map->fd == fd) {
456 if (refs)
457 map->refs++;
458 match = map;
459 break;
460 }
461 }
462 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700463 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700464 if (match) {
465 *ppmap = match;
466 return 0;
467 }
468 return -ENOTTY;
469}
470
c_mtharuf931ff92017-11-30 19:35:30 +0530471static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530472{
473 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530474
475 if (me->dev == NULL) {
476 pr_err("device adsprpc-mem is not initialized\n");
477 return -ENODEV;
478 }
c_mtharuf931ff92017-11-30 19:35:30 +0530479 *vaddr = dma_alloc_coherent(me->dev, size, region_phys, GFP_KERNEL);
480 if (!*vaddr) {
c_mtharue1a5ce12017-10-13 20:47:09 +0530481 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
482 (unsigned int)size);
483 return -ENOMEM;
484 }
485 return 0;
486}
487
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700488static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530489 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700490{
c_mtharue1a5ce12017-10-13 20:47:09 +0530491 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700492 struct hlist_node *n;
493 struct fastrpc_apps *me = &gfa;
494
495 spin_lock(&me->hlock);
496 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
497 if (map->raddr == va &&
498 map->raddr + map->len == va + len &&
499 map->refs == 1) {
500 match = map;
501 hlist_del_init(&map->hn);
502 break;
503 }
504 }
505 spin_unlock(&me->hlock);
506 if (match) {
507 *ppmap = match;
508 return 0;
509 }
510 spin_lock(&fl->hlock);
511 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
512 if (map->raddr == va &&
513 map->raddr + map->len == va + len &&
514 map->refs == 1) {
515 match = map;
516 hlist_del_init(&map->hn);
517 break;
518 }
519 }
520 spin_unlock(&fl->hlock);
521 if (match) {
522 *ppmap = match;
523 return 0;
524 }
525 return -ENOTTY;
526}
527
c_mtharu7bd6a422017-10-17 18:15:37 +0530528static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700529{
c_mtharue1a5ce12017-10-13 20:47:09 +0530530 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700531 struct fastrpc_file *fl;
532 int vmid;
533 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700534
535 if (!map)
536 return;
537 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530538 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
539 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
540 spin_lock(&me->hlock);
541 map->refs--;
542 if (!map->refs)
543 hlist_del_init(&map->hn);
544 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530545 if (map->refs > 0)
546 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530547 } else {
548 spin_lock(&fl->hlock);
549 map->refs--;
550 if (!map->refs)
551 hlist_del_init(&map->hn);
552 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530553 if (map->refs > 0 && !flags)
554 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530555 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530556 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
557 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700558
c_mtharue1a5ce12017-10-13 20:47:09 +0530559 if (me->dev == NULL) {
560 pr_err("failed to free remote heap allocation\n");
561 return;
562 }
563 if (map->phys) {
564 dma_free_coherent(me->dev, map->size,
c_mtharuf931ff92017-11-30 19:35:30 +0530565 (void *)map->va, (dma_addr_t)map->phys);
c_mtharue1a5ce12017-10-13 20:47:09 +0530566 }
567 } else {
568 int destVM[1] = {VMID_HLOS};
569 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
570
571 if (map->secure)
572 sess = fl->secsctx;
573 else
574 sess = fl->sctx;
575
576 if (!IS_ERR_OR_NULL(map->handle))
577 ion_free(fl->apps->client, map->handle);
578 if (sess && sess->smmu.enabled) {
579 if (map->size || map->phys)
580 msm_dma_unmap_sg(sess->smmu.dev,
581 map->table->sgl,
582 map->table->nents, DMA_BIDIRECTIONAL,
583 map->buf);
584 }
585 vmid = fl->apps->channel[fl->cid].vmid;
586 if (vmid && map->phys) {
587 int srcVM[2] = {VMID_HLOS, vmid};
588
589 hyp_assign_phys(map->phys, buf_page_size(map->size),
590 srcVM, 2, destVM, destVMperm, 1);
591 }
592
593 if (!IS_ERR_OR_NULL(map->table))
594 dma_buf_unmap_attachment(map->attach, map->table,
595 DMA_BIDIRECTIONAL);
596 if (!IS_ERR_OR_NULL(map->attach))
597 dma_buf_detach(map->buf, map->attach);
598 if (!IS_ERR_OR_NULL(map->buf))
599 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700600 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700601 kfree(map);
602}
603
604static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
605 struct fastrpc_session_ctx **session);
606
607static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530608 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700609 struct fastrpc_mmap **ppmap)
610{
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700612 struct fastrpc_session_ctx *sess;
613 struct fastrpc_apps *apps = fl->apps;
614 int cid = fl->cid;
615 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530616 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700617 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530618 dma_addr_t region_phys = 0;
619 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700620 unsigned long flags;
621 int err = 0, vmid;
622
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800623 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700624 return 0;
625 map = kzalloc(sizeof(*map), GFP_KERNEL);
626 VERIFY(err, !IS_ERR_OR_NULL(map));
627 if (err)
628 goto bail;
629 INIT_HLIST_NODE(&map->hn);
630 map->flags = mflags;
631 map->refs = 1;
632 map->fl = fl;
633 map->fd = fd;
634 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530635 if (mflags == ADSP_MMAP_HEAP_ADDR ||
636 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
637 map->apps = me;
638 map->fl = NULL;
c_mtharuf931ff92017-11-30 19:35:30 +0530639 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
640 len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700641 if (err)
642 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530643 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530644 map->size = len;
c_mtharuf931ff92017-11-30 19:35:30 +0530645 map->va = (uintptr_t)region_vaddr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530646 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530647 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
648 pr_info("adsprpc: buffer mapped with persist attr %x\n",
649 (unsigned int)map->attr);
650 map->refs = 2;
651 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530652 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
653 ion_import_dma_buf_fd(fl->apps->client, fd)));
654 if (err)
655 goto bail;
656 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
657 &flags));
658 if (err)
659 goto bail;
660
c_mtharue1a5ce12017-10-13 20:47:09 +0530661 map->secure = flags & ION_FLAG_SECURE;
662 if (map->secure) {
663 if (!fl->secsctx)
664 err = fastrpc_session_alloc(chan, 1,
665 &fl->secsctx);
666 if (err)
667 goto bail;
668 }
669 if (map->secure)
670 sess = fl->secsctx;
671 else
672 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530673
c_mtharue1a5ce12017-10-13 20:47:09 +0530674 VERIFY(err, !IS_ERR_OR_NULL(sess));
675 if (err)
676 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530677
678 map->uncached = !ION_IS_CACHED(flags);
679 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
680 map->uncached = 1;
681
c_mtharue1a5ce12017-10-13 20:47:09 +0530682 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
683 if (err)
684 goto bail;
685 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
686 dma_buf_attach(map->buf, sess->smmu.dev)));
687 if (err)
688 goto bail;
689 VERIFY(err, !IS_ERR_OR_NULL(map->table =
690 dma_buf_map_attachment(map->attach,
691 DMA_BIDIRECTIONAL)));
692 if (err)
693 goto bail;
694 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700695 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530696
697 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
698 (sess->smmu.coherent && map->uncached))
699 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
700 else if (map->attr & FASTRPC_ATTR_COHERENT)
701 attrs |= DMA_ATTR_FORCE_COHERENT;
702
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700703 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530704 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700705 map->table->sgl, map->table->nents,
706 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530707 if (err)
708 goto bail;
709 } else {
710 VERIFY(err, map->table->nents == 1);
711 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530713 }
714 map->phys = sg_dma_address(map->table->sgl);
715 if (sess->smmu.cb) {
716 map->phys += ((uint64_t)sess->smmu.cb << 32);
717 map->size = sg_dma_len(map->table->sgl);
718 } else {
719 map->size = buf_page_size(len);
720 }
721 vmid = fl->apps->channel[fl->cid].vmid;
722 if (vmid) {
723 int srcVM[1] = {VMID_HLOS};
724 int destVM[2] = {VMID_HLOS, vmid};
725 int destVMperm[2] = {PERM_READ | PERM_WRITE,
726 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700727
c_mtharue1a5ce12017-10-13 20:47:09 +0530728 VERIFY(err, !hyp_assign_phys(map->phys,
729 buf_page_size(map->size),
730 srcVM, 1, destVM, destVMperm, 2));
731 if (err)
732 goto bail;
733 }
734 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736 map->len = len;
737
738 fastrpc_mmap_add(map);
739 *ppmap = map;
740
741bail:
742 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530743 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700744 return err;
745}
746
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530747static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700748 struct fastrpc_buf **obuf)
749{
750 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530751 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700752 struct hlist_node *n;
753
754 VERIFY(err, size > 0);
755 if (err)
756 goto bail;
757
758 /* find the smallest buffer that fits in the cache */
759 spin_lock(&fl->hlock);
760 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
761 if (buf->size >= size && (!fr || fr->size > buf->size))
762 fr = buf;
763 }
764 if (fr)
765 hlist_del_init(&fr->hn);
766 spin_unlock(&fl->hlock);
767 if (fr) {
768 *obuf = fr;
769 return 0;
770 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530771 buf = NULL;
772 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700773 if (err)
774 goto bail;
775 INIT_HLIST_NODE(&buf->hn);
776 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530777 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700778 buf->phys = 0;
779 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530780 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700781 (void *)&buf->phys, GFP_KERNEL);
782 if (IS_ERR_OR_NULL(buf->virt)) {
783 /* free cache and retry */
784 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530785 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700786 (void *)&buf->phys, GFP_KERNEL);
787 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
788 }
789 if (err)
790 goto bail;
791 if (fl->sctx->smmu.cb)
792 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
793 vmid = fl->apps->channel[fl->cid].vmid;
794 if (vmid) {
795 int srcVM[1] = {VMID_HLOS};
796 int destVM[2] = {VMID_HLOS, vmid};
797 int destVMperm[2] = {PERM_READ | PERM_WRITE,
798 PERM_READ | PERM_WRITE | PERM_EXEC};
799
800 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
801 srcVM, 1, destVM, destVMperm, 2));
802 if (err)
803 goto bail;
804 }
805
806 *obuf = buf;
807 bail:
808 if (err && buf)
809 fastrpc_buf_free(buf, 0);
810 return err;
811}
812
813
814static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700815 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700816 struct smq_invoke_ctx **po)
817{
818 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530819 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700820 struct hlist_node *n;
821 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
822
823 spin_lock(&fl->hlock);
824 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
825 if (ictx->pid == current->pid) {
826 if (invoke->sc != ictx->sc || ictx->fl != fl)
827 err = -1;
828 else {
829 ctx = ictx;
830 hlist_del_init(&ctx->hn);
831 hlist_add_head(&ctx->hn, &fl->clst.pending);
832 }
833 break;
834 }
835 }
836 spin_unlock(&fl->hlock);
837 if (ctx)
838 *po = ctx;
839 return err;
840}
841
842#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
843static int overlap_ptr_cmp(const void *a, const void *b)
844{
845 struct overlap *pa = *((struct overlap **)a);
846 struct overlap *pb = *((struct overlap **)b);
847 /* sort with lowest starting buffer first */
848 int st = CMP(pa->start, pb->start);
849 /* sort with highest ending buffer first */
850 int ed = CMP(pb->end, pa->end);
851 return st == 0 ? ed : st;
852}
853
Sathish Ambley9466d672017-01-25 10:51:55 -0800854static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700855{
Sathish Ambley9466d672017-01-25 10:51:55 -0800856 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700857 remote_arg_t *lpra = ctx->lpra;
858 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
859 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
860 int nbufs = inbufs + outbufs;
861 struct overlap max;
862
863 for (i = 0; i < nbufs; ++i) {
864 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
865 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800866 if (lpra[i].buf.len) {
867 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
868 if (err)
869 goto bail;
870 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700871 ctx->overs[i].raix = i;
872 ctx->overps[i] = &ctx->overs[i];
873 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530874 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700875 max.start = 0;
876 max.end = 0;
877 for (i = 0; i < nbufs; ++i) {
878 if (ctx->overps[i]->start < max.end) {
879 ctx->overps[i]->mstart = max.end;
880 ctx->overps[i]->mend = ctx->overps[i]->end;
881 ctx->overps[i]->offset = max.end -
882 ctx->overps[i]->start;
883 if (ctx->overps[i]->end > max.end) {
884 max.end = ctx->overps[i]->end;
885 } else {
886 ctx->overps[i]->mend = 0;
887 ctx->overps[i]->mstart = 0;
888 }
889 } else {
890 ctx->overps[i]->mend = ctx->overps[i]->end;
891 ctx->overps[i]->mstart = ctx->overps[i]->start;
892 ctx->overps[i]->offset = 0;
893 max = *ctx->overps[i];
894 }
895 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800896bail:
897 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700898}
899
900#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
901 do {\
902 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530903 VERIFY(err, 0 == copy_from_user((dst),\
904 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700905 (size)));\
906 else\
907 memmove((dst), (src), (size));\
908 } while (0)
909
910#define K_COPY_TO_USER(err, kernel, dst, src, size) \
911 do {\
912 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530913 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
914 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700915 else\
916 memmove((dst), (src), (size));\
917 } while (0)
918
919
920static void context_free(struct smq_invoke_ctx *ctx);
921
922static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700923 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700924 struct smq_invoke_ctx **po)
925{
926 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530927 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700928 struct fastrpc_ctx_lst *clst = &fl->clst;
929 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
930
931 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
932 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
933 sizeof(*ctx->fds) * (bufs) +
934 sizeof(*ctx->attrs) * (bufs) +
935 sizeof(*ctx->overs) * (bufs) +
936 sizeof(*ctx->overps) * (bufs);
937
c_mtharue1a5ce12017-10-13 20:47:09 +0530938 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700939 if (err)
940 goto bail;
941
942 INIT_HLIST_NODE(&ctx->hn);
943 hlist_add_fake(&ctx->hn);
944 ctx->fl = fl;
945 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
946 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
947 ctx->fds = (int *)(&ctx->lpra[bufs]);
948 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
949 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
950 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
951
c_mtharue1a5ce12017-10-13 20:47:09 +0530952 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700953 bufs * sizeof(*ctx->lpra));
954 if (err)
955 goto bail;
956
957 if (invokefd->fds) {
958 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
959 bufs * sizeof(*ctx->fds));
960 if (err)
961 goto bail;
962 }
963 if (invokefd->attrs) {
964 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
965 bufs * sizeof(*ctx->attrs));
966 if (err)
967 goto bail;
968 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700969 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700970 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800971 if (bufs) {
972 VERIFY(err, 0 == context_build_overlap(ctx));
973 if (err)
974 goto bail;
975 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700976 ctx->retval = -1;
977 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530978 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700979 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530980 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700981
982 spin_lock(&fl->hlock);
983 hlist_add_head(&ctx->hn, &clst->pending);
984 spin_unlock(&fl->hlock);
985
986 *po = ctx;
987bail:
988 if (ctx && err)
989 context_free(ctx);
990 return err;
991}
992
993static void context_save_interrupted(struct smq_invoke_ctx *ctx)
994{
995 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
996
997 spin_lock(&ctx->fl->hlock);
998 hlist_del_init(&ctx->hn);
999 hlist_add_head(&ctx->hn, &clst->interrupted);
1000 spin_unlock(&ctx->fl->hlock);
1001 /* free the cache on power collapse */
1002 fastrpc_buf_list_free(ctx->fl);
1003}
1004
1005static void context_free(struct smq_invoke_ctx *ctx)
1006{
1007 int i;
1008 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1009 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1010 spin_lock(&ctx->fl->hlock);
1011 hlist_del_init(&ctx->hn);
1012 spin_unlock(&ctx->fl->hlock);
1013 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301014 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001015 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301016 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001017 kfree(ctx);
1018}
1019
1020static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1021{
1022 ctx->retval = retval;
1023 complete(&ctx->work);
1024}
1025
1026
1027static void fastrpc_notify_users(struct fastrpc_file *me)
1028{
1029 struct smq_invoke_ctx *ictx;
1030 struct hlist_node *n;
1031
1032 spin_lock(&me->hlock);
1033 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1034 complete(&ictx->work);
1035 }
1036 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1037 complete(&ictx->work);
1038 }
1039 spin_unlock(&me->hlock);
1040
1041}
1042
1043static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1044{
1045 struct fastrpc_file *fl;
1046 struct hlist_node *n;
1047
1048 spin_lock(&me->hlock);
1049 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1050 if (fl->cid == cid)
1051 fastrpc_notify_users(fl);
1052 }
1053 spin_unlock(&me->hlock);
1054
1055}
1056static void context_list_ctor(struct fastrpc_ctx_lst *me)
1057{
1058 INIT_HLIST_HEAD(&me->interrupted);
1059 INIT_HLIST_HEAD(&me->pending);
1060}
1061
1062static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1063{
1064 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301065 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001066 struct hlist_node *n;
1067
1068 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301069 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001070 spin_lock(&fl->hlock);
1071 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1072 hlist_del_init(&ictx->hn);
1073 ctxfree = ictx;
1074 break;
1075 }
1076 spin_unlock(&fl->hlock);
1077 if (ctxfree)
1078 context_free(ctxfree);
1079 } while (ctxfree);
1080 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301081 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001082 spin_lock(&fl->hlock);
1083 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1084 hlist_del_init(&ictx->hn);
1085 ctxfree = ictx;
1086 break;
1087 }
1088 spin_unlock(&fl->hlock);
1089 if (ctxfree)
1090 context_free(ctxfree);
1091 } while (ctxfree);
1092}
1093
1094static int fastrpc_file_free(struct fastrpc_file *fl);
1095static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1096{
1097 struct fastrpc_file *fl, *free;
1098 struct hlist_node *n;
1099
1100 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301101 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001102 spin_lock(&me->hlock);
1103 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1104 hlist_del_init(&fl->hn);
1105 free = fl;
1106 break;
1107 }
1108 spin_unlock(&me->hlock);
1109 if (free)
1110 fastrpc_file_free(free);
1111 } while (free);
1112}
1113
1114static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1115{
1116 remote_arg64_t *rpra;
1117 remote_arg_t *lpra = ctx->lpra;
1118 struct smq_invoke_buf *list;
1119 struct smq_phy_page *pages, *ipage;
1120 uint32_t sc = ctx->sc;
1121 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1122 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001123 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001124 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301125 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001126 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001127 int err = 0;
1128 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001129 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001130 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001131
1132 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301133 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001134 list = smq_invoke_buf_start(rpra, sc);
1135 pages = smq_phy_page_start(sc, list);
1136 ipage = pages;
1137
1138 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301139 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1140 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001141
1142 if (ctx->fds[i] && (ctx->fds[i] != -1))
1143 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1144 ctx->attrs[i], buf, len,
1145 mflags, &ctx->maps[i]);
1146 ipage += 1;
1147 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001148 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1149 for (i = bufs; i < bufs + handles; i++) {
1150 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1151 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1152 if (err)
1153 goto bail;
1154 ipage += 1;
1155 }
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301156 metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
Sathish Ambleybae51902017-07-03 15:00:49 -07001157 (sizeof(uint32_t) * M_CRCLIST);
1158
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001159 /* calculate len requreed for copying */
1160 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1161 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001162 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301163 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001164
1165 if (!len)
1166 continue;
1167 if (ctx->maps[i])
1168 continue;
1169 if (ctx->overps[oix]->offset == 0)
1170 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001171 mstart = ctx->overps[oix]->mstart;
1172 mend = ctx->overps[oix]->mend;
1173 VERIFY(err, (mend - mstart) <= LONG_MAX);
1174 if (err)
1175 goto bail;
1176 copylen += mend - mstart;
1177 VERIFY(err, copylen >= 0);
1178 if (err)
1179 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001180 }
1181 ctx->used = copylen;
1182
1183 /* allocate new buffer */
1184 if (copylen) {
1185 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1186 if (err)
1187 goto bail;
1188 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301189 if (ctx->buf->virt && metalen <= copylen)
1190 memset(ctx->buf->virt, 0, metalen);
1191
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001192 /* copy metadata */
1193 rpra = ctx->buf->virt;
1194 ctx->rpra = rpra;
1195 list = smq_invoke_buf_start(rpra, sc);
1196 pages = smq_phy_page_start(sc, list);
1197 ipage = pages;
1198 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001199 for (i = 0; i < bufs + handles; ++i) {
1200 if (lpra[i].buf.len)
1201 list[i].num = 1;
1202 else
1203 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001204 list[i].pgidx = ipage - pages;
1205 ipage++;
1206 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301207
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001208 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001209 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301210 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001211 struct fastrpc_mmap *map = ctx->maps[i];
1212 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301213 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001214
1215 rpra[i].buf.pv = 0;
1216 rpra[i].buf.len = len;
1217 if (!len)
1218 continue;
1219 if (map) {
1220 struct vm_area_struct *vma;
1221 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301222 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001223 int idx = list[i].pgidx;
1224
1225 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001226 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001227 } else {
1228 down_read(&current->mm->mmap_sem);
1229 VERIFY(err, NULL != (vma = find_vma(current->mm,
1230 map->va)));
1231 if (err) {
1232 up_read(&current->mm->mmap_sem);
1233 goto bail;
1234 }
1235 offset = buf_page_start(buf) - vma->vm_start;
1236 up_read(&current->mm->mmap_sem);
1237 VERIFY(err, offset < (uintptr_t)map->size);
1238 if (err)
1239 goto bail;
1240 }
1241 pages[idx].addr = map->phys + offset;
1242 pages[idx].size = num << PAGE_SHIFT;
1243 }
1244 rpra[i].buf.pv = buf;
1245 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001246 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001247 for (i = bufs; i < bufs + handles; ++i) {
1248 struct fastrpc_mmap *map = ctx->maps[i];
1249
1250 pages[i].addr = map->phys;
1251 pages[i].size = map->size;
1252 }
1253 fdlist = (uint64_t *)&pages[bufs + handles];
1254 for (i = 0; i < M_FDLIST; i++)
1255 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001256 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301257 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001258
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001259 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001260 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001261 rlen = copylen - metalen;
1262 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1263 int i = ctx->overps[oix]->raix;
1264 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301265 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001266 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301267 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001268
1269 if (!len)
1270 continue;
1271 if (map)
1272 continue;
1273 if (ctx->overps[oix]->offset == 0) {
1274 rlen -= ALIGN(args, BALIGN) - args;
1275 args = ALIGN(args, BALIGN);
1276 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001277 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001278 VERIFY(err, rlen >= mlen);
1279 if (err)
1280 goto bail;
1281 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1282 pages[list[i].pgidx].addr = ctx->buf->phys -
1283 ctx->overps[oix]->offset +
1284 (copylen - rlen);
1285 pages[list[i].pgidx].addr =
1286 buf_page_start(pages[list[i].pgidx].addr);
1287 buf = rpra[i].buf.pv;
1288 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1289 if (i < inbufs) {
1290 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1291 lpra[i].buf.pv, len);
1292 if (err)
1293 goto bail;
1294 }
1295 args = args + mlen;
1296 rlen -= mlen;
1297 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001298 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001299
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001300 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001301 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1302 int i = ctx->overps[oix]->raix;
1303 struct fastrpc_mmap *map = ctx->maps[i];
1304
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001305 if (map && map->uncached)
1306 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301307 if (ctx->fl->sctx->smmu.coherent &&
1308 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1309 continue;
1310 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1311 continue;
1312
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001313 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1314 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1315 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1316 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001317 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301318 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001319 rpra[i].dma.fd = ctx->fds[i];
1320 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1321 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001322 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001323
1324 if (!ctx->fl->sctx->smmu.coherent) {
1325 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001326 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001327 PERF_END);
1328 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001329 bail:
1330 return err;
1331}
1332
1333static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1334 remote_arg_t *upra)
1335{
1336 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001337 struct smq_invoke_buf *list;
1338 struct smq_phy_page *pages;
1339 struct fastrpc_mmap *mmap;
1340 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001341 uint32_t *crclist = NULL;
1342
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001343 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001344 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001345 int err = 0;
1346
1347 inbufs = REMOTE_SCALARS_INBUFS(sc);
1348 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001349 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1350 list = smq_invoke_buf_start(ctx->rpra, sc);
1351 pages = smq_phy_page_start(sc, list);
1352 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001353 crclist = (uint32_t *)(fdlist + M_FDLIST);
1354
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001355 for (i = inbufs; i < inbufs + outbufs; ++i) {
1356 if (!ctx->maps[i]) {
1357 K_COPY_TO_USER(err, kernel,
1358 ctx->lpra[i].buf.pv,
1359 uint64_to_ptr(rpra[i].buf.pv),
1360 rpra[i].buf.len);
1361 if (err)
1362 goto bail;
1363 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301364 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301365 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001366 }
1367 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001368 if (inbufs + outbufs + handles) {
1369 for (i = 0; i < M_FDLIST; i++) {
1370 if (!fdlist[i])
1371 break;
1372 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001373 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301374 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001375 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001376 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001377 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301378 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001379 crclist, M_CRCLIST*sizeof(uint32_t));
1380
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001381 bail:
1382 return err;
1383}
1384
1385static void inv_args_pre(struct smq_invoke_ctx *ctx)
1386{
1387 int i, inbufs, outbufs;
1388 uint32_t sc = ctx->sc;
1389 remote_arg64_t *rpra = ctx->rpra;
1390 uintptr_t end;
1391
1392 inbufs = REMOTE_SCALARS_INBUFS(sc);
1393 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1394 for (i = inbufs; i < inbufs + outbufs; ++i) {
1395 struct fastrpc_mmap *map = ctx->maps[i];
1396
1397 if (map && map->uncached)
1398 continue;
1399 if (!rpra[i].buf.len)
1400 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301401 if (ctx->fl->sctx->smmu.coherent &&
1402 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1403 continue;
1404 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1405 continue;
1406
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001407 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1408 buf_page_start(rpra[i].buf.pv))
1409 continue;
1410 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1411 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1412 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1413 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1414 rpra[i].buf.len);
1415 if (!IS_CACHE_ALIGNED(end))
1416 dmac_flush_range((char *)end,
1417 (char *)end + 1);
1418 }
1419}
1420
1421static void inv_args(struct smq_invoke_ctx *ctx)
1422{
1423 int i, inbufs, outbufs;
1424 uint32_t sc = ctx->sc;
1425 remote_arg64_t *rpra = ctx->rpra;
1426 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001427
1428 inbufs = REMOTE_SCALARS_INBUFS(sc);
1429 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1430 for (i = inbufs; i < inbufs + outbufs; ++i) {
1431 struct fastrpc_mmap *map = ctx->maps[i];
1432
1433 if (map && map->uncached)
1434 continue;
1435 if (!rpra[i].buf.len)
1436 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301437 if (ctx->fl->sctx->smmu.coherent &&
1438 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1439 continue;
1440 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1441 continue;
1442
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001443 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1444 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001445 continue;
1446 }
1447 if (map && map->handle)
1448 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1449 (char *)uint64_to_ptr(rpra[i].buf.pv),
1450 rpra[i].buf.len, ION_IOC_INV_CACHES);
1451 else
1452 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1453 (char *)uint64_to_ptr(rpra[i].buf.pv
1454 + rpra[i].buf.len));
1455 }
1456
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001457 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001458 dmac_inv_range(rpra, (char *)rpra + used);
1459}
1460
1461static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1462 uint32_t kernel, uint32_t handle)
1463{
1464 struct smq_msg *msg = &ctx->msg;
1465 struct fastrpc_file *fl = ctx->fl;
1466 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1467 int err = 0;
1468
c_mtharue1a5ce12017-10-13 20:47:09 +05301469 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001470 if (err)
1471 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301472 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001473 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301474 if (fl->sessionid)
1475 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001476 if (kernel)
1477 msg->pid = 0;
1478 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1479 msg->invoke.header.handle = handle;
1480 msg->invoke.header.sc = ctx->sc;
1481 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1482 msg->invoke.page.size = buf_page_size(ctx->used);
1483
1484 if (fl->ssrcount != channel_ctx->ssrcount) {
1485 err = -ECONNRESET;
1486 goto bail;
1487 }
1488 VERIFY(err, channel_ctx->link.port_state ==
1489 FASTRPC_LINK_CONNECTED);
1490 if (err)
1491 goto bail;
1492 err = glink_tx(channel_ctx->chan,
1493 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1494 GLINK_TX_REQ_INTENT);
1495 bail:
1496 return err;
1497}
1498
1499static void fastrpc_init(struct fastrpc_apps *me)
1500{
1501 int i;
1502
1503 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301504 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 spin_lock_init(&me->hlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301506 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001507 me->channel = &gcinfo[0];
1508 for (i = 0; i < NUM_CHANNELS; i++) {
1509 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301510 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001511 me->channel[i].sesscount = 0;
1512 }
1513}
1514
1515static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1516
1517static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1518 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001519 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001520{
c_mtharue1a5ce12017-10-13 20:47:09 +05301521 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1523 int cid = fl->cid;
1524 int interrupted = 0;
1525 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001526 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001527
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001528 if (fl->profile)
1529 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301530
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301531
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301532 VERIFY(err, fl->sctx != NULL);
1533 if (err)
1534 goto bail;
1535 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1536 if (err)
1537 goto bail;
1538
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001539 if (!kernel) {
1540 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1541 &ctx));
1542 if (err)
1543 goto bail;
1544 if (fl->sctx->smmu.faults)
1545 err = FASTRPC_ENOSUCH;
1546 if (err)
1547 goto bail;
1548 if (ctx)
1549 goto wait;
1550 }
1551
1552 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1553 if (err)
1554 goto bail;
1555
1556 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001557 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001558 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001559 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001560 if (err)
1561 goto bail;
1562 }
1563
Sathish Ambleyc432b502017-06-05 12:03:42 -07001564 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001565 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001566 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001567 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001568 PERF_END);
1569
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 if (err)
1571 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001572 wait:
1573 if (kernel)
1574 wait_for_completion(&ctx->work);
1575 else {
1576 interrupted = wait_for_completion_interruptible(&ctx->work);
1577 VERIFY(err, 0 == (err = interrupted));
1578 if (err)
1579 goto bail;
1580 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001581
1582 PERF(fl->profile, fl->perf.invargs,
1583 if (!fl->sctx->smmu.coherent)
1584 inv_args(ctx);
1585 PERF_END);
1586
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001587 VERIFY(err, 0 == (err = ctx->retval));
1588 if (err)
1589 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001590
1591 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001593 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001594 if (err)
1595 goto bail;
1596 bail:
1597 if (ctx && interrupted == -ERESTARTSYS)
1598 context_save_interrupted(ctx);
1599 else if (ctx)
1600 context_free(ctx);
1601 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1602 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001603
1604 if (fl->profile && !interrupted) {
1605 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1606 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301607 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001608 fl->perf.count++;
1609 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001610 return err;
1611}
1612
Sathish Ambley36849af2017-02-02 09:35:55 -08001613static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001614static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001615 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001616{
1617 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301618 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001619 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001620 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001621 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301622 struct fastrpc_mmap *file = NULL, *mem = NULL;
1623 char *proc_name = NULL;
1624 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301625 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301626 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1627 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001628
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301629 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001630 if (err)
1631 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001632 if (init->flags == FASTRPC_INIT_ATTACH) {
1633 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301634 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001635
1636 ra[0].buf.pv = (void *)&tgid;
1637 ra[0].buf.len = sizeof(tgid);
1638 ioctl.inv.handle = 1;
1639 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1640 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301641 ioctl.fds = NULL;
1642 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001643 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001644 fl->pd = 0;
1645 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1646 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1647 if (err)
1648 goto bail;
1649 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001650 remote_arg_t ra[6];
1651 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001652 int mflags = 0;
1653 struct {
1654 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301655 unsigned int namelen;
1656 unsigned int filelen;
1657 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001658 int attrs;
1659 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001660 } inbuf;
1661
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301662 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001663 inbuf.namelen = strlen(current->comm) + 1;
1664 inbuf.filelen = init->filelen;
1665 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301666
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301667 VERIFY(err, access_ok(0, (void __user *)init->file,
1668 init->filelen));
1669 if (err)
1670 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001671 if (init->filelen) {
1672 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1673 init->file, init->filelen, mflags, &file));
1674 if (err)
1675 goto bail;
1676 }
1677 inbuf.pageslen = 1;
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301678 VERIFY(err, access_ok(1, (void __user *)init->mem,
1679 init->memlen));
1680 if (err)
1681 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001682 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1683 init->mem, init->memlen, mflags, &mem));
1684 if (err)
1685 goto bail;
1686 inbuf.pageslen = 1;
1687 ra[0].buf.pv = (void *)&inbuf;
1688 ra[0].buf.len = sizeof(inbuf);
1689 fds[0] = 0;
1690
1691 ra[1].buf.pv = (void *)current->comm;
1692 ra[1].buf.len = inbuf.namelen;
1693 fds[1] = 0;
1694
1695 ra[2].buf.pv = (void *)init->file;
1696 ra[2].buf.len = inbuf.filelen;
1697 fds[2] = init->filefd;
1698
1699 pages[0].addr = mem->phys;
1700 pages[0].size = mem->size;
1701 ra[3].buf.pv = (void *)pages;
1702 ra[3].buf.len = 1 * sizeof(*pages);
1703 fds[3] = 0;
1704
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001705 inbuf.attrs = uproc->attrs;
1706 ra[4].buf.pv = (void *)&(inbuf.attrs);
1707 ra[4].buf.len = sizeof(inbuf.attrs);
1708 fds[4] = 0;
1709
1710 inbuf.siglen = uproc->siglen;
1711 ra[5].buf.pv = (void *)&(inbuf.siglen);
1712 ra[5].buf.len = sizeof(inbuf.siglen);
1713 fds[5] = 0;
1714
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001715 ioctl.inv.handle = 1;
1716 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001717 if (uproc->attrs)
1718 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001719 ioctl.inv.pra = ra;
1720 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301721 ioctl.attrs = NULL;
1722 ioctl.crc = NULL;
1723 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1724 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1725 if (err)
1726 goto bail;
1727 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1728 remote_arg_t ra[3];
1729 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301730 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301731 int fds[3];
1732 struct {
1733 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301734 unsigned int namelen;
1735 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301736 } inbuf;
1737
1738 if (!init->filelen)
1739 goto bail;
1740
1741 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1742 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1743 if (err)
1744 goto bail;
1745 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1746 (void __user *)init->file, init->filelen));
1747 if (err)
1748 goto bail;
1749
1750 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301751 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301752 inbuf.pageslen = 0;
1753 if (!me->staticpd_flags) {
1754 inbuf.pageslen = 1;
1755 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1756 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1757 &mem));
1758 if (err)
1759 goto bail;
1760 phys = mem->phys;
1761 size = mem->size;
1762 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1763 srcVM, 1, destVM, destVMperm, 1));
1764 if (err) {
1765 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1766 err);
1767 pr_err("map->phys %llx, map->size %d\n",
1768 phys, (int)size);
1769 goto bail;
1770 }
1771 me->staticpd_flags = 1;
1772 }
1773
1774 ra[0].buf.pv = (void *)&inbuf;
1775 ra[0].buf.len = sizeof(inbuf);
1776 fds[0] = 0;
1777
1778 ra[1].buf.pv = (void *)proc_name;
1779 ra[1].buf.len = inbuf.namelen;
1780 fds[1] = 0;
1781
1782 pages[0].addr = phys;
1783 pages[0].size = size;
1784
1785 ra[2].buf.pv = (void *)pages;
1786 ra[2].buf.len = sizeof(*pages);
1787 fds[2] = 0;
1788 ioctl.inv.handle = 1;
1789
1790 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1791 ioctl.inv.pra = ra;
1792 ioctl.fds = NULL;
1793 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001794 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001795 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1796 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1797 if (err)
1798 goto bail;
1799 } else {
1800 err = -ENOTTY;
1801 }
1802bail:
c_mtharud91205a2017-11-07 16:01:06 +05301803 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301804 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1805 me->staticpd_flags = 0;
1806 if (mem && err) {
1807 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1808 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1809 destVM, 1, srcVM, hlosVMperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301810 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301811 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001812 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301813 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001814 return err;
1815}
1816
1817static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1818{
1819 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001820 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001821 remote_arg_t ra[1];
1822 int tgid = 0;
1823
Sathish Ambley36849af2017-02-02 09:35:55 -08001824 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1825 if (err)
1826 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301827 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001828 if (err)
1829 goto bail;
1830 tgid = fl->tgid;
1831 ra[0].buf.pv = (void *)&tgid;
1832 ra[0].buf.len = sizeof(tgid);
1833 ioctl.inv.handle = 1;
1834 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1835 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301836 ioctl.fds = NULL;
1837 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001838 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001839 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1840 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1841bail:
1842 return err;
1843}
1844
1845static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1846 struct fastrpc_mmap *map)
1847{
Sathish Ambleybae51902017-07-03 15:00:49 -07001848 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05301849 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001850 struct smq_phy_page page;
1851 int num = 1;
1852 remote_arg_t ra[3];
1853 int err = 0;
1854 struct {
1855 int pid;
1856 uint32_t flags;
1857 uintptr_t vaddrin;
1858 int num;
1859 } inargs;
1860 struct {
1861 uintptr_t vaddrout;
1862 } routargs;
1863
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301864 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001865 inargs.vaddrin = (uintptr_t)map->va;
1866 inargs.flags = flags;
1867 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1868 ra[0].buf.pv = (void *)&inargs;
1869 ra[0].buf.len = sizeof(inargs);
1870 page.addr = map->phys;
1871 page.size = map->size;
1872 ra[1].buf.pv = (void *)&page;
1873 ra[1].buf.len = num * sizeof(page);
1874
1875 ra[2].buf.pv = (void *)&routargs;
1876 ra[2].buf.len = sizeof(routargs);
1877
1878 ioctl.inv.handle = 1;
1879 if (fl->apps->compat)
1880 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1881 else
1882 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1883 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301884 ioctl.fds = NULL;
1885 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001886 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001887 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1888 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1889 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301890 if (err)
1891 goto bail;
1892 if (flags == ADSP_MMAP_HEAP_ADDR) {
1893 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001894
c_mtharue1a5ce12017-10-13 20:47:09 +05301895 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1896 desc.args[1] = map->phys;
1897 desc.args[2] = map->size;
1898 desc.arginfo = SCM_ARGS(3);
1899 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1900 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1901 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1902
1903 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301904 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301905 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1906
1907 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1908 srcVM, 1, destVM, destVMperm, 1));
1909 if (err)
1910 goto bail;
1911 }
1912bail:
1913 return err;
1914}
1915
1916static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1917 struct fastrpc_mmap *map)
1918{
1919 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05301920 struct fastrpc_apps *me = &gfa;
1921 int srcVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301922 int destVM[1] = {VMID_HLOS};
1923 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1924
1925 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1926 struct fastrpc_ioctl_invoke_crc ioctl;
1927 struct scm_desc desc = {0};
1928 remote_arg_t ra[1];
1929 int err = 0;
1930 struct {
1931 uint8_t skey;
1932 } routargs;
1933
1934 ra[0].buf.pv = (void *)&routargs;
1935 ra[0].buf.len = sizeof(routargs);
1936
1937 ioctl.inv.handle = 1;
1938 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1939 ioctl.inv.pra = ra;
1940 ioctl.fds = NULL;
1941 ioctl.attrs = NULL;
1942 ioctl.crc = NULL;
1943 if (fl == NULL)
1944 goto bail;
1945
1946 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1947 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1948 if (err)
1949 goto bail;
1950 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1951 desc.args[1] = map->phys;
1952 desc.args[2] = map->size;
1953 desc.args[3] = routargs.skey;
1954 desc.arginfo = SCM_ARGS(4);
1955 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1956 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1957 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1958 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1959 srcVM, 1, destVM, destVMperm, 1));
1960 if (err)
1961 goto bail;
1962 }
1963
1964bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001965 return err;
1966}
1967
1968static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1969 struct fastrpc_mmap *map)
1970{
Sathish Ambleybae51902017-07-03 15:00:49 -07001971 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001972 remote_arg_t ra[1];
1973 int err = 0;
1974 struct {
1975 int pid;
1976 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301977 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001978 } inargs;
1979
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301980 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001981 inargs.size = map->size;
1982 inargs.vaddrout = map->raddr;
1983 ra[0].buf.pv = (void *)&inargs;
1984 ra[0].buf.len = sizeof(inargs);
1985
1986 ioctl.inv.handle = 1;
1987 if (fl->apps->compat)
1988 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1989 else
1990 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1991 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301992 ioctl.fds = NULL;
1993 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001994 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001995 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1996 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301997 if (err)
1998 goto bail;
1999 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
2000 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2001 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
2002 if (err)
2003 goto bail;
2004 }
2005bail:
2006 return err;
2007}
2008
2009static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2010{
2011 struct fastrpc_mmap *match = NULL, *map = NULL;
2012 struct hlist_node *n = NULL;
2013 int err = 0, ret = 0;
2014 struct fastrpc_apps *me = &gfa;
2015 struct ramdump_segment *ramdump_segments_rh = NULL;
2016
2017 do {
2018 match = NULL;
2019 spin_lock(&me->hlock);
2020 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2021 match = map;
2022 hlist_del_init(&map->hn);
2023 break;
2024 }
2025 spin_unlock(&me->hlock);
2026
2027 if (match) {
2028 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2029 if (err)
2030 goto bail;
2031 if (me->channel[0].ramdumpenabled) {
2032 ramdump_segments_rh = kcalloc(1,
2033 sizeof(struct ramdump_segment), GFP_KERNEL);
2034 if (ramdump_segments_rh) {
2035 ramdump_segments_rh->address =
2036 match->phys;
2037 ramdump_segments_rh->size = match->size;
2038 ret = do_elf_ramdump(
2039 me->channel[0].remoteheap_ramdump_dev,
2040 ramdump_segments_rh, 1);
2041 if (ret < 0)
2042 pr_err("ADSPRPC: unable to dump heap");
2043 kfree(ramdump_segments_rh);
2044 }
2045 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302046 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302047 }
2048 } while (match);
2049bail:
2050 if (err && match)
2051 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002052 return err;
2053}
2054
2055static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302056 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002057
2058static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2059
2060static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2061 struct fastrpc_ioctl_munmap *ud)
2062{
2063 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302064 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002065
2066 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2067 if (err)
2068 goto bail;
2069 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2070 if (err)
2071 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302072 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002073bail:
2074 if (err && map)
2075 fastrpc_mmap_add(map);
2076 return err;
2077}
2078
c_mtharu7bd6a422017-10-17 18:15:37 +05302079static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2080 struct fastrpc_ioctl_munmap_fd *ud) {
2081 int err = 0;
2082 struct fastrpc_mmap *map = NULL;
2083
2084 VERIFY(err, (fl && ud));
2085 if (err)
2086 goto bail;
2087
2088 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2089 pr_err("mapping not found to unamp %x va %llx %x\n",
2090 ud->fd, (unsigned long long)ud->va,
2091 (unsigned int)ud->len);
2092 err = -1;
2093 goto bail;
2094 }
2095 if (map)
2096 fastrpc_mmap_free(map, 0);
2097bail:
2098 return err;
2099}
2100
2101
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002102static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2103 struct fastrpc_ioctl_mmap *ud)
2104{
2105
c_mtharue1a5ce12017-10-13 20:47:09 +05302106 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002107 int err = 0;
2108
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302109 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
c_mtharue1a5ce12017-10-13 20:47:09 +05302110 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002111 return 0;
2112
2113 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302114 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302115 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002116 if (err)
2117 goto bail;
2118 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2119 if (err)
2120 goto bail;
2121 ud->vaddrout = map->raddr;
2122 bail:
2123 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302124 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002125 return err;
2126}
2127
2128static void fastrpc_channel_close(struct kref *kref)
2129{
2130 struct fastrpc_apps *me = &gfa;
2131 struct fastrpc_channel_ctx *ctx;
2132 int cid;
2133
2134 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2135 cid = ctx - &gcinfo[0];
2136 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302137 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302138 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2139 ctx->link.link_notify_handle = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302140 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002141 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2142 MAJOR(me->dev_no), cid);
2143}
2144
2145static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2146
2147static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2148 int secure, struct fastrpc_session_ctx **session)
2149{
2150 struct fastrpc_apps *me = &gfa;
2151 int idx = 0, err = 0;
2152
2153 if (chan->sesscount) {
2154 for (idx = 0; idx < chan->sesscount; ++idx) {
2155 if (!chan->session[idx].used &&
2156 chan->session[idx].smmu.secure == secure) {
2157 chan->session[idx].used = 1;
2158 break;
2159 }
2160 }
2161 VERIFY(err, idx < chan->sesscount);
2162 if (err)
2163 goto bail;
2164 chan->session[idx].smmu.faults = 0;
2165 } else {
2166 VERIFY(err, me->dev != NULL);
2167 if (err)
2168 goto bail;
2169 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302170 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002171 }
2172
2173 *session = &chan->session[idx];
2174 bail:
2175 return err;
2176}
2177
c_mtharue1a5ce12017-10-13 20:47:09 +05302178static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2179 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002180{
2181 if (glink_queue_rx_intent(h, NULL, size))
2182 return false;
2183 return true;
2184}
2185
c_mtharue1a5ce12017-10-13 20:47:09 +05302186static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002187 const void *pkt_priv, const void *ptr)
2188{
2189}
2190
c_mtharue1a5ce12017-10-13 20:47:09 +05302191static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192 const void *pkt_priv, const void *ptr, size_t size)
2193{
2194 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302195 struct smq_invoke_ctx *ctx;
2196 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002197
c_mtharufdac6892017-10-12 13:09:01 +05302198 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2199 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302200 goto bail;
2201
c_mtharufdac6892017-10-12 13:09:01 +05302202 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2203 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2204 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302205 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302206
c_mtharufdac6892017-10-12 13:09:01 +05302207 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302208bail:
c_mtharufdac6892017-10-12 13:09:01 +05302209 if (err)
2210 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002211 glink_rx_done(handle, ptr, true);
2212}
2213
c_mtharue1a5ce12017-10-13 20:47:09 +05302214static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002215 unsigned int event)
2216{
2217 struct fastrpc_apps *me = &gfa;
2218 int cid = (int)(uintptr_t)priv;
2219 struct fastrpc_glink_info *link;
2220
2221 if (cid < 0 || cid >= NUM_CHANNELS)
2222 return;
2223 link = &me->channel[cid].link;
2224 switch (event) {
2225 case GLINK_CONNECTED:
2226 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302227 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002228 break;
2229 case GLINK_LOCAL_DISCONNECTED:
2230 link->port_state = FASTRPC_LINK_DISCONNECTED;
2231 break;
2232 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002233 break;
2234 default:
2235 break;
2236 }
2237}
2238
2239static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2240 struct fastrpc_session_ctx **session)
2241{
2242 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302243 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002244
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302245 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002246 if (!*session)
2247 err = fastrpc_session_alloc_locked(chan, secure, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302248 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002249 return err;
2250}
2251
2252static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2253 struct fastrpc_session_ctx *session)
2254{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302255 struct fastrpc_apps *me = &gfa;
2256
2257 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002258 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302259 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002260}
2261
2262static int fastrpc_file_free(struct fastrpc_file *fl)
2263{
2264 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302265 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002266 int cid;
2267
2268 if (!fl)
2269 return 0;
2270 cid = fl->cid;
2271
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302272 (void)fastrpc_release_current_dsp_process(fl);
2273
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002274 spin_lock(&fl->apps->hlock);
2275 hlist_del_init(&fl->hn);
2276 spin_unlock(&fl->apps->hlock);
2277
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002278 if (!fl->sctx) {
2279 kfree(fl);
2280 return 0;
2281 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302282 spin_lock(&fl->hlock);
2283 fl->file_close = 1;
2284 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002285 fastrpc_context_list_dtor(fl);
2286 fastrpc_buf_list_free(fl);
2287 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302288 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002289 }
2290 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2291 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302292 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002293 if (fl->sctx)
2294 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2295 if (fl->secsctx)
2296 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2297 kfree(fl);
2298 return 0;
2299}
2300
2301static int fastrpc_device_release(struct inode *inode, struct file *file)
2302{
2303 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2304
2305 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302306 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2307 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002308 if (fl->debugfs_file != NULL)
2309 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002310 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302311 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002312 }
2313 return 0;
2314}
2315
2316static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2317 void *priv)
2318{
2319 struct fastrpc_apps *me = &gfa;
2320 int cid = (int)((uintptr_t)priv);
2321 struct fastrpc_glink_info *link;
2322
2323 if (cid < 0 || cid >= NUM_CHANNELS)
2324 return;
2325
2326 link = &me->channel[cid].link;
2327 switch (cb_info->link_state) {
2328 case GLINK_LINK_STATE_UP:
2329 link->link_state = FASTRPC_LINK_STATE_UP;
2330 complete(&me->channel[cid].work);
2331 break;
2332 case GLINK_LINK_STATE_DOWN:
2333 link->link_state = FASTRPC_LINK_STATE_DOWN;
2334 break;
2335 default:
2336 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2337 break;
2338 }
2339}
2340
2341static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2342{
2343 int err = 0;
2344 struct fastrpc_glink_info *link;
2345
2346 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2347 if (err)
2348 goto bail;
2349
2350 link = &me->channel[cid].link;
2351 if (link->link_notify_handle != NULL)
2352 goto bail;
2353
2354 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2355 link->link_notify_handle = glink_register_link_state_cb(
2356 &link->link_info,
2357 (void *)((uintptr_t)cid));
2358 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2359 if (err) {
2360 link->link_notify_handle = NULL;
2361 goto bail;
2362 }
2363 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2364 RPC_TIMEOUT));
2365bail:
2366 return err;
2367}
2368
2369static void fastrpc_glink_close(void *chan, int cid)
2370{
2371 int err = 0;
2372 struct fastrpc_glink_info *link;
2373
2374 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2375 if (err)
2376 return;
2377 link = &gfa.channel[cid].link;
2378
c_mtharu314a4202017-11-15 22:09:17 +05302379 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2380 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002381 link->port_state = FASTRPC_LINK_DISCONNECTING;
2382 glink_close(chan);
2383 }
2384}
2385
2386static int fastrpc_glink_open(int cid)
2387{
2388 int err = 0;
2389 void *handle = NULL;
2390 struct fastrpc_apps *me = &gfa;
2391 struct glink_open_config *cfg;
2392 struct fastrpc_glink_info *link;
2393
2394 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2395 if (err)
2396 goto bail;
2397 link = &me->channel[cid].link;
2398 cfg = &me->channel[cid].link.cfg;
2399 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2400 if (err)
2401 goto bail;
2402
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302403 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2404 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002405 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002406
2407 link->port_state = FASTRPC_LINK_CONNECTING;
2408 cfg->priv = (void *)(uintptr_t)cid;
2409 cfg->edge = gcinfo[cid].link.link_info.edge;
2410 cfg->transport = gcinfo[cid].link.link_info.transport;
2411 cfg->name = FASTRPC_GLINK_GUID;
2412 cfg->notify_rx = fastrpc_glink_notify_rx;
2413 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2414 cfg->notify_state = fastrpc_glink_notify_state;
2415 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2416 handle = glink_open(cfg);
2417 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302418 if (err) {
2419 if (link->port_state == FASTRPC_LINK_CONNECTING)
2420 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002421 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302422 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002423 me->channel[cid].chan = handle;
2424bail:
2425 return err;
2426}
2427
Sathish Ambley1ca68232017-01-19 10:32:55 -08002428static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2429{
2430 filp->private_data = inode->i_private;
2431 return 0;
2432}
2433
2434static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2435 size_t count, loff_t *position)
2436{
2437 struct fastrpc_file *fl = filp->private_data;
2438 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302439 struct fastrpc_buf *buf = NULL;
2440 struct fastrpc_mmap *map = NULL;
2441 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002442 struct fastrpc_channel_ctx *chan;
2443 struct fastrpc_session_ctx *sess;
2444 unsigned int len = 0;
2445 int i, j, ret = 0;
2446 char *fileinfo = NULL;
2447
2448 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2449 if (!fileinfo)
2450 goto bail;
2451 if (fl == NULL) {
2452 for (i = 0; i < NUM_CHANNELS; i++) {
2453 chan = &gcinfo[i];
2454 len += scnprintf(fileinfo + len,
2455 DEBUGFS_SIZE - len, "%s\n\n",
2456 chan->name);
2457 len += scnprintf(fileinfo + len,
2458 DEBUGFS_SIZE - len, "%s %d\n",
2459 "sesscount:", chan->sesscount);
2460 for (j = 0; j < chan->sesscount; j++) {
2461 sess = &chan->session[j];
2462 len += scnprintf(fileinfo + len,
2463 DEBUGFS_SIZE - len,
2464 "%s%d\n\n", "SESSION", j);
2465 len += scnprintf(fileinfo + len,
2466 DEBUGFS_SIZE - len,
2467 "%s %d\n", "sid:",
2468 sess->smmu.cb);
2469 len += scnprintf(fileinfo + len,
2470 DEBUGFS_SIZE - len,
2471 "%s %d\n", "SECURE:",
2472 sess->smmu.secure);
2473 }
2474 }
2475 } else {
2476 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2477 "%s %d\n\n",
2478 "PROCESS_ID:", fl->tgid);
2479 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2480 "%s %d\n\n",
2481 "CHANNEL_ID:", fl->cid);
2482 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2483 "%s %d\n\n",
2484 "SSRCOUNT:", fl->ssrcount);
2485 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2486 "%s\n",
2487 "LIST OF BUFS:");
2488 spin_lock(&fl->hlock);
2489 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2490 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302491 "%s %pK %s %pK %s %llx\n", "buf:",
2492 buf, "buf->virt:", buf->virt,
2493 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002494 }
2495 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2496 "\n%s\n",
2497 "LIST OF MAPS:");
2498 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2499 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302500 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002501 "map:", map,
2502 "map->va:", map->va,
2503 "map->phys:", map->phys);
2504 }
2505 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2506 "\n%s\n",
2507 "LIST OF PENDING SMQCONTEXTS:");
2508 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2509 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302510 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002511 "smqcontext:", ictx,
2512 "sc:", ictx->sc,
2513 "tid:", ictx->pid,
2514 "handle", ictx->rpra->h);
2515 }
2516 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2517 "\n%s\n",
2518 "LIST OF INTERRUPTED SMQCONTEXTS:");
2519 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2520 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302521 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002522 "smqcontext:", ictx,
2523 "sc:", ictx->sc,
2524 "tid:", ictx->pid,
2525 "handle", ictx->rpra->h);
2526 }
2527 spin_unlock(&fl->hlock);
2528 }
2529 if (len > DEBUGFS_SIZE)
2530 len = DEBUGFS_SIZE;
2531 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2532 kfree(fileinfo);
2533bail:
2534 return ret;
2535}
2536
2537static const struct file_operations debugfs_fops = {
2538 .open = fastrpc_debugfs_open,
2539 .read = fastrpc_debugfs_read,
2540};
Sathish Ambley36849af2017-02-02 09:35:55 -08002541static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002542{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002543 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002544 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002545
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302546 mutex_lock(&me->smd_mutex);
2547
Sathish Ambley36849af2017-02-02 09:35:55 -08002548 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002549 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302550 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002551 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302552 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2553 if (err)
2554 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302555 if (me->channel[cid].ssrcount !=
2556 me->channel[cid].prevssrcount) {
2557 if (!me->channel[cid].issubsystemup) {
2558 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302559 if (err) {
2560 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05302561 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302562 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302563 }
2564 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002565 fl->ssrcount = me->channel[cid].ssrcount;
2566 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302567 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302568 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2569 if (err)
2570 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002571 VERIFY(err, 0 == fastrpc_glink_open(cid));
2572 if (err)
2573 goto bail;
2574
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302575 VERIFY(err,
2576 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002577 RPC_TIMEOUT));
2578 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302579 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002580 goto bail;
2581 }
2582 kref_init(&me->channel[cid].kref);
2583 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2584 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302585 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2586 FASTRPC_GLINK_INTENT_LEN);
2587 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2588 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002589 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302590 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2591 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002592 if (me->channel[cid].ssrcount !=
2593 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302594 if (fastrpc_mmap_remove_ssr(fl))
2595 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002596 me->channel[cid].prevssrcount =
2597 me->channel[cid].ssrcount;
2598 }
2599 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002600
2601bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302602 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002603 return err;
2604}
2605
Sathish Ambley36849af2017-02-02 09:35:55 -08002606static int fastrpc_device_open(struct inode *inode, struct file *filp)
2607{
2608 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002609 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302610 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002611 struct fastrpc_apps *me = &gfa;
2612
c_mtharue1a5ce12017-10-13 20:47:09 +05302613 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002614 if (err)
2615 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002616 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2617 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002618 context_list_ctor(&fl->clst);
2619 spin_lock_init(&fl->hlock);
2620 INIT_HLIST_HEAD(&fl->maps);
2621 INIT_HLIST_HEAD(&fl->bufs);
2622 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302623 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002624 fl->tgid = current->tgid;
2625 fl->apps = me;
2626 fl->mode = FASTRPC_MODE_SERIAL;
2627 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002628 if (debugfs_file != NULL)
2629 fl->debugfs_file = debugfs_file;
2630 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302631 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002632 filp->private_data = fl;
2633 spin_lock(&me->hlock);
2634 hlist_add_head(&fl->hn, &me->drivers);
2635 spin_unlock(&me->hlock);
2636 return 0;
2637}
2638
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002639static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2640{
2641 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002642 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002643
c_mtharue1a5ce12017-10-13 20:47:09 +05302644 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002645 if (err)
2646 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002647 if (fl->cid == -1) {
2648 cid = *info;
2649 VERIFY(err, cid < NUM_CHANNELS);
2650 if (err)
2651 goto bail;
2652 fl->cid = cid;
2653 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2654 VERIFY(err, !fastrpc_session_alloc_locked(
2655 &fl->apps->channel[cid], 0, &fl->sctx));
2656 if (err)
2657 goto bail;
2658 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302659 VERIFY(err, fl->sctx != NULL);
2660 if (err)
2661 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002662 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2663bail:
2664 return err;
2665}
2666
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302667static int fastrpc_internal_control(struct fastrpc_file *fl,
2668 struct fastrpc_ioctl_control *cp)
2669{
2670 int err = 0;
2671 int latency;
2672
2673 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2674 if (err)
2675 goto bail;
2676 VERIFY(err, !IS_ERR_OR_NULL(cp));
2677 if (err)
2678 goto bail;
2679
2680 switch (cp->req) {
2681 case FASTRPC_CONTROL_LATENCY:
2682 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2683 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2684 VERIFY(err, latency != 0);
2685 if (err)
2686 goto bail;
2687 if (!fl->qos_request) {
2688 pm_qos_add_request(&fl->pm_qos_req,
2689 PM_QOS_CPU_DMA_LATENCY, latency);
2690 fl->qos_request = 1;
2691 } else
2692 pm_qos_update_request(&fl->pm_qos_req, latency);
2693 break;
2694 default:
2695 err = -ENOTTY;
2696 break;
2697 }
2698bail:
2699 return err;
2700}
2701
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002702static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2703 unsigned long ioctl_param)
2704{
2705 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002706 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002707 struct fastrpc_ioctl_mmap mmap;
2708 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302709 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002710 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002711 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302712 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002713 } p;
2714 void *param = (char *)ioctl_param;
2715 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2716 int size = 0, err = 0;
2717 uint32_t info;
2718
c_mtharue1a5ce12017-10-13 20:47:09 +05302719 p.inv.fds = NULL;
2720 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002721 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302722 spin_lock(&fl->hlock);
2723 if (fl->file_close == 1) {
2724 err = EBADF;
2725 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2726 spin_unlock(&fl->hlock);
2727 goto bail;
2728 }
2729 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002730
2731 switch (ioctl_num) {
2732 case FASTRPC_IOCTL_INVOKE:
2733 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002734 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002735 case FASTRPC_IOCTL_INVOKE_FD:
2736 if (!size)
2737 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2738 /* fall through */
2739 case FASTRPC_IOCTL_INVOKE_ATTRS:
2740 if (!size)
2741 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002742 /* fall through */
2743 case FASTRPC_IOCTL_INVOKE_CRC:
2744 if (!size)
2745 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302746 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002747 if (err)
2748 goto bail;
2749 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2750 0, &p.inv)));
2751 if (err)
2752 goto bail;
2753 break;
2754 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302755 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2756 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302757 if (err)
2758 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002759 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2760 if (err)
2761 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302762 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002763 if (err)
2764 goto bail;
2765 break;
2766 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302767 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2768 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302769 if (err)
2770 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002771 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2772 &p.munmap)));
2773 if (err)
2774 goto bail;
2775 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302776 case FASTRPC_IOCTL_MUNMAP_FD:
2777 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2778 sizeof(p.munmap_fd));
2779 if (err)
2780 goto bail;
2781 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2782 &p.munmap_fd)));
2783 if (err)
2784 goto bail;
2785 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002786 case FASTRPC_IOCTL_SETMODE:
2787 switch ((uint32_t)ioctl_param) {
2788 case FASTRPC_MODE_PARALLEL:
2789 case FASTRPC_MODE_SERIAL:
2790 fl->mode = (uint32_t)ioctl_param;
2791 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002792 case FASTRPC_MODE_PROFILE:
2793 fl->profile = (uint32_t)ioctl_param;
2794 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302795 case FASTRPC_MODE_SESSION:
2796 fl->sessionid = 1;
2797 fl->tgid |= (1 << SESSION_ID_INDEX);
2798 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002799 default:
2800 err = -ENOTTY;
2801 break;
2802 }
2803 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002804 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302805 K_COPY_FROM_USER(err, 0, &p.perf,
2806 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002807 if (err)
2808 goto bail;
2809 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2810 if (p.perf.keys) {
2811 char *keys = PERF_KEYS;
2812
c_mtharue1a5ce12017-10-13 20:47:09 +05302813 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2814 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002815 if (err)
2816 goto bail;
2817 }
2818 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302819 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2820 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002821 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302822 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002823 if (err)
2824 goto bail;
2825 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302826 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302827 K_COPY_FROM_USER(err, 0, &p.cp, param,
2828 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302829 if (err)
2830 goto bail;
2831 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2832 if (err)
2833 goto bail;
2834 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002835 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302836 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002837 if (err)
2838 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002839 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2840 if (err)
2841 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302842 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002843 if (err)
2844 goto bail;
2845 break;
2846 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002847 p.init.attrs = 0;
2848 p.init.siglen = 0;
2849 size = sizeof(struct fastrpc_ioctl_init);
2850 /* fall through */
2851 case FASTRPC_IOCTL_INIT_ATTRS:
2852 if (!size)
2853 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302854 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002855 if (err)
2856 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302857 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302858 p.init.init.filelen < INIT_FILELEN_MAX);
2859 if (err)
2860 goto bail;
2861 VERIFY(err, p.init.init.memlen >= 0 &&
2862 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302863 if (err)
2864 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302865 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002866 if (err)
2867 goto bail;
2868 break;
2869
2870 default:
2871 err = -ENOTTY;
2872 pr_info("bad ioctl: %d\n", ioctl_num);
2873 break;
2874 }
2875 bail:
2876 return err;
2877}
2878
2879static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2880 unsigned long code,
2881 void *data)
2882{
2883 struct fastrpc_apps *me = &gfa;
2884 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302885 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002886 int cid;
2887
2888 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2889 cid = ctx - &me->channel[0];
2890 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302891 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002892 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302893 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302894 if (ctx->chan) {
2895 fastrpc_glink_close(ctx->chan, cid);
2896 ctx->chan = NULL;
2897 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2898 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2899 }
2900 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302901 if (cid == 0)
2902 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002903 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302904 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2905 if (me->channel[0].remoteheap_ramdump_dev &&
2906 notifdata->enable_ramdump) {
2907 me->channel[0].ramdumpenabled = 1;
2908 }
2909 } else if (code == SUBSYS_AFTER_POWERUP) {
2910 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002911 }
2912
2913 return NOTIFY_DONE;
2914}
2915
2916static const struct file_operations fops = {
2917 .open = fastrpc_device_open,
2918 .release = fastrpc_device_release,
2919 .unlocked_ioctl = fastrpc_device_ioctl,
2920 .compat_ioctl = compat_fastrpc_device_ioctl,
2921};
2922
2923static const struct of_device_id fastrpc_match_table[] = {
2924 { .compatible = "qcom,msm-fastrpc-adsp", },
2925 { .compatible = "qcom,msm-fastrpc-compute", },
2926 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2927 { .compatible = "qcom,msm-adsprpc-mem-region", },
2928 {}
2929};
2930
2931static int fastrpc_cb_probe(struct device *dev)
2932{
2933 struct fastrpc_channel_ctx *chan;
2934 struct fastrpc_session_ctx *sess;
2935 struct of_phandle_args iommuspec;
2936 const char *name;
2937 unsigned int start = 0x80000000;
2938 int err = 0, i;
2939 int secure_vmid = VMID_CP_PIXEL;
2940
c_mtharue1a5ce12017-10-13 20:47:09 +05302941 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2942 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002943 if (err)
2944 goto bail;
2945 for (i = 0; i < NUM_CHANNELS; i++) {
2946 if (!gcinfo[i].name)
2947 continue;
2948 if (!strcmp(name, gcinfo[i].name))
2949 break;
2950 }
2951 VERIFY(err, i < NUM_CHANNELS);
2952 if (err)
2953 goto bail;
2954 chan = &gcinfo[i];
2955 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2956 if (err)
2957 goto bail;
2958
2959 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2960 "#iommu-cells", 0, &iommuspec));
2961 if (err)
2962 goto bail;
2963 sess = &chan->session[chan->sesscount];
2964 sess->smmu.cb = iommuspec.args[0] & 0xf;
2965 sess->used = 0;
2966 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2967 "dma-coherent");
2968 sess->smmu.secure = of_property_read_bool(dev->of_node,
2969 "qcom,secure-context-bank");
2970 if (sess->smmu.secure)
2971 start = 0x60000000;
2972 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2973 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302974 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002975 if (err)
2976 goto bail;
2977
2978 if (sess->smmu.secure)
2979 iommu_domain_set_attr(sess->smmu.mapping->domain,
2980 DOMAIN_ATTR_SECURE_VMID,
2981 &secure_vmid);
2982
2983 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2984 if (err)
2985 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302986 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002987 sess->smmu.enabled = 1;
2988 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002989 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2990 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002991bail:
2992 return err;
2993}
2994
2995static int fastrpc_probe(struct platform_device *pdev)
2996{
2997 int err = 0;
2998 struct fastrpc_apps *me = &gfa;
2999 struct device *dev = &pdev->dev;
3000 struct smq_phy_page range;
3001 struct device_node *ion_node, *node;
3002 struct platform_device *ion_pdev;
3003 struct cma *cma;
3004 uint32_t val;
3005
c_mtharu63ffc012017-11-16 15:26:56 +05303006
3007 if (of_device_is_compatible(dev->of_node,
3008 "qcom,msm-fastrpc-compute")) {
3009 of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
3010 &gcinfo[0].rhvmid);
3011
3012 pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
3013
3014 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3015 &me->latency);
3016 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003017 if (of_device_is_compatible(dev->of_node,
3018 "qcom,msm-fastrpc-compute-cb"))
3019 return fastrpc_cb_probe(dev);
3020
3021 if (of_device_is_compatible(dev->of_node,
3022 "qcom,msm-adsprpc-mem-region")) {
3023 me->dev = dev;
3024 range.addr = 0;
3025 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3026 if (ion_node) {
3027 for_each_available_child_of_node(ion_node, node) {
3028 if (of_property_read_u32(node, "reg", &val))
3029 continue;
3030 if (val != ION_ADSP_HEAP_ID)
3031 continue;
3032 ion_pdev = of_find_device_by_node(node);
3033 if (!ion_pdev)
3034 break;
3035 cma = dev_get_cma_area(&ion_pdev->dev);
3036 if (cma) {
3037 range.addr = cma_get_base(cma);
3038 range.size = (size_t)cma_get_size(cma);
3039 }
3040 break;
3041 }
3042 }
3043 if (range.addr) {
3044 int srcVM[1] = {VMID_HLOS};
3045 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3046 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003047 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003048 PERM_READ | PERM_WRITE | PERM_EXEC,
3049 PERM_READ | PERM_WRITE | PERM_EXEC,
3050 PERM_READ | PERM_WRITE | PERM_EXEC,
3051 };
3052
3053 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3054 srcVM, 1, destVM, destVMperm, 4));
3055 if (err)
3056 goto bail;
3057 }
3058 return 0;
3059 }
3060
3061 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3062 fastrpc_match_table,
3063 NULL, &pdev->dev));
3064 if (err)
3065 goto bail;
3066bail:
3067 return err;
3068}
3069
3070static void fastrpc_deinit(void)
3071{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303072 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003073 struct fastrpc_channel_ctx *chan = gcinfo;
3074 int i, j;
3075
3076 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3077 if (chan->chan) {
3078 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303079 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303080 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003081 }
3082 for (j = 0; j < NUM_SESSIONS; j++) {
3083 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303084 if (sess->smmu.dev) {
3085 arm_iommu_detach_device(sess->smmu.dev);
3086 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003087 }
3088 if (sess->smmu.mapping) {
3089 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303090 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003091 }
3092 }
3093 }
3094}
3095
3096static struct platform_driver fastrpc_driver = {
3097 .probe = fastrpc_probe,
3098 .driver = {
3099 .name = "fastrpc",
3100 .owner = THIS_MODULE,
3101 .of_match_table = fastrpc_match_table,
3102 },
3103};
3104
3105static int __init fastrpc_device_init(void)
3106{
3107 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303108 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003109 int err = 0, i;
3110
3111 memset(me, 0, sizeof(*me));
3112
3113 fastrpc_init(me);
3114 me->dev = NULL;
3115 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3116 if (err)
3117 goto register_bail;
3118 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3119 DEVICE_NAME));
3120 if (err)
3121 goto alloc_chrdev_bail;
3122 cdev_init(&me->cdev, &fops);
3123 me->cdev.owner = THIS_MODULE;
3124 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003125 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003126 if (err)
3127 goto cdev_init_bail;
3128 me->class = class_create(THIS_MODULE, "fastrpc");
3129 VERIFY(err, !IS_ERR(me->class));
3130 if (err)
3131 goto class_create_bail;
3132 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003133 dev = device_create(me->class, NULL,
3134 MKDEV(MAJOR(me->dev_no), 0),
3135 NULL, gcinfo[0].name);
3136 VERIFY(err, !IS_ERR_OR_NULL(dev));
3137 if (err)
3138 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003139 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003140 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003141 me->channel[i].ssrcount = 0;
3142 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303143 me->channel[i].issubsystemup = 1;
3144 me->channel[i].ramdumpenabled = 0;
3145 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003146 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3147 me->channel[i].handle = subsys_notif_register_notifier(
3148 gcinfo[i].subsys,
3149 &me->channel[i].nb);
3150 }
3151
3152 me->client = msm_ion_client_create(DEVICE_NAME);
3153 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3154 if (err)
3155 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003156 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003157 return 0;
3158device_create_bail:
3159 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003160 if (me->channel[i].handle)
3161 subsys_notif_unregister_notifier(me->channel[i].handle,
3162 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003163 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003164 if (!IS_ERR_OR_NULL(dev))
3165 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003166 class_destroy(me->class);
3167class_create_bail:
3168 cdev_del(&me->cdev);
3169cdev_init_bail:
3170 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3171alloc_chrdev_bail:
3172register_bail:
3173 fastrpc_deinit();
3174 return err;
3175}
3176
3177static void __exit fastrpc_device_exit(void)
3178{
3179 struct fastrpc_apps *me = &gfa;
3180 int i;
3181
3182 fastrpc_file_list_dtor(me);
3183 fastrpc_deinit();
3184 for (i = 0; i < NUM_CHANNELS; i++) {
3185 if (!gcinfo[i].name)
3186 continue;
3187 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3188 subsys_notif_unregister_notifier(me->channel[i].handle,
3189 &me->channel[i].nb);
3190 }
3191 class_destroy(me->class);
3192 cdev_del(&me->cdev);
3193 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3194 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003195 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003196}
3197
3198late_initcall(fastrpc_device_init);
3199module_exit(fastrpc_device_exit);
3200
3201MODULE_LICENSE("GPL v2");