blob: 2a73e13fcc6dc10d2fbc39025a3c0d7d33e997b2 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Sathish Ambleyae5ee542017-01-16 22:24:23 -08002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053078#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
79#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070080
Sathish Ambleya21b5b52017-01-11 16:11:01 -080081#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
82#define FASTRPC_STATIC_HANDLE_LISTENER (3)
83#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053084#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080085
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053086#define INIT_FILELEN_MAX (2*1024*1024)
87#define INIT_MEMLEN_MAX (8*1024*1024)
88
Sathish Ambleya21b5b52017-01-11 16:11:01 -080089#define PERF_END (void)0
90
91#define PERF(enb, cnt, ff) \
92 {\
93 struct timespec startT = {0};\
94 if (enb) {\
95 getnstimeofday(&startT);\
96 } \
97 ff ;\
98 if (enb) {\
99 cnt += getnstimediff(&startT);\
100 } \
101 }
102
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700103static int fastrpc_glink_open(int cid);
104static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800105static struct dentry *debugfs_root;
106static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
108static inline uint64_t buf_page_start(uint64_t buf)
109{
110 uint64_t start = (uint64_t) buf & PAGE_MASK;
111 return start;
112}
113
114static inline uint64_t buf_page_offset(uint64_t buf)
115{
116 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
117 return offset;
118}
119
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530120static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700121{
122 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
123 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530124 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700125 return nPages;
126}
127
128static inline uint64_t buf_page_size(uint32_t size)
129{
130 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
131
132 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
133}
134
135static inline void *uint64_to_ptr(uint64_t addr)
136{
137 void *ptr = (void *)((uintptr_t)addr);
138
139 return ptr;
140}
141
142static inline uint64_t ptr_to_uint64(void *ptr)
143{
144 uint64_t addr = (uint64_t)((uintptr_t)ptr);
145
146 return addr;
147}
148
149struct fastrpc_file;
150
151struct fastrpc_buf {
152 struct hlist_node hn;
153 struct fastrpc_file *fl;
154 void *virt;
155 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530156 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700157};
158
159struct fastrpc_ctx_lst;
160
161struct overlap {
162 uintptr_t start;
163 uintptr_t end;
164 int raix;
165 uintptr_t mstart;
166 uintptr_t mend;
167 uintptr_t offset;
168};
169
170struct smq_invoke_ctx {
171 struct hlist_node hn;
172 struct completion work;
173 int retval;
174 int pid;
175 int tgid;
176 remote_arg_t *lpra;
177 remote_arg64_t *rpra;
178 int *fds;
179 unsigned int *attrs;
180 struct fastrpc_mmap **maps;
181 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530182 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700183 struct fastrpc_file *fl;
184 uint32_t sc;
185 struct overlap *overs;
186 struct overlap **overps;
187 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700188 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530189 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700190};
191
192struct fastrpc_ctx_lst {
193 struct hlist_head pending;
194 struct hlist_head interrupted;
195};
196
197struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530198 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct dma_iommu_mapping *mapping;
200 int cb;
201 int enabled;
202 int faults;
203 int secure;
204 int coherent;
205};
206
207struct fastrpc_session_ctx {
208 struct device *dev;
209 struct fastrpc_smmu smmu;
210 int used;
211};
212
213struct fastrpc_glink_info {
214 int link_state;
215 int port_state;
216 struct glink_open_config cfg;
217 struct glink_link_info link_info;
218 void *link_notify_handle;
219};
220
221struct fastrpc_channel_ctx {
222 char *name;
223 char *subsys;
224 void *chan;
225 struct device *dev;
226 struct fastrpc_session_ctx session[NUM_SESSIONS];
227 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530228 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 struct notifier_block nb;
230 struct kref kref;
231 int sesscount;
232 int ssrcount;
233 void *handle;
234 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530235 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700236 int vmid;
c_mtharu63ffc012017-11-16 15:26:56 +0530237 int rhvmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530238 int ramdumpenabled;
239 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700240 struct fastrpc_glink_info link;
241};
242
243struct fastrpc_apps {
244 struct fastrpc_channel_ctx *channel;
245 struct cdev cdev;
246 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530247 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 struct smq_phy_page range;
249 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530250 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700251 dev_t dev_no;
252 int compat;
253 struct hlist_head drivers;
254 spinlock_t hlock;
255 struct ion_client *client;
256 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530257 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700258};
259
260struct fastrpc_mmap {
261 struct hlist_node hn;
262 struct fastrpc_file *fl;
263 struct fastrpc_apps *apps;
264 int fd;
265 uint32_t flags;
266 struct dma_buf *buf;
267 struct sg_table *table;
268 struct dma_buf_attachment *attach;
269 struct ion_handle *handle;
270 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530271 size_t size;
272 uintptr_t va;
273 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700274 int refs;
275 uintptr_t raddr;
276 int uncached;
277 int secure;
278 uintptr_t attr;
279};
280
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800281struct fastrpc_perf {
282 int64_t count;
283 int64_t flush;
284 int64_t map;
285 int64_t copy;
286 int64_t link;
287 int64_t getargs;
288 int64_t putargs;
289 int64_t invargs;
290 int64_t invoke;
291};
292
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293struct fastrpc_file {
294 struct hlist_node hn;
295 spinlock_t hlock;
296 struct hlist_head maps;
297 struct hlist_head bufs;
298 struct fastrpc_ctx_lst clst;
299 struct fastrpc_session_ctx *sctx;
300 struct fastrpc_session_ctx *secsctx;
301 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800302 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530303 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 int tgid;
305 int cid;
306 int ssrcount;
307 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530308 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800310 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800311 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530312 struct pm_qos_request pm_qos_req;
313 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314};
315
316static struct fastrpc_apps gfa;
317
318static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
319 {
320 .name = "adsprpc-smd",
321 .subsys = "adsp",
322 .link.link_info.edge = "lpass",
323 .link.link_info.transport = "smem",
324 },
325 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700326 .name = "mdsprpc-smd",
327 .subsys = "modem",
328 .link.link_info.edge = "mpss",
329 .link.link_info.transport = "smem",
330 },
331 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800332 .name = "sdsprpc-smd",
333 .subsys = "slpi",
334 .link.link_info.edge = "dsps",
335 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800336 },
337 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700338 .name = "cdsprpc-smd",
339 .subsys = "cdsp",
340 .link.link_info.edge = "cdsp",
341 .link.link_info.transport = "smem",
342 },
343};
344
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800345static inline int64_t getnstimediff(struct timespec *start)
346{
347 int64_t ns;
348 struct timespec ts, b;
349
350 getnstimeofday(&ts);
351 b = timespec_sub(ts, *start);
352 ns = timespec_to_ns(&b);
353 return ns;
354}
355
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
357{
c_mtharue1a5ce12017-10-13 20:47:09 +0530358 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700359 int vmid;
360
361 if (!fl)
362 return;
363 if (cache) {
364 spin_lock(&fl->hlock);
365 hlist_add_head(&buf->hn, &fl->bufs);
366 spin_unlock(&fl->hlock);
367 return;
368 }
369 if (!IS_ERR_OR_NULL(buf->virt)) {
370 int destVM[1] = {VMID_HLOS};
371 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
372
373 if (fl->sctx->smmu.cb)
374 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
375 vmid = fl->apps->channel[fl->cid].vmid;
376 if (vmid) {
377 int srcVM[2] = {VMID_HLOS, vmid};
378
379 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
380 srcVM, 2, destVM, destVMperm, 1);
381 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530382 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383 buf->phys);
384 }
385 kfree(buf);
386}
387
388static void fastrpc_buf_list_free(struct fastrpc_file *fl)
389{
390 struct fastrpc_buf *buf, *free;
391
392 do {
393 struct hlist_node *n;
394
c_mtharue1a5ce12017-10-13 20:47:09 +0530395 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396 spin_lock(&fl->hlock);
397 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
398 hlist_del_init(&buf->hn);
399 free = buf;
400 break;
401 }
402 spin_unlock(&fl->hlock);
403 if (free)
404 fastrpc_buf_free(free, 0);
405 } while (free);
406}
407
408static void fastrpc_mmap_add(struct fastrpc_mmap *map)
409{
c_mtharue1a5ce12017-10-13 20:47:09 +0530410 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
411 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
412 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413
c_mtharue1a5ce12017-10-13 20:47:09 +0530414 spin_lock(&me->hlock);
415 hlist_add_head(&map->hn, &me->maps);
416 spin_unlock(&me->hlock);
417 } else {
418 struct fastrpc_file *fl = map->fl;
419
420 spin_lock(&fl->hlock);
421 hlist_add_head(&map->hn, &fl->maps);
422 spin_unlock(&fl->hlock);
423 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424}
425
c_mtharue1a5ce12017-10-13 20:47:09 +0530426static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530427 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530428 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700429{
c_mtharue1a5ce12017-10-13 20:47:09 +0530430 struct fastrpc_apps *me = &gfa;
431 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700432 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530433
434 if ((va + len) < va)
435 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530436 if (mflags == ADSP_MMAP_HEAP_ADDR ||
437 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
438 spin_lock(&me->hlock);
439 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
440 if (va >= map->va &&
441 va + len <= map->va + map->len &&
442 map->fd == fd) {
443 if (refs)
444 map->refs++;
445 match = map;
446 break;
447 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700448 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530449 spin_unlock(&me->hlock);
450 } else {
451 spin_lock(&fl->hlock);
452 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
453 if (va >= map->va &&
454 va + len <= map->va + map->len &&
455 map->fd == fd) {
456 if (refs)
457 map->refs++;
458 match = map;
459 break;
460 }
461 }
462 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700463 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700464 if (match) {
465 *ppmap = match;
466 return 0;
467 }
468 return -ENOTTY;
469}
470
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530471static int dma_alloc_memory(phys_addr_t *region_start, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530472{
473 struct fastrpc_apps *me = &gfa;
474 void *vaddr = NULL;
475
476 if (me->dev == NULL) {
477 pr_err("device adsprpc-mem is not initialized\n");
478 return -ENODEV;
479 }
480 vaddr = dma_alloc_coherent(me->dev, size, region_start, GFP_KERNEL);
481 if (!vaddr) {
482 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
483 (unsigned int)size);
484 return -ENOMEM;
485 }
486 return 0;
487}
488
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700489static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530490 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700491{
c_mtharue1a5ce12017-10-13 20:47:09 +0530492 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700493 struct hlist_node *n;
494 struct fastrpc_apps *me = &gfa;
495
496 spin_lock(&me->hlock);
497 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
498 if (map->raddr == va &&
499 map->raddr + map->len == va + len &&
500 map->refs == 1) {
501 match = map;
502 hlist_del_init(&map->hn);
503 break;
504 }
505 }
506 spin_unlock(&me->hlock);
507 if (match) {
508 *ppmap = match;
509 return 0;
510 }
511 spin_lock(&fl->hlock);
512 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
513 if (map->raddr == va &&
514 map->raddr + map->len == va + len &&
515 map->refs == 1) {
516 match = map;
517 hlist_del_init(&map->hn);
518 break;
519 }
520 }
521 spin_unlock(&fl->hlock);
522 if (match) {
523 *ppmap = match;
524 return 0;
525 }
526 return -ENOTTY;
527}
528
c_mtharu7bd6a422017-10-17 18:15:37 +0530529static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700530{
c_mtharue1a5ce12017-10-13 20:47:09 +0530531 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532 struct fastrpc_file *fl;
533 int vmid;
534 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700535
536 if (!map)
537 return;
538 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530539 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
540 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
541 spin_lock(&me->hlock);
542 map->refs--;
543 if (!map->refs)
544 hlist_del_init(&map->hn);
545 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530546 if (map->refs > 0)
547 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530548 } else {
549 spin_lock(&fl->hlock);
550 map->refs--;
551 if (!map->refs)
552 hlist_del_init(&map->hn);
553 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530554 if (map->refs > 0 && !flags)
555 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530556 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530557 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
558 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700559
c_mtharue1a5ce12017-10-13 20:47:09 +0530560 if (me->dev == NULL) {
561 pr_err("failed to free remote heap allocation\n");
562 return;
563 }
564 if (map->phys) {
565 dma_free_coherent(me->dev, map->size,
566 &(map->va), map->phys);
567 }
568 } else {
569 int destVM[1] = {VMID_HLOS};
570 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
571
572 if (map->secure)
573 sess = fl->secsctx;
574 else
575 sess = fl->sctx;
576
577 if (!IS_ERR_OR_NULL(map->handle))
578 ion_free(fl->apps->client, map->handle);
579 if (sess && sess->smmu.enabled) {
580 if (map->size || map->phys)
581 msm_dma_unmap_sg(sess->smmu.dev,
582 map->table->sgl,
583 map->table->nents, DMA_BIDIRECTIONAL,
584 map->buf);
585 }
586 vmid = fl->apps->channel[fl->cid].vmid;
587 if (vmid && map->phys) {
588 int srcVM[2] = {VMID_HLOS, vmid};
589
590 hyp_assign_phys(map->phys, buf_page_size(map->size),
591 srcVM, 2, destVM, destVMperm, 1);
592 }
593
594 if (!IS_ERR_OR_NULL(map->table))
595 dma_buf_unmap_attachment(map->attach, map->table,
596 DMA_BIDIRECTIONAL);
597 if (!IS_ERR_OR_NULL(map->attach))
598 dma_buf_detach(map->buf, map->attach);
599 if (!IS_ERR_OR_NULL(map->buf))
600 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700601 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700602 kfree(map);
603}
604
605static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
606 struct fastrpc_session_ctx **session);
607
608static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530609 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 struct fastrpc_mmap **ppmap)
611{
c_mtharue1a5ce12017-10-13 20:47:09 +0530612 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700613 struct fastrpc_session_ctx *sess;
614 struct fastrpc_apps *apps = fl->apps;
615 int cid = fl->cid;
616 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530617 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700618 unsigned long attrs;
c_mtharue1a5ce12017-10-13 20:47:09 +0530619 phys_addr_t region_start = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700620 unsigned long flags;
621 int err = 0, vmid;
622
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800623 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700624 return 0;
625 map = kzalloc(sizeof(*map), GFP_KERNEL);
626 VERIFY(err, !IS_ERR_OR_NULL(map));
627 if (err)
628 goto bail;
629 INIT_HLIST_NODE(&map->hn);
630 map->flags = mflags;
631 map->refs = 1;
632 map->fl = fl;
633 map->fd = fd;
634 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530635 if (mflags == ADSP_MMAP_HEAP_ADDR ||
636 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
637 map->apps = me;
638 map->fl = NULL;
639 VERIFY(err, !dma_alloc_memory(&region_start, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700640 if (err)
641 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530642 map->phys = (uintptr_t)region_start;
643 map->size = len;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530644 map->va = (uintptr_t)map->phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530645 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530646 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
647 pr_info("adsprpc: buffer mapped with persist attr %x\n",
648 (unsigned int)map->attr);
649 map->refs = 2;
650 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530651 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
652 ion_import_dma_buf_fd(fl->apps->client, fd)));
653 if (err)
654 goto bail;
655 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
656 &flags));
657 if (err)
658 goto bail;
659
c_mtharue1a5ce12017-10-13 20:47:09 +0530660 map->secure = flags & ION_FLAG_SECURE;
661 if (map->secure) {
662 if (!fl->secsctx)
663 err = fastrpc_session_alloc(chan, 1,
664 &fl->secsctx);
665 if (err)
666 goto bail;
667 }
668 if (map->secure)
669 sess = fl->secsctx;
670 else
671 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530672
c_mtharue1a5ce12017-10-13 20:47:09 +0530673 VERIFY(err, !IS_ERR_OR_NULL(sess));
674 if (err)
675 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530676
677 map->uncached = !ION_IS_CACHED(flags);
678 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
679 map->uncached = 1;
680
c_mtharue1a5ce12017-10-13 20:47:09 +0530681 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
682 if (err)
683 goto bail;
684 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
685 dma_buf_attach(map->buf, sess->smmu.dev)));
686 if (err)
687 goto bail;
688 VERIFY(err, !IS_ERR_OR_NULL(map->table =
689 dma_buf_map_attachment(map->attach,
690 DMA_BIDIRECTIONAL)));
691 if (err)
692 goto bail;
693 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700694 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530695
696 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
697 (sess->smmu.coherent && map->uncached))
698 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
699 else if (map->attr & FASTRPC_ATTR_COHERENT)
700 attrs |= DMA_ATTR_FORCE_COHERENT;
701
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700702 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530703 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700704 map->table->sgl, map->table->nents,
705 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530706 if (err)
707 goto bail;
708 } else {
709 VERIFY(err, map->table->nents == 1);
710 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700711 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530712 }
713 map->phys = sg_dma_address(map->table->sgl);
714 if (sess->smmu.cb) {
715 map->phys += ((uint64_t)sess->smmu.cb << 32);
716 map->size = sg_dma_len(map->table->sgl);
717 } else {
718 map->size = buf_page_size(len);
719 }
720 vmid = fl->apps->channel[fl->cid].vmid;
721 if (vmid) {
722 int srcVM[1] = {VMID_HLOS};
723 int destVM[2] = {VMID_HLOS, vmid};
724 int destVMperm[2] = {PERM_READ | PERM_WRITE,
725 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700726
c_mtharue1a5ce12017-10-13 20:47:09 +0530727 VERIFY(err, !hyp_assign_phys(map->phys,
728 buf_page_size(map->size),
729 srcVM, 1, destVM, destVMperm, 2));
730 if (err)
731 goto bail;
732 }
733 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700734 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 map->len = len;
736
737 fastrpc_mmap_add(map);
738 *ppmap = map;
739
740bail:
741 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530742 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700743 return err;
744}
745
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530746static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700747 struct fastrpc_buf **obuf)
748{
749 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530750 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700751 struct hlist_node *n;
752
753 VERIFY(err, size > 0);
754 if (err)
755 goto bail;
756
757 /* find the smallest buffer that fits in the cache */
758 spin_lock(&fl->hlock);
759 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
760 if (buf->size >= size && (!fr || fr->size > buf->size))
761 fr = buf;
762 }
763 if (fr)
764 hlist_del_init(&fr->hn);
765 spin_unlock(&fl->hlock);
766 if (fr) {
767 *obuf = fr;
768 return 0;
769 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530770 buf = NULL;
771 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700772 if (err)
773 goto bail;
774 INIT_HLIST_NODE(&buf->hn);
775 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530776 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777 buf->phys = 0;
778 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530779 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700780 (void *)&buf->phys, GFP_KERNEL);
781 if (IS_ERR_OR_NULL(buf->virt)) {
782 /* free cache and retry */
783 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530784 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700785 (void *)&buf->phys, GFP_KERNEL);
786 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
787 }
788 if (err)
789 goto bail;
790 if (fl->sctx->smmu.cb)
791 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
792 vmid = fl->apps->channel[fl->cid].vmid;
793 if (vmid) {
794 int srcVM[1] = {VMID_HLOS};
795 int destVM[2] = {VMID_HLOS, vmid};
796 int destVMperm[2] = {PERM_READ | PERM_WRITE,
797 PERM_READ | PERM_WRITE | PERM_EXEC};
798
799 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
800 srcVM, 1, destVM, destVMperm, 2));
801 if (err)
802 goto bail;
803 }
804
805 *obuf = buf;
806 bail:
807 if (err && buf)
808 fastrpc_buf_free(buf, 0);
809 return err;
810}
811
812
813static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700814 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700815 struct smq_invoke_ctx **po)
816{
817 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530818 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700819 struct hlist_node *n;
820 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
821
822 spin_lock(&fl->hlock);
823 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
824 if (ictx->pid == current->pid) {
825 if (invoke->sc != ictx->sc || ictx->fl != fl)
826 err = -1;
827 else {
828 ctx = ictx;
829 hlist_del_init(&ctx->hn);
830 hlist_add_head(&ctx->hn, &fl->clst.pending);
831 }
832 break;
833 }
834 }
835 spin_unlock(&fl->hlock);
836 if (ctx)
837 *po = ctx;
838 return err;
839}
840
841#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
842static int overlap_ptr_cmp(const void *a, const void *b)
843{
844 struct overlap *pa = *((struct overlap **)a);
845 struct overlap *pb = *((struct overlap **)b);
846 /* sort with lowest starting buffer first */
847 int st = CMP(pa->start, pb->start);
848 /* sort with highest ending buffer first */
849 int ed = CMP(pb->end, pa->end);
850 return st == 0 ? ed : st;
851}
852
Sathish Ambley9466d672017-01-25 10:51:55 -0800853static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700854{
Sathish Ambley9466d672017-01-25 10:51:55 -0800855 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700856 remote_arg_t *lpra = ctx->lpra;
857 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
858 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
859 int nbufs = inbufs + outbufs;
860 struct overlap max;
861
862 for (i = 0; i < nbufs; ++i) {
863 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
864 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800865 if (lpra[i].buf.len) {
866 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
867 if (err)
868 goto bail;
869 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700870 ctx->overs[i].raix = i;
871 ctx->overps[i] = &ctx->overs[i];
872 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530873 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700874 max.start = 0;
875 max.end = 0;
876 for (i = 0; i < nbufs; ++i) {
877 if (ctx->overps[i]->start < max.end) {
878 ctx->overps[i]->mstart = max.end;
879 ctx->overps[i]->mend = ctx->overps[i]->end;
880 ctx->overps[i]->offset = max.end -
881 ctx->overps[i]->start;
882 if (ctx->overps[i]->end > max.end) {
883 max.end = ctx->overps[i]->end;
884 } else {
885 ctx->overps[i]->mend = 0;
886 ctx->overps[i]->mstart = 0;
887 }
888 } else {
889 ctx->overps[i]->mend = ctx->overps[i]->end;
890 ctx->overps[i]->mstart = ctx->overps[i]->start;
891 ctx->overps[i]->offset = 0;
892 max = *ctx->overps[i];
893 }
894 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800895bail:
896 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700897}
898
899#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
900 do {\
901 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530902 VERIFY(err, 0 == copy_from_user((dst),\
903 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700904 (size)));\
905 else\
906 memmove((dst), (src), (size));\
907 } while (0)
908
909#define K_COPY_TO_USER(err, kernel, dst, src, size) \
910 do {\
911 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530912 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
913 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700914 else\
915 memmove((dst), (src), (size));\
916 } while (0)
917
918
919static void context_free(struct smq_invoke_ctx *ctx);
920
921static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700922 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700923 struct smq_invoke_ctx **po)
924{
925 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530926 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700927 struct fastrpc_ctx_lst *clst = &fl->clst;
928 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
929
930 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
931 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
932 sizeof(*ctx->fds) * (bufs) +
933 sizeof(*ctx->attrs) * (bufs) +
934 sizeof(*ctx->overs) * (bufs) +
935 sizeof(*ctx->overps) * (bufs);
936
c_mtharue1a5ce12017-10-13 20:47:09 +0530937 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700938 if (err)
939 goto bail;
940
941 INIT_HLIST_NODE(&ctx->hn);
942 hlist_add_fake(&ctx->hn);
943 ctx->fl = fl;
944 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
945 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
946 ctx->fds = (int *)(&ctx->lpra[bufs]);
947 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
948 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
949 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
950
c_mtharue1a5ce12017-10-13 20:47:09 +0530951 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700952 bufs * sizeof(*ctx->lpra));
953 if (err)
954 goto bail;
955
956 if (invokefd->fds) {
957 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
958 bufs * sizeof(*ctx->fds));
959 if (err)
960 goto bail;
961 }
962 if (invokefd->attrs) {
963 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
964 bufs * sizeof(*ctx->attrs));
965 if (err)
966 goto bail;
967 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700968 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700969 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800970 if (bufs) {
971 VERIFY(err, 0 == context_build_overlap(ctx));
972 if (err)
973 goto bail;
974 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700975 ctx->retval = -1;
976 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530977 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700978 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530979 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700980
981 spin_lock(&fl->hlock);
982 hlist_add_head(&ctx->hn, &clst->pending);
983 spin_unlock(&fl->hlock);
984
985 *po = ctx;
986bail:
987 if (ctx && err)
988 context_free(ctx);
989 return err;
990}
991
992static void context_save_interrupted(struct smq_invoke_ctx *ctx)
993{
994 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
995
996 spin_lock(&ctx->fl->hlock);
997 hlist_del_init(&ctx->hn);
998 hlist_add_head(&ctx->hn, &clst->interrupted);
999 spin_unlock(&ctx->fl->hlock);
1000 /* free the cache on power collapse */
1001 fastrpc_buf_list_free(ctx->fl);
1002}
1003
1004static void context_free(struct smq_invoke_ctx *ctx)
1005{
1006 int i;
1007 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1008 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1009 spin_lock(&ctx->fl->hlock);
1010 hlist_del_init(&ctx->hn);
1011 spin_unlock(&ctx->fl->hlock);
1012 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301013 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001014 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301015 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016 kfree(ctx);
1017}
1018
1019static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1020{
1021 ctx->retval = retval;
1022 complete(&ctx->work);
1023}
1024
1025
1026static void fastrpc_notify_users(struct fastrpc_file *me)
1027{
1028 struct smq_invoke_ctx *ictx;
1029 struct hlist_node *n;
1030
1031 spin_lock(&me->hlock);
1032 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1033 complete(&ictx->work);
1034 }
1035 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1036 complete(&ictx->work);
1037 }
1038 spin_unlock(&me->hlock);
1039
1040}
1041
1042static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1043{
1044 struct fastrpc_file *fl;
1045 struct hlist_node *n;
1046
1047 spin_lock(&me->hlock);
1048 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1049 if (fl->cid == cid)
1050 fastrpc_notify_users(fl);
1051 }
1052 spin_unlock(&me->hlock);
1053
1054}
1055static void context_list_ctor(struct fastrpc_ctx_lst *me)
1056{
1057 INIT_HLIST_HEAD(&me->interrupted);
1058 INIT_HLIST_HEAD(&me->pending);
1059}
1060
1061static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1062{
1063 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301064 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001065 struct hlist_node *n;
1066
1067 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301068 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 spin_lock(&fl->hlock);
1070 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1071 hlist_del_init(&ictx->hn);
1072 ctxfree = ictx;
1073 break;
1074 }
1075 spin_unlock(&fl->hlock);
1076 if (ctxfree)
1077 context_free(ctxfree);
1078 } while (ctxfree);
1079 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301080 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001081 spin_lock(&fl->hlock);
1082 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1083 hlist_del_init(&ictx->hn);
1084 ctxfree = ictx;
1085 break;
1086 }
1087 spin_unlock(&fl->hlock);
1088 if (ctxfree)
1089 context_free(ctxfree);
1090 } while (ctxfree);
1091}
1092
1093static int fastrpc_file_free(struct fastrpc_file *fl);
1094static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1095{
1096 struct fastrpc_file *fl, *free;
1097 struct hlist_node *n;
1098
1099 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301100 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001101 spin_lock(&me->hlock);
1102 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1103 hlist_del_init(&fl->hn);
1104 free = fl;
1105 break;
1106 }
1107 spin_unlock(&me->hlock);
1108 if (free)
1109 fastrpc_file_free(free);
1110 } while (free);
1111}
1112
1113static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1114{
1115 remote_arg64_t *rpra;
1116 remote_arg_t *lpra = ctx->lpra;
1117 struct smq_invoke_buf *list;
1118 struct smq_phy_page *pages, *ipage;
1119 uint32_t sc = ctx->sc;
1120 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1121 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001122 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301124 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001125 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001126 int err = 0;
1127 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001128 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001129 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001130
1131 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301132 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001133 list = smq_invoke_buf_start(rpra, sc);
1134 pages = smq_phy_page_start(sc, list);
1135 ipage = pages;
1136
1137 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301138 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1139 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001140
1141 if (ctx->fds[i] && (ctx->fds[i] != -1))
1142 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1143 ctx->attrs[i], buf, len,
1144 mflags, &ctx->maps[i]);
1145 ipage += 1;
1146 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001147 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1148 for (i = bufs; i < bufs + handles; i++) {
1149 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1150 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1151 if (err)
1152 goto bail;
1153 ipage += 1;
1154 }
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301155 metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
Sathish Ambleybae51902017-07-03 15:00:49 -07001156 (sizeof(uint32_t) * M_CRCLIST);
1157
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001158 /* calculate len requreed for copying */
1159 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1160 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001161 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301162 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001163
1164 if (!len)
1165 continue;
1166 if (ctx->maps[i])
1167 continue;
1168 if (ctx->overps[oix]->offset == 0)
1169 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001170 mstart = ctx->overps[oix]->mstart;
1171 mend = ctx->overps[oix]->mend;
1172 VERIFY(err, (mend - mstart) <= LONG_MAX);
1173 if (err)
1174 goto bail;
1175 copylen += mend - mstart;
1176 VERIFY(err, copylen >= 0);
1177 if (err)
1178 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 }
1180 ctx->used = copylen;
1181
1182 /* allocate new buffer */
1183 if (copylen) {
1184 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1185 if (err)
1186 goto bail;
1187 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301188 if (ctx->buf->virt && metalen <= copylen)
1189 memset(ctx->buf->virt, 0, metalen);
1190
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001191 /* copy metadata */
1192 rpra = ctx->buf->virt;
1193 ctx->rpra = rpra;
1194 list = smq_invoke_buf_start(rpra, sc);
1195 pages = smq_phy_page_start(sc, list);
1196 ipage = pages;
1197 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001198 for (i = 0; i < bufs + handles; ++i) {
1199 if (lpra[i].buf.len)
1200 list[i].num = 1;
1201 else
1202 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001203 list[i].pgidx = ipage - pages;
1204 ipage++;
1205 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301206
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001207 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001208 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301209 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001210 struct fastrpc_mmap *map = ctx->maps[i];
1211 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301212 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001213
1214 rpra[i].buf.pv = 0;
1215 rpra[i].buf.len = len;
1216 if (!len)
1217 continue;
1218 if (map) {
1219 struct vm_area_struct *vma;
1220 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301221 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 int idx = list[i].pgidx;
1223
1224 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001225 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001226 } else {
1227 down_read(&current->mm->mmap_sem);
1228 VERIFY(err, NULL != (vma = find_vma(current->mm,
1229 map->va)));
1230 if (err) {
1231 up_read(&current->mm->mmap_sem);
1232 goto bail;
1233 }
1234 offset = buf_page_start(buf) - vma->vm_start;
1235 up_read(&current->mm->mmap_sem);
1236 VERIFY(err, offset < (uintptr_t)map->size);
1237 if (err)
1238 goto bail;
1239 }
1240 pages[idx].addr = map->phys + offset;
1241 pages[idx].size = num << PAGE_SHIFT;
1242 }
1243 rpra[i].buf.pv = buf;
1244 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001245 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001246 for (i = bufs; i < bufs + handles; ++i) {
1247 struct fastrpc_mmap *map = ctx->maps[i];
1248
1249 pages[i].addr = map->phys;
1250 pages[i].size = map->size;
1251 }
1252 fdlist = (uint64_t *)&pages[bufs + handles];
1253 for (i = 0; i < M_FDLIST; i++)
1254 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001255 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301256 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001257
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001258 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001259 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001260 rlen = copylen - metalen;
1261 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1262 int i = ctx->overps[oix]->raix;
1263 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301264 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001265 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301266 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001267
1268 if (!len)
1269 continue;
1270 if (map)
1271 continue;
1272 if (ctx->overps[oix]->offset == 0) {
1273 rlen -= ALIGN(args, BALIGN) - args;
1274 args = ALIGN(args, BALIGN);
1275 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001276 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001277 VERIFY(err, rlen >= mlen);
1278 if (err)
1279 goto bail;
1280 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1281 pages[list[i].pgidx].addr = ctx->buf->phys -
1282 ctx->overps[oix]->offset +
1283 (copylen - rlen);
1284 pages[list[i].pgidx].addr =
1285 buf_page_start(pages[list[i].pgidx].addr);
1286 buf = rpra[i].buf.pv;
1287 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1288 if (i < inbufs) {
1289 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1290 lpra[i].buf.pv, len);
1291 if (err)
1292 goto bail;
1293 }
1294 args = args + mlen;
1295 rlen -= mlen;
1296 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001297 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001298
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001299 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001300 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1301 int i = ctx->overps[oix]->raix;
1302 struct fastrpc_mmap *map = ctx->maps[i];
1303
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001304 if (map && map->uncached)
1305 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301306 if (ctx->fl->sctx->smmu.coherent &&
1307 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1308 continue;
1309 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1310 continue;
1311
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001312 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1313 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1314 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1315 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001316 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301317 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001318 rpra[i].dma.fd = ctx->fds[i];
1319 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1320 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001321 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001322
1323 if (!ctx->fl->sctx->smmu.coherent) {
1324 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001325 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001326 PERF_END);
1327 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001328 bail:
1329 return err;
1330}
1331
1332static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1333 remote_arg_t *upra)
1334{
1335 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001336 struct smq_invoke_buf *list;
1337 struct smq_phy_page *pages;
1338 struct fastrpc_mmap *mmap;
1339 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001340 uint32_t *crclist = NULL;
1341
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001342 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001343 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001344 int err = 0;
1345
1346 inbufs = REMOTE_SCALARS_INBUFS(sc);
1347 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001348 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1349 list = smq_invoke_buf_start(ctx->rpra, sc);
1350 pages = smq_phy_page_start(sc, list);
1351 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001352 crclist = (uint32_t *)(fdlist + M_FDLIST);
1353
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001354 for (i = inbufs; i < inbufs + outbufs; ++i) {
1355 if (!ctx->maps[i]) {
1356 K_COPY_TO_USER(err, kernel,
1357 ctx->lpra[i].buf.pv,
1358 uint64_to_ptr(rpra[i].buf.pv),
1359 rpra[i].buf.len);
1360 if (err)
1361 goto bail;
1362 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301363 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301364 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001365 }
1366 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001367 if (inbufs + outbufs + handles) {
1368 for (i = 0; i < M_FDLIST; i++) {
1369 if (!fdlist[i])
1370 break;
1371 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001372 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301373 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001374 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001375 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001376 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301377 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001378 crclist, M_CRCLIST*sizeof(uint32_t));
1379
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001380 bail:
1381 return err;
1382}
1383
1384static void inv_args_pre(struct smq_invoke_ctx *ctx)
1385{
1386 int i, inbufs, outbufs;
1387 uint32_t sc = ctx->sc;
1388 remote_arg64_t *rpra = ctx->rpra;
1389 uintptr_t end;
1390
1391 inbufs = REMOTE_SCALARS_INBUFS(sc);
1392 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1393 for (i = inbufs; i < inbufs + outbufs; ++i) {
1394 struct fastrpc_mmap *map = ctx->maps[i];
1395
1396 if (map && map->uncached)
1397 continue;
1398 if (!rpra[i].buf.len)
1399 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301400 if (ctx->fl->sctx->smmu.coherent &&
1401 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1402 continue;
1403 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1404 continue;
1405
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001406 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1407 buf_page_start(rpra[i].buf.pv))
1408 continue;
1409 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1410 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1411 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1412 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1413 rpra[i].buf.len);
1414 if (!IS_CACHE_ALIGNED(end))
1415 dmac_flush_range((char *)end,
1416 (char *)end + 1);
1417 }
1418}
1419
1420static void inv_args(struct smq_invoke_ctx *ctx)
1421{
1422 int i, inbufs, outbufs;
1423 uint32_t sc = ctx->sc;
1424 remote_arg64_t *rpra = ctx->rpra;
1425 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001426
1427 inbufs = REMOTE_SCALARS_INBUFS(sc);
1428 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1429 for (i = inbufs; i < inbufs + outbufs; ++i) {
1430 struct fastrpc_mmap *map = ctx->maps[i];
1431
1432 if (map && map->uncached)
1433 continue;
1434 if (!rpra[i].buf.len)
1435 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301436 if (ctx->fl->sctx->smmu.coherent &&
1437 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1438 continue;
1439 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1440 continue;
1441
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1443 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001444 continue;
1445 }
1446 if (map && map->handle)
1447 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1448 (char *)uint64_to_ptr(rpra[i].buf.pv),
1449 rpra[i].buf.len, ION_IOC_INV_CACHES);
1450 else
1451 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1452 (char *)uint64_to_ptr(rpra[i].buf.pv
1453 + rpra[i].buf.len));
1454 }
1455
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001456 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 dmac_inv_range(rpra, (char *)rpra + used);
1458}
1459
1460static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1461 uint32_t kernel, uint32_t handle)
1462{
1463 struct smq_msg *msg = &ctx->msg;
1464 struct fastrpc_file *fl = ctx->fl;
1465 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1466 int err = 0;
1467
c_mtharue1a5ce12017-10-13 20:47:09 +05301468 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001469 if (err)
1470 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301471 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001472 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301473 if (fl->sessionid)
1474 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001475 if (kernel)
1476 msg->pid = 0;
1477 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1478 msg->invoke.header.handle = handle;
1479 msg->invoke.header.sc = ctx->sc;
1480 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1481 msg->invoke.page.size = buf_page_size(ctx->used);
1482
1483 if (fl->ssrcount != channel_ctx->ssrcount) {
1484 err = -ECONNRESET;
1485 goto bail;
1486 }
1487 VERIFY(err, channel_ctx->link.port_state ==
1488 FASTRPC_LINK_CONNECTED);
1489 if (err)
1490 goto bail;
1491 err = glink_tx(channel_ctx->chan,
1492 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1493 GLINK_TX_REQ_INTENT);
1494 bail:
1495 return err;
1496}
1497
1498static void fastrpc_init(struct fastrpc_apps *me)
1499{
1500 int i;
1501
1502 INIT_HLIST_HEAD(&me->drivers);
1503 spin_lock_init(&me->hlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301504 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 me->channel = &gcinfo[0];
1506 for (i = 0; i < NUM_CHANNELS; i++) {
1507 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301508 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001509 me->channel[i].sesscount = 0;
1510 }
1511}
1512
1513static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1514
1515static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1516 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001517 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001518{
c_mtharue1a5ce12017-10-13 20:47:09 +05301519 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001520 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1521 int cid = fl->cid;
1522 int interrupted = 0;
1523 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001524 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001525
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001526 if (fl->profile)
1527 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301528
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301529
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301530 VERIFY(err, fl->sctx != NULL);
1531 if (err)
1532 goto bail;
1533 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1534 if (err)
1535 goto bail;
1536
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001537 if (!kernel) {
1538 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1539 &ctx));
1540 if (err)
1541 goto bail;
1542 if (fl->sctx->smmu.faults)
1543 err = FASTRPC_ENOSUCH;
1544 if (err)
1545 goto bail;
1546 if (ctx)
1547 goto wait;
1548 }
1549
1550 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1551 if (err)
1552 goto bail;
1553
1554 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001555 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001556 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001557 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001558 if (err)
1559 goto bail;
1560 }
1561
Sathish Ambleyc432b502017-06-05 12:03:42 -07001562 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001564 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001565 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001566 PERF_END);
1567
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001568 if (err)
1569 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 wait:
1571 if (kernel)
1572 wait_for_completion(&ctx->work);
1573 else {
1574 interrupted = wait_for_completion_interruptible(&ctx->work);
1575 VERIFY(err, 0 == (err = interrupted));
1576 if (err)
1577 goto bail;
1578 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001579
1580 PERF(fl->profile, fl->perf.invargs,
1581 if (!fl->sctx->smmu.coherent)
1582 inv_args(ctx);
1583 PERF_END);
1584
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001585 VERIFY(err, 0 == (err = ctx->retval));
1586 if (err)
1587 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001588
1589 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001590 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001591 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 if (err)
1593 goto bail;
1594 bail:
1595 if (ctx && interrupted == -ERESTARTSYS)
1596 context_save_interrupted(ctx);
1597 else if (ctx)
1598 context_free(ctx);
1599 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1600 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001601
1602 if (fl->profile && !interrupted) {
1603 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1604 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301605 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001606 fl->perf.count++;
1607 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001608 return err;
1609}
1610
Sathish Ambley36849af2017-02-02 09:35:55 -08001611static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001612static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001613 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001614{
1615 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301616 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001617 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001618 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001619 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301620 struct fastrpc_mmap *file = NULL, *mem = NULL;
1621 char *proc_name = NULL;
1622 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301623 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301624 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1625 int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001626
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301627 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001628 if (err)
1629 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 if (init->flags == FASTRPC_INIT_ATTACH) {
1631 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301632 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633
1634 ra[0].buf.pv = (void *)&tgid;
1635 ra[0].buf.len = sizeof(tgid);
1636 ioctl.inv.handle = 1;
1637 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1638 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301639 ioctl.fds = NULL;
1640 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001641 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001642 fl->pd = 0;
1643 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1644 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1645 if (err)
1646 goto bail;
1647 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001648 remote_arg_t ra[6];
1649 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001650 int mflags = 0;
1651 struct {
1652 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301653 unsigned int namelen;
1654 unsigned int filelen;
1655 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001656 int attrs;
1657 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001658 } inbuf;
1659
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301660 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001661 inbuf.namelen = strlen(current->comm) + 1;
1662 inbuf.filelen = init->filelen;
1663 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301664
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001665 if (init->filelen) {
1666 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1667 init->file, init->filelen, mflags, &file));
1668 if (err)
1669 goto bail;
1670 }
1671 inbuf.pageslen = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301672
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001673 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1674 init->mem, init->memlen, mflags, &mem));
1675 if (err)
1676 goto bail;
1677 inbuf.pageslen = 1;
1678 ra[0].buf.pv = (void *)&inbuf;
1679 ra[0].buf.len = sizeof(inbuf);
1680 fds[0] = 0;
1681
1682 ra[1].buf.pv = (void *)current->comm;
1683 ra[1].buf.len = inbuf.namelen;
1684 fds[1] = 0;
1685
1686 ra[2].buf.pv = (void *)init->file;
1687 ra[2].buf.len = inbuf.filelen;
1688 fds[2] = init->filefd;
1689
1690 pages[0].addr = mem->phys;
1691 pages[0].size = mem->size;
1692 ra[3].buf.pv = (void *)pages;
1693 ra[3].buf.len = 1 * sizeof(*pages);
1694 fds[3] = 0;
1695
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001696 inbuf.attrs = uproc->attrs;
1697 ra[4].buf.pv = (void *)&(inbuf.attrs);
1698 ra[4].buf.len = sizeof(inbuf.attrs);
1699 fds[4] = 0;
1700
1701 inbuf.siglen = uproc->siglen;
1702 ra[5].buf.pv = (void *)&(inbuf.siglen);
1703 ra[5].buf.len = sizeof(inbuf.siglen);
1704 fds[5] = 0;
1705
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001706 ioctl.inv.handle = 1;
1707 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001708 if (uproc->attrs)
1709 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001710 ioctl.inv.pra = ra;
1711 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301712 ioctl.attrs = NULL;
1713 ioctl.crc = NULL;
1714 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1715 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1716 if (err)
1717 goto bail;
1718 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1719 remote_arg_t ra[3];
1720 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301721 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301722 int fds[3];
1723 struct {
1724 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301725 unsigned int namelen;
1726 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301727 } inbuf;
1728
1729 if (!init->filelen)
1730 goto bail;
1731
1732 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1733 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1734 if (err)
1735 goto bail;
1736 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1737 (void __user *)init->file, init->filelen));
1738 if (err)
1739 goto bail;
1740
1741 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301742 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301743 inbuf.pageslen = 0;
1744 if (!me->staticpd_flags) {
1745 inbuf.pageslen = 1;
1746 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1747 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1748 &mem));
1749 if (err)
1750 goto bail;
1751 phys = mem->phys;
1752 size = mem->size;
1753 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
1754 srcVM, 1, destVM, destVMperm, 1));
1755 if (err) {
1756 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1757 err);
1758 pr_err("map->phys %llx, map->size %d\n",
1759 phys, (int)size);
1760 goto bail;
1761 }
1762 me->staticpd_flags = 1;
1763 }
1764
1765 ra[0].buf.pv = (void *)&inbuf;
1766 ra[0].buf.len = sizeof(inbuf);
1767 fds[0] = 0;
1768
1769 ra[1].buf.pv = (void *)proc_name;
1770 ra[1].buf.len = inbuf.namelen;
1771 fds[1] = 0;
1772
1773 pages[0].addr = phys;
1774 pages[0].size = size;
1775
1776 ra[2].buf.pv = (void *)pages;
1777 ra[2].buf.len = sizeof(*pages);
1778 fds[2] = 0;
1779 ioctl.inv.handle = 1;
1780
1781 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1782 ioctl.inv.pra = ra;
1783 ioctl.fds = NULL;
1784 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001785 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001786 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1787 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1788 if (err)
1789 goto bail;
1790 } else {
1791 err = -ENOTTY;
1792 }
1793bail:
c_mtharud91205a2017-11-07 16:01:06 +05301794 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301795 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1796 me->staticpd_flags = 0;
1797 if (mem && err) {
1798 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1799 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
1800 destVM, 1, srcVM, hlosVMperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301801 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301802 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001803 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301804 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001805 return err;
1806}
1807
1808static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1809{
1810 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001811 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001812 remote_arg_t ra[1];
1813 int tgid = 0;
1814
Sathish Ambley36849af2017-02-02 09:35:55 -08001815 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1816 if (err)
1817 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301818 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001819 if (err)
1820 goto bail;
1821 tgid = fl->tgid;
1822 ra[0].buf.pv = (void *)&tgid;
1823 ra[0].buf.len = sizeof(tgid);
1824 ioctl.inv.handle = 1;
1825 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1826 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301827 ioctl.fds = NULL;
1828 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001829 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001830 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1831 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1832bail:
1833 return err;
1834}
1835
1836static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1837 struct fastrpc_mmap *map)
1838{
Sathish Ambleybae51902017-07-03 15:00:49 -07001839 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05301840 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001841 struct smq_phy_page page;
1842 int num = 1;
1843 remote_arg_t ra[3];
1844 int err = 0;
1845 struct {
1846 int pid;
1847 uint32_t flags;
1848 uintptr_t vaddrin;
1849 int num;
1850 } inargs;
1851 struct {
1852 uintptr_t vaddrout;
1853 } routargs;
1854
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301855 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001856 inargs.vaddrin = (uintptr_t)map->va;
1857 inargs.flags = flags;
1858 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1859 ra[0].buf.pv = (void *)&inargs;
1860 ra[0].buf.len = sizeof(inargs);
1861 page.addr = map->phys;
1862 page.size = map->size;
1863 ra[1].buf.pv = (void *)&page;
1864 ra[1].buf.len = num * sizeof(page);
1865
1866 ra[2].buf.pv = (void *)&routargs;
1867 ra[2].buf.len = sizeof(routargs);
1868
1869 ioctl.inv.handle = 1;
1870 if (fl->apps->compat)
1871 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1872 else
1873 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1874 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301875 ioctl.fds = NULL;
1876 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001877 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001878 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1879 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1880 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301881 if (err)
1882 goto bail;
1883 if (flags == ADSP_MMAP_HEAP_ADDR) {
1884 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001885
c_mtharue1a5ce12017-10-13 20:47:09 +05301886 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1887 desc.args[1] = map->phys;
1888 desc.args[2] = map->size;
1889 desc.arginfo = SCM_ARGS(3);
1890 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1891 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1892 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1893
1894 int srcVM[1] = {VMID_HLOS};
c_mtharu63ffc012017-11-16 15:26:56 +05301895 int destVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301896 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1897
1898 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1899 srcVM, 1, destVM, destVMperm, 1));
1900 if (err)
1901 goto bail;
1902 }
1903bail:
1904 return err;
1905}
1906
1907static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1908 struct fastrpc_mmap *map)
1909{
1910 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05301911 struct fastrpc_apps *me = &gfa;
1912 int srcVM[1] = {me->channel[fl->cid].rhvmid};
c_mtharue1a5ce12017-10-13 20:47:09 +05301913 int destVM[1] = {VMID_HLOS};
1914 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1915
1916 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1917 struct fastrpc_ioctl_invoke_crc ioctl;
1918 struct scm_desc desc = {0};
1919 remote_arg_t ra[1];
1920 int err = 0;
1921 struct {
1922 uint8_t skey;
1923 } routargs;
1924
1925 ra[0].buf.pv = (void *)&routargs;
1926 ra[0].buf.len = sizeof(routargs);
1927
1928 ioctl.inv.handle = 1;
1929 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1930 ioctl.inv.pra = ra;
1931 ioctl.fds = NULL;
1932 ioctl.attrs = NULL;
1933 ioctl.crc = NULL;
1934 if (fl == NULL)
1935 goto bail;
1936
1937 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1938 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1939 if (err)
1940 goto bail;
1941 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1942 desc.args[1] = map->phys;
1943 desc.args[2] = map->size;
1944 desc.args[3] = routargs.skey;
1945 desc.arginfo = SCM_ARGS(4);
1946 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1947 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1948 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1949 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
1950 srcVM, 1, destVM, destVMperm, 1));
1951 if (err)
1952 goto bail;
1953 }
1954
1955bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001956 return err;
1957}
1958
1959static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1960 struct fastrpc_mmap *map)
1961{
Sathish Ambleybae51902017-07-03 15:00:49 -07001962 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001963 remote_arg_t ra[1];
1964 int err = 0;
1965 struct {
1966 int pid;
1967 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301968 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001969 } inargs;
1970
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301971 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001972 inargs.size = map->size;
1973 inargs.vaddrout = map->raddr;
1974 ra[0].buf.pv = (void *)&inargs;
1975 ra[0].buf.len = sizeof(inargs);
1976
1977 ioctl.inv.handle = 1;
1978 if (fl->apps->compat)
1979 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1980 else
1981 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1982 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301983 ioctl.fds = NULL;
1984 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001985 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001986 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1987 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05301988 if (err)
1989 goto bail;
1990 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
1991 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1992 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
1993 if (err)
1994 goto bail;
1995 }
1996bail:
1997 return err;
1998}
1999
2000static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2001{
2002 struct fastrpc_mmap *match = NULL, *map = NULL;
2003 struct hlist_node *n = NULL;
2004 int err = 0, ret = 0;
2005 struct fastrpc_apps *me = &gfa;
2006 struct ramdump_segment *ramdump_segments_rh = NULL;
2007
2008 do {
2009 match = NULL;
2010 spin_lock(&me->hlock);
2011 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2012 match = map;
2013 hlist_del_init(&map->hn);
2014 break;
2015 }
2016 spin_unlock(&me->hlock);
2017
2018 if (match) {
2019 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2020 if (err)
2021 goto bail;
2022 if (me->channel[0].ramdumpenabled) {
2023 ramdump_segments_rh = kcalloc(1,
2024 sizeof(struct ramdump_segment), GFP_KERNEL);
2025 if (ramdump_segments_rh) {
2026 ramdump_segments_rh->address =
2027 match->phys;
2028 ramdump_segments_rh->size = match->size;
2029 ret = do_elf_ramdump(
2030 me->channel[0].remoteheap_ramdump_dev,
2031 ramdump_segments_rh, 1);
2032 if (ret < 0)
2033 pr_err("ADSPRPC: unable to dump heap");
2034 kfree(ramdump_segments_rh);
2035 }
2036 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302037 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302038 }
2039 } while (match);
2040bail:
2041 if (err && match)
2042 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002043 return err;
2044}
2045
2046static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302047 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002048
2049static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2050
2051static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2052 struct fastrpc_ioctl_munmap *ud)
2053{
2054 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302055 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002056
2057 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2058 if (err)
2059 goto bail;
2060 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2061 if (err)
2062 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302063 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002064bail:
2065 if (err && map)
2066 fastrpc_mmap_add(map);
2067 return err;
2068}
2069
c_mtharu7bd6a422017-10-17 18:15:37 +05302070static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2071 struct fastrpc_ioctl_munmap_fd *ud) {
2072 int err = 0;
2073 struct fastrpc_mmap *map = NULL;
2074
2075 VERIFY(err, (fl && ud));
2076 if (err)
2077 goto bail;
2078
2079 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2080 pr_err("mapping not found to unamp %x va %llx %x\n",
2081 ud->fd, (unsigned long long)ud->va,
2082 (unsigned int)ud->len);
2083 err = -1;
2084 goto bail;
2085 }
2086 if (map)
2087 fastrpc_mmap_free(map, 0);
2088bail:
2089 return err;
2090}
2091
2092
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002093static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2094 struct fastrpc_ioctl_mmap *ud)
2095{
2096
c_mtharue1a5ce12017-10-13 20:47:09 +05302097 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002098 int err = 0;
2099
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302100 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
c_mtharue1a5ce12017-10-13 20:47:09 +05302101 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002102 return 0;
2103
2104 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302105 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302106 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002107 if (err)
2108 goto bail;
2109 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2110 if (err)
2111 goto bail;
2112 ud->vaddrout = map->raddr;
2113 bail:
2114 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302115 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002116 return err;
2117}
2118
2119static void fastrpc_channel_close(struct kref *kref)
2120{
2121 struct fastrpc_apps *me = &gfa;
2122 struct fastrpc_channel_ctx *ctx;
2123 int cid;
2124
2125 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2126 cid = ctx - &gcinfo[0];
2127 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302128 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302129 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2130 ctx->link.link_notify_handle = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302131 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002132 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2133 MAJOR(me->dev_no), cid);
2134}
2135
2136static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2137
2138static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2139 int secure, struct fastrpc_session_ctx **session)
2140{
2141 struct fastrpc_apps *me = &gfa;
2142 int idx = 0, err = 0;
2143
2144 if (chan->sesscount) {
2145 for (idx = 0; idx < chan->sesscount; ++idx) {
2146 if (!chan->session[idx].used &&
2147 chan->session[idx].smmu.secure == secure) {
2148 chan->session[idx].used = 1;
2149 break;
2150 }
2151 }
2152 VERIFY(err, idx < chan->sesscount);
2153 if (err)
2154 goto bail;
2155 chan->session[idx].smmu.faults = 0;
2156 } else {
2157 VERIFY(err, me->dev != NULL);
2158 if (err)
2159 goto bail;
2160 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302161 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002162 }
2163
2164 *session = &chan->session[idx];
2165 bail:
2166 return err;
2167}
2168
c_mtharue1a5ce12017-10-13 20:47:09 +05302169static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2170 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002171{
2172 if (glink_queue_rx_intent(h, NULL, size))
2173 return false;
2174 return true;
2175}
2176
c_mtharue1a5ce12017-10-13 20:47:09 +05302177static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002178 const void *pkt_priv, const void *ptr)
2179{
2180}
2181
c_mtharue1a5ce12017-10-13 20:47:09 +05302182static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002183 const void *pkt_priv, const void *ptr, size_t size)
2184{
2185 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302186 struct smq_invoke_ctx *ctx;
2187 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002188
c_mtharufdac6892017-10-12 13:09:01 +05302189 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2190 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302191 goto bail;
2192
c_mtharufdac6892017-10-12 13:09:01 +05302193 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2194 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2195 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302196 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302197
c_mtharufdac6892017-10-12 13:09:01 +05302198 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302199bail:
c_mtharufdac6892017-10-12 13:09:01 +05302200 if (err)
2201 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002202 glink_rx_done(handle, ptr, true);
2203}
2204
c_mtharue1a5ce12017-10-13 20:47:09 +05302205static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002206 unsigned int event)
2207{
2208 struct fastrpc_apps *me = &gfa;
2209 int cid = (int)(uintptr_t)priv;
2210 struct fastrpc_glink_info *link;
2211
2212 if (cid < 0 || cid >= NUM_CHANNELS)
2213 return;
2214 link = &me->channel[cid].link;
2215 switch (event) {
2216 case GLINK_CONNECTED:
2217 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302218 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002219 break;
2220 case GLINK_LOCAL_DISCONNECTED:
2221 link->port_state = FASTRPC_LINK_DISCONNECTED;
2222 break;
2223 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002224 break;
2225 default:
2226 break;
2227 }
2228}
2229
2230static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2231 struct fastrpc_session_ctx **session)
2232{
2233 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302234 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002235
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302236 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002237 if (!*session)
2238 err = fastrpc_session_alloc_locked(chan, secure, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302239 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002240 return err;
2241}
2242
2243static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2244 struct fastrpc_session_ctx *session)
2245{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302246 struct fastrpc_apps *me = &gfa;
2247
2248 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002249 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302250 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002251}
2252
2253static int fastrpc_file_free(struct fastrpc_file *fl)
2254{
2255 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302256 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002257 int cid;
2258
2259 if (!fl)
2260 return 0;
2261 cid = fl->cid;
2262
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302263 (void)fastrpc_release_current_dsp_process(fl);
2264
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002265 spin_lock(&fl->apps->hlock);
2266 hlist_del_init(&fl->hn);
2267 spin_unlock(&fl->apps->hlock);
2268
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002269 if (!fl->sctx) {
2270 kfree(fl);
2271 return 0;
2272 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302273 spin_lock(&fl->hlock);
2274 fl->file_close = 1;
2275 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002276 fastrpc_context_list_dtor(fl);
2277 fastrpc_buf_list_free(fl);
2278 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302279 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002280 }
2281 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2282 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302283 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002284 if (fl->sctx)
2285 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2286 if (fl->secsctx)
2287 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2288 kfree(fl);
2289 return 0;
2290}
2291
2292static int fastrpc_device_release(struct inode *inode, struct file *file)
2293{
2294 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2295
2296 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302297 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2298 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002299 if (fl->debugfs_file != NULL)
2300 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002301 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302302 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002303 }
2304 return 0;
2305}
2306
2307static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2308 void *priv)
2309{
2310 struct fastrpc_apps *me = &gfa;
2311 int cid = (int)((uintptr_t)priv);
2312 struct fastrpc_glink_info *link;
2313
2314 if (cid < 0 || cid >= NUM_CHANNELS)
2315 return;
2316
2317 link = &me->channel[cid].link;
2318 switch (cb_info->link_state) {
2319 case GLINK_LINK_STATE_UP:
2320 link->link_state = FASTRPC_LINK_STATE_UP;
2321 complete(&me->channel[cid].work);
2322 break;
2323 case GLINK_LINK_STATE_DOWN:
2324 link->link_state = FASTRPC_LINK_STATE_DOWN;
2325 break;
2326 default:
2327 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2328 break;
2329 }
2330}
2331
2332static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2333{
2334 int err = 0;
2335 struct fastrpc_glink_info *link;
2336
2337 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2338 if (err)
2339 goto bail;
2340
2341 link = &me->channel[cid].link;
2342 if (link->link_notify_handle != NULL)
2343 goto bail;
2344
2345 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2346 link->link_notify_handle = glink_register_link_state_cb(
2347 &link->link_info,
2348 (void *)((uintptr_t)cid));
2349 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2350 if (err) {
2351 link->link_notify_handle = NULL;
2352 goto bail;
2353 }
2354 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2355 RPC_TIMEOUT));
2356bail:
2357 return err;
2358}
2359
2360static void fastrpc_glink_close(void *chan, int cid)
2361{
2362 int err = 0;
2363 struct fastrpc_glink_info *link;
2364
2365 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2366 if (err)
2367 return;
2368 link = &gfa.channel[cid].link;
2369
c_mtharu314a4202017-11-15 22:09:17 +05302370 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2371 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002372 link->port_state = FASTRPC_LINK_DISCONNECTING;
2373 glink_close(chan);
2374 }
2375}
2376
2377static int fastrpc_glink_open(int cid)
2378{
2379 int err = 0;
2380 void *handle = NULL;
2381 struct fastrpc_apps *me = &gfa;
2382 struct glink_open_config *cfg;
2383 struct fastrpc_glink_info *link;
2384
2385 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2386 if (err)
2387 goto bail;
2388 link = &me->channel[cid].link;
2389 cfg = &me->channel[cid].link.cfg;
2390 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2391 if (err)
2392 goto bail;
2393
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302394 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2395 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002396 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002397
2398 link->port_state = FASTRPC_LINK_CONNECTING;
2399 cfg->priv = (void *)(uintptr_t)cid;
2400 cfg->edge = gcinfo[cid].link.link_info.edge;
2401 cfg->transport = gcinfo[cid].link.link_info.transport;
2402 cfg->name = FASTRPC_GLINK_GUID;
2403 cfg->notify_rx = fastrpc_glink_notify_rx;
2404 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2405 cfg->notify_state = fastrpc_glink_notify_state;
2406 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2407 handle = glink_open(cfg);
2408 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302409 if (err) {
2410 if (link->port_state == FASTRPC_LINK_CONNECTING)
2411 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002412 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302413 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002414 me->channel[cid].chan = handle;
2415bail:
2416 return err;
2417}
2418
Sathish Ambley1ca68232017-01-19 10:32:55 -08002419static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2420{
2421 filp->private_data = inode->i_private;
2422 return 0;
2423}
2424
2425static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2426 size_t count, loff_t *position)
2427{
2428 struct fastrpc_file *fl = filp->private_data;
2429 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302430 struct fastrpc_buf *buf = NULL;
2431 struct fastrpc_mmap *map = NULL;
2432 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002433 struct fastrpc_channel_ctx *chan;
2434 struct fastrpc_session_ctx *sess;
2435 unsigned int len = 0;
2436 int i, j, ret = 0;
2437 char *fileinfo = NULL;
2438
2439 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2440 if (!fileinfo)
2441 goto bail;
2442 if (fl == NULL) {
2443 for (i = 0; i < NUM_CHANNELS; i++) {
2444 chan = &gcinfo[i];
2445 len += scnprintf(fileinfo + len,
2446 DEBUGFS_SIZE - len, "%s\n\n",
2447 chan->name);
2448 len += scnprintf(fileinfo + len,
2449 DEBUGFS_SIZE - len, "%s %d\n",
2450 "sesscount:", chan->sesscount);
2451 for (j = 0; j < chan->sesscount; j++) {
2452 sess = &chan->session[j];
2453 len += scnprintf(fileinfo + len,
2454 DEBUGFS_SIZE - len,
2455 "%s%d\n\n", "SESSION", j);
2456 len += scnprintf(fileinfo + len,
2457 DEBUGFS_SIZE - len,
2458 "%s %d\n", "sid:",
2459 sess->smmu.cb);
2460 len += scnprintf(fileinfo + len,
2461 DEBUGFS_SIZE - len,
2462 "%s %d\n", "SECURE:",
2463 sess->smmu.secure);
2464 }
2465 }
2466 } else {
2467 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2468 "%s %d\n\n",
2469 "PROCESS_ID:", fl->tgid);
2470 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2471 "%s %d\n\n",
2472 "CHANNEL_ID:", fl->cid);
2473 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2474 "%s %d\n\n",
2475 "SSRCOUNT:", fl->ssrcount);
2476 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2477 "%s\n",
2478 "LIST OF BUFS:");
2479 spin_lock(&fl->hlock);
2480 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2481 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302482 "%s %pK %s %pK %s %llx\n", "buf:",
2483 buf, "buf->virt:", buf->virt,
2484 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002485 }
2486 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2487 "\n%s\n",
2488 "LIST OF MAPS:");
2489 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2490 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302491 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002492 "map:", map,
2493 "map->va:", map->va,
2494 "map->phys:", map->phys);
2495 }
2496 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2497 "\n%s\n",
2498 "LIST OF PENDING SMQCONTEXTS:");
2499 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2500 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302501 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002502 "smqcontext:", ictx,
2503 "sc:", ictx->sc,
2504 "tid:", ictx->pid,
2505 "handle", ictx->rpra->h);
2506 }
2507 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2508 "\n%s\n",
2509 "LIST OF INTERRUPTED SMQCONTEXTS:");
2510 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2511 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302512 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002513 "smqcontext:", ictx,
2514 "sc:", ictx->sc,
2515 "tid:", ictx->pid,
2516 "handle", ictx->rpra->h);
2517 }
2518 spin_unlock(&fl->hlock);
2519 }
2520 if (len > DEBUGFS_SIZE)
2521 len = DEBUGFS_SIZE;
2522 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2523 kfree(fileinfo);
2524bail:
2525 return ret;
2526}
2527
2528static const struct file_operations debugfs_fops = {
2529 .open = fastrpc_debugfs_open,
2530 .read = fastrpc_debugfs_read,
2531};
Sathish Ambley36849af2017-02-02 09:35:55 -08002532static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002533{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002534 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002535 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002536
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302537 mutex_lock(&me->smd_mutex);
2538
Sathish Ambley36849af2017-02-02 09:35:55 -08002539 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002540 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302541 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002542 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302543 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2544 if (err)
2545 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302546 if (me->channel[cid].ssrcount !=
2547 me->channel[cid].prevssrcount) {
2548 if (!me->channel[cid].issubsystemup) {
2549 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302550 if (err) {
2551 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05302552 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302553 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302554 }
2555 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002556 fl->ssrcount = me->channel[cid].ssrcount;
2557 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302558 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302559 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2560 if (err)
2561 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002562 VERIFY(err, 0 == fastrpc_glink_open(cid));
2563 if (err)
2564 goto bail;
2565
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302566 VERIFY(err,
2567 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002568 RPC_TIMEOUT));
2569 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302570 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002571 goto bail;
2572 }
2573 kref_init(&me->channel[cid].kref);
2574 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2575 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302576 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2577 FASTRPC_GLINK_INTENT_LEN);
2578 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2579 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002580 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302581 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2582 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002583 if (me->channel[cid].ssrcount !=
2584 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302585 if (fastrpc_mmap_remove_ssr(fl))
2586 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587 me->channel[cid].prevssrcount =
2588 me->channel[cid].ssrcount;
2589 }
2590 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002591
2592bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302593 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002594 return err;
2595}
2596
Sathish Ambley36849af2017-02-02 09:35:55 -08002597static int fastrpc_device_open(struct inode *inode, struct file *filp)
2598{
2599 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002600 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302601 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002602 struct fastrpc_apps *me = &gfa;
2603
c_mtharue1a5ce12017-10-13 20:47:09 +05302604 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002605 if (err)
2606 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002607 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2608 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002609 context_list_ctor(&fl->clst);
2610 spin_lock_init(&fl->hlock);
2611 INIT_HLIST_HEAD(&fl->maps);
2612 INIT_HLIST_HEAD(&fl->bufs);
2613 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302614 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002615 fl->tgid = current->tgid;
2616 fl->apps = me;
2617 fl->mode = FASTRPC_MODE_SERIAL;
2618 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002619 if (debugfs_file != NULL)
2620 fl->debugfs_file = debugfs_file;
2621 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302622 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002623 filp->private_data = fl;
2624 spin_lock(&me->hlock);
2625 hlist_add_head(&fl->hn, &me->drivers);
2626 spin_unlock(&me->hlock);
2627 return 0;
2628}
2629
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002630static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2631{
2632 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002633 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002634
c_mtharue1a5ce12017-10-13 20:47:09 +05302635 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002636 if (err)
2637 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002638 if (fl->cid == -1) {
2639 cid = *info;
2640 VERIFY(err, cid < NUM_CHANNELS);
2641 if (err)
2642 goto bail;
2643 fl->cid = cid;
2644 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2645 VERIFY(err, !fastrpc_session_alloc_locked(
2646 &fl->apps->channel[cid], 0, &fl->sctx));
2647 if (err)
2648 goto bail;
2649 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302650 VERIFY(err, fl->sctx != NULL);
2651 if (err)
2652 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002653 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2654bail:
2655 return err;
2656}
2657
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302658static int fastrpc_internal_control(struct fastrpc_file *fl,
2659 struct fastrpc_ioctl_control *cp)
2660{
2661 int err = 0;
2662 int latency;
2663
2664 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2665 if (err)
2666 goto bail;
2667 VERIFY(err, !IS_ERR_OR_NULL(cp));
2668 if (err)
2669 goto bail;
2670
2671 switch (cp->req) {
2672 case FASTRPC_CONTROL_LATENCY:
2673 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2674 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2675 VERIFY(err, latency != 0);
2676 if (err)
2677 goto bail;
2678 if (!fl->qos_request) {
2679 pm_qos_add_request(&fl->pm_qos_req,
2680 PM_QOS_CPU_DMA_LATENCY, latency);
2681 fl->qos_request = 1;
2682 } else
2683 pm_qos_update_request(&fl->pm_qos_req, latency);
2684 break;
2685 default:
2686 err = -ENOTTY;
2687 break;
2688 }
2689bail:
2690 return err;
2691}
2692
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002693static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2694 unsigned long ioctl_param)
2695{
2696 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002697 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002698 struct fastrpc_ioctl_mmap mmap;
2699 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302700 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002701 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002702 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302703 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002704 } p;
2705 void *param = (char *)ioctl_param;
2706 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2707 int size = 0, err = 0;
2708 uint32_t info;
2709
c_mtharue1a5ce12017-10-13 20:47:09 +05302710 p.inv.fds = NULL;
2711 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002712 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302713 spin_lock(&fl->hlock);
2714 if (fl->file_close == 1) {
2715 err = EBADF;
2716 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2717 spin_unlock(&fl->hlock);
2718 goto bail;
2719 }
2720 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002721
2722 switch (ioctl_num) {
2723 case FASTRPC_IOCTL_INVOKE:
2724 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002725 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002726 case FASTRPC_IOCTL_INVOKE_FD:
2727 if (!size)
2728 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2729 /* fall through */
2730 case FASTRPC_IOCTL_INVOKE_ATTRS:
2731 if (!size)
2732 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002733 /* fall through */
2734 case FASTRPC_IOCTL_INVOKE_CRC:
2735 if (!size)
2736 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302737 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002738 if (err)
2739 goto bail;
2740 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2741 0, &p.inv)));
2742 if (err)
2743 goto bail;
2744 break;
2745 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302746 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2747 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302748 if (err)
2749 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002750 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2751 if (err)
2752 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302753 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002754 if (err)
2755 goto bail;
2756 break;
2757 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302758 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2759 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302760 if (err)
2761 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002762 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2763 &p.munmap)));
2764 if (err)
2765 goto bail;
2766 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302767 case FASTRPC_IOCTL_MUNMAP_FD:
2768 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2769 sizeof(p.munmap_fd));
2770 if (err)
2771 goto bail;
2772 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2773 &p.munmap_fd)));
2774 if (err)
2775 goto bail;
2776 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002777 case FASTRPC_IOCTL_SETMODE:
2778 switch ((uint32_t)ioctl_param) {
2779 case FASTRPC_MODE_PARALLEL:
2780 case FASTRPC_MODE_SERIAL:
2781 fl->mode = (uint32_t)ioctl_param;
2782 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002783 case FASTRPC_MODE_PROFILE:
2784 fl->profile = (uint32_t)ioctl_param;
2785 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302786 case FASTRPC_MODE_SESSION:
2787 fl->sessionid = 1;
2788 fl->tgid |= (1 << SESSION_ID_INDEX);
2789 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002790 default:
2791 err = -ENOTTY;
2792 break;
2793 }
2794 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002795 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302796 K_COPY_FROM_USER(err, 0, &p.perf,
2797 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002798 if (err)
2799 goto bail;
2800 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2801 if (p.perf.keys) {
2802 char *keys = PERF_KEYS;
2803
c_mtharue1a5ce12017-10-13 20:47:09 +05302804 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2805 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002806 if (err)
2807 goto bail;
2808 }
2809 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302810 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2811 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002812 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302813 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002814 if (err)
2815 goto bail;
2816 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302817 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302818 K_COPY_FROM_USER(err, 0, &p.cp, param,
2819 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302820 if (err)
2821 goto bail;
2822 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2823 if (err)
2824 goto bail;
2825 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002826 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302827 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002828 if (err)
2829 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002830 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2831 if (err)
2832 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302833 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002834 if (err)
2835 goto bail;
2836 break;
2837 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002838 p.init.attrs = 0;
2839 p.init.siglen = 0;
2840 size = sizeof(struct fastrpc_ioctl_init);
2841 /* fall through */
2842 case FASTRPC_IOCTL_INIT_ATTRS:
2843 if (!size)
2844 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302845 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002846 if (err)
2847 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302848 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302849 p.init.init.filelen < INIT_FILELEN_MAX);
2850 if (err)
2851 goto bail;
2852 VERIFY(err, p.init.init.memlen >= 0 &&
2853 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302854 if (err)
2855 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302856 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002857 if (err)
2858 goto bail;
2859 break;
2860
2861 default:
2862 err = -ENOTTY;
2863 pr_info("bad ioctl: %d\n", ioctl_num);
2864 break;
2865 }
2866 bail:
2867 return err;
2868}
2869
2870static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2871 unsigned long code,
2872 void *data)
2873{
2874 struct fastrpc_apps *me = &gfa;
2875 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302876 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002877 int cid;
2878
2879 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2880 cid = ctx - &me->channel[0];
2881 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302882 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002883 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302884 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302885 if (ctx->chan) {
2886 fastrpc_glink_close(ctx->chan, cid);
2887 ctx->chan = NULL;
2888 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2889 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2890 }
2891 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302892 if (cid == 0)
2893 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002894 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302895 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2896 if (me->channel[0].remoteheap_ramdump_dev &&
2897 notifdata->enable_ramdump) {
2898 me->channel[0].ramdumpenabled = 1;
2899 }
2900 } else if (code == SUBSYS_AFTER_POWERUP) {
2901 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002902 }
2903
2904 return NOTIFY_DONE;
2905}
2906
2907static const struct file_operations fops = {
2908 .open = fastrpc_device_open,
2909 .release = fastrpc_device_release,
2910 .unlocked_ioctl = fastrpc_device_ioctl,
2911 .compat_ioctl = compat_fastrpc_device_ioctl,
2912};
2913
2914static const struct of_device_id fastrpc_match_table[] = {
2915 { .compatible = "qcom,msm-fastrpc-adsp", },
2916 { .compatible = "qcom,msm-fastrpc-compute", },
2917 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2918 { .compatible = "qcom,msm-adsprpc-mem-region", },
2919 {}
2920};
2921
2922static int fastrpc_cb_probe(struct device *dev)
2923{
2924 struct fastrpc_channel_ctx *chan;
2925 struct fastrpc_session_ctx *sess;
2926 struct of_phandle_args iommuspec;
2927 const char *name;
2928 unsigned int start = 0x80000000;
2929 int err = 0, i;
2930 int secure_vmid = VMID_CP_PIXEL;
2931
c_mtharue1a5ce12017-10-13 20:47:09 +05302932 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2933 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002934 if (err)
2935 goto bail;
2936 for (i = 0; i < NUM_CHANNELS; i++) {
2937 if (!gcinfo[i].name)
2938 continue;
2939 if (!strcmp(name, gcinfo[i].name))
2940 break;
2941 }
2942 VERIFY(err, i < NUM_CHANNELS);
2943 if (err)
2944 goto bail;
2945 chan = &gcinfo[i];
2946 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2947 if (err)
2948 goto bail;
2949
2950 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2951 "#iommu-cells", 0, &iommuspec));
2952 if (err)
2953 goto bail;
2954 sess = &chan->session[chan->sesscount];
2955 sess->smmu.cb = iommuspec.args[0] & 0xf;
2956 sess->used = 0;
2957 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2958 "dma-coherent");
2959 sess->smmu.secure = of_property_read_bool(dev->of_node,
2960 "qcom,secure-context-bank");
2961 if (sess->smmu.secure)
2962 start = 0x60000000;
2963 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2964 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302965 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002966 if (err)
2967 goto bail;
2968
2969 if (sess->smmu.secure)
2970 iommu_domain_set_attr(sess->smmu.mapping->domain,
2971 DOMAIN_ATTR_SECURE_VMID,
2972 &secure_vmid);
2973
2974 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2975 if (err)
2976 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302977 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002978 sess->smmu.enabled = 1;
2979 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002980 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2981 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002982bail:
2983 return err;
2984}
2985
2986static int fastrpc_probe(struct platform_device *pdev)
2987{
2988 int err = 0;
2989 struct fastrpc_apps *me = &gfa;
2990 struct device *dev = &pdev->dev;
2991 struct smq_phy_page range;
2992 struct device_node *ion_node, *node;
2993 struct platform_device *ion_pdev;
2994 struct cma *cma;
2995 uint32_t val;
2996
c_mtharu63ffc012017-11-16 15:26:56 +05302997
2998 if (of_device_is_compatible(dev->of_node,
2999 "qcom,msm-fastrpc-compute")) {
3000 of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
3001 &gcinfo[0].rhvmid);
3002
3003 pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
3004
3005 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3006 &me->latency);
3007 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003008 if (of_device_is_compatible(dev->of_node,
3009 "qcom,msm-fastrpc-compute-cb"))
3010 return fastrpc_cb_probe(dev);
3011
3012 if (of_device_is_compatible(dev->of_node,
3013 "qcom,msm-adsprpc-mem-region")) {
3014 me->dev = dev;
3015 range.addr = 0;
3016 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3017 if (ion_node) {
3018 for_each_available_child_of_node(ion_node, node) {
3019 if (of_property_read_u32(node, "reg", &val))
3020 continue;
3021 if (val != ION_ADSP_HEAP_ID)
3022 continue;
3023 ion_pdev = of_find_device_by_node(node);
3024 if (!ion_pdev)
3025 break;
3026 cma = dev_get_cma_area(&ion_pdev->dev);
3027 if (cma) {
3028 range.addr = cma_get_base(cma);
3029 range.size = (size_t)cma_get_size(cma);
3030 }
3031 break;
3032 }
3033 }
3034 if (range.addr) {
3035 int srcVM[1] = {VMID_HLOS};
3036 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3037 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003038 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003039 PERM_READ | PERM_WRITE | PERM_EXEC,
3040 PERM_READ | PERM_WRITE | PERM_EXEC,
3041 PERM_READ | PERM_WRITE | PERM_EXEC,
3042 };
3043
3044 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3045 srcVM, 1, destVM, destVMperm, 4));
3046 if (err)
3047 goto bail;
3048 }
3049 return 0;
3050 }
3051
3052 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3053 fastrpc_match_table,
3054 NULL, &pdev->dev));
3055 if (err)
3056 goto bail;
3057bail:
3058 return err;
3059}
3060
3061static void fastrpc_deinit(void)
3062{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303063 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003064 struct fastrpc_channel_ctx *chan = gcinfo;
3065 int i, j;
3066
3067 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3068 if (chan->chan) {
3069 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303070 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303071 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072 }
3073 for (j = 0; j < NUM_SESSIONS; j++) {
3074 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303075 if (sess->smmu.dev) {
3076 arm_iommu_detach_device(sess->smmu.dev);
3077 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003078 }
3079 if (sess->smmu.mapping) {
3080 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303081 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003082 }
3083 }
3084 }
3085}
3086
3087static struct platform_driver fastrpc_driver = {
3088 .probe = fastrpc_probe,
3089 .driver = {
3090 .name = "fastrpc",
3091 .owner = THIS_MODULE,
3092 .of_match_table = fastrpc_match_table,
3093 },
3094};
3095
3096static int __init fastrpc_device_init(void)
3097{
3098 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303099 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003100 int err = 0, i;
3101
3102 memset(me, 0, sizeof(*me));
3103
3104 fastrpc_init(me);
3105 me->dev = NULL;
3106 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3107 if (err)
3108 goto register_bail;
3109 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3110 DEVICE_NAME));
3111 if (err)
3112 goto alloc_chrdev_bail;
3113 cdev_init(&me->cdev, &fops);
3114 me->cdev.owner = THIS_MODULE;
3115 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003116 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003117 if (err)
3118 goto cdev_init_bail;
3119 me->class = class_create(THIS_MODULE, "fastrpc");
3120 VERIFY(err, !IS_ERR(me->class));
3121 if (err)
3122 goto class_create_bail;
3123 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003124 dev = device_create(me->class, NULL,
3125 MKDEV(MAJOR(me->dev_no), 0),
3126 NULL, gcinfo[0].name);
3127 VERIFY(err, !IS_ERR_OR_NULL(dev));
3128 if (err)
3129 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003130 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003131 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003132 me->channel[i].ssrcount = 0;
3133 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303134 me->channel[i].issubsystemup = 1;
3135 me->channel[i].ramdumpenabled = 0;
3136 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003137 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3138 me->channel[i].handle = subsys_notif_register_notifier(
3139 gcinfo[i].subsys,
3140 &me->channel[i].nb);
3141 }
3142
3143 me->client = msm_ion_client_create(DEVICE_NAME);
3144 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3145 if (err)
3146 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003147 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003148 return 0;
3149device_create_bail:
3150 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003151 if (me->channel[i].handle)
3152 subsys_notif_unregister_notifier(me->channel[i].handle,
3153 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003154 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003155 if (!IS_ERR_OR_NULL(dev))
3156 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003157 class_destroy(me->class);
3158class_create_bail:
3159 cdev_del(&me->cdev);
3160cdev_init_bail:
3161 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3162alloc_chrdev_bail:
3163register_bail:
3164 fastrpc_deinit();
3165 return err;
3166}
3167
3168static void __exit fastrpc_device_exit(void)
3169{
3170 struct fastrpc_apps *me = &gfa;
3171 int i;
3172
3173 fastrpc_file_list_dtor(me);
3174 fastrpc_deinit();
3175 for (i = 0; i < NUM_CHANNELS; i++) {
3176 if (!gcinfo[i].name)
3177 continue;
3178 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3179 subsys_notif_unregister_notifier(me->channel[i].handle,
3180 &me->channel[i].nb);
3181 }
3182 class_destroy(me->class);
3183 cdev_del(&me->cdev);
3184 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3185 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003186 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003187}
3188
3189late_initcall(fastrpc_device_init);
3190module_exit(fastrpc_device_exit);
3191
3192MODULE_LICENSE("GPL v2");