blob: af8ad7ce09b33267d48fbc371d06edaa283e1cad [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
31#include <linux/scatterlist.h>
32#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35#include <linux/of.h>
36#include <linux/of_address.h>
37#include <linux/of_platform.h>
38#include <linux/dma-contiguous.h>
39#include <linux/cma.h>
40#include <linux/iommu.h>
41#include <linux/kref.h>
42#include <linux/sort.h>
43#include <linux/msm_dma_iommu_mapping.h>
44#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053045#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070046#include "adsprpc_compat.h"
47#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080049#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053050#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070051#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
52#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
53#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053054#define ADSP_MMAP_HEAP_ADDR 4
55#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070056#define FASTRPC_ENOSUCH 39
57#define VMID_SSC_Q6 5
58#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080059#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070060
61#define RPC_TIMEOUT (5 * HZ)
62#define BALIGN 128
63#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
64#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070065#define M_FDLIST (16)
66#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053067#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053068#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070069
70#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
71
72#define FASTRPC_LINK_STATE_DOWN (0x0)
73#define FASTRPC_LINK_STATE_UP (0x1)
74#define FASTRPC_LINK_DISCONNECTED (0x0)
75#define FASTRPC_LINK_CONNECTING (0x1)
76#define FASTRPC_LINK_CONNECTED (0x3)
77#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053078#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
79#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070080
Sathish Ambleya21b5b52017-01-11 16:11:01 -080081#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
82#define FASTRPC_STATIC_HANDLE_LISTENER (3)
83#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053084#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080085
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053086#define INIT_FILELEN_MAX (2*1024*1024)
87#define INIT_MEMLEN_MAX (8*1024*1024)
88
Sathish Ambleya21b5b52017-01-11 16:11:01 -080089#define PERF_END (void)0
90
91#define PERF(enb, cnt, ff) \
92 {\
93 struct timespec startT = {0};\
94 if (enb) {\
95 getnstimeofday(&startT);\
96 } \
97 ff ;\
98 if (enb) {\
99 cnt += getnstimediff(&startT);\
100 } \
101 }
102
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700103static int fastrpc_glink_open(int cid);
104static void fastrpc_glink_close(void *chan, int cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800105static struct dentry *debugfs_root;
106static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
108static inline uint64_t buf_page_start(uint64_t buf)
109{
110 uint64_t start = (uint64_t) buf & PAGE_MASK;
111 return start;
112}
113
114static inline uint64_t buf_page_offset(uint64_t buf)
115{
116 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
117 return offset;
118}
119
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530120static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700121{
122 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
123 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530124 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700125 return nPages;
126}
127
128static inline uint64_t buf_page_size(uint32_t size)
129{
130 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
131
132 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
133}
134
135static inline void *uint64_to_ptr(uint64_t addr)
136{
137 void *ptr = (void *)((uintptr_t)addr);
138
139 return ptr;
140}
141
142static inline uint64_t ptr_to_uint64(void *ptr)
143{
144 uint64_t addr = (uint64_t)((uintptr_t)ptr);
145
146 return addr;
147}
148
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530149struct secure_vm {
150 int *vmid;
151 int *vmperm;
152 int vmcount;
153};
154
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700155struct fastrpc_file;
156
157struct fastrpc_buf {
158 struct hlist_node hn;
159 struct fastrpc_file *fl;
160 void *virt;
161 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530162 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700163};
164
165struct fastrpc_ctx_lst;
166
167struct overlap {
168 uintptr_t start;
169 uintptr_t end;
170 int raix;
171 uintptr_t mstart;
172 uintptr_t mend;
173 uintptr_t offset;
174};
175
176struct smq_invoke_ctx {
177 struct hlist_node hn;
178 struct completion work;
179 int retval;
180 int pid;
181 int tgid;
182 remote_arg_t *lpra;
183 remote_arg64_t *rpra;
184 int *fds;
185 unsigned int *attrs;
186 struct fastrpc_mmap **maps;
187 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530188 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700189 struct fastrpc_file *fl;
190 uint32_t sc;
191 struct overlap *overs;
192 struct overlap **overps;
193 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700194 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530195 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700196};
197
198struct fastrpc_ctx_lst {
199 struct hlist_head pending;
200 struct hlist_head interrupted;
201};
202
203struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530204 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700205 struct dma_iommu_mapping *mapping;
206 int cb;
207 int enabled;
208 int faults;
209 int secure;
210 int coherent;
211};
212
213struct fastrpc_session_ctx {
214 struct device *dev;
215 struct fastrpc_smmu smmu;
216 int used;
217};
218
219struct fastrpc_glink_info {
220 int link_state;
221 int port_state;
222 struct glink_open_config cfg;
223 struct glink_link_info link_info;
224 void *link_notify_handle;
225};
226
227struct fastrpc_channel_ctx {
228 char *name;
229 char *subsys;
230 void *chan;
231 struct device *dev;
232 struct fastrpc_session_ctx session[NUM_SESSIONS];
233 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530234 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700235 struct notifier_block nb;
236 struct kref kref;
237 int sesscount;
238 int ssrcount;
239 void *handle;
240 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530241 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700242 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530243 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530244 int ramdumpenabled;
245 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700246 struct fastrpc_glink_info link;
247};
248
249struct fastrpc_apps {
250 struct fastrpc_channel_ctx *channel;
251 struct cdev cdev;
252 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530253 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700254 struct smq_phy_page range;
255 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530256 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700257 dev_t dev_no;
258 int compat;
259 struct hlist_head drivers;
260 spinlock_t hlock;
261 struct ion_client *client;
262 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530263 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700264};
265
266struct fastrpc_mmap {
267 struct hlist_node hn;
268 struct fastrpc_file *fl;
269 struct fastrpc_apps *apps;
270 int fd;
271 uint32_t flags;
272 struct dma_buf *buf;
273 struct sg_table *table;
274 struct dma_buf_attachment *attach;
275 struct ion_handle *handle;
276 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530277 size_t size;
278 uintptr_t va;
279 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280 int refs;
281 uintptr_t raddr;
282 int uncached;
283 int secure;
284 uintptr_t attr;
285};
286
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800287struct fastrpc_perf {
288 int64_t count;
289 int64_t flush;
290 int64_t map;
291 int64_t copy;
292 int64_t link;
293 int64_t getargs;
294 int64_t putargs;
295 int64_t invargs;
296 int64_t invoke;
297};
298
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700299struct fastrpc_file {
300 struct hlist_node hn;
301 spinlock_t hlock;
302 struct hlist_head maps;
303 struct hlist_head bufs;
304 struct fastrpc_ctx_lst clst;
305 struct fastrpc_session_ctx *sctx;
306 struct fastrpc_session_ctx *secsctx;
307 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800308 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530309 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700310 int tgid;
311 int cid;
312 int ssrcount;
313 int pd;
tharun kumar9f899ea2017-07-03 17:07:03 +0530314 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700315 struct fastrpc_apps *apps;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800316 struct fastrpc_perf perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800317 struct dentry *debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530318 struct pm_qos_request pm_qos_req;
319 int qos_request;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700320};
321
322static struct fastrpc_apps gfa;
323
324static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
325 {
326 .name = "adsprpc-smd",
327 .subsys = "adsp",
328 .link.link_info.edge = "lpass",
329 .link.link_info.transport = "smem",
330 },
331 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700332 .name = "mdsprpc-smd",
333 .subsys = "modem",
334 .link.link_info.edge = "mpss",
335 .link.link_info.transport = "smem",
336 },
337 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800338 .name = "sdsprpc-smd",
339 .subsys = "slpi",
340 .link.link_info.edge = "dsps",
341 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800342 },
343 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700344 .name = "cdsprpc-smd",
345 .subsys = "cdsp",
346 .link.link_info.edge = "cdsp",
347 .link.link_info.transport = "smem",
348 },
349};
350
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530351static int hlosvm[1] = {VMID_HLOS};
352static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
353
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800354static inline int64_t getnstimediff(struct timespec *start)
355{
356 int64_t ns;
357 struct timespec ts, b;
358
359 getnstimeofday(&ts);
360 b = timespec_sub(ts, *start);
361 ns = timespec_to_ns(&b);
362 return ns;
363}
364
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700365static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
366{
c_mtharue1a5ce12017-10-13 20:47:09 +0530367 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700368 int vmid;
369
370 if (!fl)
371 return;
372 if (cache) {
373 spin_lock(&fl->hlock);
374 hlist_add_head(&buf->hn, &fl->bufs);
375 spin_unlock(&fl->hlock);
376 return;
377 }
378 if (!IS_ERR_OR_NULL(buf->virt)) {
379 int destVM[1] = {VMID_HLOS};
380 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
381
382 if (fl->sctx->smmu.cb)
383 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
384 vmid = fl->apps->channel[fl->cid].vmid;
385 if (vmid) {
386 int srcVM[2] = {VMID_HLOS, vmid};
387
388 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
389 srcVM, 2, destVM, destVMperm, 1);
390 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530391 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700392 buf->phys);
393 }
394 kfree(buf);
395}
396
397static void fastrpc_buf_list_free(struct fastrpc_file *fl)
398{
399 struct fastrpc_buf *buf, *free;
400
401 do {
402 struct hlist_node *n;
403
c_mtharue1a5ce12017-10-13 20:47:09 +0530404 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700405 spin_lock(&fl->hlock);
406 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
407 hlist_del_init(&buf->hn);
408 free = buf;
409 break;
410 }
411 spin_unlock(&fl->hlock);
412 if (free)
413 fastrpc_buf_free(free, 0);
414 } while (free);
415}
416
417static void fastrpc_mmap_add(struct fastrpc_mmap *map)
418{
c_mtharue1a5ce12017-10-13 20:47:09 +0530419 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
420 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
421 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700422
c_mtharue1a5ce12017-10-13 20:47:09 +0530423 spin_lock(&me->hlock);
424 hlist_add_head(&map->hn, &me->maps);
425 spin_unlock(&me->hlock);
426 } else {
427 struct fastrpc_file *fl = map->fl;
428
429 spin_lock(&fl->hlock);
430 hlist_add_head(&map->hn, &fl->maps);
431 spin_unlock(&fl->hlock);
432 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700433}
434
c_mtharue1a5ce12017-10-13 20:47:09 +0530435static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530436 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530437 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700438{
c_mtharue1a5ce12017-10-13 20:47:09 +0530439 struct fastrpc_apps *me = &gfa;
440 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700441 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530442
443 if ((va + len) < va)
444 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530445 if (mflags == ADSP_MMAP_HEAP_ADDR ||
446 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
447 spin_lock(&me->hlock);
448 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
449 if (va >= map->va &&
450 va + len <= map->va + map->len &&
451 map->fd == fd) {
452 if (refs)
453 map->refs++;
454 match = map;
455 break;
456 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700457 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530458 spin_unlock(&me->hlock);
459 } else {
460 spin_lock(&fl->hlock);
461 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
462 if (va >= map->va &&
463 va + len <= map->va + map->len &&
464 map->fd == fd) {
465 if (refs)
466 map->refs++;
467 match = map;
468 break;
469 }
470 }
471 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700472 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700473 if (match) {
474 *ppmap = match;
475 return 0;
476 }
477 return -ENOTTY;
478}
479
c_mtharuf931ff92017-11-30 19:35:30 +0530480static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530481{
482 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530483
484 if (me->dev == NULL) {
485 pr_err("device adsprpc-mem is not initialized\n");
486 return -ENODEV;
487 }
c_mtharuf931ff92017-11-30 19:35:30 +0530488 *vaddr = dma_alloc_coherent(me->dev, size, region_phys, GFP_KERNEL);
489 if (!*vaddr) {
c_mtharue1a5ce12017-10-13 20:47:09 +0530490 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
491 (unsigned int)size);
492 return -ENOMEM;
493 }
494 return 0;
495}
496
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700497static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530498 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700499{
c_mtharue1a5ce12017-10-13 20:47:09 +0530500 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700501 struct hlist_node *n;
502 struct fastrpc_apps *me = &gfa;
503
504 spin_lock(&me->hlock);
505 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
506 if (map->raddr == va &&
507 map->raddr + map->len == va + len &&
508 map->refs == 1) {
509 match = map;
510 hlist_del_init(&map->hn);
511 break;
512 }
513 }
514 spin_unlock(&me->hlock);
515 if (match) {
516 *ppmap = match;
517 return 0;
518 }
519 spin_lock(&fl->hlock);
520 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
521 if (map->raddr == va &&
522 map->raddr + map->len == va + len &&
523 map->refs == 1) {
524 match = map;
525 hlist_del_init(&map->hn);
526 break;
527 }
528 }
529 spin_unlock(&fl->hlock);
530 if (match) {
531 *ppmap = match;
532 return 0;
533 }
534 return -ENOTTY;
535}
536
c_mtharu7bd6a422017-10-17 18:15:37 +0530537static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700538{
c_mtharue1a5ce12017-10-13 20:47:09 +0530539 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700540 struct fastrpc_file *fl;
541 int vmid;
542 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700543
544 if (!map)
545 return;
546 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530547 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
548 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
549 spin_lock(&me->hlock);
550 map->refs--;
551 if (!map->refs)
552 hlist_del_init(&map->hn);
553 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530554 if (map->refs > 0)
555 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530556 } else {
557 spin_lock(&fl->hlock);
558 map->refs--;
559 if (!map->refs)
560 hlist_del_init(&map->hn);
561 spin_unlock(&fl->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530562 if (map->refs > 0 && !flags)
563 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530564 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530565 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
566 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700567
c_mtharue1a5ce12017-10-13 20:47:09 +0530568 if (me->dev == NULL) {
569 pr_err("failed to free remote heap allocation\n");
570 return;
571 }
572 if (map->phys) {
573 dma_free_coherent(me->dev, map->size,
c_mtharuf931ff92017-11-30 19:35:30 +0530574 (void *)map->va, (dma_addr_t)map->phys);
c_mtharue1a5ce12017-10-13 20:47:09 +0530575 }
576 } else {
577 int destVM[1] = {VMID_HLOS};
578 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
579
580 if (map->secure)
581 sess = fl->secsctx;
582 else
583 sess = fl->sctx;
584
585 if (!IS_ERR_OR_NULL(map->handle))
586 ion_free(fl->apps->client, map->handle);
587 if (sess && sess->smmu.enabled) {
588 if (map->size || map->phys)
589 msm_dma_unmap_sg(sess->smmu.dev,
590 map->table->sgl,
591 map->table->nents, DMA_BIDIRECTIONAL,
592 map->buf);
593 }
594 vmid = fl->apps->channel[fl->cid].vmid;
595 if (vmid && map->phys) {
596 int srcVM[2] = {VMID_HLOS, vmid};
597
598 hyp_assign_phys(map->phys, buf_page_size(map->size),
599 srcVM, 2, destVM, destVMperm, 1);
600 }
601
602 if (!IS_ERR_OR_NULL(map->table))
603 dma_buf_unmap_attachment(map->attach, map->table,
604 DMA_BIDIRECTIONAL);
605 if (!IS_ERR_OR_NULL(map->attach))
606 dma_buf_detach(map->buf, map->attach);
607 if (!IS_ERR_OR_NULL(map->buf))
608 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700609 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610 kfree(map);
611}
612
613static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
614 struct fastrpc_session_ctx **session);
615
616static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530617 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700618 struct fastrpc_mmap **ppmap)
619{
c_mtharue1a5ce12017-10-13 20:47:09 +0530620 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700621 struct fastrpc_session_ctx *sess;
622 struct fastrpc_apps *apps = fl->apps;
623 int cid = fl->cid;
624 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530625 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700626 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530627 dma_addr_t region_phys = 0;
628 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700629 unsigned long flags;
630 int err = 0, vmid;
631
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800632 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700633 return 0;
634 map = kzalloc(sizeof(*map), GFP_KERNEL);
635 VERIFY(err, !IS_ERR_OR_NULL(map));
636 if (err)
637 goto bail;
638 INIT_HLIST_NODE(&map->hn);
639 map->flags = mflags;
640 map->refs = 1;
641 map->fl = fl;
642 map->fd = fd;
643 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530644 if (mflags == ADSP_MMAP_HEAP_ADDR ||
645 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
646 map->apps = me;
647 map->fl = NULL;
c_mtharuf931ff92017-11-30 19:35:30 +0530648 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
649 len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700650 if (err)
651 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530652 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530653 map->size = len;
c_mtharuf931ff92017-11-30 19:35:30 +0530654 map->va = (uintptr_t)region_vaddr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530655 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530656 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
657 pr_info("adsprpc: buffer mapped with persist attr %x\n",
658 (unsigned int)map->attr);
659 map->refs = 2;
660 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530661 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
662 ion_import_dma_buf_fd(fl->apps->client, fd)));
663 if (err)
664 goto bail;
665 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
666 &flags));
667 if (err)
668 goto bail;
669
c_mtharue1a5ce12017-10-13 20:47:09 +0530670 map->secure = flags & ION_FLAG_SECURE;
671 if (map->secure) {
672 if (!fl->secsctx)
673 err = fastrpc_session_alloc(chan, 1,
674 &fl->secsctx);
675 if (err)
676 goto bail;
677 }
678 if (map->secure)
679 sess = fl->secsctx;
680 else
681 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530682
c_mtharue1a5ce12017-10-13 20:47:09 +0530683 VERIFY(err, !IS_ERR_OR_NULL(sess));
684 if (err)
685 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530686
687 map->uncached = !ION_IS_CACHED(flags);
688 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
689 map->uncached = 1;
690
c_mtharue1a5ce12017-10-13 20:47:09 +0530691 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
692 if (err)
693 goto bail;
694 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
695 dma_buf_attach(map->buf, sess->smmu.dev)));
696 if (err)
697 goto bail;
698 VERIFY(err, !IS_ERR_OR_NULL(map->table =
699 dma_buf_map_attachment(map->attach,
700 DMA_BIDIRECTIONAL)));
701 if (err)
702 goto bail;
703 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700704 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530705
706 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
707 (sess->smmu.coherent && map->uncached))
708 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
709 else if (map->attr & FASTRPC_ATTR_COHERENT)
710 attrs |= DMA_ATTR_FORCE_COHERENT;
711
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530713 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700714 map->table->sgl, map->table->nents,
715 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530716 if (err)
717 goto bail;
718 } else {
719 VERIFY(err, map->table->nents == 1);
720 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700721 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530722 }
723 map->phys = sg_dma_address(map->table->sgl);
724 if (sess->smmu.cb) {
725 map->phys += ((uint64_t)sess->smmu.cb << 32);
726 map->size = sg_dma_len(map->table->sgl);
727 } else {
728 map->size = buf_page_size(len);
729 }
730 vmid = fl->apps->channel[fl->cid].vmid;
731 if (vmid) {
732 int srcVM[1] = {VMID_HLOS};
733 int destVM[2] = {VMID_HLOS, vmid};
734 int destVMperm[2] = {PERM_READ | PERM_WRITE,
735 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736
c_mtharue1a5ce12017-10-13 20:47:09 +0530737 VERIFY(err, !hyp_assign_phys(map->phys,
738 buf_page_size(map->size),
739 srcVM, 1, destVM, destVMperm, 2));
740 if (err)
741 goto bail;
742 }
743 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700744 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700745 map->len = len;
746
747 fastrpc_mmap_add(map);
748 *ppmap = map;
749
750bail:
751 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530752 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700753 return err;
754}
755
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530756static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700757 struct fastrpc_buf **obuf)
758{
759 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530760 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700761 struct hlist_node *n;
762
763 VERIFY(err, size > 0);
764 if (err)
765 goto bail;
766
767 /* find the smallest buffer that fits in the cache */
768 spin_lock(&fl->hlock);
769 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
770 if (buf->size >= size && (!fr || fr->size > buf->size))
771 fr = buf;
772 }
773 if (fr)
774 hlist_del_init(&fr->hn);
775 spin_unlock(&fl->hlock);
776 if (fr) {
777 *obuf = fr;
778 return 0;
779 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530780 buf = NULL;
781 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700782 if (err)
783 goto bail;
784 INIT_HLIST_NODE(&buf->hn);
785 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530786 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700787 buf->phys = 0;
788 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530789 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700790 (void *)&buf->phys, GFP_KERNEL);
791 if (IS_ERR_OR_NULL(buf->virt)) {
792 /* free cache and retry */
793 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530794 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700795 (void *)&buf->phys, GFP_KERNEL);
796 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
797 }
798 if (err)
799 goto bail;
800 if (fl->sctx->smmu.cb)
801 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
802 vmid = fl->apps->channel[fl->cid].vmid;
803 if (vmid) {
804 int srcVM[1] = {VMID_HLOS};
805 int destVM[2] = {VMID_HLOS, vmid};
806 int destVMperm[2] = {PERM_READ | PERM_WRITE,
807 PERM_READ | PERM_WRITE | PERM_EXEC};
808
809 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
810 srcVM, 1, destVM, destVMperm, 2));
811 if (err)
812 goto bail;
813 }
814
815 *obuf = buf;
816 bail:
817 if (err && buf)
818 fastrpc_buf_free(buf, 0);
819 return err;
820}
821
822
823static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700824 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700825 struct smq_invoke_ctx **po)
826{
827 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530828 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700829 struct hlist_node *n;
830 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
831
832 spin_lock(&fl->hlock);
833 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
834 if (ictx->pid == current->pid) {
835 if (invoke->sc != ictx->sc || ictx->fl != fl)
836 err = -1;
837 else {
838 ctx = ictx;
839 hlist_del_init(&ctx->hn);
840 hlist_add_head(&ctx->hn, &fl->clst.pending);
841 }
842 break;
843 }
844 }
845 spin_unlock(&fl->hlock);
846 if (ctx)
847 *po = ctx;
848 return err;
849}
850
851#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
852static int overlap_ptr_cmp(const void *a, const void *b)
853{
854 struct overlap *pa = *((struct overlap **)a);
855 struct overlap *pb = *((struct overlap **)b);
856 /* sort with lowest starting buffer first */
857 int st = CMP(pa->start, pb->start);
858 /* sort with highest ending buffer first */
859 int ed = CMP(pb->end, pa->end);
860 return st == 0 ? ed : st;
861}
862
Sathish Ambley9466d672017-01-25 10:51:55 -0800863static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700864{
Sathish Ambley9466d672017-01-25 10:51:55 -0800865 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700866 remote_arg_t *lpra = ctx->lpra;
867 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
868 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
869 int nbufs = inbufs + outbufs;
870 struct overlap max;
871
872 for (i = 0; i < nbufs; ++i) {
873 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
874 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800875 if (lpra[i].buf.len) {
876 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
877 if (err)
878 goto bail;
879 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700880 ctx->overs[i].raix = i;
881 ctx->overps[i] = &ctx->overs[i];
882 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530883 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700884 max.start = 0;
885 max.end = 0;
886 for (i = 0; i < nbufs; ++i) {
887 if (ctx->overps[i]->start < max.end) {
888 ctx->overps[i]->mstart = max.end;
889 ctx->overps[i]->mend = ctx->overps[i]->end;
890 ctx->overps[i]->offset = max.end -
891 ctx->overps[i]->start;
892 if (ctx->overps[i]->end > max.end) {
893 max.end = ctx->overps[i]->end;
894 } else {
895 ctx->overps[i]->mend = 0;
896 ctx->overps[i]->mstart = 0;
897 }
898 } else {
899 ctx->overps[i]->mend = ctx->overps[i]->end;
900 ctx->overps[i]->mstart = ctx->overps[i]->start;
901 ctx->overps[i]->offset = 0;
902 max = *ctx->overps[i];
903 }
904 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800905bail:
906 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700907}
908
909#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
910 do {\
911 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530912 VERIFY(err, 0 == copy_from_user((dst),\
913 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700914 (size)));\
915 else\
916 memmove((dst), (src), (size));\
917 } while (0)
918
919#define K_COPY_TO_USER(err, kernel, dst, src, size) \
920 do {\
921 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530922 VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
923 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700924 else\
925 memmove((dst), (src), (size));\
926 } while (0)
927
928
929static void context_free(struct smq_invoke_ctx *ctx);
930
931static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -0700932 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700933 struct smq_invoke_ctx **po)
934{
935 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530936 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700937 struct fastrpc_ctx_lst *clst = &fl->clst;
938 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
939
940 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
941 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
942 sizeof(*ctx->fds) * (bufs) +
943 sizeof(*ctx->attrs) * (bufs) +
944 sizeof(*ctx->overs) * (bufs) +
945 sizeof(*ctx->overps) * (bufs);
946
c_mtharue1a5ce12017-10-13 20:47:09 +0530947 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700948 if (err)
949 goto bail;
950
951 INIT_HLIST_NODE(&ctx->hn);
952 hlist_add_fake(&ctx->hn);
953 ctx->fl = fl;
954 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
955 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
956 ctx->fds = (int *)(&ctx->lpra[bufs]);
957 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
958 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
959 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
960
c_mtharue1a5ce12017-10-13 20:47:09 +0530961 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700962 bufs * sizeof(*ctx->lpra));
963 if (err)
964 goto bail;
965
966 if (invokefd->fds) {
967 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
968 bufs * sizeof(*ctx->fds));
969 if (err)
970 goto bail;
971 }
972 if (invokefd->attrs) {
973 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
974 bufs * sizeof(*ctx->attrs));
975 if (err)
976 goto bail;
977 }
Sathish Ambleybae51902017-07-03 15:00:49 -0700978 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700979 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -0800980 if (bufs) {
981 VERIFY(err, 0 == context_build_overlap(ctx));
982 if (err)
983 goto bail;
984 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700985 ctx->retval = -1;
986 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530987 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700988 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +0530989 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700990
991 spin_lock(&fl->hlock);
992 hlist_add_head(&ctx->hn, &clst->pending);
993 spin_unlock(&fl->hlock);
994
995 *po = ctx;
996bail:
997 if (ctx && err)
998 context_free(ctx);
999 return err;
1000}
1001
1002static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1003{
1004 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1005
1006 spin_lock(&ctx->fl->hlock);
1007 hlist_del_init(&ctx->hn);
1008 hlist_add_head(&ctx->hn, &clst->interrupted);
1009 spin_unlock(&ctx->fl->hlock);
1010 /* free the cache on power collapse */
1011 fastrpc_buf_list_free(ctx->fl);
1012}
1013
1014static void context_free(struct smq_invoke_ctx *ctx)
1015{
1016 int i;
1017 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1018 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1019 spin_lock(&ctx->fl->hlock);
1020 hlist_del_init(&ctx->hn);
1021 spin_unlock(&ctx->fl->hlock);
1022 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301023 fastrpc_mmap_free(ctx->maps[i], 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001024 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301025 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001026 kfree(ctx);
1027}
1028
1029static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1030{
1031 ctx->retval = retval;
1032 complete(&ctx->work);
1033}
1034
1035
1036static void fastrpc_notify_users(struct fastrpc_file *me)
1037{
1038 struct smq_invoke_ctx *ictx;
1039 struct hlist_node *n;
1040
1041 spin_lock(&me->hlock);
1042 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1043 complete(&ictx->work);
1044 }
1045 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1046 complete(&ictx->work);
1047 }
1048 spin_unlock(&me->hlock);
1049
1050}
1051
1052static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1053{
1054 struct fastrpc_file *fl;
1055 struct hlist_node *n;
1056
1057 spin_lock(&me->hlock);
1058 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1059 if (fl->cid == cid)
1060 fastrpc_notify_users(fl);
1061 }
1062 spin_unlock(&me->hlock);
1063
1064}
1065static void context_list_ctor(struct fastrpc_ctx_lst *me)
1066{
1067 INIT_HLIST_HEAD(&me->interrupted);
1068 INIT_HLIST_HEAD(&me->pending);
1069}
1070
1071static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1072{
1073 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301074 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001075 struct hlist_node *n;
1076
1077 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301078 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001079 spin_lock(&fl->hlock);
1080 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1081 hlist_del_init(&ictx->hn);
1082 ctxfree = ictx;
1083 break;
1084 }
1085 spin_unlock(&fl->hlock);
1086 if (ctxfree)
1087 context_free(ctxfree);
1088 } while (ctxfree);
1089 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301090 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001091 spin_lock(&fl->hlock);
1092 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1093 hlist_del_init(&ictx->hn);
1094 ctxfree = ictx;
1095 break;
1096 }
1097 spin_unlock(&fl->hlock);
1098 if (ctxfree)
1099 context_free(ctxfree);
1100 } while (ctxfree);
1101}
1102
1103static int fastrpc_file_free(struct fastrpc_file *fl);
1104static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1105{
1106 struct fastrpc_file *fl, *free;
1107 struct hlist_node *n;
1108
1109 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301110 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001111 spin_lock(&me->hlock);
1112 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1113 hlist_del_init(&fl->hn);
1114 free = fl;
1115 break;
1116 }
1117 spin_unlock(&me->hlock);
1118 if (free)
1119 fastrpc_file_free(free);
1120 } while (free);
1121}
1122
1123static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1124{
1125 remote_arg64_t *rpra;
1126 remote_arg_t *lpra = ctx->lpra;
1127 struct smq_invoke_buf *list;
1128 struct smq_phy_page *pages, *ipage;
1129 uint32_t sc = ctx->sc;
1130 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1131 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001132 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001133 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301134 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001135 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 int err = 0;
1137 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001138 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001139 uint32_t *crclist;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001140
1141 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301142 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 list = smq_invoke_buf_start(rpra, sc);
1144 pages = smq_phy_page_start(sc, list);
1145 ipage = pages;
1146
1147 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301148 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1149 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001150
1151 if (ctx->fds[i] && (ctx->fds[i] != -1))
1152 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1153 ctx->attrs[i], buf, len,
1154 mflags, &ctx->maps[i]);
1155 ipage += 1;
1156 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001157 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1158 for (i = bufs; i < bufs + handles; i++) {
1159 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1160 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
1161 if (err)
1162 goto bail;
1163 ipage += 1;
1164 }
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301165 metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
Sathish Ambleybae51902017-07-03 15:00:49 -07001166 (sizeof(uint32_t) * M_CRCLIST);
1167
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001168 /* calculate len requreed for copying */
1169 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1170 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001171 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301172 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001173
1174 if (!len)
1175 continue;
1176 if (ctx->maps[i])
1177 continue;
1178 if (ctx->overps[oix]->offset == 0)
1179 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001180 mstart = ctx->overps[oix]->mstart;
1181 mend = ctx->overps[oix]->mend;
1182 VERIFY(err, (mend - mstart) <= LONG_MAX);
1183 if (err)
1184 goto bail;
1185 copylen += mend - mstart;
1186 VERIFY(err, copylen >= 0);
1187 if (err)
1188 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001189 }
1190 ctx->used = copylen;
1191
1192 /* allocate new buffer */
1193 if (copylen) {
1194 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1195 if (err)
1196 goto bail;
1197 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301198 if (ctx->buf->virt && metalen <= copylen)
1199 memset(ctx->buf->virt, 0, metalen);
1200
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001201 /* copy metadata */
1202 rpra = ctx->buf->virt;
1203 ctx->rpra = rpra;
1204 list = smq_invoke_buf_start(rpra, sc);
1205 pages = smq_phy_page_start(sc, list);
1206 ipage = pages;
1207 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001208 for (i = 0; i < bufs + handles; ++i) {
1209 if (lpra[i].buf.len)
1210 list[i].num = 1;
1211 else
1212 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001213 list[i].pgidx = ipage - pages;
1214 ipage++;
1215 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301216
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001217 /* map ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001218 PERF(ctx->fl->profile, ctx->fl->perf.map,
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301219 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001220 struct fastrpc_mmap *map = ctx->maps[i];
1221 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301222 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001223
1224 rpra[i].buf.pv = 0;
1225 rpra[i].buf.len = len;
1226 if (!len)
1227 continue;
1228 if (map) {
1229 struct vm_area_struct *vma;
1230 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301231 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001232 int idx = list[i].pgidx;
1233
1234 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001235 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001236 } else {
1237 down_read(&current->mm->mmap_sem);
1238 VERIFY(err, NULL != (vma = find_vma(current->mm,
1239 map->va)));
1240 if (err) {
1241 up_read(&current->mm->mmap_sem);
1242 goto bail;
1243 }
1244 offset = buf_page_start(buf) - vma->vm_start;
1245 up_read(&current->mm->mmap_sem);
1246 VERIFY(err, offset < (uintptr_t)map->size);
1247 if (err)
1248 goto bail;
1249 }
1250 pages[idx].addr = map->phys + offset;
1251 pages[idx].size = num << PAGE_SHIFT;
1252 }
1253 rpra[i].buf.pv = buf;
1254 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001255 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001256 for (i = bufs; i < bufs + handles; ++i) {
1257 struct fastrpc_mmap *map = ctx->maps[i];
1258
1259 pages[i].addr = map->phys;
1260 pages[i].size = map->size;
1261 }
1262 fdlist = (uint64_t *)&pages[bufs + handles];
1263 for (i = 0; i < M_FDLIST; i++)
1264 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001265 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301266 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001267
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001268 /* copy non ion buffers */
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001269 PERF(ctx->fl->profile, ctx->fl->perf.copy,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001270 rlen = copylen - metalen;
1271 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1272 int i = ctx->overps[oix]->raix;
1273 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301274 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001275 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301276 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001277
1278 if (!len)
1279 continue;
1280 if (map)
1281 continue;
1282 if (ctx->overps[oix]->offset == 0) {
1283 rlen -= ALIGN(args, BALIGN) - args;
1284 args = ALIGN(args, BALIGN);
1285 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001286 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001287 VERIFY(err, rlen >= mlen);
1288 if (err)
1289 goto bail;
1290 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1291 pages[list[i].pgidx].addr = ctx->buf->phys -
1292 ctx->overps[oix]->offset +
1293 (copylen - rlen);
1294 pages[list[i].pgidx].addr =
1295 buf_page_start(pages[list[i].pgidx].addr);
1296 buf = rpra[i].buf.pv;
1297 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1298 if (i < inbufs) {
1299 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1300 lpra[i].buf.pv, len);
1301 if (err)
1302 goto bail;
1303 }
1304 args = args + mlen;
1305 rlen -= mlen;
1306 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001307 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001308
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001309 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001310 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1311 int i = ctx->overps[oix]->raix;
1312 struct fastrpc_mmap *map = ctx->maps[i];
1313
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001314 if (map && map->uncached)
1315 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301316 if (ctx->fl->sctx->smmu.coherent &&
1317 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1318 continue;
1319 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1320 continue;
1321
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001322 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1323 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1324 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1325 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001326 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301327 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001328 rpra[i].dma.fd = ctx->fds[i];
1329 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1330 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001331 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001332
1333 if (!ctx->fl->sctx->smmu.coherent) {
1334 PERF(ctx->fl->profile, ctx->fl->perf.flush,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001335 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001336 PERF_END);
1337 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001338 bail:
1339 return err;
1340}
1341
1342static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1343 remote_arg_t *upra)
1344{
1345 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001346 struct smq_invoke_buf *list;
1347 struct smq_phy_page *pages;
1348 struct fastrpc_mmap *mmap;
1349 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001350 uint32_t *crclist = NULL;
1351
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001352 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001353 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001354 int err = 0;
1355
1356 inbufs = REMOTE_SCALARS_INBUFS(sc);
1357 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001358 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1359 list = smq_invoke_buf_start(ctx->rpra, sc);
1360 pages = smq_phy_page_start(sc, list);
1361 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001362 crclist = (uint32_t *)(fdlist + M_FDLIST);
1363
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364 for (i = inbufs; i < inbufs + outbufs; ++i) {
1365 if (!ctx->maps[i]) {
1366 K_COPY_TO_USER(err, kernel,
1367 ctx->lpra[i].buf.pv,
1368 uint64_to_ptr(rpra[i].buf.pv),
1369 rpra[i].buf.len);
1370 if (err)
1371 goto bail;
1372 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +05301373 fastrpc_mmap_free(ctx->maps[i], 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301374 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001375 }
1376 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001377 if (inbufs + outbufs + handles) {
1378 for (i = 0; i < M_FDLIST; i++) {
1379 if (!fdlist[i])
1380 break;
1381 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001382 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301383 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001384 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001385 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001386 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301387 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001388 crclist, M_CRCLIST*sizeof(uint32_t));
1389
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001390 bail:
1391 return err;
1392}
1393
1394static void inv_args_pre(struct smq_invoke_ctx *ctx)
1395{
1396 int i, inbufs, outbufs;
1397 uint32_t sc = ctx->sc;
1398 remote_arg64_t *rpra = ctx->rpra;
1399 uintptr_t end;
1400
1401 inbufs = REMOTE_SCALARS_INBUFS(sc);
1402 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1403 for (i = inbufs; i < inbufs + outbufs; ++i) {
1404 struct fastrpc_mmap *map = ctx->maps[i];
1405
1406 if (map && map->uncached)
1407 continue;
1408 if (!rpra[i].buf.len)
1409 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301410 if (ctx->fl->sctx->smmu.coherent &&
1411 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1412 continue;
1413 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1414 continue;
1415
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001416 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1417 buf_page_start(rpra[i].buf.pv))
1418 continue;
1419 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1420 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1421 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1422 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1423 rpra[i].buf.len);
1424 if (!IS_CACHE_ALIGNED(end))
1425 dmac_flush_range((char *)end,
1426 (char *)end + 1);
1427 }
1428}
1429
1430static void inv_args(struct smq_invoke_ctx *ctx)
1431{
1432 int i, inbufs, outbufs;
1433 uint32_t sc = ctx->sc;
1434 remote_arg64_t *rpra = ctx->rpra;
1435 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436
1437 inbufs = REMOTE_SCALARS_INBUFS(sc);
1438 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1439 for (i = inbufs; i < inbufs + outbufs; ++i) {
1440 struct fastrpc_mmap *map = ctx->maps[i];
1441
1442 if (map && map->uncached)
1443 continue;
1444 if (!rpra[i].buf.len)
1445 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301446 if (ctx->fl->sctx->smmu.coherent &&
1447 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1448 continue;
1449 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1450 continue;
1451
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1453 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 continue;
1455 }
1456 if (map && map->handle)
1457 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1458 (char *)uint64_to_ptr(rpra[i].buf.pv),
1459 rpra[i].buf.len, ION_IOC_INV_CACHES);
1460 else
1461 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1462 (char *)uint64_to_ptr(rpra[i].buf.pv
1463 + rpra[i].buf.len));
1464 }
1465
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001466 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001467 dmac_inv_range(rpra, (char *)rpra + used);
1468}
1469
1470static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1471 uint32_t kernel, uint32_t handle)
1472{
1473 struct smq_msg *msg = &ctx->msg;
1474 struct fastrpc_file *fl = ctx->fl;
1475 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1476 int err = 0;
1477
c_mtharue1a5ce12017-10-13 20:47:09 +05301478 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001479 if (err)
1480 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301481 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001482 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301483 if (fl->sessionid)
1484 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001485 if (kernel)
1486 msg->pid = 0;
1487 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1488 msg->invoke.header.handle = handle;
1489 msg->invoke.header.sc = ctx->sc;
1490 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1491 msg->invoke.page.size = buf_page_size(ctx->used);
1492
1493 if (fl->ssrcount != channel_ctx->ssrcount) {
1494 err = -ECONNRESET;
1495 goto bail;
1496 }
1497 VERIFY(err, channel_ctx->link.port_state ==
1498 FASTRPC_LINK_CONNECTED);
1499 if (err)
1500 goto bail;
1501 err = glink_tx(channel_ctx->chan,
1502 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1503 GLINK_TX_REQ_INTENT);
1504 bail:
1505 return err;
1506}
1507
1508static void fastrpc_init(struct fastrpc_apps *me)
1509{
1510 int i;
1511
1512 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301513 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001514 spin_lock_init(&me->hlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301515 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001516 me->channel = &gcinfo[0];
1517 for (i = 0; i < NUM_CHANNELS; i++) {
1518 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301519 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001520 me->channel[i].sesscount = 0;
1521 }
1522}
1523
1524static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1525
1526static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1527 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001528 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001529{
c_mtharue1a5ce12017-10-13 20:47:09 +05301530 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001531 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1532 int cid = fl->cid;
1533 int interrupted = 0;
1534 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001535 struct timespec invoket = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001536
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001537 if (fl->profile)
1538 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301539
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301540
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301541 VERIFY(err, fl->sctx != NULL);
1542 if (err)
1543 goto bail;
1544 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1545 if (err)
1546 goto bail;
1547
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001548 if (!kernel) {
1549 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1550 &ctx));
1551 if (err)
1552 goto bail;
1553 if (fl->sctx->smmu.faults)
1554 err = FASTRPC_ENOSUCH;
1555 if (err)
1556 goto bail;
1557 if (ctx)
1558 goto wait;
1559 }
1560
1561 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1562 if (err)
1563 goto bail;
1564
1565 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001566 PERF(fl->profile, fl->perf.getargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001567 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001568 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569 if (err)
1570 goto bail;
1571 }
1572
Sathish Ambleyc432b502017-06-05 12:03:42 -07001573 if (!fl->sctx->smmu.coherent)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001574 inv_args_pre(ctx);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001575 PERF(fl->profile, fl->perf.link,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001576 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001577 PERF_END);
1578
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001579 if (err)
1580 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001581 wait:
1582 if (kernel)
1583 wait_for_completion(&ctx->work);
1584 else {
1585 interrupted = wait_for_completion_interruptible(&ctx->work);
1586 VERIFY(err, 0 == (err = interrupted));
1587 if (err)
1588 goto bail;
1589 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001590
1591 PERF(fl->profile, fl->perf.invargs,
1592 if (!fl->sctx->smmu.coherent)
1593 inv_args(ctx);
1594 PERF_END);
1595
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001596 VERIFY(err, 0 == (err = ctx->retval));
1597 if (err)
1598 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001599
1600 PERF(fl->profile, fl->perf.putargs,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001601 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001602 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001603 if (err)
1604 goto bail;
1605 bail:
1606 if (ctx && interrupted == -ERESTARTSYS)
1607 context_save_interrupted(ctx);
1608 else if (ctx)
1609 context_free(ctx);
1610 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1611 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001612
1613 if (fl->profile && !interrupted) {
1614 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
1615 fl->perf.invoke += getnstimediff(&invoket);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301616 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001617 fl->perf.count++;
1618 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001619 return err;
1620}
1621
Sathish Ambley36849af2017-02-02 09:35:55 -08001622static int fastrpc_channel_open(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001623static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001624 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001625{
1626 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301627 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001628 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001629 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301631 struct fastrpc_mmap *file = NULL, *mem = NULL;
1632 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301634 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001635 if (err)
1636 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001637 if (init->flags == FASTRPC_INIT_ATTACH) {
1638 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301639 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001640
1641 ra[0].buf.pv = (void *)&tgid;
1642 ra[0].buf.len = sizeof(tgid);
1643 ioctl.inv.handle = 1;
1644 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1645 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301646 ioctl.fds = NULL;
1647 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001648 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001649 fl->pd = 0;
1650 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1651 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1652 if (err)
1653 goto bail;
1654 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001655 remote_arg_t ra[6];
1656 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001657 int mflags = 0;
1658 struct {
1659 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301660 unsigned int namelen;
1661 unsigned int filelen;
1662 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001663 int attrs;
1664 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001665 } inbuf;
1666
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301667 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001668 inbuf.namelen = strlen(current->comm) + 1;
1669 inbuf.filelen = init->filelen;
1670 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301671
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301672 VERIFY(err, access_ok(0, (void __user *)init->file,
1673 init->filelen));
1674 if (err)
1675 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001676 if (init->filelen) {
1677 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1678 init->file, init->filelen, mflags, &file));
1679 if (err)
1680 goto bail;
1681 }
1682 inbuf.pageslen = 1;
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301683 VERIFY(err, access_ok(1, (void __user *)init->mem,
1684 init->memlen));
1685 if (err)
1686 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001687 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1688 init->mem, init->memlen, mflags, &mem));
1689 if (err)
1690 goto bail;
1691 inbuf.pageslen = 1;
1692 ra[0].buf.pv = (void *)&inbuf;
1693 ra[0].buf.len = sizeof(inbuf);
1694 fds[0] = 0;
1695
1696 ra[1].buf.pv = (void *)current->comm;
1697 ra[1].buf.len = inbuf.namelen;
1698 fds[1] = 0;
1699
1700 ra[2].buf.pv = (void *)init->file;
1701 ra[2].buf.len = inbuf.filelen;
1702 fds[2] = init->filefd;
1703
1704 pages[0].addr = mem->phys;
1705 pages[0].size = mem->size;
1706 ra[3].buf.pv = (void *)pages;
1707 ra[3].buf.len = 1 * sizeof(*pages);
1708 fds[3] = 0;
1709
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001710 inbuf.attrs = uproc->attrs;
1711 ra[4].buf.pv = (void *)&(inbuf.attrs);
1712 ra[4].buf.len = sizeof(inbuf.attrs);
1713 fds[4] = 0;
1714
1715 inbuf.siglen = uproc->siglen;
1716 ra[5].buf.pv = (void *)&(inbuf.siglen);
1717 ra[5].buf.len = sizeof(inbuf.siglen);
1718 fds[5] = 0;
1719
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001720 ioctl.inv.handle = 1;
1721 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001722 if (uproc->attrs)
1723 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001724 ioctl.inv.pra = ra;
1725 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301726 ioctl.attrs = NULL;
1727 ioctl.crc = NULL;
1728 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1729 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1730 if (err)
1731 goto bail;
1732 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1733 remote_arg_t ra[3];
1734 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301735 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301736 int fds[3];
1737 struct {
1738 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301739 unsigned int namelen;
1740 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301741 } inbuf;
1742
1743 if (!init->filelen)
1744 goto bail;
1745
1746 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1747 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1748 if (err)
1749 goto bail;
1750 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1751 (void __user *)init->file, init->filelen));
1752 if (err)
1753 goto bail;
1754
1755 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301756 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301757 inbuf.pageslen = 0;
1758 if (!me->staticpd_flags) {
1759 inbuf.pageslen = 1;
1760 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1761 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1762 &mem));
1763 if (err)
1764 goto bail;
1765 phys = mem->phys;
1766 size = mem->size;
1767 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05301768 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
1769 me->channel[fl->cid].rhvm.vmperm,
1770 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05301771 if (err) {
1772 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1773 err);
1774 pr_err("map->phys %llx, map->size %d\n",
1775 phys, (int)size);
1776 goto bail;
1777 }
1778 me->staticpd_flags = 1;
1779 }
1780
1781 ra[0].buf.pv = (void *)&inbuf;
1782 ra[0].buf.len = sizeof(inbuf);
1783 fds[0] = 0;
1784
1785 ra[1].buf.pv = (void *)proc_name;
1786 ra[1].buf.len = inbuf.namelen;
1787 fds[1] = 0;
1788
1789 pages[0].addr = phys;
1790 pages[0].size = size;
1791
1792 ra[2].buf.pv = (void *)pages;
1793 ra[2].buf.len = sizeof(*pages);
1794 fds[2] = 0;
1795 ioctl.inv.handle = 1;
1796
1797 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1798 ioctl.inv.pra = ra;
1799 ioctl.fds = NULL;
1800 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001801 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001802 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1803 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1804 if (err)
1805 goto bail;
1806 } else {
1807 err = -ENOTTY;
1808 }
1809bail:
c_mtharud91205a2017-11-07 16:01:06 +05301810 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301811 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1812 me->staticpd_flags = 0;
1813 if (mem && err) {
1814 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1815 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05301816 me->channel[fl->cid].rhvm.vmid,
1817 me->channel[fl->cid].rhvm.vmcount,
1818 hlosvm, hlosvmperm, 1);
c_mtharu7bd6a422017-10-17 18:15:37 +05301819 fastrpc_mmap_free(mem, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05301820 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001821 if (file)
c_mtharu7bd6a422017-10-17 18:15:37 +05301822 fastrpc_mmap_free(file, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001823 return err;
1824}
1825
1826static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1827{
1828 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001829 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001830 remote_arg_t ra[1];
1831 int tgid = 0;
1832
Sathish Ambley36849af2017-02-02 09:35:55 -08001833 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1834 if (err)
1835 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05301836 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001837 if (err)
1838 goto bail;
1839 tgid = fl->tgid;
1840 ra[0].buf.pv = (void *)&tgid;
1841 ra[0].buf.len = sizeof(tgid);
1842 ioctl.inv.handle = 1;
1843 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1844 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301845 ioctl.fds = NULL;
1846 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001847 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001848 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1849 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1850bail:
1851 return err;
1852}
1853
1854static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
1855 struct fastrpc_mmap *map)
1856{
Sathish Ambleybae51902017-07-03 15:00:49 -07001857 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05301858 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001859 struct smq_phy_page page;
1860 int num = 1;
1861 remote_arg_t ra[3];
1862 int err = 0;
1863 struct {
1864 int pid;
1865 uint32_t flags;
1866 uintptr_t vaddrin;
1867 int num;
1868 } inargs;
1869 struct {
1870 uintptr_t vaddrout;
1871 } routargs;
1872
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301873 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001874 inargs.vaddrin = (uintptr_t)map->va;
1875 inargs.flags = flags;
1876 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
1877 ra[0].buf.pv = (void *)&inargs;
1878 ra[0].buf.len = sizeof(inargs);
1879 page.addr = map->phys;
1880 page.size = map->size;
1881 ra[1].buf.pv = (void *)&page;
1882 ra[1].buf.len = num * sizeof(page);
1883
1884 ra[2].buf.pv = (void *)&routargs;
1885 ra[2].buf.len = sizeof(routargs);
1886
1887 ioctl.inv.handle = 1;
1888 if (fl->apps->compat)
1889 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
1890 else
1891 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1892 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301893 ioctl.fds = NULL;
1894 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001895 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001896 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1897 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1898 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05301899 if (err)
1900 goto bail;
1901 if (flags == ADSP_MMAP_HEAP_ADDR) {
1902 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001903
c_mtharue1a5ce12017-10-13 20:47:09 +05301904 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1905 desc.args[1] = map->phys;
1906 desc.args[2] = map->size;
1907 desc.arginfo = SCM_ARGS(3);
1908 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1909 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
1910 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05301911 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05301912 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
1913 me->channel[fl->cid].rhvm.vmperm,
1914 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05301915 if (err)
1916 goto bail;
1917 }
1918bail:
1919 return err;
1920}
1921
1922static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
1923 struct fastrpc_mmap *map)
1924{
1925 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05301926 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05301927 int destVM[1] = {VMID_HLOS};
1928 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
1929
1930 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
1931 struct fastrpc_ioctl_invoke_crc ioctl;
1932 struct scm_desc desc = {0};
1933 remote_arg_t ra[1];
1934 int err = 0;
1935 struct {
1936 uint8_t skey;
1937 } routargs;
1938
1939 ra[0].buf.pv = (void *)&routargs;
1940 ra[0].buf.len = sizeof(routargs);
1941
1942 ioctl.inv.handle = 1;
1943 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
1944 ioctl.inv.pra = ra;
1945 ioctl.fds = NULL;
1946 ioctl.attrs = NULL;
1947 ioctl.crc = NULL;
1948 if (fl == NULL)
1949 goto bail;
1950
1951 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
1952 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1953 if (err)
1954 goto bail;
1955 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
1956 desc.args[1] = map->phys;
1957 desc.args[2] = map->size;
1958 desc.args[3] = routargs.skey;
1959 desc.arginfo = SCM_ARGS(4);
1960 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
1961 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
1962 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
1963 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05301964 me->channel[fl->cid].rhvm.vmid,
1965 me->channel[fl->cid].rhvm.vmcount,
1966 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05301967 if (err)
1968 goto bail;
1969 }
1970
1971bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001972 return err;
1973}
1974
1975static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
1976 struct fastrpc_mmap *map)
1977{
Sathish Ambleybae51902017-07-03 15:00:49 -07001978 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001979 remote_arg_t ra[1];
1980 int err = 0;
1981 struct {
1982 int pid;
1983 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301984 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001985 } inargs;
1986
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301987 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001988 inargs.size = map->size;
1989 inargs.vaddrout = map->raddr;
1990 ra[0].buf.pv = (void *)&inargs;
1991 ra[0].buf.len = sizeof(inargs);
1992
1993 ioctl.inv.handle = 1;
1994 if (fl->apps->compat)
1995 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
1996 else
1997 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1998 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301999 ioctl.fds = NULL;
2000 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002001 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002002 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2003 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302004 if (err)
2005 goto bail;
2006 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
2007 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2008 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
2009 if (err)
2010 goto bail;
2011 }
2012bail:
2013 return err;
2014}
2015
2016static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2017{
2018 struct fastrpc_mmap *match = NULL, *map = NULL;
2019 struct hlist_node *n = NULL;
2020 int err = 0, ret = 0;
2021 struct fastrpc_apps *me = &gfa;
2022 struct ramdump_segment *ramdump_segments_rh = NULL;
2023
2024 do {
2025 match = NULL;
2026 spin_lock(&me->hlock);
2027 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2028 match = map;
2029 hlist_del_init(&map->hn);
2030 break;
2031 }
2032 spin_unlock(&me->hlock);
2033
2034 if (match) {
2035 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2036 if (err)
2037 goto bail;
2038 if (me->channel[0].ramdumpenabled) {
2039 ramdump_segments_rh = kcalloc(1,
2040 sizeof(struct ramdump_segment), GFP_KERNEL);
2041 if (ramdump_segments_rh) {
2042 ramdump_segments_rh->address =
2043 match->phys;
2044 ramdump_segments_rh->size = match->size;
2045 ret = do_elf_ramdump(
2046 me->channel[0].remoteheap_ramdump_dev,
2047 ramdump_segments_rh, 1);
2048 if (ret < 0)
2049 pr_err("ADSPRPC: unable to dump heap");
2050 kfree(ramdump_segments_rh);
2051 }
2052 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302053 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302054 }
2055 } while (match);
2056bail:
2057 if (err && match)
2058 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002059 return err;
2060}
2061
2062static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302063 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002064
2065static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2066
2067static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2068 struct fastrpc_ioctl_munmap *ud)
2069{
2070 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302071 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002072
2073 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
2074 if (err)
2075 goto bail;
2076 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2077 if (err)
2078 goto bail;
c_mtharu7bd6a422017-10-17 18:15:37 +05302079 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002080bail:
2081 if (err && map)
2082 fastrpc_mmap_add(map);
2083 return err;
2084}
2085
c_mtharu7bd6a422017-10-17 18:15:37 +05302086static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2087 struct fastrpc_ioctl_munmap_fd *ud) {
2088 int err = 0;
2089 struct fastrpc_mmap *map = NULL;
2090
2091 VERIFY(err, (fl && ud));
2092 if (err)
2093 goto bail;
2094
2095 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2096 pr_err("mapping not found to unamp %x va %llx %x\n",
2097 ud->fd, (unsigned long long)ud->va,
2098 (unsigned int)ud->len);
2099 err = -1;
2100 goto bail;
2101 }
2102 if (map)
2103 fastrpc_mmap_free(map, 0);
2104bail:
2105 return err;
2106}
2107
2108
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002109static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2110 struct fastrpc_ioctl_mmap *ud)
2111{
2112
c_mtharue1a5ce12017-10-13 20:47:09 +05302113 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002114 int err = 0;
2115
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302116 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
c_mtharue1a5ce12017-10-13 20:47:09 +05302117 ud->size, ud->flags, 1, &map))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002118 return 0;
2119
2120 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302121 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302122 ud->flags, &map));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002123 if (err)
2124 goto bail;
2125 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2126 if (err)
2127 goto bail;
2128 ud->vaddrout = map->raddr;
2129 bail:
2130 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05302131 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002132 return err;
2133}
2134
2135static void fastrpc_channel_close(struct kref *kref)
2136{
2137 struct fastrpc_apps *me = &gfa;
2138 struct fastrpc_channel_ctx *ctx;
2139 int cid;
2140
2141 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2142 cid = ctx - &gcinfo[0];
2143 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302144 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302145 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2146 ctx->link.link_notify_handle = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302147 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002148 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2149 MAJOR(me->dev_no), cid);
2150}
2151
2152static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2153
2154static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2155 int secure, struct fastrpc_session_ctx **session)
2156{
2157 struct fastrpc_apps *me = &gfa;
2158 int idx = 0, err = 0;
2159
2160 if (chan->sesscount) {
2161 for (idx = 0; idx < chan->sesscount; ++idx) {
2162 if (!chan->session[idx].used &&
2163 chan->session[idx].smmu.secure == secure) {
2164 chan->session[idx].used = 1;
2165 break;
2166 }
2167 }
2168 VERIFY(err, idx < chan->sesscount);
2169 if (err)
2170 goto bail;
2171 chan->session[idx].smmu.faults = 0;
2172 } else {
2173 VERIFY(err, me->dev != NULL);
2174 if (err)
2175 goto bail;
2176 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302177 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002178 }
2179
2180 *session = &chan->session[idx];
2181 bail:
2182 return err;
2183}
2184
c_mtharue1a5ce12017-10-13 20:47:09 +05302185static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2186 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002187{
2188 if (glink_queue_rx_intent(h, NULL, size))
2189 return false;
2190 return true;
2191}
2192
c_mtharue1a5ce12017-10-13 20:47:09 +05302193static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002194 const void *pkt_priv, const void *ptr)
2195{
2196}
2197
c_mtharue1a5ce12017-10-13 20:47:09 +05302198static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002199 const void *pkt_priv, const void *ptr, size_t size)
2200{
2201 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302202 struct smq_invoke_ctx *ctx;
2203 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002204
c_mtharufdac6892017-10-12 13:09:01 +05302205 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2206 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302207 goto bail;
2208
c_mtharufdac6892017-10-12 13:09:01 +05302209 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2210 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2211 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302212 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302213
c_mtharufdac6892017-10-12 13:09:01 +05302214 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302215bail:
c_mtharufdac6892017-10-12 13:09:01 +05302216 if (err)
2217 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002218 glink_rx_done(handle, ptr, true);
2219}
2220
c_mtharue1a5ce12017-10-13 20:47:09 +05302221static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002222 unsigned int event)
2223{
2224 struct fastrpc_apps *me = &gfa;
2225 int cid = (int)(uintptr_t)priv;
2226 struct fastrpc_glink_info *link;
2227
2228 if (cid < 0 || cid >= NUM_CHANNELS)
2229 return;
2230 link = &me->channel[cid].link;
2231 switch (event) {
2232 case GLINK_CONNECTED:
2233 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302234 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002235 break;
2236 case GLINK_LOCAL_DISCONNECTED:
2237 link->port_state = FASTRPC_LINK_DISCONNECTED;
2238 break;
2239 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002240 break;
2241 default:
2242 break;
2243 }
2244}
2245
2246static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2247 struct fastrpc_session_ctx **session)
2248{
2249 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302250 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002251
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302252 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002253 if (!*session)
2254 err = fastrpc_session_alloc_locked(chan, secure, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302255 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002256 return err;
2257}
2258
2259static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2260 struct fastrpc_session_ctx *session)
2261{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302262 struct fastrpc_apps *me = &gfa;
2263
2264 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002265 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302266 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002267}
2268
2269static int fastrpc_file_free(struct fastrpc_file *fl)
2270{
2271 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302272 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002273 int cid;
2274
2275 if (!fl)
2276 return 0;
2277 cid = fl->cid;
2278
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302279 (void)fastrpc_release_current_dsp_process(fl);
2280
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002281 spin_lock(&fl->apps->hlock);
2282 hlist_del_init(&fl->hn);
2283 spin_unlock(&fl->apps->hlock);
2284
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002285 if (!fl->sctx) {
2286 kfree(fl);
2287 return 0;
2288 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302289 spin_lock(&fl->hlock);
2290 fl->file_close = 1;
2291 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002292 fastrpc_context_list_dtor(fl);
2293 fastrpc_buf_list_free(fl);
2294 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302295 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002296 }
2297 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2298 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302299 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002300 if (fl->sctx)
2301 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2302 if (fl->secsctx)
2303 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
2304 kfree(fl);
2305 return 0;
2306}
2307
2308static int fastrpc_device_release(struct inode *inode, struct file *file)
2309{
2310 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2311
2312 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302313 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2314 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002315 if (fl->debugfs_file != NULL)
2316 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002317 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302318 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002319 }
2320 return 0;
2321}
2322
2323static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2324 void *priv)
2325{
2326 struct fastrpc_apps *me = &gfa;
2327 int cid = (int)((uintptr_t)priv);
2328 struct fastrpc_glink_info *link;
2329
2330 if (cid < 0 || cid >= NUM_CHANNELS)
2331 return;
2332
2333 link = &me->channel[cid].link;
2334 switch (cb_info->link_state) {
2335 case GLINK_LINK_STATE_UP:
2336 link->link_state = FASTRPC_LINK_STATE_UP;
2337 complete(&me->channel[cid].work);
2338 break;
2339 case GLINK_LINK_STATE_DOWN:
2340 link->link_state = FASTRPC_LINK_STATE_DOWN;
2341 break;
2342 default:
2343 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2344 break;
2345 }
2346}
2347
2348static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2349{
2350 int err = 0;
2351 struct fastrpc_glink_info *link;
2352
2353 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2354 if (err)
2355 goto bail;
2356
2357 link = &me->channel[cid].link;
2358 if (link->link_notify_handle != NULL)
2359 goto bail;
2360
2361 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2362 link->link_notify_handle = glink_register_link_state_cb(
2363 &link->link_info,
2364 (void *)((uintptr_t)cid));
2365 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2366 if (err) {
2367 link->link_notify_handle = NULL;
2368 goto bail;
2369 }
2370 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2371 RPC_TIMEOUT));
2372bail:
2373 return err;
2374}
2375
2376static void fastrpc_glink_close(void *chan, int cid)
2377{
2378 int err = 0;
2379 struct fastrpc_glink_info *link;
2380
2381 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2382 if (err)
2383 return;
2384 link = &gfa.channel[cid].link;
2385
c_mtharu314a4202017-11-15 22:09:17 +05302386 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2387 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002388 link->port_state = FASTRPC_LINK_DISCONNECTING;
2389 glink_close(chan);
2390 }
2391}
2392
2393static int fastrpc_glink_open(int cid)
2394{
2395 int err = 0;
2396 void *handle = NULL;
2397 struct fastrpc_apps *me = &gfa;
2398 struct glink_open_config *cfg;
2399 struct fastrpc_glink_info *link;
2400
2401 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2402 if (err)
2403 goto bail;
2404 link = &me->channel[cid].link;
2405 cfg = &me->channel[cid].link.cfg;
2406 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2407 if (err)
2408 goto bail;
2409
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302410 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2411 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002412 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002413
2414 link->port_state = FASTRPC_LINK_CONNECTING;
2415 cfg->priv = (void *)(uintptr_t)cid;
2416 cfg->edge = gcinfo[cid].link.link_info.edge;
2417 cfg->transport = gcinfo[cid].link.link_info.transport;
2418 cfg->name = FASTRPC_GLINK_GUID;
2419 cfg->notify_rx = fastrpc_glink_notify_rx;
2420 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2421 cfg->notify_state = fastrpc_glink_notify_state;
2422 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2423 handle = glink_open(cfg);
2424 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302425 if (err) {
2426 if (link->port_state == FASTRPC_LINK_CONNECTING)
2427 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002428 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302429 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002430 me->channel[cid].chan = handle;
2431bail:
2432 return err;
2433}
2434
Sathish Ambley1ca68232017-01-19 10:32:55 -08002435static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2436{
2437 filp->private_data = inode->i_private;
2438 return 0;
2439}
2440
2441static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2442 size_t count, loff_t *position)
2443{
2444 struct fastrpc_file *fl = filp->private_data;
2445 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302446 struct fastrpc_buf *buf = NULL;
2447 struct fastrpc_mmap *map = NULL;
2448 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002449 struct fastrpc_channel_ctx *chan;
2450 struct fastrpc_session_ctx *sess;
2451 unsigned int len = 0;
2452 int i, j, ret = 0;
2453 char *fileinfo = NULL;
2454
2455 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2456 if (!fileinfo)
2457 goto bail;
2458 if (fl == NULL) {
2459 for (i = 0; i < NUM_CHANNELS; i++) {
2460 chan = &gcinfo[i];
2461 len += scnprintf(fileinfo + len,
2462 DEBUGFS_SIZE - len, "%s\n\n",
2463 chan->name);
2464 len += scnprintf(fileinfo + len,
2465 DEBUGFS_SIZE - len, "%s %d\n",
2466 "sesscount:", chan->sesscount);
2467 for (j = 0; j < chan->sesscount; j++) {
2468 sess = &chan->session[j];
2469 len += scnprintf(fileinfo + len,
2470 DEBUGFS_SIZE - len,
2471 "%s%d\n\n", "SESSION", j);
2472 len += scnprintf(fileinfo + len,
2473 DEBUGFS_SIZE - len,
2474 "%s %d\n", "sid:",
2475 sess->smmu.cb);
2476 len += scnprintf(fileinfo + len,
2477 DEBUGFS_SIZE - len,
2478 "%s %d\n", "SECURE:",
2479 sess->smmu.secure);
2480 }
2481 }
2482 } else {
2483 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2484 "%s %d\n\n",
2485 "PROCESS_ID:", fl->tgid);
2486 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2487 "%s %d\n\n",
2488 "CHANNEL_ID:", fl->cid);
2489 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2490 "%s %d\n\n",
2491 "SSRCOUNT:", fl->ssrcount);
2492 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2493 "%s\n",
2494 "LIST OF BUFS:");
2495 spin_lock(&fl->hlock);
2496 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2497 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302498 "%s %pK %s %pK %s %llx\n", "buf:",
2499 buf, "buf->virt:", buf->virt,
2500 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002501 }
2502 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2503 "\n%s\n",
2504 "LIST OF MAPS:");
2505 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2506 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302507 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002508 "map:", map,
2509 "map->va:", map->va,
2510 "map->phys:", map->phys);
2511 }
2512 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2513 "\n%s\n",
2514 "LIST OF PENDING SMQCONTEXTS:");
2515 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2516 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302517 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002518 "smqcontext:", ictx,
2519 "sc:", ictx->sc,
2520 "tid:", ictx->pid,
2521 "handle", ictx->rpra->h);
2522 }
2523 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2524 "\n%s\n",
2525 "LIST OF INTERRUPTED SMQCONTEXTS:");
2526 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2527 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302528 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002529 "smqcontext:", ictx,
2530 "sc:", ictx->sc,
2531 "tid:", ictx->pid,
2532 "handle", ictx->rpra->h);
2533 }
2534 spin_unlock(&fl->hlock);
2535 }
2536 if (len > DEBUGFS_SIZE)
2537 len = DEBUGFS_SIZE;
2538 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2539 kfree(fileinfo);
2540bail:
2541 return ret;
2542}
2543
2544static const struct file_operations debugfs_fops = {
2545 .open = fastrpc_debugfs_open,
2546 .read = fastrpc_debugfs_read,
2547};
Sathish Ambley36849af2017-02-02 09:35:55 -08002548static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002549{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002550 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002551 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302553 mutex_lock(&me->smd_mutex);
2554
Sathish Ambley36849af2017-02-02 09:35:55 -08002555 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002556 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302557 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002558 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302559 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2560 if (err)
2561 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302562 if (me->channel[cid].ssrcount !=
2563 me->channel[cid].prevssrcount) {
2564 if (!me->channel[cid].issubsystemup) {
2565 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302566 if (err) {
2567 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05302568 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302569 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302570 }
2571 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002572 fl->ssrcount = me->channel[cid].ssrcount;
2573 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302574 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302575 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2576 if (err)
2577 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002578 VERIFY(err, 0 == fastrpc_glink_open(cid));
2579 if (err)
2580 goto bail;
2581
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302582 VERIFY(err,
2583 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002584 RPC_TIMEOUT));
2585 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302586 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587 goto bail;
2588 }
2589 kref_init(&me->channel[cid].kref);
2590 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2591 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302592 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2593 FASTRPC_GLINK_INTENT_LEN);
2594 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2595 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002596 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302597 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2598 cid, err);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002599 if (me->channel[cid].ssrcount !=
2600 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302601 if (fastrpc_mmap_remove_ssr(fl))
2602 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002603 me->channel[cid].prevssrcount =
2604 me->channel[cid].ssrcount;
2605 }
2606 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002607
2608bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302609 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002610 return err;
2611}
2612
Sathish Ambley36849af2017-02-02 09:35:55 -08002613static int fastrpc_device_open(struct inode *inode, struct file *filp)
2614{
2615 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002616 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302617 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002618 struct fastrpc_apps *me = &gfa;
2619
c_mtharue1a5ce12017-10-13 20:47:09 +05302620 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002621 if (err)
2622 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002623 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2624 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002625 context_list_ctor(&fl->clst);
2626 spin_lock_init(&fl->hlock);
2627 INIT_HLIST_HEAD(&fl->maps);
2628 INIT_HLIST_HEAD(&fl->bufs);
2629 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302630 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002631 fl->tgid = current->tgid;
2632 fl->apps = me;
2633 fl->mode = FASTRPC_MODE_SERIAL;
2634 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002635 if (debugfs_file != NULL)
2636 fl->debugfs_file = debugfs_file;
2637 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302638 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002639 filp->private_data = fl;
2640 spin_lock(&me->hlock);
2641 hlist_add_head(&fl->hn, &me->drivers);
2642 spin_unlock(&me->hlock);
2643 return 0;
2644}
2645
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002646static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2647{
2648 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002649 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002650
c_mtharue1a5ce12017-10-13 20:47:09 +05302651 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002652 if (err)
2653 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002654 if (fl->cid == -1) {
2655 cid = *info;
2656 VERIFY(err, cid < NUM_CHANNELS);
2657 if (err)
2658 goto bail;
2659 fl->cid = cid;
2660 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2661 VERIFY(err, !fastrpc_session_alloc_locked(
2662 &fl->apps->channel[cid], 0, &fl->sctx));
2663 if (err)
2664 goto bail;
2665 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302666 VERIFY(err, fl->sctx != NULL);
2667 if (err)
2668 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002669 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2670bail:
2671 return err;
2672}
2673
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302674static int fastrpc_internal_control(struct fastrpc_file *fl,
2675 struct fastrpc_ioctl_control *cp)
2676{
2677 int err = 0;
2678 int latency;
2679
2680 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2681 if (err)
2682 goto bail;
2683 VERIFY(err, !IS_ERR_OR_NULL(cp));
2684 if (err)
2685 goto bail;
2686
2687 switch (cp->req) {
2688 case FASTRPC_CONTROL_LATENCY:
2689 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2690 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2691 VERIFY(err, latency != 0);
2692 if (err)
2693 goto bail;
2694 if (!fl->qos_request) {
2695 pm_qos_add_request(&fl->pm_qos_req,
2696 PM_QOS_CPU_DMA_LATENCY, latency);
2697 fl->qos_request = 1;
2698 } else
2699 pm_qos_update_request(&fl->pm_qos_req, latency);
2700 break;
2701 default:
2702 err = -ENOTTY;
2703 break;
2704 }
2705bail:
2706 return err;
2707}
2708
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002709static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2710 unsigned long ioctl_param)
2711{
2712 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002713 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002714 struct fastrpc_ioctl_mmap mmap;
2715 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302716 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002717 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002718 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302719 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002720 } p;
2721 void *param = (char *)ioctl_param;
2722 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2723 int size = 0, err = 0;
2724 uint32_t info;
2725
c_mtharue1a5ce12017-10-13 20:47:09 +05302726 p.inv.fds = NULL;
2727 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002728 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302729 spin_lock(&fl->hlock);
2730 if (fl->file_close == 1) {
2731 err = EBADF;
2732 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2733 spin_unlock(&fl->hlock);
2734 goto bail;
2735 }
2736 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002737
2738 switch (ioctl_num) {
2739 case FASTRPC_IOCTL_INVOKE:
2740 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002741 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002742 case FASTRPC_IOCTL_INVOKE_FD:
2743 if (!size)
2744 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2745 /* fall through */
2746 case FASTRPC_IOCTL_INVOKE_ATTRS:
2747 if (!size)
2748 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002749 /* fall through */
2750 case FASTRPC_IOCTL_INVOKE_CRC:
2751 if (!size)
2752 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302753 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002754 if (err)
2755 goto bail;
2756 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2757 0, &p.inv)));
2758 if (err)
2759 goto bail;
2760 break;
2761 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302762 K_COPY_FROM_USER(err, 0, &p.mmap, param,
2763 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302764 if (err)
2765 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002766 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
2767 if (err)
2768 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302769 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002770 if (err)
2771 goto bail;
2772 break;
2773 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05302774 K_COPY_FROM_USER(err, 0, &p.munmap, param,
2775 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302776 if (err)
2777 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002778 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
2779 &p.munmap)));
2780 if (err)
2781 goto bail;
2782 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05302783 case FASTRPC_IOCTL_MUNMAP_FD:
2784 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
2785 sizeof(p.munmap_fd));
2786 if (err)
2787 goto bail;
2788 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
2789 &p.munmap_fd)));
2790 if (err)
2791 goto bail;
2792 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002793 case FASTRPC_IOCTL_SETMODE:
2794 switch ((uint32_t)ioctl_param) {
2795 case FASTRPC_MODE_PARALLEL:
2796 case FASTRPC_MODE_SERIAL:
2797 fl->mode = (uint32_t)ioctl_param;
2798 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002799 case FASTRPC_MODE_PROFILE:
2800 fl->profile = (uint32_t)ioctl_param;
2801 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302802 case FASTRPC_MODE_SESSION:
2803 fl->sessionid = 1;
2804 fl->tgid |= (1 << SESSION_ID_INDEX);
2805 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002806 default:
2807 err = -ENOTTY;
2808 break;
2809 }
2810 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002811 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05302812 K_COPY_FROM_USER(err, 0, &p.perf,
2813 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002814 if (err)
2815 goto bail;
2816 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
2817 if (p.perf.keys) {
2818 char *keys = PERF_KEYS;
2819
c_mtharue1a5ce12017-10-13 20:47:09 +05302820 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
2821 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002822 if (err)
2823 goto bail;
2824 }
2825 if (p.perf.data) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302826 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
2827 &fl->perf, sizeof(fl->perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002828 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302829 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002830 if (err)
2831 goto bail;
2832 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302833 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05302834 K_COPY_FROM_USER(err, 0, &p.cp, param,
2835 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302836 if (err)
2837 goto bail;
2838 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
2839 if (err)
2840 goto bail;
2841 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002842 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05302843 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08002844 if (err)
2845 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002846 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
2847 if (err)
2848 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302849 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002850 if (err)
2851 goto bail;
2852 break;
2853 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002854 p.init.attrs = 0;
2855 p.init.siglen = 0;
2856 size = sizeof(struct fastrpc_ioctl_init);
2857 /* fall through */
2858 case FASTRPC_IOCTL_INIT_ATTRS:
2859 if (!size)
2860 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302861 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002862 if (err)
2863 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302864 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05302865 p.init.init.filelen < INIT_FILELEN_MAX);
2866 if (err)
2867 goto bail;
2868 VERIFY(err, p.init.init.memlen >= 0 &&
2869 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05302870 if (err)
2871 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302872 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002873 if (err)
2874 goto bail;
2875 break;
2876
2877 default:
2878 err = -ENOTTY;
2879 pr_info("bad ioctl: %d\n", ioctl_num);
2880 break;
2881 }
2882 bail:
2883 return err;
2884}
2885
2886static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
2887 unsigned long code,
2888 void *data)
2889{
2890 struct fastrpc_apps *me = &gfa;
2891 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05302892 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002893 int cid;
2894
2895 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
2896 cid = ctx - &me->channel[0];
2897 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302898 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002899 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05302900 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302901 if (ctx->chan) {
2902 fastrpc_glink_close(ctx->chan, cid);
2903 ctx->chan = NULL;
2904 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
2905 gcinfo[cid].name, MAJOR(me->dev_no), cid);
2906 }
2907 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302908 if (cid == 0)
2909 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002910 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302911 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
2912 if (me->channel[0].remoteheap_ramdump_dev &&
2913 notifdata->enable_ramdump) {
2914 me->channel[0].ramdumpenabled = 1;
2915 }
2916 } else if (code == SUBSYS_AFTER_POWERUP) {
2917 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002918 }
2919
2920 return NOTIFY_DONE;
2921}
2922
2923static const struct file_operations fops = {
2924 .open = fastrpc_device_open,
2925 .release = fastrpc_device_release,
2926 .unlocked_ioctl = fastrpc_device_ioctl,
2927 .compat_ioctl = compat_fastrpc_device_ioctl,
2928};
2929
2930static const struct of_device_id fastrpc_match_table[] = {
2931 { .compatible = "qcom,msm-fastrpc-adsp", },
2932 { .compatible = "qcom,msm-fastrpc-compute", },
2933 { .compatible = "qcom,msm-fastrpc-compute-cb", },
2934 { .compatible = "qcom,msm-adsprpc-mem-region", },
2935 {}
2936};
2937
2938static int fastrpc_cb_probe(struct device *dev)
2939{
2940 struct fastrpc_channel_ctx *chan;
2941 struct fastrpc_session_ctx *sess;
2942 struct of_phandle_args iommuspec;
2943 const char *name;
2944 unsigned int start = 0x80000000;
2945 int err = 0, i;
2946 int secure_vmid = VMID_CP_PIXEL;
2947
c_mtharue1a5ce12017-10-13 20:47:09 +05302948 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
2949 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002950 if (err)
2951 goto bail;
2952 for (i = 0; i < NUM_CHANNELS; i++) {
2953 if (!gcinfo[i].name)
2954 continue;
2955 if (!strcmp(name, gcinfo[i].name))
2956 break;
2957 }
2958 VERIFY(err, i < NUM_CHANNELS);
2959 if (err)
2960 goto bail;
2961 chan = &gcinfo[i];
2962 VERIFY(err, chan->sesscount < NUM_SESSIONS);
2963 if (err)
2964 goto bail;
2965
2966 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
2967 "#iommu-cells", 0, &iommuspec));
2968 if (err)
2969 goto bail;
2970 sess = &chan->session[chan->sesscount];
2971 sess->smmu.cb = iommuspec.args[0] & 0xf;
2972 sess->used = 0;
2973 sess->smmu.coherent = of_property_read_bool(dev->of_node,
2974 "dma-coherent");
2975 sess->smmu.secure = of_property_read_bool(dev->of_node,
2976 "qcom,secure-context-bank");
2977 if (sess->smmu.secure)
2978 start = 0x60000000;
2979 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
2980 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05302981 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002982 if (err)
2983 goto bail;
2984
2985 if (sess->smmu.secure)
2986 iommu_domain_set_attr(sess->smmu.mapping->domain,
2987 DOMAIN_ATTR_SECURE_VMID,
2988 &secure_vmid);
2989
2990 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
2991 if (err)
2992 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302993 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002994 sess->smmu.enabled = 1;
2995 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002996 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
2997 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002998bail:
2999 return err;
3000}
3001
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303002static void init_secure_vmid_list(struct device *dev, char *prop_name,
3003 struct secure_vm *destvm)
3004{
3005 int err = 0;
3006 u32 len = 0, i = 0;
3007 u32 *rhvmlist = NULL;
3008 u32 *rhvmpermlist = NULL;
3009
3010 if (!of_find_property(dev->of_node, prop_name, &len))
3011 goto bail;
3012 if (len == 0)
3013 goto bail;
3014 len /= sizeof(u32);
3015 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
3016 if (err)
3017 goto bail;
3018 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
3019 GFP_KERNEL)));
3020 if (err)
3021 goto bail;
3022 for (i = 0; i < len; i++) {
3023 err = of_property_read_u32_index(dev->of_node, prop_name, i,
3024 &rhvmlist[i]);
3025 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
3026 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
3027 if (err) {
3028 pr_err("ADSPRPC: Failed to read VMID\n");
3029 goto bail;
3030 }
3031 }
3032 destvm->vmid = rhvmlist;
3033 destvm->vmperm = rhvmpermlist;
3034 destvm->vmcount = len;
3035bail:
3036 if (err) {
3037 kfree(rhvmlist);
3038 kfree(rhvmpermlist);
3039 }
3040}
3041
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003042static int fastrpc_probe(struct platform_device *pdev)
3043{
3044 int err = 0;
3045 struct fastrpc_apps *me = &gfa;
3046 struct device *dev = &pdev->dev;
3047 struct smq_phy_page range;
3048 struct device_node *ion_node, *node;
3049 struct platform_device *ion_pdev;
3050 struct cma *cma;
3051 uint32_t val;
3052
c_mtharu63ffc012017-11-16 15:26:56 +05303053
3054 if (of_device_is_compatible(dev->of_node,
3055 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303056 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
3057 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05303058
c_mtharu63ffc012017-11-16 15:26:56 +05303059
3060 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3061 &me->latency);
3062 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003063 if (of_device_is_compatible(dev->of_node,
3064 "qcom,msm-fastrpc-compute-cb"))
3065 return fastrpc_cb_probe(dev);
3066
3067 if (of_device_is_compatible(dev->of_node,
3068 "qcom,msm-adsprpc-mem-region")) {
3069 me->dev = dev;
3070 range.addr = 0;
3071 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3072 if (ion_node) {
3073 for_each_available_child_of_node(ion_node, node) {
3074 if (of_property_read_u32(node, "reg", &val))
3075 continue;
3076 if (val != ION_ADSP_HEAP_ID)
3077 continue;
3078 ion_pdev = of_find_device_by_node(node);
3079 if (!ion_pdev)
3080 break;
3081 cma = dev_get_cma_area(&ion_pdev->dev);
3082 if (cma) {
3083 range.addr = cma_get_base(cma);
3084 range.size = (size_t)cma_get_size(cma);
3085 }
3086 break;
3087 }
3088 }
3089 if (range.addr) {
3090 int srcVM[1] = {VMID_HLOS};
3091 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3092 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003093 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003094 PERM_READ | PERM_WRITE | PERM_EXEC,
3095 PERM_READ | PERM_WRITE | PERM_EXEC,
3096 PERM_READ | PERM_WRITE | PERM_EXEC,
3097 };
3098
3099 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3100 srcVM, 1, destVM, destVMperm, 4));
3101 if (err)
3102 goto bail;
3103 }
3104 return 0;
3105 }
3106
3107 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3108 fastrpc_match_table,
3109 NULL, &pdev->dev));
3110 if (err)
3111 goto bail;
3112bail:
3113 return err;
3114}
3115
3116static void fastrpc_deinit(void)
3117{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303118 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003119 struct fastrpc_channel_ctx *chan = gcinfo;
3120 int i, j;
3121
3122 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3123 if (chan->chan) {
3124 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303125 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303126 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003127 }
3128 for (j = 0; j < NUM_SESSIONS; j++) {
3129 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303130 if (sess->smmu.dev) {
3131 arm_iommu_detach_device(sess->smmu.dev);
3132 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003133 }
3134 if (sess->smmu.mapping) {
3135 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303136 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003137 }
3138 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303139 kfree(chan->rhvm.vmid);
3140 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003141 }
3142}
3143
3144static struct platform_driver fastrpc_driver = {
3145 .probe = fastrpc_probe,
3146 .driver = {
3147 .name = "fastrpc",
3148 .owner = THIS_MODULE,
3149 .of_match_table = fastrpc_match_table,
3150 },
3151};
3152
3153static int __init fastrpc_device_init(void)
3154{
3155 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303156 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003157 int err = 0, i;
3158
3159 memset(me, 0, sizeof(*me));
3160
3161 fastrpc_init(me);
3162 me->dev = NULL;
3163 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3164 if (err)
3165 goto register_bail;
3166 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3167 DEVICE_NAME));
3168 if (err)
3169 goto alloc_chrdev_bail;
3170 cdev_init(&me->cdev, &fops);
3171 me->cdev.owner = THIS_MODULE;
3172 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003173 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003174 if (err)
3175 goto cdev_init_bail;
3176 me->class = class_create(THIS_MODULE, "fastrpc");
3177 VERIFY(err, !IS_ERR(me->class));
3178 if (err)
3179 goto class_create_bail;
3180 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003181 dev = device_create(me->class, NULL,
3182 MKDEV(MAJOR(me->dev_no), 0),
3183 NULL, gcinfo[0].name);
3184 VERIFY(err, !IS_ERR_OR_NULL(dev));
3185 if (err)
3186 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003187 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003188 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003189 me->channel[i].ssrcount = 0;
3190 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303191 me->channel[i].issubsystemup = 1;
3192 me->channel[i].ramdumpenabled = 0;
3193 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003194 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3195 me->channel[i].handle = subsys_notif_register_notifier(
3196 gcinfo[i].subsys,
3197 &me->channel[i].nb);
3198 }
3199
3200 me->client = msm_ion_client_create(DEVICE_NAME);
3201 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3202 if (err)
3203 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003204 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003205 return 0;
3206device_create_bail:
3207 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003208 if (me->channel[i].handle)
3209 subsys_notif_unregister_notifier(me->channel[i].handle,
3210 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003211 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003212 if (!IS_ERR_OR_NULL(dev))
3213 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003214 class_destroy(me->class);
3215class_create_bail:
3216 cdev_del(&me->cdev);
3217cdev_init_bail:
3218 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3219alloc_chrdev_bail:
3220register_bail:
3221 fastrpc_deinit();
3222 return err;
3223}
3224
3225static void __exit fastrpc_device_exit(void)
3226{
3227 struct fastrpc_apps *me = &gfa;
3228 int i;
3229
3230 fastrpc_file_list_dtor(me);
3231 fastrpc_deinit();
3232 for (i = 0; i < NUM_CHANNELS; i++) {
3233 if (!gcinfo[i].name)
3234 continue;
3235 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3236 subsys_notif_unregister_notifier(me->channel[i].handle,
3237 &me->channel[i].nb);
3238 }
3239 class_destroy(me->class);
3240 cdev_del(&me->cdev);
3241 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3242 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003243 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003244}
3245
3246late_initcall(fastrpc_device_init);
3247module_exit(fastrpc_device_exit);
3248
3249MODULE_LICENSE("GPL v2");