blob: 3a6dfd8ff1ab8193520448eb393fe80aae495f9f [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
29#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053031#include <soc/qcom/service-notifier.h>
32#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070033#include <linux/scatterlist.h>
34#include <linux/fs.h>
35#include <linux/uaccess.h>
36#include <linux/device.h>
37#include <linux/of.h>
38#include <linux/of_address.h>
39#include <linux/of_platform.h>
40#include <linux/dma-contiguous.h>
41#include <linux/cma.h>
42#include <linux/iommu.h>
43#include <linux/kref.h>
44#include <linux/sort.h>
45#include <linux/msm_dma_iommu_mapping.h>
46#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053047#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070048#include "adsprpc_compat.h"
49#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053050#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080051#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053052#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070053#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
54#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
55#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053056#define ADSP_MMAP_HEAP_ADDR 4
57#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Sathish Ambley69e1ab02016-10-18 10:28:15 -070058#define FASTRPC_ENOSUCH 39
59#define VMID_SSC_Q6 5
60#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080061#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053063#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
64#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
65
Sathish Ambley69e1ab02016-10-18 10:28:15 -070066#define RPC_TIMEOUT (5 * HZ)
67#define BALIGN 128
68#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
69#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070070#define M_FDLIST (16)
71#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053072#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053073#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070074
75#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
76
77#define FASTRPC_LINK_STATE_DOWN (0x0)
78#define FASTRPC_LINK_STATE_UP (0x1)
79#define FASTRPC_LINK_DISCONNECTED (0x0)
80#define FASTRPC_LINK_CONNECTING (0x1)
81#define FASTRPC_LINK_CONNECTED (0x3)
82#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053083#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
84#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070085
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +053086#define PERF_KEYS \
87 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Sathish Ambleya21b5b52017-01-11 16:11:01 -080088#define FASTRPC_STATIC_HANDLE_LISTENER (3)
89#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053090#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080091
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +053092#define INIT_FILELEN_MAX (2*1024*1024)
93#define INIT_MEMLEN_MAX (8*1024*1024)
94
Sathish Ambleya21b5b52017-01-11 16:11:01 -080095#define PERF_END (void)0
96
97#define PERF(enb, cnt, ff) \
98 {\
99 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530100 int64_t *counter = cnt;\
101 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800102 getnstimeofday(&startT);\
103 } \
104 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530105 if (enb && counter) {\
106 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800107 } \
108 }
109
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530110#define GET_COUNTER(perf_ptr, offset) \
111 (perf_ptr != NULL ?\
112 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
113 (int64_t *)(perf_ptr + offset)\
114 : (int64_t *)NULL) : (int64_t *)NULL)
115
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700116static int fastrpc_glink_open(int cid);
117static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530118static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *nb,
119 unsigned long code,
120 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800121static struct dentry *debugfs_root;
122static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700123
124static inline uint64_t buf_page_start(uint64_t buf)
125{
126 uint64_t start = (uint64_t) buf & PAGE_MASK;
127 return start;
128}
129
130static inline uint64_t buf_page_offset(uint64_t buf)
131{
132 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
133 return offset;
134}
135
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530136static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700137{
138 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
139 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530140 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700141 return nPages;
142}
143
144static inline uint64_t buf_page_size(uint32_t size)
145{
146 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
147
148 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
149}
150
151static inline void *uint64_to_ptr(uint64_t addr)
152{
153 void *ptr = (void *)((uintptr_t)addr);
154
155 return ptr;
156}
157
158static inline uint64_t ptr_to_uint64(void *ptr)
159{
160 uint64_t addr = (uint64_t)((uintptr_t)ptr);
161
162 return addr;
163}
164
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530165struct secure_vm {
166 int *vmid;
167 int *vmperm;
168 int vmcount;
169};
170
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700171struct fastrpc_file;
172
173struct fastrpc_buf {
174 struct hlist_node hn;
175 struct fastrpc_file *fl;
176 void *virt;
177 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530178 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700179};
180
181struct fastrpc_ctx_lst;
182
183struct overlap {
184 uintptr_t start;
185 uintptr_t end;
186 int raix;
187 uintptr_t mstart;
188 uintptr_t mend;
189 uintptr_t offset;
190};
191
192struct smq_invoke_ctx {
193 struct hlist_node hn;
194 struct completion work;
195 int retval;
196 int pid;
197 int tgid;
198 remote_arg_t *lpra;
199 remote_arg64_t *rpra;
200 int *fds;
201 unsigned int *attrs;
202 struct fastrpc_mmap **maps;
203 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530204 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700205 struct fastrpc_file *fl;
206 uint32_t sc;
207 struct overlap *overs;
208 struct overlap **overps;
209 struct smq_msg msg;
Sathish Ambleybae51902017-07-03 15:00:49 -0700210 uint32_t *crc;
c_mtharufdac6892017-10-12 13:09:01 +0530211 unsigned int magic;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700212};
213
214struct fastrpc_ctx_lst {
215 struct hlist_head pending;
216 struct hlist_head interrupted;
217};
218
219struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530220 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700221 struct dma_iommu_mapping *mapping;
222 int cb;
223 int enabled;
224 int faults;
225 int secure;
226 int coherent;
227};
228
229struct fastrpc_session_ctx {
230 struct device *dev;
231 struct fastrpc_smmu smmu;
232 int used;
233};
234
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530235struct fastrpc_static_pd {
236 char *spdname;
237 struct notifier_block pdrnb;
238 struct notifier_block get_service_nb;
239 void *pdrhandle;
240 int pdrcount;
241 int prevpdrcount;
242 int ispdup;
243};
244
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700245struct fastrpc_glink_info {
246 int link_state;
247 int port_state;
248 struct glink_open_config cfg;
249 struct glink_link_info link_info;
250 void *link_notify_handle;
251};
252
253struct fastrpc_channel_ctx {
254 char *name;
255 char *subsys;
256 void *chan;
257 struct device *dev;
258 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530259 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700260 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530261 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700262 struct notifier_block nb;
263 struct kref kref;
264 int sesscount;
265 int ssrcount;
266 void *handle;
267 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530268 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700269 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530270 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530271 int ramdumpenabled;
272 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273 struct fastrpc_glink_info link;
274};
275
276struct fastrpc_apps {
277 struct fastrpc_channel_ctx *channel;
278 struct cdev cdev;
279 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530280 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700281 struct smq_phy_page range;
282 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530283 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700284 dev_t dev_no;
285 int compat;
286 struct hlist_head drivers;
287 spinlock_t hlock;
288 struct ion_client *client;
289 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530290 unsigned int latency;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700291};
292
293struct fastrpc_mmap {
294 struct hlist_node hn;
295 struct fastrpc_file *fl;
296 struct fastrpc_apps *apps;
297 int fd;
298 uint32_t flags;
299 struct dma_buf *buf;
300 struct sg_table *table;
301 struct dma_buf_attachment *attach;
302 struct ion_handle *handle;
303 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530304 size_t size;
305 uintptr_t va;
306 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700307 int refs;
308 uintptr_t raddr;
309 int uncached;
310 int secure;
311 uintptr_t attr;
312};
313
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530314enum fastrpc_perfkeys {
315 PERF_COUNT = 0,
316 PERF_FLUSH = 1,
317 PERF_MAP = 2,
318 PERF_COPY = 3,
319 PERF_LINK = 4,
320 PERF_GETARGS = 5,
321 PERF_PUTARGS = 6,
322 PERF_INVARGS = 7,
323 PERF_INVOKE = 8,
324 PERF_KEY_MAX = 9,
325};
326
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800327struct fastrpc_perf {
328 int64_t count;
329 int64_t flush;
330 int64_t map;
331 int64_t copy;
332 int64_t link;
333 int64_t getargs;
334 int64_t putargs;
335 int64_t invargs;
336 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530337 int64_t tid;
338 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800339};
340
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700341struct fastrpc_file {
342 struct hlist_node hn;
343 spinlock_t hlock;
344 struct hlist_head maps;
345 struct hlist_head bufs;
346 struct fastrpc_ctx_lst clst;
347 struct fastrpc_session_ctx *sctx;
348 struct fastrpc_session_ctx *secsctx;
349 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800350 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530351 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700352 int tgid;
353 int cid;
354 int ssrcount;
355 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530356 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530357 int file_close;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700358 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530359 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800360 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530361 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530362 struct pm_qos_request pm_qos_req;
363 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530364 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530365 struct mutex fl_map_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700366};
367
368static struct fastrpc_apps gfa;
369
370static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
371 {
372 .name = "adsprpc-smd",
373 .subsys = "adsp",
374 .link.link_info.edge = "lpass",
375 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530376 .spd = {
377 {
378 .spdname =
379 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
380 .pdrnb.notifier_call =
381 fastrpc_audio_pdr_notifier_cb,
382 }
383 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700384 },
385 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700386 .name = "mdsprpc-smd",
387 .subsys = "modem",
388 .link.link_info.edge = "mpss",
389 .link.link_info.transport = "smem",
390 },
391 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800392 .name = "sdsprpc-smd",
393 .subsys = "slpi",
394 .link.link_info.edge = "dsps",
395 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800396 },
397 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700398 .name = "cdsprpc-smd",
399 .subsys = "cdsp",
400 .link.link_info.edge = "cdsp",
401 .link.link_info.transport = "smem",
402 },
403};
404
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530405static int hlosvm[1] = {VMID_HLOS};
406static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
407
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800408static inline int64_t getnstimediff(struct timespec *start)
409{
410 int64_t ns;
411 struct timespec ts, b;
412
413 getnstimeofday(&ts);
414 b = timespec_sub(ts, *start);
415 ns = timespec_to_ns(&b);
416 return ns;
417}
418
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530419static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
420{
421 int err = 0;
422 int64_t *val = NULL;
423 struct fastrpc_perf *perf = NULL, *fperf = NULL;
424 struct hlist_node *n = NULL;
425
426 VERIFY(err, !IS_ERR_OR_NULL(fl));
427 if (err)
428 goto bail;
429
430 mutex_lock(&fl->perf_mutex);
431 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
432 if (perf->tid == current->pid) {
433 fperf = perf;
434 break;
435 }
436 }
437
438 if (IS_ERR_OR_NULL(fperf)) {
439 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
440
441 VERIFY(err, !IS_ERR_OR_NULL(fperf));
442 if (err) {
443 mutex_unlock(&fl->perf_mutex);
444 kfree(fperf);
445 goto bail;
446 }
447
448 fperf->tid = current->pid;
449 hlist_add_head(&fperf->hn, &fl->perf);
450 }
451
452 val = ((int64_t *)fperf) + key;
453 mutex_unlock(&fl->perf_mutex);
454bail:
455 return val;
456}
457
458
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700459static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
460{
c_mtharue1a5ce12017-10-13 20:47:09 +0530461 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700462 int vmid;
463
464 if (!fl)
465 return;
466 if (cache) {
467 spin_lock(&fl->hlock);
468 hlist_add_head(&buf->hn, &fl->bufs);
469 spin_unlock(&fl->hlock);
470 return;
471 }
472 if (!IS_ERR_OR_NULL(buf->virt)) {
473 int destVM[1] = {VMID_HLOS};
474 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
475
476 if (fl->sctx->smmu.cb)
477 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
478 vmid = fl->apps->channel[fl->cid].vmid;
479 if (vmid) {
480 int srcVM[2] = {VMID_HLOS, vmid};
481
482 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
483 srcVM, 2, destVM, destVMperm, 1);
484 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530485 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486 buf->phys);
487 }
488 kfree(buf);
489}
490
491static void fastrpc_buf_list_free(struct fastrpc_file *fl)
492{
493 struct fastrpc_buf *buf, *free;
494
495 do {
496 struct hlist_node *n;
497
c_mtharue1a5ce12017-10-13 20:47:09 +0530498 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700499 spin_lock(&fl->hlock);
500 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
501 hlist_del_init(&buf->hn);
502 free = buf;
503 break;
504 }
505 spin_unlock(&fl->hlock);
506 if (free)
507 fastrpc_buf_free(free, 0);
508 } while (free);
509}
510
511static void fastrpc_mmap_add(struct fastrpc_mmap *map)
512{
c_mtharue1a5ce12017-10-13 20:47:09 +0530513 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
514 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
515 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700516
c_mtharue1a5ce12017-10-13 20:47:09 +0530517 spin_lock(&me->hlock);
518 hlist_add_head(&map->hn, &me->maps);
519 spin_unlock(&me->hlock);
520 } else {
521 struct fastrpc_file *fl = map->fl;
522
c_mtharue1a5ce12017-10-13 20:47:09 +0530523 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530524 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700525}
526
c_mtharue1a5ce12017-10-13 20:47:09 +0530527static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530528 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530529 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700530{
c_mtharue1a5ce12017-10-13 20:47:09 +0530531 struct fastrpc_apps *me = &gfa;
532 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700533 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530534
535 if ((va + len) < va)
536 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530537 if (mflags == ADSP_MMAP_HEAP_ADDR ||
538 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
539 spin_lock(&me->hlock);
540 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
541 if (va >= map->va &&
542 va + len <= map->va + map->len &&
543 map->fd == fd) {
544 if (refs)
545 map->refs++;
546 match = map;
547 break;
548 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700549 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530550 spin_unlock(&me->hlock);
551 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530552 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
553 if (va >= map->va &&
554 va + len <= map->va + map->len &&
555 map->fd == fd) {
556 if (refs)
557 map->refs++;
558 match = map;
559 break;
560 }
561 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700562 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700563 if (match) {
564 *ppmap = match;
565 return 0;
566 }
567 return -ENOTTY;
568}
569
c_mtharuf931ff92017-11-30 19:35:30 +0530570static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530571{
572 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530573
574 if (me->dev == NULL) {
575 pr_err("device adsprpc-mem is not initialized\n");
576 return -ENODEV;
577 }
c_mtharuf931ff92017-11-30 19:35:30 +0530578 *vaddr = dma_alloc_coherent(me->dev, size, region_phys, GFP_KERNEL);
579 if (!*vaddr) {
c_mtharue1a5ce12017-10-13 20:47:09 +0530580 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
581 (unsigned int)size);
582 return -ENOMEM;
583 }
584 return 0;
585}
586
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700587static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530588 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700589{
c_mtharue1a5ce12017-10-13 20:47:09 +0530590 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700591 struct hlist_node *n;
592 struct fastrpc_apps *me = &gfa;
593
594 spin_lock(&me->hlock);
595 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
596 if (map->raddr == va &&
597 map->raddr + map->len == va + len &&
598 map->refs == 1) {
599 match = map;
600 hlist_del_init(&map->hn);
601 break;
602 }
603 }
604 spin_unlock(&me->hlock);
605 if (match) {
606 *ppmap = match;
607 return 0;
608 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700609 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
610 if (map->raddr == va &&
611 map->raddr + map->len == va + len &&
612 map->refs == 1) {
613 match = map;
614 hlist_del_init(&map->hn);
615 break;
616 }
617 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700618 if (match) {
619 *ppmap = match;
620 return 0;
621 }
622 return -ENOTTY;
623}
624
c_mtharu7bd6a422017-10-17 18:15:37 +0530625static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700626{
c_mtharue1a5ce12017-10-13 20:47:09 +0530627 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700628 struct fastrpc_file *fl;
629 int vmid;
630 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700631
632 if (!map)
633 return;
634 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530635 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
636 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
637 spin_lock(&me->hlock);
638 map->refs--;
639 if (!map->refs)
640 hlist_del_init(&map->hn);
641 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530642 if (map->refs > 0)
643 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530644 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530645 map->refs--;
646 if (!map->refs)
647 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530648 if (map->refs > 0 && !flags)
649 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530650 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530651 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
652 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700653
c_mtharue1a5ce12017-10-13 20:47:09 +0530654 if (me->dev == NULL) {
655 pr_err("failed to free remote heap allocation\n");
656 return;
657 }
658 if (map->phys) {
659 dma_free_coherent(me->dev, map->size,
c_mtharuf931ff92017-11-30 19:35:30 +0530660 (void *)map->va, (dma_addr_t)map->phys);
c_mtharue1a5ce12017-10-13 20:47:09 +0530661 }
662 } else {
663 int destVM[1] = {VMID_HLOS};
664 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
665
666 if (map->secure)
667 sess = fl->secsctx;
668 else
669 sess = fl->sctx;
670
671 if (!IS_ERR_OR_NULL(map->handle))
672 ion_free(fl->apps->client, map->handle);
673 if (sess && sess->smmu.enabled) {
674 if (map->size || map->phys)
675 msm_dma_unmap_sg(sess->smmu.dev,
676 map->table->sgl,
677 map->table->nents, DMA_BIDIRECTIONAL,
678 map->buf);
679 }
680 vmid = fl->apps->channel[fl->cid].vmid;
681 if (vmid && map->phys) {
682 int srcVM[2] = {VMID_HLOS, vmid};
683
684 hyp_assign_phys(map->phys, buf_page_size(map->size),
685 srcVM, 2, destVM, destVMperm, 1);
686 }
687
688 if (!IS_ERR_OR_NULL(map->table))
689 dma_buf_unmap_attachment(map->attach, map->table,
690 DMA_BIDIRECTIONAL);
691 if (!IS_ERR_OR_NULL(map->attach))
692 dma_buf_detach(map->buf, map->attach);
693 if (!IS_ERR_OR_NULL(map->buf))
694 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700695 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696 kfree(map);
697}
698
699static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
700 struct fastrpc_session_ctx **session);
701
702static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530703 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700704 struct fastrpc_mmap **ppmap)
705{
c_mtharue1a5ce12017-10-13 20:47:09 +0530706 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700707 struct fastrpc_session_ctx *sess;
708 struct fastrpc_apps *apps = fl->apps;
709 int cid = fl->cid;
710 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530711 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530713 dma_addr_t region_phys = 0;
714 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700715 unsigned long flags;
716 int err = 0, vmid;
717
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800718 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719 return 0;
720 map = kzalloc(sizeof(*map), GFP_KERNEL);
721 VERIFY(err, !IS_ERR_OR_NULL(map));
722 if (err)
723 goto bail;
724 INIT_HLIST_NODE(&map->hn);
725 map->flags = mflags;
726 map->refs = 1;
727 map->fl = fl;
728 map->fd = fd;
729 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530730 if (mflags == ADSP_MMAP_HEAP_ADDR ||
731 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
732 map->apps = me;
733 map->fl = NULL;
c_mtharuf931ff92017-11-30 19:35:30 +0530734 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
735 len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736 if (err)
737 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530738 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530739 map->size = len;
c_mtharuf931ff92017-11-30 19:35:30 +0530740 map->va = (uintptr_t)region_vaddr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530741 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530742 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
743 pr_info("adsprpc: buffer mapped with persist attr %x\n",
744 (unsigned int)map->attr);
745 map->refs = 2;
746 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530747 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
748 ion_import_dma_buf_fd(fl->apps->client, fd)));
749 if (err)
750 goto bail;
751 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
752 &flags));
753 if (err)
754 goto bail;
755
c_mtharue1a5ce12017-10-13 20:47:09 +0530756 map->secure = flags & ION_FLAG_SECURE;
757 if (map->secure) {
758 if (!fl->secsctx)
759 err = fastrpc_session_alloc(chan, 1,
760 &fl->secsctx);
761 if (err)
762 goto bail;
763 }
764 if (map->secure)
765 sess = fl->secsctx;
766 else
767 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530768
c_mtharue1a5ce12017-10-13 20:47:09 +0530769 VERIFY(err, !IS_ERR_OR_NULL(sess));
770 if (err)
771 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530772
773 map->uncached = !ION_IS_CACHED(flags);
774 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
775 map->uncached = 1;
776
c_mtharue1a5ce12017-10-13 20:47:09 +0530777 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
778 if (err)
779 goto bail;
780 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
781 dma_buf_attach(map->buf, sess->smmu.dev)));
782 if (err)
783 goto bail;
784 VERIFY(err, !IS_ERR_OR_NULL(map->table =
785 dma_buf_map_attachment(map->attach,
786 DMA_BIDIRECTIONAL)));
787 if (err)
788 goto bail;
789 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700790 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530791
792 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
793 (sess->smmu.coherent && map->uncached))
794 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
795 else if (map->attr & FASTRPC_ATTR_COHERENT)
796 attrs |= DMA_ATTR_FORCE_COHERENT;
797
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700798 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530799 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700800 map->table->sgl, map->table->nents,
801 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530802 if (err)
803 goto bail;
804 } else {
805 VERIFY(err, map->table->nents == 1);
806 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700807 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530808 }
809 map->phys = sg_dma_address(map->table->sgl);
810 if (sess->smmu.cb) {
811 map->phys += ((uint64_t)sess->smmu.cb << 32);
812 map->size = sg_dma_len(map->table->sgl);
813 } else {
814 map->size = buf_page_size(len);
815 }
816 vmid = fl->apps->channel[fl->cid].vmid;
817 if (vmid) {
818 int srcVM[1] = {VMID_HLOS};
819 int destVM[2] = {VMID_HLOS, vmid};
820 int destVMperm[2] = {PERM_READ | PERM_WRITE,
821 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700822
c_mtharue1a5ce12017-10-13 20:47:09 +0530823 VERIFY(err, !hyp_assign_phys(map->phys,
824 buf_page_size(map->size),
825 srcVM, 1, destVM, destVMperm, 2));
826 if (err)
827 goto bail;
828 }
829 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700830 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700831 map->len = len;
832
833 fastrpc_mmap_add(map);
834 *ppmap = map;
835
836bail:
837 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530838 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700839 return err;
840}
841
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530842static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700843 struct fastrpc_buf **obuf)
844{
845 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530846 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700847 struct hlist_node *n;
848
849 VERIFY(err, size > 0);
850 if (err)
851 goto bail;
852
853 /* find the smallest buffer that fits in the cache */
854 spin_lock(&fl->hlock);
855 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
856 if (buf->size >= size && (!fr || fr->size > buf->size))
857 fr = buf;
858 }
859 if (fr)
860 hlist_del_init(&fr->hn);
861 spin_unlock(&fl->hlock);
862 if (fr) {
863 *obuf = fr;
864 return 0;
865 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530866 buf = NULL;
867 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700868 if (err)
869 goto bail;
870 INIT_HLIST_NODE(&buf->hn);
871 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530872 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700873 buf->phys = 0;
874 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530875 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700876 (void *)&buf->phys, GFP_KERNEL);
877 if (IS_ERR_OR_NULL(buf->virt)) {
878 /* free cache and retry */
879 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530880 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700881 (void *)&buf->phys, GFP_KERNEL);
882 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
883 }
884 if (err)
885 goto bail;
886 if (fl->sctx->smmu.cb)
887 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
888 vmid = fl->apps->channel[fl->cid].vmid;
889 if (vmid) {
890 int srcVM[1] = {VMID_HLOS};
891 int destVM[2] = {VMID_HLOS, vmid};
892 int destVMperm[2] = {PERM_READ | PERM_WRITE,
893 PERM_READ | PERM_WRITE | PERM_EXEC};
894
895 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
896 srcVM, 1, destVM, destVMperm, 2));
897 if (err)
898 goto bail;
899 }
900
901 *obuf = buf;
902 bail:
903 if (err && buf)
904 fastrpc_buf_free(buf, 0);
905 return err;
906}
907
908
909static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700910 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700911 struct smq_invoke_ctx **po)
912{
913 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530914 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700915 struct hlist_node *n;
916 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
917
918 spin_lock(&fl->hlock);
919 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
920 if (ictx->pid == current->pid) {
921 if (invoke->sc != ictx->sc || ictx->fl != fl)
922 err = -1;
923 else {
924 ctx = ictx;
925 hlist_del_init(&ctx->hn);
926 hlist_add_head(&ctx->hn, &fl->clst.pending);
927 }
928 break;
929 }
930 }
931 spin_unlock(&fl->hlock);
932 if (ctx)
933 *po = ctx;
934 return err;
935}
936
937#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
938static int overlap_ptr_cmp(const void *a, const void *b)
939{
940 struct overlap *pa = *((struct overlap **)a);
941 struct overlap *pb = *((struct overlap **)b);
942 /* sort with lowest starting buffer first */
943 int st = CMP(pa->start, pb->start);
944 /* sort with highest ending buffer first */
945 int ed = CMP(pb->end, pa->end);
946 return st == 0 ? ed : st;
947}
948
Sathish Ambley9466d672017-01-25 10:51:55 -0800949static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700950{
Sathish Ambley9466d672017-01-25 10:51:55 -0800951 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700952 remote_arg_t *lpra = ctx->lpra;
953 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
954 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
955 int nbufs = inbufs + outbufs;
956 struct overlap max;
957
958 for (i = 0; i < nbufs; ++i) {
959 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
960 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -0800961 if (lpra[i].buf.len) {
962 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
963 if (err)
964 goto bail;
965 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700966 ctx->overs[i].raix = i;
967 ctx->overps[i] = &ctx->overs[i];
968 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530969 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700970 max.start = 0;
971 max.end = 0;
972 for (i = 0; i < nbufs; ++i) {
973 if (ctx->overps[i]->start < max.end) {
974 ctx->overps[i]->mstart = max.end;
975 ctx->overps[i]->mend = ctx->overps[i]->end;
976 ctx->overps[i]->offset = max.end -
977 ctx->overps[i]->start;
978 if (ctx->overps[i]->end > max.end) {
979 max.end = ctx->overps[i]->end;
980 } else {
981 ctx->overps[i]->mend = 0;
982 ctx->overps[i]->mstart = 0;
983 }
984 } else {
985 ctx->overps[i]->mend = ctx->overps[i]->end;
986 ctx->overps[i]->mstart = ctx->overps[i]->start;
987 ctx->overps[i]->offset = 0;
988 max = *ctx->overps[i];
989 }
990 }
Sathish Ambley9466d672017-01-25 10:51:55 -0800991bail:
992 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700993}
994
995#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
996 do {\
997 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +0530998 VERIFY(err, 0 == copy_from_user((dst),\
999 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001000 (size)));\
1001 else\
1002 memmove((dst), (src), (size));\
1003 } while (0)
1004
1005#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1006 do {\
1007 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301008 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301009 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001010 else\
1011 memmove((dst), (src), (size));\
1012 } while (0)
1013
1014
1015static void context_free(struct smq_invoke_ctx *ctx);
1016
1017static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001018 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001019 struct smq_invoke_ctx **po)
1020{
1021 int err = 0, bufs, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301022 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001023 struct fastrpc_ctx_lst *clst = &fl->clst;
1024 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1025
1026 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1027 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1028 sizeof(*ctx->fds) * (bufs) +
1029 sizeof(*ctx->attrs) * (bufs) +
1030 sizeof(*ctx->overs) * (bufs) +
1031 sizeof(*ctx->overps) * (bufs);
1032
c_mtharue1a5ce12017-10-13 20:47:09 +05301033 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001034 if (err)
1035 goto bail;
1036
1037 INIT_HLIST_NODE(&ctx->hn);
1038 hlist_add_fake(&ctx->hn);
1039 ctx->fl = fl;
1040 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1041 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1042 ctx->fds = (int *)(&ctx->lpra[bufs]);
1043 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1044 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1045 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1046
c_mtharue1a5ce12017-10-13 20:47:09 +05301047 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001048 bufs * sizeof(*ctx->lpra));
1049 if (err)
1050 goto bail;
1051
1052 if (invokefd->fds) {
1053 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1054 bufs * sizeof(*ctx->fds));
1055 if (err)
1056 goto bail;
1057 }
1058 if (invokefd->attrs) {
1059 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1060 bufs * sizeof(*ctx->attrs));
1061 if (err)
1062 goto bail;
1063 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001064 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001065 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001066 if (bufs) {
1067 VERIFY(err, 0 == context_build_overlap(ctx));
1068 if (err)
1069 goto bail;
1070 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001071 ctx->retval = -1;
1072 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301073 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001074 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301075 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001076
1077 spin_lock(&fl->hlock);
1078 hlist_add_head(&ctx->hn, &clst->pending);
1079 spin_unlock(&fl->hlock);
1080
1081 *po = ctx;
1082bail:
1083 if (ctx && err)
1084 context_free(ctx);
1085 return err;
1086}
1087
1088static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1089{
1090 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1091
1092 spin_lock(&ctx->fl->hlock);
1093 hlist_del_init(&ctx->hn);
1094 hlist_add_head(&ctx->hn, &clst->interrupted);
1095 spin_unlock(&ctx->fl->hlock);
1096 /* free the cache on power collapse */
1097 fastrpc_buf_list_free(ctx->fl);
1098}
1099
1100static void context_free(struct smq_invoke_ctx *ctx)
1101{
1102 int i;
1103 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1104 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1105 spin_lock(&ctx->fl->hlock);
1106 hlist_del_init(&ctx->hn);
1107 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301108 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001109 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301110 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301111
1112 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001113 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301114 ctx->magic = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001115 kfree(ctx);
1116}
1117
1118static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1119{
1120 ctx->retval = retval;
1121 complete(&ctx->work);
1122}
1123
1124
1125static void fastrpc_notify_users(struct fastrpc_file *me)
1126{
1127 struct smq_invoke_ctx *ictx;
1128 struct hlist_node *n;
1129
1130 spin_lock(&me->hlock);
1131 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1132 complete(&ictx->work);
1133 }
1134 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1135 complete(&ictx->work);
1136 }
1137 spin_unlock(&me->hlock);
1138
1139}
1140
1141static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1142{
1143 struct fastrpc_file *fl;
1144 struct hlist_node *n;
1145
1146 spin_lock(&me->hlock);
1147 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1148 if (fl->cid == cid)
1149 fastrpc_notify_users(fl);
1150 }
1151 spin_unlock(&me->hlock);
1152
1153}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301154
1155static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1156{
1157 struct fastrpc_file *fl;
1158 struct hlist_node *n;
1159
1160 spin_lock(&me->hlock);
1161 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1162 if (fl->spdname && !strcmp(spdname, fl->spdname))
1163 fastrpc_notify_users(fl);
1164 }
1165 spin_unlock(&me->hlock);
1166
1167}
1168
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001169static void context_list_ctor(struct fastrpc_ctx_lst *me)
1170{
1171 INIT_HLIST_HEAD(&me->interrupted);
1172 INIT_HLIST_HEAD(&me->pending);
1173}
1174
1175static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1176{
1177 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301178 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 struct hlist_node *n;
1180
1181 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301182 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001183 spin_lock(&fl->hlock);
1184 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1185 hlist_del_init(&ictx->hn);
1186 ctxfree = ictx;
1187 break;
1188 }
1189 spin_unlock(&fl->hlock);
1190 if (ctxfree)
1191 context_free(ctxfree);
1192 } while (ctxfree);
1193 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301194 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001195 spin_lock(&fl->hlock);
1196 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1197 hlist_del_init(&ictx->hn);
1198 ctxfree = ictx;
1199 break;
1200 }
1201 spin_unlock(&fl->hlock);
1202 if (ctxfree)
1203 context_free(ctxfree);
1204 } while (ctxfree);
1205}
1206
1207static int fastrpc_file_free(struct fastrpc_file *fl);
1208static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1209{
1210 struct fastrpc_file *fl, *free;
1211 struct hlist_node *n;
1212
1213 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301214 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001215 spin_lock(&me->hlock);
1216 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1217 hlist_del_init(&fl->hn);
1218 free = fl;
1219 break;
1220 }
1221 spin_unlock(&me->hlock);
1222 if (free)
1223 fastrpc_file_free(free);
1224 } while (free);
1225}
1226
1227static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1228{
1229 remote_arg64_t *rpra;
1230 remote_arg_t *lpra = ctx->lpra;
1231 struct smq_invoke_buf *list;
1232 struct smq_phy_page *pages, *ipage;
1233 uint32_t sc = ctx->sc;
1234 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1235 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001236 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001237 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301238 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001239 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001240 int err = 0;
1241 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001242 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001243 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301244 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001245
1246 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301247 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001248 list = smq_invoke_buf_start(rpra, sc);
1249 pages = smq_phy_page_start(sc, list);
1250 ipage = pages;
1251
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301252 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001253 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301254 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1255 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001256
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301257 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001258 if (ctx->fds[i] && (ctx->fds[i] != -1))
1259 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1260 ctx->attrs[i], buf, len,
1261 mflags, &ctx->maps[i]);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301262 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001263 ipage += 1;
1264 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301265 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001266 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301267 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001268 for (i = bufs; i < bufs + handles; i++) {
1269 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
1270 FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301271 if (err) {
1272 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001273 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301274 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001275 ipage += 1;
1276 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301277 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301278 metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
Sathish Ambleybae51902017-07-03 15:00:49 -07001279 (sizeof(uint32_t) * M_CRCLIST);
1280
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001281 /* calculate len requreed for copying */
1282 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1283 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001284 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301285 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001286
1287 if (!len)
1288 continue;
1289 if (ctx->maps[i])
1290 continue;
1291 if (ctx->overps[oix]->offset == 0)
1292 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001293 mstart = ctx->overps[oix]->mstart;
1294 mend = ctx->overps[oix]->mend;
1295 VERIFY(err, (mend - mstart) <= LONG_MAX);
1296 if (err)
1297 goto bail;
1298 copylen += mend - mstart;
1299 VERIFY(err, copylen >= 0);
1300 if (err)
1301 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001302 }
1303 ctx->used = copylen;
1304
1305 /* allocate new buffer */
1306 if (copylen) {
1307 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1308 if (err)
1309 goto bail;
1310 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301311 if (ctx->buf->virt && metalen <= copylen)
1312 memset(ctx->buf->virt, 0, metalen);
1313
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001314 /* copy metadata */
1315 rpra = ctx->buf->virt;
1316 ctx->rpra = rpra;
1317 list = smq_invoke_buf_start(rpra, sc);
1318 pages = smq_phy_page_start(sc, list);
1319 ipage = pages;
1320 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001321 for (i = 0; i < bufs + handles; ++i) {
1322 if (lpra[i].buf.len)
1323 list[i].num = 1;
1324 else
1325 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001326 list[i].pgidx = ipage - pages;
1327 ipage++;
1328 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301329
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001330 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301331 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301332 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001333 struct fastrpc_mmap *map = ctx->maps[i];
1334 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301335 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336
1337 rpra[i].buf.pv = 0;
1338 rpra[i].buf.len = len;
1339 if (!len)
1340 continue;
1341 if (map) {
1342 struct vm_area_struct *vma;
1343 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301344 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001345 int idx = list[i].pgidx;
1346
1347 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001348 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001349 } else {
1350 down_read(&current->mm->mmap_sem);
1351 VERIFY(err, NULL != (vma = find_vma(current->mm,
1352 map->va)));
1353 if (err) {
1354 up_read(&current->mm->mmap_sem);
1355 goto bail;
1356 }
1357 offset = buf_page_start(buf) - vma->vm_start;
1358 up_read(&current->mm->mmap_sem);
1359 VERIFY(err, offset < (uintptr_t)map->size);
1360 if (err)
1361 goto bail;
1362 }
1363 pages[idx].addr = map->phys + offset;
1364 pages[idx].size = num << PAGE_SHIFT;
1365 }
1366 rpra[i].buf.pv = buf;
1367 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001368 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001369 for (i = bufs; i < bufs + handles; ++i) {
1370 struct fastrpc_mmap *map = ctx->maps[i];
1371
1372 pages[i].addr = map->phys;
1373 pages[i].size = map->size;
1374 }
1375 fdlist = (uint64_t *)&pages[bufs + handles];
1376 for (i = 0; i < M_FDLIST; i++)
1377 fdlist[i] = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001378 crclist = (uint32_t *)&fdlist[M_FDLIST];
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301379 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001380
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001381 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301382 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001383 rlen = copylen - metalen;
1384 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1385 int i = ctx->overps[oix]->raix;
1386 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301387 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001388 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301389 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001390
1391 if (!len)
1392 continue;
1393 if (map)
1394 continue;
1395 if (ctx->overps[oix]->offset == 0) {
1396 rlen -= ALIGN(args, BALIGN) - args;
1397 args = ALIGN(args, BALIGN);
1398 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001399 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001400 VERIFY(err, rlen >= mlen);
1401 if (err)
1402 goto bail;
1403 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1404 pages[list[i].pgidx].addr = ctx->buf->phys -
1405 ctx->overps[oix]->offset +
1406 (copylen - rlen);
1407 pages[list[i].pgidx].addr =
1408 buf_page_start(pages[list[i].pgidx].addr);
1409 buf = rpra[i].buf.pv;
1410 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1411 if (i < inbufs) {
1412 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1413 lpra[i].buf.pv, len);
1414 if (err)
1415 goto bail;
1416 }
1417 args = args + mlen;
1418 rlen -= mlen;
1419 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001420 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001421
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301422 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001423 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1424 int i = ctx->overps[oix]->raix;
1425 struct fastrpc_mmap *map = ctx->maps[i];
1426
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001427 if (map && map->uncached)
1428 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301429 if (ctx->fl->sctx->smmu.coherent &&
1430 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1431 continue;
1432 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1433 continue;
1434
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001435 if (rpra[i].buf.len && ctx->overps[oix]->mstart)
1436 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1437 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1438 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001439 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301440 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001441 rpra[i].dma.fd = ctx->fds[i];
1442 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1443 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001444 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001445
1446 if (!ctx->fl->sctx->smmu.coherent) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301447 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001448 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001449 PERF_END);
1450 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001451 bail:
1452 return err;
1453}
1454
1455static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1456 remote_arg_t *upra)
1457{
1458 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001459 struct smq_invoke_buf *list;
1460 struct smq_phy_page *pages;
1461 struct fastrpc_mmap *mmap;
1462 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001463 uint32_t *crclist = NULL;
1464
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001465 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001466 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001467 int err = 0;
1468
1469 inbufs = REMOTE_SCALARS_INBUFS(sc);
1470 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001471 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1472 list = smq_invoke_buf_start(ctx->rpra, sc);
1473 pages = smq_phy_page_start(sc, list);
1474 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
Sathish Ambleybae51902017-07-03 15:00:49 -07001475 crclist = (uint32_t *)(fdlist + M_FDLIST);
1476
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001477 for (i = inbufs; i < inbufs + outbufs; ++i) {
1478 if (!ctx->maps[i]) {
1479 K_COPY_TO_USER(err, kernel,
1480 ctx->lpra[i].buf.pv,
1481 uint64_to_ptr(rpra[i].buf.pv),
1482 rpra[i].buf.len);
1483 if (err)
1484 goto bail;
1485 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301486 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301487 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301488 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301489 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001490 }
1491 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301492 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001493 if (inbufs + outbufs + handles) {
1494 for (i = 0; i < M_FDLIST; i++) {
1495 if (!fdlist[i])
1496 break;
1497 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001498 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301499 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001500 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001501 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301502 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001503 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301504 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001505 crclist, M_CRCLIST*sizeof(uint32_t));
1506
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001507 bail:
1508 return err;
1509}
1510
1511static void inv_args_pre(struct smq_invoke_ctx *ctx)
1512{
1513 int i, inbufs, outbufs;
1514 uint32_t sc = ctx->sc;
1515 remote_arg64_t *rpra = ctx->rpra;
1516 uintptr_t end;
1517
1518 inbufs = REMOTE_SCALARS_INBUFS(sc);
1519 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1520 for (i = inbufs; i < inbufs + outbufs; ++i) {
1521 struct fastrpc_mmap *map = ctx->maps[i];
1522
1523 if (map && map->uncached)
1524 continue;
1525 if (!rpra[i].buf.len)
1526 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301527 if (ctx->fl->sctx->smmu.coherent &&
1528 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1529 continue;
1530 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1531 continue;
1532
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001533 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1534 buf_page_start(rpra[i].buf.pv))
1535 continue;
1536 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1537 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1538 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1539 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1540 rpra[i].buf.len);
1541 if (!IS_CACHE_ALIGNED(end))
1542 dmac_flush_range((char *)end,
1543 (char *)end + 1);
1544 }
1545}
1546
1547static void inv_args(struct smq_invoke_ctx *ctx)
1548{
1549 int i, inbufs, outbufs;
1550 uint32_t sc = ctx->sc;
1551 remote_arg64_t *rpra = ctx->rpra;
1552 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001553
1554 inbufs = REMOTE_SCALARS_INBUFS(sc);
1555 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1556 for (i = inbufs; i < inbufs + outbufs; ++i) {
1557 struct fastrpc_mmap *map = ctx->maps[i];
1558
1559 if (map && map->uncached)
1560 continue;
1561 if (!rpra[i].buf.len)
1562 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301563 if (ctx->fl->sctx->smmu.coherent &&
1564 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1565 continue;
1566 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1567 continue;
1568
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1570 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001571 continue;
1572 }
1573 if (map && map->handle)
1574 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1575 (char *)uint64_to_ptr(rpra[i].buf.pv),
1576 rpra[i].buf.len, ION_IOC_INV_CACHES);
1577 else
1578 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1579 (char *)uint64_to_ptr(rpra[i].buf.pv
1580 + rpra[i].buf.len));
1581 }
1582
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001583 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001584 dmac_inv_range(rpra, (char *)rpra + used);
1585}
1586
1587static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1588 uint32_t kernel, uint32_t handle)
1589{
1590 struct smq_msg *msg = &ctx->msg;
1591 struct fastrpc_file *fl = ctx->fl;
1592 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
1593 int err = 0;
1594
c_mtharue1a5ce12017-10-13 20:47:09 +05301595 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001596 if (err)
1597 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301598 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301600 if (fl->sessionid)
1601 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001602 if (kernel)
1603 msg->pid = 0;
1604 msg->invoke.header.ctx = ptr_to_uint64(ctx) | fl->pd;
1605 msg->invoke.header.handle = handle;
1606 msg->invoke.header.sc = ctx->sc;
1607 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1608 msg->invoke.page.size = buf_page_size(ctx->used);
1609
1610 if (fl->ssrcount != channel_ctx->ssrcount) {
1611 err = -ECONNRESET;
1612 goto bail;
1613 }
1614 VERIFY(err, channel_ctx->link.port_state ==
1615 FASTRPC_LINK_CONNECTED);
1616 if (err)
1617 goto bail;
1618 err = glink_tx(channel_ctx->chan,
1619 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1620 GLINK_TX_REQ_INTENT);
1621 bail:
1622 return err;
1623}
1624
1625static void fastrpc_init(struct fastrpc_apps *me)
1626{
1627 int i;
1628
1629 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301630 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001631 spin_lock_init(&me->hlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301632 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633 me->channel = &gcinfo[0];
1634 for (i = 0; i < NUM_CHANNELS; i++) {
1635 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301636 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001637 me->channel[i].sesscount = 0;
1638 }
1639}
1640
1641static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1642
1643static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1644 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001645 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001646{
c_mtharue1a5ce12017-10-13 20:47:09 +05301647 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001648 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1649 int cid = fl->cid;
1650 int interrupted = 0;
1651 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001652 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301653 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001654
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001655 if (fl->profile)
1656 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301657
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301658
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301659 VERIFY(err, fl->sctx != NULL);
1660 if (err)
1661 goto bail;
1662 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1663 if (err)
1664 goto bail;
1665
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001666 if (!kernel) {
1667 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1668 &ctx));
1669 if (err)
1670 goto bail;
1671 if (fl->sctx->smmu.faults)
1672 err = FASTRPC_ENOSUCH;
1673 if (err)
1674 goto bail;
1675 if (ctx)
1676 goto wait;
1677 }
1678
1679 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1680 if (err)
1681 goto bail;
1682
1683 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301684 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001685 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001686 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001687 if (err)
1688 goto bail;
1689 }
1690
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301691 if (!fl->sctx->smmu.coherent) {
1692 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001693 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301694 PERF_END);
1695 }
1696
1697 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001698 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001699 PERF_END);
1700
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001701 if (err)
1702 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001703 wait:
1704 if (kernel)
1705 wait_for_completion(&ctx->work);
1706 else {
1707 interrupted = wait_for_completion_interruptible(&ctx->work);
1708 VERIFY(err, 0 == (err = interrupted));
1709 if (err)
1710 goto bail;
1711 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001712
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301713 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07001714 if (!fl->sctx->smmu.coherent)
1715 inv_args(ctx);
1716 PERF_END);
1717
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001718 VERIFY(err, 0 == (err = ctx->retval));
1719 if (err)
1720 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001721
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301722 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001723 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001724 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001725 if (err)
1726 goto bail;
1727 bail:
1728 if (ctx && interrupted == -ERESTARTSYS)
1729 context_save_interrupted(ctx);
1730 else if (ctx)
1731 context_free(ctx);
1732 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1733 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001734
1735 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301736 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
1737 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
1738
1739 if (count)
1740 *count += getnstimediff(&invoket);
1741 }
1742 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
1743 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
1744
1745 if (count)
1746 *count = *count+1;
1747 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001748 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001749 return err;
1750}
1751
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301752static int fastrpc_get_adsp_session(char *name, int *session)
1753{
1754 struct fastrpc_apps *me = &gfa;
1755 int err = 0, i;
1756
1757 for (i = 0; i < NUM_SESSIONS; i++) {
1758 if (!me->channel[0].spd[i].spdname)
1759 continue;
1760 if (!strcmp(name, me->channel[0].spd[i].spdname))
1761 break;
1762 }
1763 VERIFY(err, i < NUM_SESSIONS);
1764 if (err)
1765 goto bail;
1766 *session = i;
1767bail:
1768 return err;
1769}
1770
1771static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08001772static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301773static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001774static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001775 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001776{
1777 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301778 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001779 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001780 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001781 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301782 struct fastrpc_mmap *file = NULL, *mem = NULL;
1783 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001784
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301785 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001786 if (err)
1787 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001788 if (init->flags == FASTRPC_INIT_ATTACH) {
1789 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301790 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001791
1792 ra[0].buf.pv = (void *)&tgid;
1793 ra[0].buf.len = sizeof(tgid);
1794 ioctl.inv.handle = 1;
1795 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1796 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05301797 ioctl.fds = NULL;
1798 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001799 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001800 fl->pd = 0;
1801 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1802 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1803 if (err)
1804 goto bail;
1805 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001806 remote_arg_t ra[6];
1807 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001808 int mflags = 0;
1809 struct {
1810 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301811 unsigned int namelen;
1812 unsigned int filelen;
1813 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001814 int attrs;
1815 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001816 } inbuf;
1817
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301818 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001819 inbuf.namelen = strlen(current->comm) + 1;
1820 inbuf.filelen = init->filelen;
1821 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301822
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301823 VERIFY(err, access_ok(0, (void __user *)init->file,
1824 init->filelen));
1825 if (err)
1826 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001827 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301828 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001829 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
1830 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301831 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001832 if (err)
1833 goto bail;
1834 }
1835 inbuf.pageslen = 1;
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05301836 VERIFY(err, access_ok(1, (void __user *)init->mem,
1837 init->memlen));
1838 if (err)
1839 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301840 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001841 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
1842 init->mem, init->memlen, mflags, &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301843 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001844 if (err)
1845 goto bail;
1846 inbuf.pageslen = 1;
1847 ra[0].buf.pv = (void *)&inbuf;
1848 ra[0].buf.len = sizeof(inbuf);
1849 fds[0] = 0;
1850
1851 ra[1].buf.pv = (void *)current->comm;
1852 ra[1].buf.len = inbuf.namelen;
1853 fds[1] = 0;
1854
1855 ra[2].buf.pv = (void *)init->file;
1856 ra[2].buf.len = inbuf.filelen;
1857 fds[2] = init->filefd;
1858
1859 pages[0].addr = mem->phys;
1860 pages[0].size = mem->size;
1861 ra[3].buf.pv = (void *)pages;
1862 ra[3].buf.len = 1 * sizeof(*pages);
1863 fds[3] = 0;
1864
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001865 inbuf.attrs = uproc->attrs;
1866 ra[4].buf.pv = (void *)&(inbuf.attrs);
1867 ra[4].buf.len = sizeof(inbuf.attrs);
1868 fds[4] = 0;
1869
1870 inbuf.siglen = uproc->siglen;
1871 ra[5].buf.pv = (void *)&(inbuf.siglen);
1872 ra[5].buf.len = sizeof(inbuf.siglen);
1873 fds[5] = 0;
1874
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001875 ioctl.inv.handle = 1;
1876 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001877 if (uproc->attrs)
1878 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001879 ioctl.inv.pra = ra;
1880 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05301881 ioctl.attrs = NULL;
1882 ioctl.crc = NULL;
1883 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1884 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1885 if (err)
1886 goto bail;
1887 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
1888 remote_arg_t ra[3];
1889 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301890 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301891 int fds[3];
1892 struct {
1893 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301894 unsigned int namelen;
1895 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301896 } inbuf;
1897
1898 if (!init->filelen)
1899 goto bail;
1900
1901 proc_name = kzalloc(init->filelen, GFP_KERNEL);
1902 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
1903 if (err)
1904 goto bail;
1905 VERIFY(err, 0 == copy_from_user((void *)proc_name,
1906 (void __user *)init->file, init->filelen));
1907 if (err)
1908 goto bail;
1909
1910 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05301911 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05301912 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301913
1914 if (!strcmp(proc_name, "audiopd")) {
1915 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
1916 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
1917 }
1918
c_mtharue1a5ce12017-10-13 20:47:09 +05301919 if (!me->staticpd_flags) {
1920 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301921 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301922 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
1923 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
1924 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301925 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301926 if (err)
1927 goto bail;
1928 phys = mem->phys;
1929 size = mem->size;
1930 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05301931 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
1932 me->channel[fl->cid].rhvm.vmperm,
1933 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05301934 if (err) {
1935 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
1936 err);
1937 pr_err("map->phys %llx, map->size %d\n",
1938 phys, (int)size);
1939 goto bail;
1940 }
1941 me->staticpd_flags = 1;
1942 }
1943
1944 ra[0].buf.pv = (void *)&inbuf;
1945 ra[0].buf.len = sizeof(inbuf);
1946 fds[0] = 0;
1947
1948 ra[1].buf.pv = (void *)proc_name;
1949 ra[1].buf.len = inbuf.namelen;
1950 fds[1] = 0;
1951
1952 pages[0].addr = phys;
1953 pages[0].size = size;
1954
1955 ra[2].buf.pv = (void *)pages;
1956 ra[2].buf.len = sizeof(*pages);
1957 fds[2] = 0;
1958 ioctl.inv.handle = 1;
1959
1960 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
1961 ioctl.inv.pra = ra;
1962 ioctl.fds = NULL;
1963 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001964 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001965 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
1966 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
1967 if (err)
1968 goto bail;
1969 } else {
1970 err = -ENOTTY;
1971 }
1972bail:
c_mtharud91205a2017-11-07 16:01:06 +05301973 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05301974 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
1975 me->staticpd_flags = 0;
1976 if (mem && err) {
1977 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
1978 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05301979 me->channel[fl->cid].rhvm.vmid,
1980 me->channel[fl->cid].rhvm.vmcount,
1981 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301982 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301983 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301984 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301985 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301986 if (file) {
1987 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301988 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301989 mutex_unlock(&fl->fl_map_mutex);
1990 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001991 return err;
1992}
1993
1994static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
1995{
1996 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07001997 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001998 remote_arg_t ra[1];
1999 int tgid = 0;
2000
Sathish Ambley36849af2017-02-02 09:35:55 -08002001 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2002 if (err)
2003 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302004 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002005 if (err)
2006 goto bail;
2007 tgid = fl->tgid;
2008 ra[0].buf.pv = (void *)&tgid;
2009 ra[0].buf.len = sizeof(tgid);
2010 ioctl.inv.handle = 1;
2011 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2012 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302013 ioctl.fds = NULL;
2014 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002015 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002016 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2017 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2018bail:
2019 return err;
2020}
2021
2022static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
2023 struct fastrpc_mmap *map)
2024{
Sathish Ambleybae51902017-07-03 15:00:49 -07002025 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302026 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002027 struct smq_phy_page page;
2028 int num = 1;
2029 remote_arg_t ra[3];
2030 int err = 0;
2031 struct {
2032 int pid;
2033 uint32_t flags;
2034 uintptr_t vaddrin;
2035 int num;
2036 } inargs;
2037 struct {
2038 uintptr_t vaddrout;
2039 } routargs;
2040
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302041 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002042 inargs.vaddrin = (uintptr_t)map->va;
2043 inargs.flags = flags;
2044 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2045 ra[0].buf.pv = (void *)&inargs;
2046 ra[0].buf.len = sizeof(inargs);
2047 page.addr = map->phys;
2048 page.size = map->size;
2049 ra[1].buf.pv = (void *)&page;
2050 ra[1].buf.len = num * sizeof(page);
2051
2052 ra[2].buf.pv = (void *)&routargs;
2053 ra[2].buf.len = sizeof(routargs);
2054
2055 ioctl.inv.handle = 1;
2056 if (fl->apps->compat)
2057 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2058 else
2059 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2060 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302061 ioctl.fds = NULL;
2062 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002063 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002064 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2065 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2066 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302067 if (err)
2068 goto bail;
2069 if (flags == ADSP_MMAP_HEAP_ADDR) {
2070 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002071
c_mtharue1a5ce12017-10-13 20:47:09 +05302072 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
2073 desc.args[1] = map->phys;
2074 desc.args[2] = map->size;
2075 desc.arginfo = SCM_ARGS(3);
2076 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2077 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2078 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302079 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302080 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2081 me->channel[fl->cid].rhvm.vmperm,
2082 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302083 if (err)
2084 goto bail;
2085 }
2086bail:
2087 return err;
2088}
2089
2090static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
2091 struct fastrpc_mmap *map)
2092{
2093 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302094 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302095 int destVM[1] = {VMID_HLOS};
2096 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2097
2098 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
2099 struct fastrpc_ioctl_invoke_crc ioctl;
2100 struct scm_desc desc = {0};
2101 remote_arg_t ra[1];
2102 int err = 0;
2103 struct {
2104 uint8_t skey;
2105 } routargs;
2106
2107 ra[0].buf.pv = (void *)&routargs;
2108 ra[0].buf.len = sizeof(routargs);
2109
2110 ioctl.inv.handle = 1;
2111 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2112 ioctl.inv.pra = ra;
2113 ioctl.fds = NULL;
2114 ioctl.attrs = NULL;
2115 ioctl.crc = NULL;
2116 if (fl == NULL)
2117 goto bail;
2118
2119 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2120 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2121 if (err)
2122 goto bail;
2123 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
2124 desc.args[1] = map->phys;
2125 desc.args[2] = map->size;
2126 desc.args[3] = routargs.skey;
2127 desc.arginfo = SCM_ARGS(4);
2128 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2129 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
2130 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2131 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302132 me->channel[fl->cid].rhvm.vmid,
2133 me->channel[fl->cid].rhvm.vmcount,
2134 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302135 if (err)
2136 goto bail;
2137 }
2138
2139bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002140 return err;
2141}
2142
2143static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
2144 struct fastrpc_mmap *map)
2145{
Sathish Ambleybae51902017-07-03 15:00:49 -07002146 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002147 remote_arg_t ra[1];
2148 int err = 0;
2149 struct {
2150 int pid;
2151 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302152 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002153 } inargs;
2154
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302155 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002156 inargs.size = map->size;
2157 inargs.vaddrout = map->raddr;
2158 ra[0].buf.pv = (void *)&inargs;
2159 ra[0].buf.len = sizeof(inargs);
2160
2161 ioctl.inv.handle = 1;
2162 if (fl->apps->compat)
2163 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2164 else
2165 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2166 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302167 ioctl.fds = NULL;
2168 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002169 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002170 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2171 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302172 if (err)
2173 goto bail;
2174 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
2175 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2176 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
2177 if (err)
2178 goto bail;
2179 }
2180bail:
2181 return err;
2182}
2183
2184static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2185{
2186 struct fastrpc_mmap *match = NULL, *map = NULL;
2187 struct hlist_node *n = NULL;
2188 int err = 0, ret = 0;
2189 struct fastrpc_apps *me = &gfa;
2190 struct ramdump_segment *ramdump_segments_rh = NULL;
2191
2192 do {
2193 match = NULL;
2194 spin_lock(&me->hlock);
2195 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2196 match = map;
2197 hlist_del_init(&map->hn);
2198 break;
2199 }
2200 spin_unlock(&me->hlock);
2201
2202 if (match) {
2203 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2204 if (err)
2205 goto bail;
2206 if (me->channel[0].ramdumpenabled) {
2207 ramdump_segments_rh = kcalloc(1,
2208 sizeof(struct ramdump_segment), GFP_KERNEL);
2209 if (ramdump_segments_rh) {
2210 ramdump_segments_rh->address =
2211 match->phys;
2212 ramdump_segments_rh->size = match->size;
2213 ret = do_elf_ramdump(
2214 me->channel[0].remoteheap_ramdump_dev,
2215 ramdump_segments_rh, 1);
2216 if (ret < 0)
2217 pr_err("ADSPRPC: unable to dump heap");
2218 kfree(ramdump_segments_rh);
2219 }
2220 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302221 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302222 }
2223 } while (match);
2224bail:
2225 if (err && match)
2226 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002227 return err;
2228}
2229
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302230static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2231{
2232 struct fastrpc_apps *me = &gfa;
2233 int session = 0, err = 0;
2234
2235 VERIFY(err, !fastrpc_get_adsp_session(
2236 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2237 if (err)
2238 goto bail;
2239 if (me->channel[fl->cid].spd[session].pdrcount !=
2240 me->channel[fl->cid].spd[session].prevpdrcount) {
2241 if (fastrpc_mmap_remove_ssr(fl))
2242 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2243 me->channel[fl->cid].spd[session].prevpdrcount =
2244 me->channel[fl->cid].spd[session].pdrcount;
2245 }
2246 if (!me->channel[fl->cid].spd[session].ispdup) {
2247 VERIFY(err, 0);
2248 if (err) {
2249 err = -ENOTCONN;
2250 goto bail;
2251 }
2252 }
2253bail:
2254 return err;
2255}
2256
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002257static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302258 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002259
2260static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2261
2262static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2263 struct fastrpc_ioctl_munmap *ud)
2264{
2265 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302266 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002267
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302268 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302269 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002270 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302271 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002272 if (err)
2273 goto bail;
2274 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2275 if (err)
2276 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302277 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302278 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302279 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002280bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302281 if (err && map) {
2282 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002283 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302284 mutex_unlock(&fl->fl_map_mutex);
2285 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302286 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002287 return err;
2288}
2289
c_mtharu7bd6a422017-10-17 18:15:37 +05302290static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2291 struct fastrpc_ioctl_munmap_fd *ud) {
2292 int err = 0;
2293 struct fastrpc_mmap *map = NULL;
2294
2295 VERIFY(err, (fl && ud));
2296 if (err)
2297 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302298 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302299 if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2300 pr_err("mapping not found to unamp %x va %llx %x\n",
2301 ud->fd, (unsigned long long)ud->va,
2302 (unsigned int)ud->len);
2303 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302304 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302305 goto bail;
2306 }
2307 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302308 fastrpc_mmap_free(map, 0);
2309 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302310bail:
2311 return err;
2312}
2313
2314
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002315static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2316 struct fastrpc_ioctl_mmap *ud)
2317{
2318
c_mtharue1a5ce12017-10-13 20:47:09 +05302319 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002320 int err = 0;
2321
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302322 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302323 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302324 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302325 ud->size, ud->flags, 1, &map)) {
2326 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302327 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002328 return 0;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302329 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002330 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302331 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302332 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302333 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002334 if (err)
2335 goto bail;
2336 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2337 if (err)
2338 goto bail;
2339 ud->vaddrout = map->raddr;
2340 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302341 if (err && map) {
2342 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302343 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302344 mutex_unlock(&fl->fl_map_mutex);
2345 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302346 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002347 return err;
2348}
2349
2350static void fastrpc_channel_close(struct kref *kref)
2351{
2352 struct fastrpc_apps *me = &gfa;
2353 struct fastrpc_channel_ctx *ctx;
2354 int cid;
2355
2356 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2357 cid = ctx - &gcinfo[0];
2358 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302359 ctx->chan = NULL;
Tharun Kumar Merugu532767d2017-06-20 19:53:13 +05302360 glink_unregister_link_state_cb(ctx->link.link_notify_handle);
2361 ctx->link.link_notify_handle = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302362 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002363 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2364 MAJOR(me->dev_no), cid);
2365}
2366
2367static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2368
2369static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
2370 int secure, struct fastrpc_session_ctx **session)
2371{
2372 struct fastrpc_apps *me = &gfa;
2373 int idx = 0, err = 0;
2374
2375 if (chan->sesscount) {
2376 for (idx = 0; idx < chan->sesscount; ++idx) {
2377 if (!chan->session[idx].used &&
2378 chan->session[idx].smmu.secure == secure) {
2379 chan->session[idx].used = 1;
2380 break;
2381 }
2382 }
2383 VERIFY(err, idx < chan->sesscount);
2384 if (err)
2385 goto bail;
2386 chan->session[idx].smmu.faults = 0;
2387 } else {
2388 VERIFY(err, me->dev != NULL);
2389 if (err)
2390 goto bail;
2391 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302392 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002393 }
2394
2395 *session = &chan->session[idx];
2396 bail:
2397 return err;
2398}
2399
c_mtharue1a5ce12017-10-13 20:47:09 +05302400static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2401 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002402{
2403 if (glink_queue_rx_intent(h, NULL, size))
2404 return false;
2405 return true;
2406}
2407
c_mtharue1a5ce12017-10-13 20:47:09 +05302408static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002409 const void *pkt_priv, const void *ptr)
2410{
2411}
2412
c_mtharue1a5ce12017-10-13 20:47:09 +05302413static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002414 const void *pkt_priv, const void *ptr, size_t size)
2415{
2416 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
c_mtharufdac6892017-10-12 13:09:01 +05302417 struct smq_invoke_ctx *ctx;
2418 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002419
c_mtharufdac6892017-10-12 13:09:01 +05302420 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2421 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302422 goto bail;
2423
c_mtharufdac6892017-10-12 13:09:01 +05302424 ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rsp->ctx & ~1));
2425 VERIFY(err, (ctx && ctx->magic == FASTRPC_CTX_MAGIC));
2426 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302427 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302428
c_mtharufdac6892017-10-12 13:09:01 +05302429 context_notify_user(ctx, rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302430bail:
c_mtharufdac6892017-10-12 13:09:01 +05302431 if (err)
2432 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002433 glink_rx_done(handle, ptr, true);
2434}
2435
c_mtharue1a5ce12017-10-13 20:47:09 +05302436static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002437 unsigned int event)
2438{
2439 struct fastrpc_apps *me = &gfa;
2440 int cid = (int)(uintptr_t)priv;
2441 struct fastrpc_glink_info *link;
2442
2443 if (cid < 0 || cid >= NUM_CHANNELS)
2444 return;
2445 link = &me->channel[cid].link;
2446 switch (event) {
2447 case GLINK_CONNECTED:
2448 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302449 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002450 break;
2451 case GLINK_LOCAL_DISCONNECTED:
2452 link->port_state = FASTRPC_LINK_DISCONNECTED;
2453 break;
2454 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002455 break;
2456 default:
2457 break;
2458 }
2459}
2460
2461static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2462 struct fastrpc_session_ctx **session)
2463{
2464 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302465 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002466
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302467 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002468 if (!*session)
2469 err = fastrpc_session_alloc_locked(chan, secure, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302470 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002471 return err;
2472}
2473
2474static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2475 struct fastrpc_session_ctx *session)
2476{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302477 struct fastrpc_apps *me = &gfa;
2478
2479 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002480 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302481 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002482}
2483
2484static int fastrpc_file_free(struct fastrpc_file *fl)
2485{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302486 struct hlist_node *n = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302487 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302488 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002489 int cid;
2490
2491 if (!fl)
2492 return 0;
2493 cid = fl->cid;
2494
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302495 (void)fastrpc_release_current_dsp_process(fl);
2496
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002497 spin_lock(&fl->apps->hlock);
2498 hlist_del_init(&fl->hn);
2499 spin_unlock(&fl->apps->hlock);
2500
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002501 if (!fl->sctx) {
2502 kfree(fl);
2503 return 0;
2504 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302505 spin_lock(&fl->hlock);
2506 fl->file_close = 1;
2507 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002508 fastrpc_context_list_dtor(fl);
2509 fastrpc_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302510 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002511 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
c_mtharu7bd6a422017-10-17 18:15:37 +05302512 fastrpc_mmap_free(map, 1);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002513 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302514 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002515 if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
2516 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302517 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002518 if (fl->sctx)
2519 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2520 if (fl->secsctx)
2521 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302522
2523 mutex_lock(&fl->perf_mutex);
2524 do {
2525 struct hlist_node *pn = NULL;
2526
2527 fperf = NULL;
2528 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2529 hlist_del_init(&perf->hn);
2530 fperf = perf;
2531 break;
2532 }
2533 kfree(fperf);
2534 } while (fperf);
2535 mutex_unlock(&fl->perf_mutex);
2536 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302537 mutex_destroy(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002538 kfree(fl);
2539 return 0;
2540}
2541
2542static int fastrpc_device_release(struct inode *inode, struct file *file)
2543{
2544 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2545
2546 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302547 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2548 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002549 if (fl->debugfs_file != NULL)
2550 debugfs_remove(fl->debugfs_file);
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302551 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002552 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302553 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002554 }
2555 return 0;
2556}
2557
2558static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2559 void *priv)
2560{
2561 struct fastrpc_apps *me = &gfa;
2562 int cid = (int)((uintptr_t)priv);
2563 struct fastrpc_glink_info *link;
2564
2565 if (cid < 0 || cid >= NUM_CHANNELS)
2566 return;
2567
2568 link = &me->channel[cid].link;
2569 switch (cb_info->link_state) {
2570 case GLINK_LINK_STATE_UP:
2571 link->link_state = FASTRPC_LINK_STATE_UP;
2572 complete(&me->channel[cid].work);
2573 break;
2574 case GLINK_LINK_STATE_DOWN:
2575 link->link_state = FASTRPC_LINK_STATE_DOWN;
2576 break;
2577 default:
2578 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2579 break;
2580 }
2581}
2582
2583static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2584{
2585 int err = 0;
2586 struct fastrpc_glink_info *link;
2587
2588 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2589 if (err)
2590 goto bail;
2591
2592 link = &me->channel[cid].link;
2593 if (link->link_notify_handle != NULL)
2594 goto bail;
2595
2596 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2597 link->link_notify_handle = glink_register_link_state_cb(
2598 &link->link_info,
2599 (void *)((uintptr_t)cid));
2600 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2601 if (err) {
2602 link->link_notify_handle = NULL;
2603 goto bail;
2604 }
2605 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2606 RPC_TIMEOUT));
2607bail:
2608 return err;
2609}
2610
2611static void fastrpc_glink_close(void *chan, int cid)
2612{
2613 int err = 0;
2614 struct fastrpc_glink_info *link;
2615
2616 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2617 if (err)
2618 return;
2619 link = &gfa.channel[cid].link;
2620
c_mtharu314a4202017-11-15 22:09:17 +05302621 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2622 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002623 link->port_state = FASTRPC_LINK_DISCONNECTING;
2624 glink_close(chan);
2625 }
2626}
2627
2628static int fastrpc_glink_open(int cid)
2629{
2630 int err = 0;
2631 void *handle = NULL;
2632 struct fastrpc_apps *me = &gfa;
2633 struct glink_open_config *cfg;
2634 struct fastrpc_glink_info *link;
2635
2636 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2637 if (err)
2638 goto bail;
2639 link = &me->channel[cid].link;
2640 cfg = &me->channel[cid].link.cfg;
2641 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2642 if (err)
2643 goto bail;
2644
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302645 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2646 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002647 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002648
2649 link->port_state = FASTRPC_LINK_CONNECTING;
2650 cfg->priv = (void *)(uintptr_t)cid;
2651 cfg->edge = gcinfo[cid].link.link_info.edge;
2652 cfg->transport = gcinfo[cid].link.link_info.transport;
2653 cfg->name = FASTRPC_GLINK_GUID;
2654 cfg->notify_rx = fastrpc_glink_notify_rx;
2655 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2656 cfg->notify_state = fastrpc_glink_notify_state;
2657 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2658 handle = glink_open(cfg);
2659 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302660 if (err) {
2661 if (link->port_state == FASTRPC_LINK_CONNECTING)
2662 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002663 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302664 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002665 me->channel[cid].chan = handle;
2666bail:
2667 return err;
2668}
2669
Sathish Ambley1ca68232017-01-19 10:32:55 -08002670static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2671{
2672 filp->private_data = inode->i_private;
2673 return 0;
2674}
2675
2676static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2677 size_t count, loff_t *position)
2678{
2679 struct fastrpc_file *fl = filp->private_data;
2680 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302681 struct fastrpc_buf *buf = NULL;
2682 struct fastrpc_mmap *map = NULL;
2683 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002684 struct fastrpc_channel_ctx *chan;
2685 struct fastrpc_session_ctx *sess;
2686 unsigned int len = 0;
2687 int i, j, ret = 0;
2688 char *fileinfo = NULL;
2689
2690 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2691 if (!fileinfo)
2692 goto bail;
2693 if (fl == NULL) {
2694 for (i = 0; i < NUM_CHANNELS; i++) {
2695 chan = &gcinfo[i];
2696 len += scnprintf(fileinfo + len,
2697 DEBUGFS_SIZE - len, "%s\n\n",
2698 chan->name);
2699 len += scnprintf(fileinfo + len,
2700 DEBUGFS_SIZE - len, "%s %d\n",
2701 "sesscount:", chan->sesscount);
2702 for (j = 0; j < chan->sesscount; j++) {
2703 sess = &chan->session[j];
2704 len += scnprintf(fileinfo + len,
2705 DEBUGFS_SIZE - len,
2706 "%s%d\n\n", "SESSION", j);
2707 len += scnprintf(fileinfo + len,
2708 DEBUGFS_SIZE - len,
2709 "%s %d\n", "sid:",
2710 sess->smmu.cb);
2711 len += scnprintf(fileinfo + len,
2712 DEBUGFS_SIZE - len,
2713 "%s %d\n", "SECURE:",
2714 sess->smmu.secure);
2715 }
2716 }
2717 } else {
2718 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2719 "%s %d\n\n",
2720 "PROCESS_ID:", fl->tgid);
2721 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2722 "%s %d\n\n",
2723 "CHANNEL_ID:", fl->cid);
2724 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2725 "%s %d\n\n",
2726 "SSRCOUNT:", fl->ssrcount);
2727 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2728 "%s\n",
2729 "LIST OF BUFS:");
2730 spin_lock(&fl->hlock);
2731 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2732 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302733 "%s %pK %s %pK %s %llx\n", "buf:",
2734 buf, "buf->virt:", buf->virt,
2735 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002736 }
2737 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2738 "\n%s\n",
2739 "LIST OF MAPS:");
2740 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2741 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302742 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002743 "map:", map,
2744 "map->va:", map->va,
2745 "map->phys:", map->phys);
2746 }
2747 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2748 "\n%s\n",
2749 "LIST OF PENDING SMQCONTEXTS:");
2750 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2751 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302752 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002753 "smqcontext:", ictx,
2754 "sc:", ictx->sc,
2755 "tid:", ictx->pid,
2756 "handle", ictx->rpra->h);
2757 }
2758 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2759 "\n%s\n",
2760 "LIST OF INTERRUPTED SMQCONTEXTS:");
2761 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2762 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302763 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002764 "smqcontext:", ictx,
2765 "sc:", ictx->sc,
2766 "tid:", ictx->pid,
2767 "handle", ictx->rpra->h);
2768 }
2769 spin_unlock(&fl->hlock);
2770 }
2771 if (len > DEBUGFS_SIZE)
2772 len = DEBUGFS_SIZE;
2773 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
2774 kfree(fileinfo);
2775bail:
2776 return ret;
2777}
2778
2779static const struct file_operations debugfs_fops = {
2780 .open = fastrpc_debugfs_open,
2781 .read = fastrpc_debugfs_read,
2782};
Sathish Ambley36849af2017-02-02 09:35:55 -08002783static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002784{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002785 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08002786 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002787
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302788 mutex_lock(&me->smd_mutex);
2789
Sathish Ambley36849af2017-02-02 09:35:55 -08002790 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002791 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302792 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002793 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05302794 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
2795 if (err)
2796 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302797 if (me->channel[cid].ssrcount !=
2798 me->channel[cid].prevssrcount) {
2799 if (!me->channel[cid].issubsystemup) {
2800 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302801 if (err) {
2802 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05302803 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302804 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302805 }
2806 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002807 fl->ssrcount = me->channel[cid].ssrcount;
2808 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05302809 (me->channel[cid].chan == NULL)) {
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302810 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
2811 if (err)
2812 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002813 VERIFY(err, 0 == fastrpc_glink_open(cid));
2814 if (err)
2815 goto bail;
2816
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302817 VERIFY(err,
2818 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002819 RPC_TIMEOUT));
2820 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302821 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002822 goto bail;
2823 }
2824 kref_init(&me->channel[cid].kref);
2825 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
2826 MAJOR(me->dev_no), cid);
c_mtharu314a4202017-11-15 22:09:17 +05302827 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
2828 FASTRPC_GLINK_INTENT_LEN);
2829 err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
2830 FASTRPC_GLINK_INTENT_LEN);
Bruce Levy34c3c1c2017-07-31 17:08:58 -07002831 if (err)
Tharun Kumar Merugu88ba9252017-08-09 12:15:41 +05302832 pr_warn("adsprpc: initial intent fail for %d err %d\n",
2833 cid, err);
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05302834 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002835 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302836 if (fastrpc_mmap_remove_ssr(fl))
2837 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002838 me->channel[cid].prevssrcount =
2839 me->channel[cid].ssrcount;
2840 }
2841 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002842
2843bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302844 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002845 return err;
2846}
2847
Sathish Ambley36849af2017-02-02 09:35:55 -08002848static int fastrpc_device_open(struct inode *inode, struct file *filp)
2849{
2850 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08002851 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05302852 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08002853 struct fastrpc_apps *me = &gfa;
2854
c_mtharue1a5ce12017-10-13 20:47:09 +05302855 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002856 if (err)
2857 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08002858 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
2859 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08002860 context_list_ctor(&fl->clst);
2861 spin_lock_init(&fl->hlock);
2862 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302863 INIT_HLIST_HEAD(&fl->perf);
Sathish Ambley36849af2017-02-02 09:35:55 -08002864 INIT_HLIST_HEAD(&fl->bufs);
2865 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302866 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002867 fl->tgid = current->tgid;
2868 fl->apps = me;
2869 fl->mode = FASTRPC_MODE_SERIAL;
2870 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08002871 if (debugfs_file != NULL)
2872 fl->debugfs_file = debugfs_file;
2873 memset(&fl->perf, 0, sizeof(fl->perf));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302874 fl->qos_request = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002875 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302876 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302877 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08002878 spin_lock(&me->hlock);
2879 hlist_add_head(&fl->hn, &me->drivers);
2880 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302881 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08002882 return 0;
2883}
2884
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002885static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
2886{
2887 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08002888 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002889
c_mtharue1a5ce12017-10-13 20:47:09 +05302890 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002891 if (err)
2892 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08002893 if (fl->cid == -1) {
2894 cid = *info;
2895 VERIFY(err, cid < NUM_CHANNELS);
2896 if (err)
2897 goto bail;
2898 fl->cid = cid;
2899 fl->ssrcount = fl->apps->channel[cid].ssrcount;
2900 VERIFY(err, !fastrpc_session_alloc_locked(
2901 &fl->apps->channel[cid], 0, &fl->sctx));
2902 if (err)
2903 goto bail;
2904 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05302905 VERIFY(err, fl->sctx != NULL);
2906 if (err)
2907 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002908 *info = (fl->sctx->smmu.enabled ? 1 : 0);
2909bail:
2910 return err;
2911}
2912
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302913static int fastrpc_internal_control(struct fastrpc_file *fl,
2914 struct fastrpc_ioctl_control *cp)
2915{
2916 int err = 0;
2917 int latency;
2918
2919 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
2920 if (err)
2921 goto bail;
2922 VERIFY(err, !IS_ERR_OR_NULL(cp));
2923 if (err)
2924 goto bail;
2925
2926 switch (cp->req) {
2927 case FASTRPC_CONTROL_LATENCY:
2928 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
2929 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
2930 VERIFY(err, latency != 0);
2931 if (err)
2932 goto bail;
2933 if (!fl->qos_request) {
2934 pm_qos_add_request(&fl->pm_qos_req,
2935 PM_QOS_CPU_DMA_LATENCY, latency);
2936 fl->qos_request = 1;
2937 } else
2938 pm_qos_update_request(&fl->pm_qos_req, latency);
2939 break;
2940 default:
2941 err = -ENOTTY;
2942 break;
2943 }
2944bail:
2945 return err;
2946}
2947
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002948static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
2949 unsigned long ioctl_param)
2950{
2951 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07002952 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002953 struct fastrpc_ioctl_mmap mmap;
2954 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05302955 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002956 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002957 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302958 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002959 } p;
2960 void *param = (char *)ioctl_param;
2961 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2962 int size = 0, err = 0;
2963 uint32_t info;
2964
c_mtharue1a5ce12017-10-13 20:47:09 +05302965 p.inv.fds = NULL;
2966 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002967 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05302968 spin_lock(&fl->hlock);
2969 if (fl->file_close == 1) {
2970 err = EBADF;
2971 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
2972 spin_unlock(&fl->hlock);
2973 goto bail;
2974 }
2975 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002976
2977 switch (ioctl_num) {
2978 case FASTRPC_IOCTL_INVOKE:
2979 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07002980 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002981 case FASTRPC_IOCTL_INVOKE_FD:
2982 if (!size)
2983 size = sizeof(struct fastrpc_ioctl_invoke_fd);
2984 /* fall through */
2985 case FASTRPC_IOCTL_INVOKE_ATTRS:
2986 if (!size)
2987 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07002988 /* fall through */
2989 case FASTRPC_IOCTL_INVOKE_CRC:
2990 if (!size)
2991 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05302992 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002993 if (err)
2994 goto bail;
2995 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
2996 0, &p.inv)));
2997 if (err)
2998 goto bail;
2999 break;
3000 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303001 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3002 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303003 if (err)
3004 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003005 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3006 if (err)
3007 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303008 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003009 if (err)
3010 goto bail;
3011 break;
3012 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303013 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3014 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303015 if (err)
3016 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003017 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3018 &p.munmap)));
3019 if (err)
3020 goto bail;
3021 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303022 case FASTRPC_IOCTL_MUNMAP_FD:
3023 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3024 sizeof(p.munmap_fd));
3025 if (err)
3026 goto bail;
3027 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3028 &p.munmap_fd)));
3029 if (err)
3030 goto bail;
3031 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003032 case FASTRPC_IOCTL_SETMODE:
3033 switch ((uint32_t)ioctl_param) {
3034 case FASTRPC_MODE_PARALLEL:
3035 case FASTRPC_MODE_SERIAL:
3036 fl->mode = (uint32_t)ioctl_param;
3037 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003038 case FASTRPC_MODE_PROFILE:
3039 fl->profile = (uint32_t)ioctl_param;
3040 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303041 case FASTRPC_MODE_SESSION:
3042 fl->sessionid = 1;
3043 fl->tgid |= (1 << SESSION_ID_INDEX);
3044 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003045 default:
3046 err = -ENOTTY;
3047 break;
3048 }
3049 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003050 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303051 K_COPY_FROM_USER(err, 0, &p.perf,
3052 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003053 if (err)
3054 goto bail;
3055 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3056 if (p.perf.keys) {
3057 char *keys = PERF_KEYS;
3058
c_mtharue1a5ce12017-10-13 20:47:09 +05303059 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3060 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003061 if (err)
3062 goto bail;
3063 }
3064 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303065 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3066 struct hlist_node *n = NULL;
3067
3068 mutex_lock(&fl->perf_mutex);
3069 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3070 if (perf->tid == current->pid) {
3071 fperf = perf;
3072 break;
3073 }
3074 }
3075
3076 mutex_unlock(&fl->perf_mutex);
3077
3078 if (fperf) {
3079 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3080 fperf, sizeof(*fperf));
3081 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003082 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303083 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003084 if (err)
3085 goto bail;
3086 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303087 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303088 K_COPY_FROM_USER(err, 0, &p.cp, param,
3089 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303090 if (err)
3091 goto bail;
3092 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3093 if (err)
3094 goto bail;
3095 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003096 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303097 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003098 if (err)
3099 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003100 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3101 if (err)
3102 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303103 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003104 if (err)
3105 goto bail;
3106 break;
3107 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003108 p.init.attrs = 0;
3109 p.init.siglen = 0;
3110 size = sizeof(struct fastrpc_ioctl_init);
3111 /* fall through */
3112 case FASTRPC_IOCTL_INIT_ATTRS:
3113 if (!size)
3114 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303115 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003116 if (err)
3117 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303118 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303119 p.init.init.filelen < INIT_FILELEN_MAX);
3120 if (err)
3121 goto bail;
3122 VERIFY(err, p.init.init.memlen >= 0 &&
3123 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303124 if (err)
3125 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303126 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003127 if (err)
3128 goto bail;
3129 break;
3130
3131 default:
3132 err = -ENOTTY;
3133 pr_info("bad ioctl: %d\n", ioctl_num);
3134 break;
3135 }
3136 bail:
3137 return err;
3138}
3139
3140static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3141 unsigned long code,
3142 void *data)
3143{
3144 struct fastrpc_apps *me = &gfa;
3145 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303146 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003147 int cid;
3148
3149 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3150 cid = ctx - &me->channel[0];
3151 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303152 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003153 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303154 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303155 if (ctx->chan) {
3156 fastrpc_glink_close(ctx->chan, cid);
3157 ctx->chan = NULL;
3158 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3159 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3160 }
3161 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303162 if (cid == 0)
3163 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003164 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303165 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3166 if (me->channel[0].remoteheap_ramdump_dev &&
3167 notifdata->enable_ramdump) {
3168 me->channel[0].ramdumpenabled = 1;
3169 }
3170 } else if (code == SUBSYS_AFTER_POWERUP) {
3171 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003172 }
3173
3174 return NOTIFY_DONE;
3175}
3176
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303177static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *pdrnb,
3178 unsigned long code,
3179 void *data)
3180{
3181 struct fastrpc_apps *me = &gfa;
3182 struct fastrpc_static_pd *spd;
3183 struct notif_data *notifdata = data;
3184
3185 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3186 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3187 mutex_lock(&me->smd_mutex);
3188 spd->pdrcount++;
3189 spd->ispdup = 0;
3190 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3191 MAJOR(me->dev_no), spd->spdname);
3192 mutex_unlock(&me->smd_mutex);
3193 if (!strcmp(spd->spdname,
3194 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3195 me->staticpd_flags = 0;
3196 fastrpc_notify_pdr_drivers(me, spd->spdname);
3197 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3198 if (me->channel[0].remoteheap_ramdump_dev &&
3199 notifdata->enable_ramdump) {
3200 me->channel[0].ramdumpenabled = 1;
3201 }
3202 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3203 spd->ispdup = 1;
3204 }
3205
3206 return NOTIFY_DONE;
3207}
3208
3209static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3210 unsigned long opcode, void *data)
3211{
3212 struct fastrpc_static_pd *spd;
3213 struct pd_qmi_client_data *pdr = data;
3214 int curr_state = 0;
3215
3216 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3217 if (opcode == LOCATOR_DOWN) {
3218 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3219 return NOTIFY_DONE;
3220 }
3221
3222 if (pdr->total_domains == 1) {
3223 spd->pdrhandle = service_notif_register_notifier(
3224 pdr->domain_list[0].name,
3225 pdr->domain_list[0].instance_id,
3226 &spd->pdrnb, &curr_state);
3227 if (IS_ERR(spd->pdrhandle))
3228 pr_err("ADSPRPC: Unable to register notifier\n");
3229 } else
3230 pr_err("ADSPRPC: Service returned invalid domains\n");
3231
3232 return NOTIFY_DONE;
3233}
3234
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003235static const struct file_operations fops = {
3236 .open = fastrpc_device_open,
3237 .release = fastrpc_device_release,
3238 .unlocked_ioctl = fastrpc_device_ioctl,
3239 .compat_ioctl = compat_fastrpc_device_ioctl,
3240};
3241
3242static const struct of_device_id fastrpc_match_table[] = {
3243 { .compatible = "qcom,msm-fastrpc-adsp", },
3244 { .compatible = "qcom,msm-fastrpc-compute", },
3245 { .compatible = "qcom,msm-fastrpc-compute-cb", },
3246 { .compatible = "qcom,msm-adsprpc-mem-region", },
3247 {}
3248};
3249
3250static int fastrpc_cb_probe(struct device *dev)
3251{
3252 struct fastrpc_channel_ctx *chan;
3253 struct fastrpc_session_ctx *sess;
3254 struct of_phandle_args iommuspec;
3255 const char *name;
3256 unsigned int start = 0x80000000;
3257 int err = 0, i;
3258 int secure_vmid = VMID_CP_PIXEL;
3259
c_mtharue1a5ce12017-10-13 20:47:09 +05303260 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3261 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003262 if (err)
3263 goto bail;
3264 for (i = 0; i < NUM_CHANNELS; i++) {
3265 if (!gcinfo[i].name)
3266 continue;
3267 if (!strcmp(name, gcinfo[i].name))
3268 break;
3269 }
3270 VERIFY(err, i < NUM_CHANNELS);
3271 if (err)
3272 goto bail;
3273 chan = &gcinfo[i];
3274 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3275 if (err)
3276 goto bail;
3277
3278 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3279 "#iommu-cells", 0, &iommuspec));
3280 if (err)
3281 goto bail;
3282 sess = &chan->session[chan->sesscount];
3283 sess->smmu.cb = iommuspec.args[0] & 0xf;
3284 sess->used = 0;
3285 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3286 "dma-coherent");
3287 sess->smmu.secure = of_property_read_bool(dev->of_node,
3288 "qcom,secure-context-bank");
3289 if (sess->smmu.secure)
3290 start = 0x60000000;
3291 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3292 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303293 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003294 if (err)
3295 goto bail;
3296
3297 if (sess->smmu.secure)
3298 iommu_domain_set_attr(sess->smmu.mapping->domain,
3299 DOMAIN_ATTR_SECURE_VMID,
3300 &secure_vmid);
3301
3302 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
3303 if (err)
3304 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303305 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003306 sess->smmu.enabled = 1;
3307 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003308 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
3309 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003310bail:
3311 return err;
3312}
3313
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303314static void init_secure_vmid_list(struct device *dev, char *prop_name,
3315 struct secure_vm *destvm)
3316{
3317 int err = 0;
3318 u32 len = 0, i = 0;
3319 u32 *rhvmlist = NULL;
3320 u32 *rhvmpermlist = NULL;
3321
3322 if (!of_find_property(dev->of_node, prop_name, &len))
3323 goto bail;
3324 if (len == 0)
3325 goto bail;
3326 len /= sizeof(u32);
3327 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
3328 if (err)
3329 goto bail;
3330 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
3331 GFP_KERNEL)));
3332 if (err)
3333 goto bail;
3334 for (i = 0; i < len; i++) {
3335 err = of_property_read_u32_index(dev->of_node, prop_name, i,
3336 &rhvmlist[i]);
3337 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
3338 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
3339 if (err) {
3340 pr_err("ADSPRPC: Failed to read VMID\n");
3341 goto bail;
3342 }
3343 }
3344 destvm->vmid = rhvmlist;
3345 destvm->vmperm = rhvmpermlist;
3346 destvm->vmcount = len;
3347bail:
3348 if (err) {
3349 kfree(rhvmlist);
3350 kfree(rhvmpermlist);
3351 }
3352}
3353
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003354static int fastrpc_probe(struct platform_device *pdev)
3355{
3356 int err = 0;
3357 struct fastrpc_apps *me = &gfa;
3358 struct device *dev = &pdev->dev;
3359 struct smq_phy_page range;
3360 struct device_node *ion_node, *node;
3361 struct platform_device *ion_pdev;
3362 struct cma *cma;
3363 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303364 int ret = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003365
c_mtharu63ffc012017-11-16 15:26:56 +05303366
3367 if (of_device_is_compatible(dev->of_node,
3368 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303369 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
3370 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05303371
c_mtharu63ffc012017-11-16 15:26:56 +05303372
3373 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3374 &me->latency);
3375 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003376 if (of_device_is_compatible(dev->of_node,
3377 "qcom,msm-fastrpc-compute-cb"))
3378 return fastrpc_cb_probe(dev);
3379
3380 if (of_device_is_compatible(dev->of_node,
3381 "qcom,msm-adsprpc-mem-region")) {
3382 me->dev = dev;
3383 range.addr = 0;
3384 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3385 if (ion_node) {
3386 for_each_available_child_of_node(ion_node, node) {
3387 if (of_property_read_u32(node, "reg", &val))
3388 continue;
3389 if (val != ION_ADSP_HEAP_ID)
3390 continue;
3391 ion_pdev = of_find_device_by_node(node);
3392 if (!ion_pdev)
3393 break;
3394 cma = dev_get_cma_area(&ion_pdev->dev);
3395 if (cma) {
3396 range.addr = cma_get_base(cma);
3397 range.size = (size_t)cma_get_size(cma);
3398 }
3399 break;
3400 }
3401 }
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05303402 if (range.addr && !of_property_read_bool(dev->of_node,
3403 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003404 int srcVM[1] = {VMID_HLOS};
3405 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3406 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003407 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003408 PERM_READ | PERM_WRITE | PERM_EXEC,
3409 PERM_READ | PERM_WRITE | PERM_EXEC,
3410 PERM_READ | PERM_WRITE | PERM_EXEC,
3411 };
3412
3413 VERIFY(err, !hyp_assign_phys(range.addr, range.size,
3414 srcVM, 1, destVM, destVMperm, 4));
3415 if (err)
3416 goto bail;
3417 }
3418 return 0;
3419 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303420 if (of_property_read_bool(dev->of_node,
3421 "qcom,fastrpc-adsp-audio-pdr")) {
3422 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003423
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303424 VERIFY(err, !fastrpc_get_adsp_session(
3425 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
3426 if (err)
3427 goto spdbail;
3428 me->channel[0].spd[session].get_service_nb.notifier_call =
3429 fastrpc_get_service_location_notify;
3430 ret = get_service_location(
3431 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
3432 AUDIO_PDR_ADSP_SERVICE_NAME,
3433 &me->channel[0].spd[session].get_service_nb);
3434 if (ret)
3435 pr_err("ADSPRPC: Get service location failed: %d\n",
3436 ret);
3437 }
3438spdbail:
3439 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003440 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3441 fastrpc_match_table,
3442 NULL, &pdev->dev));
3443 if (err)
3444 goto bail;
3445bail:
3446 return err;
3447}
3448
3449static void fastrpc_deinit(void)
3450{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303451 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003452 struct fastrpc_channel_ctx *chan = gcinfo;
3453 int i, j;
3454
3455 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3456 if (chan->chan) {
3457 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303458 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303459 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003460 }
3461 for (j = 0; j < NUM_SESSIONS; j++) {
3462 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303463 if (sess->smmu.dev) {
3464 arm_iommu_detach_device(sess->smmu.dev);
3465 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003466 }
3467 if (sess->smmu.mapping) {
3468 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303469 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003470 }
3471 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303472 kfree(chan->rhvm.vmid);
3473 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003474 }
3475}
3476
3477static struct platform_driver fastrpc_driver = {
3478 .probe = fastrpc_probe,
3479 .driver = {
3480 .name = "fastrpc",
3481 .owner = THIS_MODULE,
3482 .of_match_table = fastrpc_match_table,
3483 },
3484};
3485
3486static int __init fastrpc_device_init(void)
3487{
3488 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303489 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003490 int err = 0, i;
3491
3492 memset(me, 0, sizeof(*me));
3493
3494 fastrpc_init(me);
3495 me->dev = NULL;
3496 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3497 if (err)
3498 goto register_bail;
3499 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3500 DEVICE_NAME));
3501 if (err)
3502 goto alloc_chrdev_bail;
3503 cdev_init(&me->cdev, &fops);
3504 me->cdev.owner = THIS_MODULE;
3505 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003506 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003507 if (err)
3508 goto cdev_init_bail;
3509 me->class = class_create(THIS_MODULE, "fastrpc");
3510 VERIFY(err, !IS_ERR(me->class));
3511 if (err)
3512 goto class_create_bail;
3513 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003514 dev = device_create(me->class, NULL,
3515 MKDEV(MAJOR(me->dev_no), 0),
3516 NULL, gcinfo[0].name);
3517 VERIFY(err, !IS_ERR_OR_NULL(dev));
3518 if (err)
3519 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003520 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003521 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003522 me->channel[i].ssrcount = 0;
3523 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303524 me->channel[i].issubsystemup = 1;
3525 me->channel[i].ramdumpenabled = 0;
3526 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003527 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3528 me->channel[i].handle = subsys_notif_register_notifier(
3529 gcinfo[i].subsys,
3530 &me->channel[i].nb);
3531 }
3532
3533 me->client = msm_ion_client_create(DEVICE_NAME);
3534 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3535 if (err)
3536 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003537 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003538 return 0;
3539device_create_bail:
3540 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003541 if (me->channel[i].handle)
3542 subsys_notif_unregister_notifier(me->channel[i].handle,
3543 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003544 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003545 if (!IS_ERR_OR_NULL(dev))
3546 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003547 class_destroy(me->class);
3548class_create_bail:
3549 cdev_del(&me->cdev);
3550cdev_init_bail:
3551 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3552alloc_chrdev_bail:
3553register_bail:
3554 fastrpc_deinit();
3555 return err;
3556}
3557
3558static void __exit fastrpc_device_exit(void)
3559{
3560 struct fastrpc_apps *me = &gfa;
3561 int i;
3562
3563 fastrpc_file_list_dtor(me);
3564 fastrpc_deinit();
3565 for (i = 0; i < NUM_CHANNELS; i++) {
3566 if (!gcinfo[i].name)
3567 continue;
3568 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3569 subsys_notif_unregister_notifier(me->channel[i].handle,
3570 &me->channel[i].nb);
3571 }
3572 class_destroy(me->class);
3573 cdev_del(&me->cdev);
3574 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3575 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003576 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003577}
3578
3579late_initcall(fastrpc_device_init);
3580module_exit(fastrpc_device_exit);
3581
3582MODULE_LICENSE("GPL v2");