blob: 7776b08475e1bc90a8b5d2b43dac6563340495c9 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700106
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530107#define PERF_KEYS \
108 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800109#define FASTRPC_STATIC_HANDLE_LISTENER (3)
110#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530111#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800112
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530113#define INIT_FILELEN_MAX (2*1024*1024)
114#define INIT_MEMLEN_MAX (8*1024*1024)
115
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800116#define PERF_END (void)0
117
118#define PERF(enb, cnt, ff) \
119 {\
120 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530121 int64_t *counter = cnt;\
122 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800123 getnstimeofday(&startT);\
124 } \
125 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530126 if (enb && counter) {\
127 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800128 } \
129 }
130
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530131#define GET_COUNTER(perf_ptr, offset) \
132 (perf_ptr != NULL ?\
133 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
134 (int64_t *)(perf_ptr + offset)\
135 : (int64_t *)NULL) : (int64_t *)NULL)
136
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700137static int fastrpc_glink_open(int cid);
138static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530139static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530140 unsigned long code,
141 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800142static struct dentry *debugfs_root;
143static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700144
145static inline uint64_t buf_page_start(uint64_t buf)
146{
147 uint64_t start = (uint64_t) buf & PAGE_MASK;
148 return start;
149}
150
151static inline uint64_t buf_page_offset(uint64_t buf)
152{
153 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
154 return offset;
155}
156
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530157static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700158{
159 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
160 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530161 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700162 return nPages;
163}
164
165static inline uint64_t buf_page_size(uint32_t size)
166{
167 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
168
169 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
170}
171
172static inline void *uint64_to_ptr(uint64_t addr)
173{
174 void *ptr = (void *)((uintptr_t)addr);
175
176 return ptr;
177}
178
179static inline uint64_t ptr_to_uint64(void *ptr)
180{
181 uint64_t addr = (uint64_t)((uintptr_t)ptr);
182
183 return addr;
184}
185
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530186struct secure_vm {
187 int *vmid;
188 int *vmperm;
189 int vmcount;
190};
191
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700192struct fastrpc_file;
193
194struct fastrpc_buf {
195 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530196 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700197 struct fastrpc_file *fl;
198 void *virt;
199 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530200 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530201 unsigned long dma_attr;
202 uintptr_t raddr;
203 uint32_t flags;
204 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700205};
206
207struct fastrpc_ctx_lst;
208
209struct overlap {
210 uintptr_t start;
211 uintptr_t end;
212 int raix;
213 uintptr_t mstart;
214 uintptr_t mend;
215 uintptr_t offset;
216};
217
218struct smq_invoke_ctx {
219 struct hlist_node hn;
220 struct completion work;
221 int retval;
222 int pid;
223 int tgid;
224 remote_arg_t *lpra;
225 remote_arg64_t *rpra;
226 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700227 struct fastrpc_mmap **maps;
228 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530229 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 struct fastrpc_file *fl;
231 uint32_t sc;
232 struct overlap *overs;
233 struct overlap **overps;
234 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530235 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530236 unsigned int *attrs;
237 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530238 uint64_t ctxid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700239};
240
241struct fastrpc_ctx_lst {
242 struct hlist_head pending;
243 struct hlist_head interrupted;
244};
245
246struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530247 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700248 struct dma_iommu_mapping *mapping;
249 int cb;
250 int enabled;
251 int faults;
252 int secure;
253 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530254 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255};
256
257struct fastrpc_session_ctx {
258 struct device *dev;
259 struct fastrpc_smmu smmu;
260 int used;
261};
262
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530263struct fastrpc_static_pd {
264 char *spdname;
265 struct notifier_block pdrnb;
266 struct notifier_block get_service_nb;
267 void *pdrhandle;
268 int pdrcount;
269 int prevpdrcount;
270 int ispdup;
271};
272
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273struct fastrpc_glink_info {
274 int link_state;
275 int port_state;
276 struct glink_open_config cfg;
277 struct glink_link_info link_info;
278 void *link_notify_handle;
279};
280
281struct fastrpc_channel_ctx {
282 char *name;
283 char *subsys;
284 void *chan;
285 struct device *dev;
286 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530287 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700288 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530289 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290 struct notifier_block nb;
291 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530292 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293 int sesscount;
294 int ssrcount;
295 void *handle;
296 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530297 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700298 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530299 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530300 int ramdumpenabled;
301 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700302 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530303 /* Indicates, if channel is restricted to secure node only */
304 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700305};
306
307struct fastrpc_apps {
308 struct fastrpc_channel_ctx *channel;
309 struct cdev cdev;
310 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530311 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700312 struct smq_phy_page range;
313 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530314 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700315 dev_t dev_no;
316 int compat;
317 struct hlist_head drivers;
318 spinlock_t hlock;
319 struct ion_client *client;
320 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530321 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530322 bool glink;
323 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800324 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530325 spinlock_t ctxlock;
326 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700327};
328
329struct fastrpc_mmap {
330 struct hlist_node hn;
331 struct fastrpc_file *fl;
332 struct fastrpc_apps *apps;
333 int fd;
334 uint32_t flags;
335 struct dma_buf *buf;
336 struct sg_table *table;
337 struct dma_buf_attachment *attach;
338 struct ion_handle *handle;
339 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530340 size_t size;
341 uintptr_t va;
342 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700343 int refs;
344 uintptr_t raddr;
345 int uncached;
346 int secure;
347 uintptr_t attr;
348};
349
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530350enum fastrpc_perfkeys {
351 PERF_COUNT = 0,
352 PERF_FLUSH = 1,
353 PERF_MAP = 2,
354 PERF_COPY = 3,
355 PERF_LINK = 4,
356 PERF_GETARGS = 5,
357 PERF_PUTARGS = 6,
358 PERF_INVARGS = 7,
359 PERF_INVOKE = 8,
360 PERF_KEY_MAX = 9,
361};
362
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800363struct fastrpc_perf {
364 int64_t count;
365 int64_t flush;
366 int64_t map;
367 int64_t copy;
368 int64_t link;
369 int64_t getargs;
370 int64_t putargs;
371 int64_t invargs;
372 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530373 int64_t tid;
374 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800375};
376
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700377struct fastrpc_file {
378 struct hlist_node hn;
379 spinlock_t hlock;
380 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530381 struct hlist_head cached_bufs;
382 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383 struct fastrpc_ctx_lst clst;
384 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530385 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700386 struct fastrpc_session_ctx *secsctx;
387 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800388 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530389 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700390 int tgid;
391 int cid;
392 int ssrcount;
393 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530394 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530395 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530396 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700397 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530398 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800399 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530400 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530401 struct pm_qos_request pm_qos_req;
402 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530403 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530404 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530405 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530406 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
407 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530408 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700409};
410
411static struct fastrpc_apps gfa;
412
413static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
414 {
415 .name = "adsprpc-smd",
416 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530417 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700418 .link.link_info.edge = "lpass",
419 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530420 .spd = {
421 {
422 .spdname =
423 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
424 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530425 fastrpc_pdr_notifier_cb,
426 },
427 {
428 .spdname =
429 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
430 .pdrnb.notifier_call =
431 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530432 }
433 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700434 },
435 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700436 .name = "mdsprpc-smd",
437 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530438 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700439 .link.link_info.edge = "mpss",
440 .link.link_info.transport = "smem",
441 },
442 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800443 .name = "sdsprpc-smd",
444 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530445 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800446 .link.link_info.edge = "dsps",
447 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800448 },
449 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700450 .name = "cdsprpc-smd",
451 .subsys = "cdsp",
452 .link.link_info.edge = "cdsp",
453 .link.link_info.transport = "smem",
454 },
455};
456
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530457static int hlosvm[1] = {VMID_HLOS};
458static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
459
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800460static inline int64_t getnstimediff(struct timespec *start)
461{
462 int64_t ns;
463 struct timespec ts, b;
464
465 getnstimeofday(&ts);
466 b = timespec_sub(ts, *start);
467 ns = timespec_to_ns(&b);
468 return ns;
469}
470
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530471static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
472{
473 int err = 0;
474 int64_t *val = NULL;
475 struct fastrpc_perf *perf = NULL, *fperf = NULL;
476 struct hlist_node *n = NULL;
477
478 VERIFY(err, !IS_ERR_OR_NULL(fl));
479 if (err)
480 goto bail;
481
482 mutex_lock(&fl->perf_mutex);
483 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
484 if (perf->tid == current->pid) {
485 fperf = perf;
486 break;
487 }
488 }
489
490 if (IS_ERR_OR_NULL(fperf)) {
491 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
492
493 VERIFY(err, !IS_ERR_OR_NULL(fperf));
494 if (err) {
495 mutex_unlock(&fl->perf_mutex);
496 kfree(fperf);
497 goto bail;
498 }
499
500 fperf->tid = current->pid;
501 hlist_add_head(&fperf->hn, &fl->perf);
502 }
503
504 val = ((int64_t *)fperf) + key;
505 mutex_unlock(&fl->perf_mutex);
506bail:
507 return val;
508}
509
510
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700511static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
512{
c_mtharue1a5ce12017-10-13 20:47:09 +0530513 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700514 int vmid;
515
516 if (!fl)
517 return;
518 if (cache) {
519 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530520 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700521 spin_unlock(&fl->hlock);
522 return;
523 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530524 if (buf->remote) {
525 spin_lock(&fl->hlock);
526 hlist_del_init(&buf->hn_rem);
527 spin_unlock(&fl->hlock);
528 buf->remote = 0;
529 buf->raddr = 0;
530 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700531 if (!IS_ERR_OR_NULL(buf->virt)) {
532 int destVM[1] = {VMID_HLOS};
533 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
534
535 if (fl->sctx->smmu.cb)
536 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
537 vmid = fl->apps->channel[fl->cid].vmid;
538 if (vmid) {
539 int srcVM[2] = {VMID_HLOS, vmid};
540
541 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
542 srcVM, 2, destVM, destVMperm, 1);
543 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530544 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
545 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700546 }
547 kfree(buf);
548}
549
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530550static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700551{
552 struct fastrpc_buf *buf, *free;
553
554 do {
555 struct hlist_node *n;
556
c_mtharue1a5ce12017-10-13 20:47:09 +0530557 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700558 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530559 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700560 hlist_del_init(&buf->hn);
561 free = buf;
562 break;
563 }
564 spin_unlock(&fl->hlock);
565 if (free)
566 fastrpc_buf_free(free, 0);
567 } while (free);
568}
569
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530570static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
571{
572 struct fastrpc_buf *buf, *free;
573
574 do {
575 struct hlist_node *n;
576
577 free = NULL;
578 spin_lock(&fl->hlock);
579 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
580 free = buf;
581 break;
582 }
583 spin_unlock(&fl->hlock);
584 if (free)
585 fastrpc_buf_free(free, 0);
586 } while (free);
587}
588
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700589static void fastrpc_mmap_add(struct fastrpc_mmap *map)
590{
c_mtharue1a5ce12017-10-13 20:47:09 +0530591 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
592 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
593 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700594
c_mtharue1a5ce12017-10-13 20:47:09 +0530595 spin_lock(&me->hlock);
596 hlist_add_head(&map->hn, &me->maps);
597 spin_unlock(&me->hlock);
598 } else {
599 struct fastrpc_file *fl = map->fl;
600
c_mtharue1a5ce12017-10-13 20:47:09 +0530601 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530602 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700603}
604
c_mtharue1a5ce12017-10-13 20:47:09 +0530605static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530606 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530607 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700608{
c_mtharue1a5ce12017-10-13 20:47:09 +0530609 struct fastrpc_apps *me = &gfa;
610 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700611 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530612
613 if ((va + len) < va)
614 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530615 if (mflags == ADSP_MMAP_HEAP_ADDR ||
616 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
617 spin_lock(&me->hlock);
618 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
619 if (va >= map->va &&
620 va + len <= map->va + map->len &&
621 map->fd == fd) {
622 if (refs)
623 map->refs++;
624 match = map;
625 break;
626 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700627 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530628 spin_unlock(&me->hlock);
629 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530630 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
631 if (va >= map->va &&
632 va + len <= map->va + map->len &&
633 map->fd == fd) {
634 if (refs)
635 map->refs++;
636 match = map;
637 break;
638 }
639 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700640 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700641 if (match) {
642 *ppmap = match;
643 return 0;
644 }
645 return -ENOTTY;
646}
647
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530648static int dma_alloc_memory(dma_addr_t *region_phys, size_t size,
649 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530650{
651 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530652 void *vaddr = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +0530653
654 if (me->dev == NULL) {
655 pr_err("device adsprpc-mem is not initialized\n");
656 return -ENODEV;
657 }
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530658 vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
659 dma_attrs);
660 if (!vaddr) {
c_mtharue1a5ce12017-10-13 20:47:09 +0530661 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
662 (unsigned int)size);
663 return -ENOMEM;
664 }
665 return 0;
666}
667
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700668static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530669 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700670{
c_mtharue1a5ce12017-10-13 20:47:09 +0530671 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700672 struct hlist_node *n;
673 struct fastrpc_apps *me = &gfa;
674
675 spin_lock(&me->hlock);
676 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
677 if (map->raddr == va &&
678 map->raddr + map->len == va + len &&
679 map->refs == 1) {
680 match = map;
681 hlist_del_init(&map->hn);
682 break;
683 }
684 }
685 spin_unlock(&me->hlock);
686 if (match) {
687 *ppmap = match;
688 return 0;
689 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700690 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
691 if (map->raddr == va &&
692 map->raddr + map->len == va + len &&
693 map->refs == 1) {
694 match = map;
695 hlist_del_init(&map->hn);
696 break;
697 }
698 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700699 if (match) {
700 *ppmap = match;
701 return 0;
702 }
703 return -ENOTTY;
704}
705
c_mtharu7bd6a422017-10-17 18:15:37 +0530706static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700707{
c_mtharue1a5ce12017-10-13 20:47:09 +0530708 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700709 struct fastrpc_file *fl;
710 int vmid;
711 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712
713 if (!map)
714 return;
715 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530716 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
717 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
718 spin_lock(&me->hlock);
719 map->refs--;
720 if (!map->refs)
721 hlist_del_init(&map->hn);
722 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530723 if (map->refs > 0)
724 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530725 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530726 map->refs--;
727 if (!map->refs)
728 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530729 if (map->refs > 0 && !flags)
730 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530731 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530732 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
733 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530734 unsigned long dma_attrs = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735
c_mtharue1a5ce12017-10-13 20:47:09 +0530736 if (me->dev == NULL) {
737 pr_err("failed to free remote heap allocation\n");
738 return;
739 }
740 if (map->phys) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530741 dma_attrs |=
742 DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
743 dma_free_attrs(me->dev, map->size, (void *)map->va,
744 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530745 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530746 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
747 if (!IS_ERR_OR_NULL(map->handle))
748 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530749 } else {
750 int destVM[1] = {VMID_HLOS};
751 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
752
753 if (map->secure)
754 sess = fl->secsctx;
755 else
756 sess = fl->sctx;
757
758 if (!IS_ERR_OR_NULL(map->handle))
759 ion_free(fl->apps->client, map->handle);
760 if (sess && sess->smmu.enabled) {
761 if (map->size || map->phys)
762 msm_dma_unmap_sg(sess->smmu.dev,
763 map->table->sgl,
764 map->table->nents, DMA_BIDIRECTIONAL,
765 map->buf);
766 }
767 vmid = fl->apps->channel[fl->cid].vmid;
768 if (vmid && map->phys) {
769 int srcVM[2] = {VMID_HLOS, vmid};
770
771 hyp_assign_phys(map->phys, buf_page_size(map->size),
772 srcVM, 2, destVM, destVMperm, 1);
773 }
774
775 if (!IS_ERR_OR_NULL(map->table))
776 dma_buf_unmap_attachment(map->attach, map->table,
777 DMA_BIDIRECTIONAL);
778 if (!IS_ERR_OR_NULL(map->attach))
779 dma_buf_detach(map->buf, map->attach);
780 if (!IS_ERR_OR_NULL(map->buf))
781 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700782 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700783 kfree(map);
784}
785
786static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
787 struct fastrpc_session_ctx **session);
788
789static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530790 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700791 struct fastrpc_mmap **ppmap)
792{
c_mtharue1a5ce12017-10-13 20:47:09 +0530793 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700794 struct fastrpc_session_ctx *sess;
795 struct fastrpc_apps *apps = fl->apps;
796 int cid = fl->cid;
797 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530798 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700799 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530800 dma_addr_t region_phys = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700801 unsigned long flags;
802 int err = 0, vmid;
803
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800804 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700805 return 0;
806 map = kzalloc(sizeof(*map), GFP_KERNEL);
807 VERIFY(err, !IS_ERR_OR_NULL(map));
808 if (err)
809 goto bail;
810 INIT_HLIST_NODE(&map->hn);
811 map->flags = mflags;
812 map->refs = 1;
813 map->fl = fl;
814 map->fd = fd;
815 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530816 if (mflags == ADSP_MMAP_HEAP_ADDR ||
817 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530818 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
819 DMA_ATTR_NO_KERNEL_MAPPING;
820
c_mtharue1a5ce12017-10-13 20:47:09 +0530821 map->apps = me;
822 map->fl = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530823 VERIFY(err, !dma_alloc_memory(&region_phys, len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700824 if (err)
825 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530826 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530827 map->size = len;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530828 map->va = (uintptr_t)map->phys;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530829 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
830 ion_phys_addr_t iphys;
831
832 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
833 ion_import_dma_buf_fd(fl->apps->client, fd)));
834 if (err)
835 goto bail;
836
837 map->uncached = 1;
838 map->buf = NULL;
839 map->attach = NULL;
840 map->table = NULL;
841 map->va = 0;
842 map->phys = 0;
843
844 err = ion_phys(fl->apps->client, map->handle,
845 &iphys, &map->size);
846 if (err)
847 goto bail;
848 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530849 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530850 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
851 pr_info("adsprpc: buffer mapped with persist attr %x\n",
852 (unsigned int)map->attr);
853 map->refs = 2;
854 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530855 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
856 ion_import_dma_buf_fd(fl->apps->client, fd)));
857 if (err)
858 goto bail;
859 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
860 &flags));
861 if (err)
862 goto bail;
863
c_mtharue1a5ce12017-10-13 20:47:09 +0530864 map->secure = flags & ION_FLAG_SECURE;
865 if (map->secure) {
866 if (!fl->secsctx)
867 err = fastrpc_session_alloc(chan, 1,
868 &fl->secsctx);
869 if (err)
870 goto bail;
871 }
872 if (map->secure)
873 sess = fl->secsctx;
874 else
875 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530876
c_mtharue1a5ce12017-10-13 20:47:09 +0530877 VERIFY(err, !IS_ERR_OR_NULL(sess));
878 if (err)
879 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530880
881 map->uncached = !ION_IS_CACHED(flags);
882 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
883 map->uncached = 1;
884
c_mtharue1a5ce12017-10-13 20:47:09 +0530885 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
886 if (err)
887 goto bail;
888 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
889 dma_buf_attach(map->buf, sess->smmu.dev)));
890 if (err)
891 goto bail;
892 VERIFY(err, !IS_ERR_OR_NULL(map->table =
893 dma_buf_map_attachment(map->attach,
894 DMA_BIDIRECTIONAL)));
895 if (err)
896 goto bail;
897 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700898 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530899
900 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
901 (sess->smmu.coherent && map->uncached))
902 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
903 else if (map->attr & FASTRPC_ATTR_COHERENT)
904 attrs |= DMA_ATTR_FORCE_COHERENT;
905
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700906 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530907 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700908 map->table->sgl, map->table->nents,
909 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530910 if (err)
911 goto bail;
912 } else {
913 VERIFY(err, map->table->nents == 1);
914 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700915 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530916 }
917 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530918
c_mtharue1a5ce12017-10-13 20:47:09 +0530919 if (sess->smmu.cb) {
920 map->phys += ((uint64_t)sess->smmu.cb << 32);
921 map->size = sg_dma_len(map->table->sgl);
922 } else {
923 map->size = buf_page_size(len);
924 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530925
c_mtharue1a5ce12017-10-13 20:47:09 +0530926 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530927 if (!sess->smmu.enabled && !vmid) {
928 VERIFY(err, map->phys >= me->range.addr &&
929 map->phys + map->size <=
930 me->range.addr + me->range.size);
931 if (err) {
932 pr_err("adsprpc: mmap fail out of range\n");
933 goto bail;
934 }
935 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530936 if (vmid) {
937 int srcVM[1] = {VMID_HLOS};
938 int destVM[2] = {VMID_HLOS, vmid};
939 int destVMperm[2] = {PERM_READ | PERM_WRITE,
940 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700941
c_mtharue1a5ce12017-10-13 20:47:09 +0530942 VERIFY(err, !hyp_assign_phys(map->phys,
943 buf_page_size(map->size),
944 srcVM, 1, destVM, destVMperm, 2));
945 if (err)
946 goto bail;
947 }
948 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700949 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700950 map->len = len;
951
952 fastrpc_mmap_add(map);
953 *ppmap = map;
954
955bail:
956 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530957 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700958 return err;
959}
960
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530961static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530962 unsigned long dma_attr, uint32_t rflags,
963 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700964{
965 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530966 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700967 struct hlist_node *n;
968
969 VERIFY(err, size > 0);
970 if (err)
971 goto bail;
972
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530973 if (!remote) {
974 /* find the smallest buffer that fits in the cache */
975 spin_lock(&fl->hlock);
976 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
977 if (buf->size >= size && (!fr || fr->size > buf->size))
978 fr = buf;
979 }
980 if (fr)
981 hlist_del_init(&fr->hn);
982 spin_unlock(&fl->hlock);
983 if (fr) {
984 *obuf = fr;
985 return 0;
986 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700987 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530988 buf = NULL;
989 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700990 if (err)
991 goto bail;
992 INIT_HLIST_NODE(&buf->hn);
993 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530994 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700995 buf->phys = 0;
996 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530997 buf->dma_attr = dma_attr;
998 buf->flags = rflags;
999 buf->raddr = 0;
1000 buf->remote = 0;
1001 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1002 (dma_addr_t *)&buf->phys,
1003 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001004 if (IS_ERR_OR_NULL(buf->virt)) {
1005 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301006 fastrpc_cached_buf_list_free(fl);
1007 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1008 (dma_addr_t *)&buf->phys,
1009 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001010 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1011 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301012 if (err) {
1013 err = -ENOMEM;
1014 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1015 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301017 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001018 if (fl->sctx->smmu.cb)
1019 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1020 vmid = fl->apps->channel[fl->cid].vmid;
1021 if (vmid) {
1022 int srcVM[1] = {VMID_HLOS};
1023 int destVM[2] = {VMID_HLOS, vmid};
1024 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1025 PERM_READ | PERM_WRITE | PERM_EXEC};
1026
1027 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1028 srcVM, 1, destVM, destVMperm, 2));
1029 if (err)
1030 goto bail;
1031 }
1032
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301033 if (remote) {
1034 INIT_HLIST_NODE(&buf->hn_rem);
1035 spin_lock(&fl->hlock);
1036 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1037 spin_unlock(&fl->hlock);
1038 buf->remote = remote;
1039 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001040 *obuf = buf;
1041 bail:
1042 if (err && buf)
1043 fastrpc_buf_free(buf, 0);
1044 return err;
1045}
1046
1047
1048static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001049 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001050 struct smq_invoke_ctx **po)
1051{
1052 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301053 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001054 struct hlist_node *n;
1055 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1056
1057 spin_lock(&fl->hlock);
1058 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1059 if (ictx->pid == current->pid) {
1060 if (invoke->sc != ictx->sc || ictx->fl != fl)
1061 err = -1;
1062 else {
1063 ctx = ictx;
1064 hlist_del_init(&ctx->hn);
1065 hlist_add_head(&ctx->hn, &fl->clst.pending);
1066 }
1067 break;
1068 }
1069 }
1070 spin_unlock(&fl->hlock);
1071 if (ctx)
1072 *po = ctx;
1073 return err;
1074}
1075
1076#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1077static int overlap_ptr_cmp(const void *a, const void *b)
1078{
1079 struct overlap *pa = *((struct overlap **)a);
1080 struct overlap *pb = *((struct overlap **)b);
1081 /* sort with lowest starting buffer first */
1082 int st = CMP(pa->start, pb->start);
1083 /* sort with highest ending buffer first */
1084 int ed = CMP(pb->end, pa->end);
1085 return st == 0 ? ed : st;
1086}
1087
Sathish Ambley9466d672017-01-25 10:51:55 -08001088static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001089{
Sathish Ambley9466d672017-01-25 10:51:55 -08001090 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001091 remote_arg_t *lpra = ctx->lpra;
1092 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1093 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1094 int nbufs = inbufs + outbufs;
1095 struct overlap max;
1096
1097 for (i = 0; i < nbufs; ++i) {
1098 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1099 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001100 if (lpra[i].buf.len) {
1101 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1102 if (err)
1103 goto bail;
1104 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001105 ctx->overs[i].raix = i;
1106 ctx->overps[i] = &ctx->overs[i];
1107 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301108 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001109 max.start = 0;
1110 max.end = 0;
1111 for (i = 0; i < nbufs; ++i) {
1112 if (ctx->overps[i]->start < max.end) {
1113 ctx->overps[i]->mstart = max.end;
1114 ctx->overps[i]->mend = ctx->overps[i]->end;
1115 ctx->overps[i]->offset = max.end -
1116 ctx->overps[i]->start;
1117 if (ctx->overps[i]->end > max.end) {
1118 max.end = ctx->overps[i]->end;
1119 } else {
1120 ctx->overps[i]->mend = 0;
1121 ctx->overps[i]->mstart = 0;
1122 }
1123 } else {
1124 ctx->overps[i]->mend = ctx->overps[i]->end;
1125 ctx->overps[i]->mstart = ctx->overps[i]->start;
1126 ctx->overps[i]->offset = 0;
1127 max = *ctx->overps[i];
1128 }
1129 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001130bail:
1131 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001132}
1133
1134#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1135 do {\
1136 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301137 VERIFY(err, 0 == copy_from_user((dst),\
1138 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001139 (size)));\
1140 else\
1141 memmove((dst), (src), (size));\
1142 } while (0)
1143
1144#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1145 do {\
1146 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301147 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301148 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001149 else\
1150 memmove((dst), (src), (size));\
1151 } while (0)
1152
1153
1154static void context_free(struct smq_invoke_ctx *ctx);
1155
1156static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001157 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001158 struct smq_invoke_ctx **po)
1159{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301160 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301161 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301162 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001163 struct fastrpc_ctx_lst *clst = &fl->clst;
1164 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1165
1166 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1167 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1168 sizeof(*ctx->fds) * (bufs) +
1169 sizeof(*ctx->attrs) * (bufs) +
1170 sizeof(*ctx->overs) * (bufs) +
1171 sizeof(*ctx->overps) * (bufs);
1172
c_mtharue1a5ce12017-10-13 20:47:09 +05301173 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001174 if (err)
1175 goto bail;
1176
1177 INIT_HLIST_NODE(&ctx->hn);
1178 hlist_add_fake(&ctx->hn);
1179 ctx->fl = fl;
1180 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1181 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1182 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301183 if (me->legacy) {
1184 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1185 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1186 } else {
1187 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1188 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1189 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1190 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001191
c_mtharue1a5ce12017-10-13 20:47:09 +05301192 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001193 bufs * sizeof(*ctx->lpra));
1194 if (err)
1195 goto bail;
1196
1197 if (invokefd->fds) {
1198 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1199 bufs * sizeof(*ctx->fds));
1200 if (err)
1201 goto bail;
1202 }
1203 if (invokefd->attrs) {
1204 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1205 bufs * sizeof(*ctx->attrs));
1206 if (err)
1207 goto bail;
1208 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001209 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001210 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001211 if (bufs) {
1212 VERIFY(err, 0 == context_build_overlap(ctx));
1213 if (err)
1214 goto bail;
1215 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001216 ctx->retval = -1;
1217 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301218 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001219 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301220 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001221
1222 spin_lock(&fl->hlock);
1223 hlist_add_head(&ctx->hn, &clst->pending);
1224 spin_unlock(&fl->hlock);
1225
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301226 spin_lock(&me->ctxlock);
1227 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1228 if (!me->ctxtable[ii]) {
1229 me->ctxtable[ii] = ctx;
1230 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1231 break;
1232 }
1233 }
1234 spin_unlock(&me->ctxlock);
1235 VERIFY(err, ii < FASTRPC_CTX_MAX);
1236 if (err) {
1237 pr_err("adsprpc: out of context memory\n");
1238 goto bail;
1239 }
1240
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001241 *po = ctx;
1242bail:
1243 if (ctx && err)
1244 context_free(ctx);
1245 return err;
1246}
1247
1248static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1249{
1250 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1251
1252 spin_lock(&ctx->fl->hlock);
1253 hlist_del_init(&ctx->hn);
1254 hlist_add_head(&ctx->hn, &clst->interrupted);
1255 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001256}
1257
1258static void context_free(struct smq_invoke_ctx *ctx)
1259{
1260 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301261 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001262 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1263 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1264 spin_lock(&ctx->fl->hlock);
1265 hlist_del_init(&ctx->hn);
1266 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301267 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001268 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301269 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301270
1271 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001272 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301273 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301274 ctx->ctxid = 0;
1275
1276 spin_lock(&me->ctxlock);
1277 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1278 if (me->ctxtable[i] == ctx) {
1279 me->ctxtable[i] = NULL;
1280 break;
1281 }
1282 }
1283 spin_unlock(&me->ctxlock);
1284
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001285 kfree(ctx);
1286}
1287
1288static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1289{
1290 ctx->retval = retval;
1291 complete(&ctx->work);
1292}
1293
1294
1295static void fastrpc_notify_users(struct fastrpc_file *me)
1296{
1297 struct smq_invoke_ctx *ictx;
1298 struct hlist_node *n;
1299
1300 spin_lock(&me->hlock);
1301 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1302 complete(&ictx->work);
1303 }
1304 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1305 complete(&ictx->work);
1306 }
1307 spin_unlock(&me->hlock);
1308
1309}
1310
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301311
1312static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1313{
1314 struct smq_invoke_ctx *ictx;
1315 struct hlist_node *n;
1316
1317 spin_lock(&me->hlock);
1318 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1319 if (ictx->msg.pid)
1320 complete(&ictx->work);
1321 }
1322 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1323 if (ictx->msg.pid)
1324 complete(&ictx->work);
1325 }
1326 spin_unlock(&me->hlock);
1327}
1328
1329
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001330static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1331{
1332 struct fastrpc_file *fl;
1333 struct hlist_node *n;
1334
1335 spin_lock(&me->hlock);
1336 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1337 if (fl->cid == cid)
1338 fastrpc_notify_users(fl);
1339 }
1340 spin_unlock(&me->hlock);
1341
1342}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301343
1344static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1345{
1346 struct fastrpc_file *fl;
1347 struct hlist_node *n;
1348
1349 spin_lock(&me->hlock);
1350 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1351 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301352 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301353 }
1354 spin_unlock(&me->hlock);
1355
1356}
1357
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001358static void context_list_ctor(struct fastrpc_ctx_lst *me)
1359{
1360 INIT_HLIST_HEAD(&me->interrupted);
1361 INIT_HLIST_HEAD(&me->pending);
1362}
1363
1364static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1365{
1366 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301367 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001368 struct hlist_node *n;
1369
1370 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301371 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001372 spin_lock(&fl->hlock);
1373 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1374 hlist_del_init(&ictx->hn);
1375 ctxfree = ictx;
1376 break;
1377 }
1378 spin_unlock(&fl->hlock);
1379 if (ctxfree)
1380 context_free(ctxfree);
1381 } while (ctxfree);
1382 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301383 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001384 spin_lock(&fl->hlock);
1385 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1386 hlist_del_init(&ictx->hn);
1387 ctxfree = ictx;
1388 break;
1389 }
1390 spin_unlock(&fl->hlock);
1391 if (ctxfree)
1392 context_free(ctxfree);
1393 } while (ctxfree);
1394}
1395
1396static int fastrpc_file_free(struct fastrpc_file *fl);
1397static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1398{
1399 struct fastrpc_file *fl, *free;
1400 struct hlist_node *n;
1401
1402 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301403 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001404 spin_lock(&me->hlock);
1405 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1406 hlist_del_init(&fl->hn);
1407 free = fl;
1408 break;
1409 }
1410 spin_unlock(&me->hlock);
1411 if (free)
1412 fastrpc_file_free(free);
1413 } while (free);
1414}
1415
1416static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1417{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301418 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001419 remote_arg64_t *rpra;
1420 remote_arg_t *lpra = ctx->lpra;
1421 struct smq_invoke_buf *list;
1422 struct smq_phy_page *pages, *ipage;
1423 uint32_t sc = ctx->sc;
1424 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1425 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001426 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001427 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301428 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001429 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001430 int err = 0;
1431 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001432 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001433 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301434 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001435
1436 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301437 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001438 list = smq_invoke_buf_start(rpra, sc);
1439 pages = smq_phy_page_start(sc, list);
1440 ipage = pages;
1441
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301442 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001443 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301444 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1445 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001446
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301447 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301448 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1449 unsigned int attrs = 0;
1450
1451 if (ctx->attrs)
1452 attrs = ctx->attrs[i];
1453
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301455 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001456 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301457 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301458 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001459 ipage += 1;
1460 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301461 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001462 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301463 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001464 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301465 int dmaflags = 0;
1466
1467 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1468 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001469 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301470 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301471 if (err) {
1472 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001473 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301474 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001475 ipage += 1;
1476 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301477 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301478 if (!me->legacy) {
1479 metalen = copylen = (size_t)&ipage[0] +
1480 (sizeof(uint64_t) * M_FDLIST) +
1481 (sizeof(uint32_t) * M_CRCLIST);
1482 } else {
1483 metalen = copylen = (size_t)&ipage[0];
1484 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001485
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001486 /* calculate len requreed for copying */
1487 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1488 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001489 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301490 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001491
1492 if (!len)
1493 continue;
1494 if (ctx->maps[i])
1495 continue;
1496 if (ctx->overps[oix]->offset == 0)
1497 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001498 mstart = ctx->overps[oix]->mstart;
1499 mend = ctx->overps[oix]->mend;
1500 VERIFY(err, (mend - mstart) <= LONG_MAX);
1501 if (err)
1502 goto bail;
1503 copylen += mend - mstart;
1504 VERIFY(err, copylen >= 0);
1505 if (err)
1506 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001507 }
1508 ctx->used = copylen;
1509
1510 /* allocate new buffer */
1511 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301512 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001513 if (err)
1514 goto bail;
1515 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301516 if (ctx->buf->virt && metalen <= copylen)
1517 memset(ctx->buf->virt, 0, metalen);
1518
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001519 /* copy metadata */
1520 rpra = ctx->buf->virt;
1521 ctx->rpra = rpra;
1522 list = smq_invoke_buf_start(rpra, sc);
1523 pages = smq_phy_page_start(sc, list);
1524 ipage = pages;
1525 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001526 for (i = 0; i < bufs + handles; ++i) {
1527 if (lpra[i].buf.len)
1528 list[i].num = 1;
1529 else
1530 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001531 list[i].pgidx = ipage - pages;
1532 ipage++;
1533 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301534
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001535 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301536 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301537 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001538 struct fastrpc_mmap *map = ctx->maps[i];
1539 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301540 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001541
1542 rpra[i].buf.pv = 0;
1543 rpra[i].buf.len = len;
1544 if (!len)
1545 continue;
1546 if (map) {
1547 struct vm_area_struct *vma;
1548 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301549 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001550 int idx = list[i].pgidx;
1551
1552 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001553 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001554 } else {
1555 down_read(&current->mm->mmap_sem);
1556 VERIFY(err, NULL != (vma = find_vma(current->mm,
1557 map->va)));
1558 if (err) {
1559 up_read(&current->mm->mmap_sem);
1560 goto bail;
1561 }
1562 offset = buf_page_start(buf) - vma->vm_start;
1563 up_read(&current->mm->mmap_sem);
1564 VERIFY(err, offset < (uintptr_t)map->size);
1565 if (err)
1566 goto bail;
1567 }
1568 pages[idx].addr = map->phys + offset;
1569 pages[idx].size = num << PAGE_SHIFT;
1570 }
1571 rpra[i].buf.pv = buf;
1572 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001573 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001574 for (i = bufs; i < bufs + handles; ++i) {
1575 struct fastrpc_mmap *map = ctx->maps[i];
1576
1577 pages[i].addr = map->phys;
1578 pages[i].size = map->size;
1579 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301580 if (!me->legacy) {
1581 fdlist = (uint64_t *)&pages[bufs + handles];
1582 for (i = 0; i < M_FDLIST; i++)
1583 fdlist[i] = 0;
1584 crclist = (uint32_t *)&fdlist[M_FDLIST];
1585 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1586 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001587
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001588 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301589 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001590 rlen = copylen - metalen;
Tharun Kumar Meruguc230bd72018-01-29 18:02:42 +05301591 for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 int i = ctx->overps[oix]->raix;
1593 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301594 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001595 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301596 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001597
1598 if (!len)
1599 continue;
1600 if (map)
1601 continue;
1602 if (ctx->overps[oix]->offset == 0) {
1603 rlen -= ALIGN(args, BALIGN) - args;
1604 args = ALIGN(args, BALIGN);
1605 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001606 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001607 VERIFY(err, rlen >= mlen);
1608 if (err)
1609 goto bail;
1610 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1611 pages[list[i].pgidx].addr = ctx->buf->phys -
1612 ctx->overps[oix]->offset +
1613 (copylen - rlen);
1614 pages[list[i].pgidx].addr =
1615 buf_page_start(pages[list[i].pgidx].addr);
1616 buf = rpra[i].buf.pv;
1617 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1618 if (i < inbufs) {
1619 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1620 lpra[i].buf.pv, len);
1621 if (err)
1622 goto bail;
1623 }
1624 args = args + mlen;
1625 rlen -= mlen;
1626 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001627 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001628
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301629 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1631 int i = ctx->overps[oix]->raix;
1632 struct fastrpc_mmap *map = ctx->maps[i];
1633
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001634 if (map && map->uncached)
1635 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301636 if (ctx->fl->sctx->smmu.coherent &&
1637 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1638 continue;
1639 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1640 continue;
1641
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301642 if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
1643 if (map && map->handle)
1644 msm_ion_do_cache_op(ctx->fl->apps->client,
1645 map->handle,
1646 uint64_to_ptr(rpra[i].buf.pv),
1647 rpra[i].buf.len,
1648 ION_IOC_CLEAN_INV_CACHES);
1649 else
1650 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1651 uint64_to_ptr(rpra[i].buf.pv
1652 + rpra[i].buf.len));
1653 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001654 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001655 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301656 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001657 rpra[i].dma.fd = ctx->fds[i];
1658 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1659 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001660 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001661
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001662 bail:
1663 return err;
1664}
1665
1666static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1667 remote_arg_t *upra)
1668{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301669 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001670 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001671 struct smq_invoke_buf *list;
1672 struct smq_phy_page *pages;
1673 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301674 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001675 uint32_t *crclist = NULL;
1676
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001677 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001678 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001679 int err = 0;
1680
1681 inbufs = REMOTE_SCALARS_INBUFS(sc);
1682 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001683 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1684 list = smq_invoke_buf_start(ctx->rpra, sc);
1685 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301686 if (!me->legacy) {
1687 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1688 crclist = (uint32_t *)(fdlist + M_FDLIST);
1689 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001690
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001691 for (i = inbufs; i < inbufs + outbufs; ++i) {
1692 if (!ctx->maps[i]) {
1693 K_COPY_TO_USER(err, kernel,
1694 ctx->lpra[i].buf.pv,
1695 uint64_to_ptr(rpra[i].buf.pv),
1696 rpra[i].buf.len);
1697 if (err)
1698 goto bail;
1699 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301700 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301701 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301702 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301703 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001704 }
1705 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301706 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301707 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001708 for (i = 0; i < M_FDLIST; i++) {
1709 if (!fdlist[i])
1710 break;
1711 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001712 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301713 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001714 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001715 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301716 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001717 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301718 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001719 crclist, M_CRCLIST*sizeof(uint32_t));
1720
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001721 bail:
1722 return err;
1723}
1724
1725static void inv_args_pre(struct smq_invoke_ctx *ctx)
1726{
1727 int i, inbufs, outbufs;
1728 uint32_t sc = ctx->sc;
1729 remote_arg64_t *rpra = ctx->rpra;
1730 uintptr_t end;
1731
1732 inbufs = REMOTE_SCALARS_INBUFS(sc);
1733 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1734 for (i = inbufs; i < inbufs + outbufs; ++i) {
1735 struct fastrpc_mmap *map = ctx->maps[i];
1736
1737 if (map && map->uncached)
1738 continue;
1739 if (!rpra[i].buf.len)
1740 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301741 if (ctx->fl->sctx->smmu.coherent &&
1742 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1743 continue;
1744 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1745 continue;
1746
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001747 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1748 buf_page_start(rpra[i].buf.pv))
1749 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301750 if (!IS_CACHE_ALIGNED((uintptr_t)
1751 uint64_to_ptr(rpra[i].buf.pv))) {
1752 if (map && map->handle)
1753 msm_ion_do_cache_op(ctx->fl->apps->client,
1754 map->handle,
1755 uint64_to_ptr(rpra[i].buf.pv),
1756 sizeof(uintptr_t),
1757 ION_IOC_CLEAN_INV_CACHES);
1758 else
1759 dmac_flush_range(
1760 uint64_to_ptr(rpra[i].buf.pv), (char *)
1761 uint64_to_ptr(rpra[i].buf.pv + 1));
1762 }
1763
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001764 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1765 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301766 if (!IS_CACHE_ALIGNED(end)) {
1767 if (map && map->handle)
1768 msm_ion_do_cache_op(ctx->fl->apps->client,
1769 map->handle,
1770 uint64_to_ptr(end),
1771 sizeof(uintptr_t),
1772 ION_IOC_CLEAN_INV_CACHES);
1773 else
1774 dmac_flush_range((char *)end,
1775 (char *)end + 1);
1776 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001777 }
1778}
1779
1780static void inv_args(struct smq_invoke_ctx *ctx)
1781{
1782 int i, inbufs, outbufs;
1783 uint32_t sc = ctx->sc;
1784 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001785
1786 inbufs = REMOTE_SCALARS_INBUFS(sc);
1787 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1788 for (i = inbufs; i < inbufs + outbufs; ++i) {
1789 struct fastrpc_mmap *map = ctx->maps[i];
1790
1791 if (map && map->uncached)
1792 continue;
1793 if (!rpra[i].buf.len)
1794 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301795 if (ctx->fl->sctx->smmu.coherent &&
1796 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1797 continue;
1798 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1799 continue;
1800
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001801 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1802 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001803 continue;
1804 }
1805 if (map && map->handle)
1806 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1807 (char *)uint64_to_ptr(rpra[i].buf.pv),
1808 rpra[i].buf.len, ION_IOC_INV_CACHES);
1809 else
1810 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1811 (char *)uint64_to_ptr(rpra[i].buf.pv
1812 + rpra[i].buf.len));
1813 }
1814
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001815}
1816
1817static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1818 uint32_t kernel, uint32_t handle)
1819{
1820 struct smq_msg *msg = &ctx->msg;
1821 struct fastrpc_file *fl = ctx->fl;
1822 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301823 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001824
c_mtharue1a5ce12017-10-13 20:47:09 +05301825 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001826 if (err)
1827 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301828 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001829 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301830 if (fl->sessionid)
1831 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001832 if (kernel)
1833 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301834 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835 msg->invoke.header.handle = handle;
1836 msg->invoke.header.sc = ctx->sc;
1837 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1838 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301839 if (fl->apps->glink) {
1840 if (fl->ssrcount != channel_ctx->ssrcount) {
1841 err = -ECONNRESET;
1842 goto bail;
1843 }
1844 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001845 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301846 if (err)
1847 goto bail;
1848 err = glink_tx(channel_ctx->chan,
1849 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1850 GLINK_TX_REQ_INTENT);
1851 } else {
1852 spin_lock(&fl->apps->hlock);
1853 len = smd_write((smd_channel_t *)
1854 channel_ctx->chan,
1855 msg, sizeof(*msg));
1856 spin_unlock(&fl->apps->hlock);
1857 VERIFY(err, len == sizeof(*msg));
1858 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001859 bail:
1860 return err;
1861}
1862
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301863static void fastrpc_smd_read_handler(int cid)
1864{
1865 struct fastrpc_apps *me = &gfa;
1866 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301867 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301868 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301869
1870 do {
1871 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1872 sizeof(rsp));
1873 if (ret != sizeof(rsp))
1874 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301875
1876 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1877 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301878 if (err)
1879 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301880
1881 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1882 if (err)
1883 goto bail;
1884
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301885 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301886 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1887 if (err)
1888 goto bail;
1889
1890 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301891 } while (ret == sizeof(rsp));
1892bail:
1893 if (err)
1894 pr_err("adsprpc: invalid response or context\n");
1895
1896}
1897
1898static void smd_event_handler(void *priv, unsigned int event)
1899{
1900 struct fastrpc_apps *me = &gfa;
1901 int cid = (int)(uintptr_t)priv;
1902
1903 switch (event) {
1904 case SMD_EVENT_OPEN:
1905 complete(&me->channel[cid].workport);
1906 break;
1907 case SMD_EVENT_CLOSE:
1908 fastrpc_notify_drivers(me, cid);
1909 break;
1910 case SMD_EVENT_DATA:
1911 fastrpc_smd_read_handler(cid);
1912 break;
1913 }
1914}
1915
1916
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001917static void fastrpc_init(struct fastrpc_apps *me)
1918{
1919 int i;
1920
1921 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301922 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001923 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301924 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301925 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001926 me->channel = &gcinfo[0];
1927 for (i = 0; i < NUM_CHANNELS; i++) {
1928 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301929 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001930 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301931 /* All channels are secure by default except CDSP */
1932 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001933 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301934 /* Set CDSP channel to non secure */
1935 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001936}
1937
1938static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1939
1940static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1941 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001942 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001943{
c_mtharue1a5ce12017-10-13 20:47:09 +05301944 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001945 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1946 int cid = fl->cid;
1947 int interrupted = 0;
1948 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001949 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301950 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001951
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001952 if (fl->profile)
1953 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301954
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301955
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301956 VERIFY(err, fl->sctx != NULL);
1957 if (err)
1958 goto bail;
1959 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1960 if (err)
1961 goto bail;
1962
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001963 if (!kernel) {
1964 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1965 &ctx));
1966 if (err)
1967 goto bail;
1968 if (fl->sctx->smmu.faults)
1969 err = FASTRPC_ENOSUCH;
1970 if (err)
1971 goto bail;
1972 if (ctx)
1973 goto wait;
1974 }
1975
1976 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1977 if (err)
1978 goto bail;
1979
1980 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301981 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001982 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001983 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001984 if (err)
1985 goto bail;
1986 }
1987
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301988 if (!fl->sctx->smmu.coherent) {
1989 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001990 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301991 PERF_END);
1992 }
1993
1994 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001995 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001996 PERF_END);
1997
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001998 if (err)
1999 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002000 wait:
2001 if (kernel)
2002 wait_for_completion(&ctx->work);
2003 else {
2004 interrupted = wait_for_completion_interruptible(&ctx->work);
2005 VERIFY(err, 0 == (err = interrupted));
2006 if (err)
2007 goto bail;
2008 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07002009
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302010 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002011 if (!fl->sctx->smmu.coherent)
2012 inv_args(ctx);
2013 PERF_END);
2014
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002015 VERIFY(err, 0 == (err = ctx->retval));
2016 if (err)
2017 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002018
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302019 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002020 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002021 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002022 if (err)
2023 goto bail;
2024 bail:
2025 if (ctx && interrupted == -ERESTARTSYS)
2026 context_save_interrupted(ctx);
2027 else if (ctx)
2028 context_free(ctx);
2029 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2030 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002031
2032 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302033 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2034 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2035
2036 if (count)
2037 *count += getnstimediff(&invoket);
2038 }
2039 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2040 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2041
2042 if (count)
2043 *count = *count+1;
2044 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002045 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002046 return err;
2047}
2048
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302049static int fastrpc_get_adsp_session(char *name, int *session)
2050{
2051 struct fastrpc_apps *me = &gfa;
2052 int err = 0, i;
2053
2054 for (i = 0; i < NUM_SESSIONS; i++) {
2055 if (!me->channel[0].spd[i].spdname)
2056 continue;
2057 if (!strcmp(name, me->channel[0].spd[i].spdname))
2058 break;
2059 }
2060 VERIFY(err, i < NUM_SESSIONS);
2061 if (err)
2062 goto bail;
2063 *session = i;
2064bail:
2065 return err;
2066}
2067
2068static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002069static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302070static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002071static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002072 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002073{
2074 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302075 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002076 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002077 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002078 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302079 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302080 struct fastrpc_buf *imem = NULL;
2081 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302082 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002083
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302084 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002085 if (err)
2086 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302087 if (init->flags == FASTRPC_INIT_ATTACH ||
2088 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002089 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302090 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002091
2092 ra[0].buf.pv = (void *)&tgid;
2093 ra[0].buf.len = sizeof(tgid);
2094 ioctl.inv.handle = 1;
2095 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2096 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302097 ioctl.fds = NULL;
2098 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002099 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302100 if (init->flags == FASTRPC_INIT_ATTACH)
2101 fl->pd = 0;
2102 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2103 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2104 fl->pd = 2;
2105 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002106 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2107 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2108 if (err)
2109 goto bail;
2110 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002111 remote_arg_t ra[6];
2112 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002113 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302114 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002115 struct {
2116 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302117 unsigned int namelen;
2118 unsigned int filelen;
2119 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002120 int attrs;
2121 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002122 } inbuf;
2123
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302124 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002125 inbuf.namelen = strlen(current->comm) + 1;
2126 inbuf.filelen = init->filelen;
2127 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302128
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302129 VERIFY(err, access_ok(0, (void __user *)init->file,
2130 init->filelen));
2131 if (err)
2132 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002133 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302134 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002135 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2136 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302137 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002138 if (err)
2139 goto bail;
2140 }
2141 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302142
2143 VERIFY(err, !init->mem);
2144 if (err) {
2145 err = -EINVAL;
2146 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2147 current->comm, __func__);
2148 goto bail;
2149 }
2150 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2151 1024*1024);
2152 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2153 DMA_ATTR_NO_KERNEL_MAPPING |
2154 DMA_ATTR_FORCE_NON_COHERENT;
2155 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302156 if (err)
2157 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302158 fl->init_mem = imem;
2159
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002160 inbuf.pageslen = 1;
2161 ra[0].buf.pv = (void *)&inbuf;
2162 ra[0].buf.len = sizeof(inbuf);
2163 fds[0] = 0;
2164
2165 ra[1].buf.pv = (void *)current->comm;
2166 ra[1].buf.len = inbuf.namelen;
2167 fds[1] = 0;
2168
2169 ra[2].buf.pv = (void *)init->file;
2170 ra[2].buf.len = inbuf.filelen;
2171 fds[2] = init->filefd;
2172
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302173 pages[0].addr = imem->phys;
2174 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002175 ra[3].buf.pv = (void *)pages;
2176 ra[3].buf.len = 1 * sizeof(*pages);
2177 fds[3] = 0;
2178
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002179 inbuf.attrs = uproc->attrs;
2180 ra[4].buf.pv = (void *)&(inbuf.attrs);
2181 ra[4].buf.len = sizeof(inbuf.attrs);
2182 fds[4] = 0;
2183
2184 inbuf.siglen = uproc->siglen;
2185 ra[5].buf.pv = (void *)&(inbuf.siglen);
2186 ra[5].buf.len = sizeof(inbuf.siglen);
2187 fds[5] = 0;
2188
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002189 ioctl.inv.handle = 1;
2190 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002191 if (uproc->attrs)
2192 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002193 ioctl.inv.pra = ra;
2194 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302195 ioctl.attrs = NULL;
2196 ioctl.crc = NULL;
2197 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2198 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2199 if (err)
2200 goto bail;
2201 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2202 remote_arg_t ra[3];
2203 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302204 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302205 int fds[3];
2206 struct {
2207 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302208 unsigned int namelen;
2209 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302210 } inbuf;
2211
2212 if (!init->filelen)
2213 goto bail;
2214
2215 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2216 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2217 if (err)
2218 goto bail;
2219 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2220 (void __user *)init->file, init->filelen));
2221 if (err)
2222 goto bail;
2223
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302224 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302225 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302226 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302227 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302228
2229 if (!strcmp(proc_name, "audiopd")) {
2230 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2231 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302232 if (err)
2233 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302234 }
2235
c_mtharue1a5ce12017-10-13 20:47:09 +05302236 if (!me->staticpd_flags) {
2237 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302238 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302239 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2240 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2241 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302242 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302243 if (err)
2244 goto bail;
2245 phys = mem->phys;
2246 size = mem->size;
2247 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302248 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2249 me->channel[fl->cid].rhvm.vmperm,
2250 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302251 if (err) {
2252 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2253 err);
2254 pr_err("map->phys %llx, map->size %d\n",
2255 phys, (int)size);
2256 goto bail;
2257 }
2258 me->staticpd_flags = 1;
2259 }
2260
2261 ra[0].buf.pv = (void *)&inbuf;
2262 ra[0].buf.len = sizeof(inbuf);
2263 fds[0] = 0;
2264
2265 ra[1].buf.pv = (void *)proc_name;
2266 ra[1].buf.len = inbuf.namelen;
2267 fds[1] = 0;
2268
2269 pages[0].addr = phys;
2270 pages[0].size = size;
2271
2272 ra[2].buf.pv = (void *)pages;
2273 ra[2].buf.len = sizeof(*pages);
2274 fds[2] = 0;
2275 ioctl.inv.handle = 1;
2276
2277 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2278 ioctl.inv.pra = ra;
2279 ioctl.fds = NULL;
2280 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002281 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002282 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2283 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2284 if (err)
2285 goto bail;
2286 } else {
2287 err = -ENOTTY;
2288 }
2289bail:
c_mtharud91205a2017-11-07 16:01:06 +05302290 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302291 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2292 me->staticpd_flags = 0;
2293 if (mem && err) {
2294 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2295 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302296 me->channel[fl->cid].rhvm.vmid,
2297 me->channel[fl->cid].rhvm.vmcount,
2298 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302299 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302300 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302301 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302302 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302303 if (file) {
2304 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302305 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302306 mutex_unlock(&fl->fl_map_mutex);
2307 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002308 return err;
2309}
2310
2311static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2312{
2313 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002314 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002315 remote_arg_t ra[1];
2316 int tgid = 0;
2317
Sathish Ambley36849af2017-02-02 09:35:55 -08002318 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2319 if (err)
2320 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302321 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002322 if (err)
2323 goto bail;
2324 tgid = fl->tgid;
2325 ra[0].buf.pv = (void *)&tgid;
2326 ra[0].buf.len = sizeof(tgid);
2327 ioctl.inv.handle = 1;
2328 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2329 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302330 ioctl.fds = NULL;
2331 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002332 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002333 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2334 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2335bail:
2336 return err;
2337}
2338
2339static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302340 uintptr_t va, uint64_t phys,
2341 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002342{
Sathish Ambleybae51902017-07-03 15:00:49 -07002343 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302344 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002345 struct smq_phy_page page;
2346 int num = 1;
2347 remote_arg_t ra[3];
2348 int err = 0;
2349 struct {
2350 int pid;
2351 uint32_t flags;
2352 uintptr_t vaddrin;
2353 int num;
2354 } inargs;
2355 struct {
2356 uintptr_t vaddrout;
2357 } routargs;
2358
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302359 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302360 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002361 inargs.flags = flags;
2362 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2363 ra[0].buf.pv = (void *)&inargs;
2364 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302365 page.addr = phys;
2366 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002367 ra[1].buf.pv = (void *)&page;
2368 ra[1].buf.len = num * sizeof(page);
2369
2370 ra[2].buf.pv = (void *)&routargs;
2371 ra[2].buf.len = sizeof(routargs);
2372
2373 ioctl.inv.handle = 1;
2374 if (fl->apps->compat)
2375 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2376 else
2377 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2378 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302379 ioctl.fds = NULL;
2380 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002381 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002382 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2383 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302384 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302385 if (err)
2386 goto bail;
2387 if (flags == ADSP_MMAP_HEAP_ADDR) {
2388 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002389
c_mtharue1a5ce12017-10-13 20:47:09 +05302390 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302391 desc.args[1] = phys;
2392 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302393 desc.arginfo = SCM_ARGS(3);
2394 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2395 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2396 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302397 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302398 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2399 me->channel[fl->cid].rhvm.vmperm,
2400 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302401 if (err)
2402 goto bail;
2403 }
2404bail:
2405 return err;
2406}
2407
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302408static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2409 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302410{
2411 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302412 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302413 int destVM[1] = {VMID_HLOS};
2414 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2415
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302416 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302417 struct fastrpc_ioctl_invoke_crc ioctl;
2418 struct scm_desc desc = {0};
2419 remote_arg_t ra[1];
2420 int err = 0;
2421 struct {
2422 uint8_t skey;
2423 } routargs;
2424
2425 ra[0].buf.pv = (void *)&routargs;
2426 ra[0].buf.len = sizeof(routargs);
2427
2428 ioctl.inv.handle = 1;
2429 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2430 ioctl.inv.pra = ra;
2431 ioctl.fds = NULL;
2432 ioctl.attrs = NULL;
2433 ioctl.crc = NULL;
2434 if (fl == NULL)
2435 goto bail;
2436
2437 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2438 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2439 if (err)
2440 goto bail;
2441 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302442 desc.args[1] = phys;
2443 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302444 desc.args[3] = routargs.skey;
2445 desc.arginfo = SCM_ARGS(4);
2446 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2447 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302448 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2449 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302450 me->channel[fl->cid].rhvm.vmid,
2451 me->channel[fl->cid].rhvm.vmcount,
2452 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302453 if (err)
2454 goto bail;
2455 }
2456
2457bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002458 return err;
2459}
2460
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302461static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2462 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002463{
Sathish Ambleybae51902017-07-03 15:00:49 -07002464 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002465 remote_arg_t ra[1];
2466 int err = 0;
2467 struct {
2468 int pid;
2469 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302470 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002471 } inargs;
2472
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302473 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302474 inargs.size = size;
2475 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002476 ra[0].buf.pv = (void *)&inargs;
2477 ra[0].buf.len = sizeof(inargs);
2478
2479 ioctl.inv.handle = 1;
2480 if (fl->apps->compat)
2481 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2482 else
2483 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2484 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302485 ioctl.fds = NULL;
2486 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002487 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002488 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2489 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302490 if (err)
2491 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302492 if (flags == ADSP_MMAP_HEAP_ADDR ||
2493 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2494 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302495 if (err)
2496 goto bail;
2497 }
2498bail:
2499 return err;
2500}
2501
2502static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2503{
2504 struct fastrpc_mmap *match = NULL, *map = NULL;
2505 struct hlist_node *n = NULL;
2506 int err = 0, ret = 0;
2507 struct fastrpc_apps *me = &gfa;
2508 struct ramdump_segment *ramdump_segments_rh = NULL;
2509
2510 do {
2511 match = NULL;
2512 spin_lock(&me->hlock);
2513 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2514 match = map;
2515 hlist_del_init(&map->hn);
2516 break;
2517 }
2518 spin_unlock(&me->hlock);
2519
2520 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302521 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2522 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302523 if (err)
2524 goto bail;
2525 if (me->channel[0].ramdumpenabled) {
2526 ramdump_segments_rh = kcalloc(1,
2527 sizeof(struct ramdump_segment), GFP_KERNEL);
2528 if (ramdump_segments_rh) {
2529 ramdump_segments_rh->address =
2530 match->phys;
2531 ramdump_segments_rh->size = match->size;
2532 ret = do_elf_ramdump(
2533 me->channel[0].remoteheap_ramdump_dev,
2534 ramdump_segments_rh, 1);
2535 if (ret < 0)
2536 pr_err("ADSPRPC: unable to dump heap");
2537 kfree(ramdump_segments_rh);
2538 }
2539 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302540 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302541 }
2542 } while (match);
2543bail:
2544 if (err && match)
2545 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002546 return err;
2547}
2548
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302549static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2550{
2551 struct fastrpc_apps *me = &gfa;
2552 int session = 0, err = 0;
2553
2554 VERIFY(err, !fastrpc_get_adsp_session(
2555 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2556 if (err)
2557 goto bail;
2558 if (me->channel[fl->cid].spd[session].pdrcount !=
2559 me->channel[fl->cid].spd[session].prevpdrcount) {
2560 if (fastrpc_mmap_remove_ssr(fl))
2561 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2562 me->channel[fl->cid].spd[session].prevpdrcount =
2563 me->channel[fl->cid].spd[session].pdrcount;
2564 }
2565 if (!me->channel[fl->cid].spd[session].ispdup) {
2566 VERIFY(err, 0);
2567 if (err) {
2568 err = -ENOTCONN;
2569 goto bail;
2570 }
2571 }
2572bail:
2573 return err;
2574}
2575
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002576static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302577 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002578
2579static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2580
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302581static inline void get_fastrpc_ioctl_mmap_64(
2582 struct fastrpc_ioctl_mmap_64 *mmap64,
2583 struct fastrpc_ioctl_mmap *immap)
2584{
2585 immap->fd = mmap64->fd;
2586 immap->flags = mmap64->flags;
2587 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2588 immap->size = mmap64->size;
2589}
2590
2591static inline void put_fastrpc_ioctl_mmap_64(
2592 struct fastrpc_ioctl_mmap_64 *mmap64,
2593 struct fastrpc_ioctl_mmap *immap)
2594{
2595 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2596}
2597
2598static inline void get_fastrpc_ioctl_munmap_64(
2599 struct fastrpc_ioctl_munmap_64 *munmap64,
2600 struct fastrpc_ioctl_munmap *imunmap)
2601{
2602 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2603 imunmap->size = munmap64->size;
2604}
2605
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002606static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2607 struct fastrpc_ioctl_munmap *ud)
2608{
2609 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302610 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302611 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2612 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002613
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302614 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302615
2616 spin_lock(&fl->hlock);
2617 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2618 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2619 if ((rbuf->raddr == ud->vaddrout) &&
2620 (rbuf->size == ud->size)) {
2621 free = rbuf;
2622 break;
2623 }
2624 }
2625 }
2626 spin_unlock(&fl->hlock);
2627
2628 if (free) {
2629 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2630 free->phys, free->size, free->flags));
2631 if (err)
2632 goto bail;
2633 fastrpc_buf_free(rbuf, 0);
2634 mutex_unlock(&fl->map_mutex);
2635 return err;
2636 }
2637
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302638 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002639 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302640 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002641 if (err)
2642 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302643 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
2644 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002645 if (err)
2646 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302647 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302648 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302649 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002650bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302651 if (err && map) {
2652 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002653 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302654 mutex_unlock(&fl->fl_map_mutex);
2655 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302656 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002657 return err;
2658}
2659
c_mtharu7bd6a422017-10-17 18:15:37 +05302660static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2661 struct fastrpc_ioctl_munmap_fd *ud) {
2662 int err = 0;
2663 struct fastrpc_mmap *map = NULL;
2664
2665 VERIFY(err, (fl && ud));
2666 if (err)
2667 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302668 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302669 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2670 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302671 ud->fd, (unsigned long long)ud->va,
2672 (unsigned int)ud->len);
2673 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302674 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302675 goto bail;
2676 }
2677 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302678 fastrpc_mmap_free(map, 0);
2679 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302680bail:
2681 return err;
2682}
2683
2684
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002685static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2686 struct fastrpc_ioctl_mmap *ud)
2687{
2688
c_mtharue1a5ce12017-10-13 20:47:09 +05302689 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302690 struct fastrpc_buf *rbuf = NULL;
2691 unsigned long dma_attr = 0;
2692 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002693 int err = 0;
2694
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302695 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302696 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2697 if (ud->vaddrin) {
2698 err = -EINVAL;
2699 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2700 current->comm, __func__);
2701 goto bail;
2702 }
2703 dma_attr = DMA_ATTR_EXEC_MAPPING |
2704 DMA_ATTR_NO_KERNEL_MAPPING |
2705 DMA_ATTR_FORCE_NON_COHERENT;
2706 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2707 1, &rbuf);
2708 if (err)
2709 goto bail;
2710 rbuf->virt = NULL;
2711 err = fastrpc_mmap_on_dsp(fl, ud->flags,
2712 (uintptr_t)rbuf->virt,
2713 rbuf->phys, rbuf->size, &raddr);
2714 if (err)
2715 goto bail;
2716 rbuf->raddr = raddr;
2717 } else {
2718 mutex_lock(&fl->fl_map_mutex);
2719 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2720 ud->size, ud->flags, 1, &map)) {
2721 mutex_unlock(&fl->fl_map_mutex);
2722 mutex_unlock(&fl->map_mutex);
2723 return 0;
2724 }
2725 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2726 (uintptr_t)ud->vaddrin, ud->size,
2727 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302728 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302729 if (err)
2730 goto bail;
2731 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map->va,
2732 map->phys, map->size, &raddr));
2733 if (err)
2734 goto bail;
2735 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302736 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302737 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002738 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302739 if (err && map) {
2740 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302741 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302742 mutex_unlock(&fl->fl_map_mutex);
2743 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302744 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002745 return err;
2746}
2747
2748static void fastrpc_channel_close(struct kref *kref)
2749{
2750 struct fastrpc_apps *me = &gfa;
2751 struct fastrpc_channel_ctx *ctx;
2752 int cid;
2753
2754 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2755 cid = ctx - &gcinfo[0];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302756 if (!me->glink)
2757 smd_close(ctx->chan);
2758 else
2759 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302760 ctx->chan = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302761 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002762 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2763 MAJOR(me->dev_no), cid);
2764}
2765
2766static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2767
2768static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302769 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002770{
2771 struct fastrpc_apps *me = &gfa;
2772 int idx = 0, err = 0;
2773
2774 if (chan->sesscount) {
2775 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302776 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2777 (!chan->session[idx].used &&
2778 chan->session[idx].smmu.secure
2779 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002780 chan->session[idx].used = 1;
2781 break;
2782 }
2783 }
2784 VERIFY(err, idx < chan->sesscount);
2785 if (err)
2786 goto bail;
2787 chan->session[idx].smmu.faults = 0;
2788 } else {
2789 VERIFY(err, me->dev != NULL);
2790 if (err)
2791 goto bail;
2792 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302793 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002794 }
2795
2796 *session = &chan->session[idx];
2797 bail:
2798 return err;
2799}
2800
c_mtharue1a5ce12017-10-13 20:47:09 +05302801static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2802 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002803{
2804 if (glink_queue_rx_intent(h, NULL, size))
2805 return false;
2806 return true;
2807}
2808
c_mtharue1a5ce12017-10-13 20:47:09 +05302809static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002810 const void *pkt_priv, const void *ptr)
2811{
2812}
2813
c_mtharue1a5ce12017-10-13 20:47:09 +05302814static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002815 const void *pkt_priv, const void *ptr, size_t size)
2816{
2817 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302818 struct fastrpc_apps *me = &gfa;
2819 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302820 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002821
c_mtharufdac6892017-10-12 13:09:01 +05302822 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2823 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302824 goto bail;
2825
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302826 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2827 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302828 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302829 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302830
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302831 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2832 if (err)
2833 goto bail;
2834
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302835 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302836 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2837 if (err)
2838 goto bail;
2839
2840 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302841bail:
c_mtharufdac6892017-10-12 13:09:01 +05302842 if (err)
2843 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002844 glink_rx_done(handle, ptr, true);
2845}
2846
c_mtharue1a5ce12017-10-13 20:47:09 +05302847static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002848 unsigned int event)
2849{
2850 struct fastrpc_apps *me = &gfa;
2851 int cid = (int)(uintptr_t)priv;
2852 struct fastrpc_glink_info *link;
2853
2854 if (cid < 0 || cid >= NUM_CHANNELS)
2855 return;
2856 link = &me->channel[cid].link;
2857 switch (event) {
2858 case GLINK_CONNECTED:
2859 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302860 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002861 break;
2862 case GLINK_LOCAL_DISCONNECTED:
2863 link->port_state = FASTRPC_LINK_DISCONNECTED;
2864 break;
2865 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002866 break;
2867 default:
2868 break;
2869 }
2870}
2871
2872static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2873 struct fastrpc_session_ctx **session)
2874{
2875 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302876 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002877
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302878 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002879 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302880 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302881 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002882 return err;
2883}
2884
2885static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2886 struct fastrpc_session_ctx *session)
2887{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302888 struct fastrpc_apps *me = &gfa;
2889
2890 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002891 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302892 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002893}
2894
2895static int fastrpc_file_free(struct fastrpc_file *fl)
2896{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302897 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302898 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302899 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002900 int cid;
2901
2902 if (!fl)
2903 return 0;
2904 cid = fl->cid;
2905
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302906 (void)fastrpc_release_current_dsp_process(fl);
2907
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002908 spin_lock(&fl->apps->hlock);
2909 hlist_del_init(&fl->hn);
2910 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05302911 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002912
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002913 if (!fl->sctx) {
2914 kfree(fl);
2915 return 0;
2916 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302917 spin_lock(&fl->hlock);
2918 fl->file_close = 1;
2919 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302920 if (!IS_ERR_OR_NULL(fl->init_mem))
2921 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002922 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302923 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302924 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302925 do {
2926 lmap = NULL;
2927 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2928 hlist_del_init(&map->hn);
2929 lmap = map;
2930 break;
2931 }
2932 fastrpc_mmap_free(lmap, 1);
2933 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302934 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302935 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002936 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302937 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002938 if (fl->sctx)
2939 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2940 if (fl->secsctx)
2941 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302942
2943 mutex_lock(&fl->perf_mutex);
2944 do {
2945 struct hlist_node *pn = NULL;
2946
2947 fperf = NULL;
2948 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2949 hlist_del_init(&perf->hn);
2950 fperf = perf;
2951 break;
2952 }
2953 kfree(fperf);
2954 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302955 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302956 mutex_unlock(&fl->perf_mutex);
2957 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302958 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05302959 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002960 kfree(fl);
2961 return 0;
2962}
2963
2964static int fastrpc_device_release(struct inode *inode, struct file *file)
2965{
2966 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2967
2968 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302969 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2970 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002971 if (fl->debugfs_file != NULL)
2972 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002973 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302974 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002975 }
2976 return 0;
2977}
2978
2979static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2980 void *priv)
2981{
2982 struct fastrpc_apps *me = &gfa;
2983 int cid = (int)((uintptr_t)priv);
2984 struct fastrpc_glink_info *link;
2985
2986 if (cid < 0 || cid >= NUM_CHANNELS)
2987 return;
2988
2989 link = &me->channel[cid].link;
2990 switch (cb_info->link_state) {
2991 case GLINK_LINK_STATE_UP:
2992 link->link_state = FASTRPC_LINK_STATE_UP;
2993 complete(&me->channel[cid].work);
2994 break;
2995 case GLINK_LINK_STATE_DOWN:
2996 link->link_state = FASTRPC_LINK_STATE_DOWN;
2997 break;
2998 default:
2999 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3000 break;
3001 }
3002}
3003
3004static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3005{
3006 int err = 0;
3007 struct fastrpc_glink_info *link;
3008
3009 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3010 if (err)
3011 goto bail;
3012
3013 link = &me->channel[cid].link;
3014 if (link->link_notify_handle != NULL)
3015 goto bail;
3016
3017 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3018 link->link_notify_handle = glink_register_link_state_cb(
3019 &link->link_info,
3020 (void *)((uintptr_t)cid));
3021 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3022 if (err) {
3023 link->link_notify_handle = NULL;
3024 goto bail;
3025 }
3026 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3027 RPC_TIMEOUT));
3028bail:
3029 return err;
3030}
3031
3032static void fastrpc_glink_close(void *chan, int cid)
3033{
3034 int err = 0;
3035 struct fastrpc_glink_info *link;
3036
3037 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3038 if (err)
3039 return;
3040 link = &gfa.channel[cid].link;
3041
c_mtharu314a4202017-11-15 22:09:17 +05303042 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3043 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003044 link->port_state = FASTRPC_LINK_DISCONNECTING;
3045 glink_close(chan);
3046 }
3047}
3048
3049static int fastrpc_glink_open(int cid)
3050{
3051 int err = 0;
3052 void *handle = NULL;
3053 struct fastrpc_apps *me = &gfa;
3054 struct glink_open_config *cfg;
3055 struct fastrpc_glink_info *link;
3056
3057 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3058 if (err)
3059 goto bail;
3060 link = &me->channel[cid].link;
3061 cfg = &me->channel[cid].link.cfg;
3062 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3063 if (err)
3064 goto bail;
3065
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303066 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3067 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003068 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003069
3070 link->port_state = FASTRPC_LINK_CONNECTING;
3071 cfg->priv = (void *)(uintptr_t)cid;
3072 cfg->edge = gcinfo[cid].link.link_info.edge;
3073 cfg->transport = gcinfo[cid].link.link_info.transport;
3074 cfg->name = FASTRPC_GLINK_GUID;
3075 cfg->notify_rx = fastrpc_glink_notify_rx;
3076 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3077 cfg->notify_state = fastrpc_glink_notify_state;
3078 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3079 handle = glink_open(cfg);
3080 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303081 if (err) {
3082 if (link->port_state == FASTRPC_LINK_CONNECTING)
3083 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003084 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303085 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003086 me->channel[cid].chan = handle;
3087bail:
3088 return err;
3089}
3090
Sathish Ambley1ca68232017-01-19 10:32:55 -08003091static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3092{
3093 filp->private_data = inode->i_private;
3094 return 0;
3095}
3096
3097static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3098 size_t count, loff_t *position)
3099{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303100 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003101 struct fastrpc_file *fl = filp->private_data;
3102 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303103 struct fastrpc_buf *buf = NULL;
3104 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303105 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303106 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303107 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003108 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303109 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003110 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303111 char single_line[UL_SIZE] = "----------------";
3112 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003113
3114 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3115 if (!fileinfo)
3116 goto bail;
3117 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303118 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3119 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3120 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3121 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3122 "susbsys", "refcount", "sesscount", "issubsystemup",
3123 "ssrcount", "session_used");
3124 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3125 "-%s%s%s%s-\n", single_line, single_line,
3126 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003127 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303128 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003129 chan = &gcinfo[i];
3130 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303131 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003132 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303133 DEBUGFS_SIZE - len, "|%-9d",
3134 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303135 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303136 DEBUGFS_SIZE - len, "|%-9d",
3137 chan->sesscount);
3138 len += scnprintf(fileinfo + len,
3139 DEBUGFS_SIZE - len, "|%-14d",
3140 chan->issubsystemup);
3141 len += scnprintf(fileinfo + len,
3142 DEBUGFS_SIZE - len, "|%-9d",
3143 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003144 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303145 sess_used += chan->session[j].used;
3146 }
3147 len += scnprintf(fileinfo + len,
3148 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3149
3150 }
3151 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3152 "\n%s%s%s\n", "=============",
3153 " CMA HEAP ", "==============");
3154 len += scnprintf(fileinfo + len,
3155 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3156 len += scnprintf(fileinfo + len,
3157 DEBUGFS_SIZE - len, "--%s%s---\n",
3158 single_line, single_line);
3159 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3160 "0x%-18llX", me->range.addr);
3161 len += scnprintf(fileinfo + len,
3162 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3163 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3164 "\n==========%s %s %s===========\n",
3165 title, " GMAPS ", title);
3166 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3167 "%-20s|%-20s|%-20s|%-20s\n",
3168 "fd", "phys", "size", "va");
3169 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3170 "%s%s%s%s%s\n", single_line, single_line,
3171 single_line, single_line, single_line);
3172 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3173 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3174 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3175 gmaps->fd, gmaps->phys,
3176 (uint32_t)gmaps->size,
3177 gmaps->va);
3178 }
3179 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3180 "%-20s|%-20s|%-20s|%-20s\n",
3181 "len", "refs", "raddr", "flags");
3182 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3183 "%s%s%s%s%s\n", single_line, single_line,
3184 single_line, single_line, single_line);
3185 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3186 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3187 "0x%-18X|%-20d|%-20lu|%-20u\n",
3188 (uint32_t)gmaps->len, gmaps->refs,
3189 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003190 }
3191 } else {
3192 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303193 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003194 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303195 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003196 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303197 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003198 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303199 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303200 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303201 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003202 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303203 "%s %14s %d\n", "pd", ":", fl->pd);
3204 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3205 "%s %9s %s\n", "spdname", ":", fl->spdname);
3206 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3207 "%s %6s %d\n", "file_close", ":", fl->file_close);
3208 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3209 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3210 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3211 "%s %9s %d\n", "profile", ":", fl->profile);
3212 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3213 "%s %3s %d\n", "smmu.coherent", ":",
3214 fl->sctx->smmu.coherent);
3215 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3216 "%s %4s %d\n", "smmu.enabled", ":",
3217 fl->sctx->smmu.enabled);
3218 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3219 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3220 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3221 "%s %5s %d\n", "smmu.secure", ":",
3222 fl->sctx->smmu.secure);
3223 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3224 "%s %5s %d\n", "smmu.faults", ":",
3225 fl->sctx->smmu.faults);
3226 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3227 "%s %s %d\n", "link.link_state",
3228 ":", *&me->channel[fl->cid].link.link_state);
3229
3230 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3231 "\n=======%s %s %s======\n", title,
3232 " LIST OF MAPS ", title);
3233
3234 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3235 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3236 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3237 "%s%s%s%s%s\n",
3238 single_line, single_line, single_line,
3239 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003240 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303241 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3242 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3243 map->va, map->phys,
3244 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003245 }
3246 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303247 "%-20s|%-20s|%-20s|%-20s\n",
3248 "len", "refs",
3249 "raddr", "uncached");
3250 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3251 "%s%s%s%s%s\n",
3252 single_line, single_line, single_line,
3253 single_line, single_line);
3254 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3255 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3256 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3257 map->len, map->refs, map->raddr,
3258 map->uncached);
3259 }
3260 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3261 "%-20s|%-20s\n", "secure", "attr");
3262 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3263 "%s%s%s%s%s\n",
3264 single_line, single_line, single_line,
3265 single_line, single_line);
3266 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3267 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3268 "%-20d|0x%-20lX\n\n",
3269 map->secure, map->attr);
3270 }
3271 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303272 "%s %d\n\n",
3273 "KERNEL MEMORY ALLOCATION:", 1);
3274 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303275 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303276 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303277 spin_lock(&fl->hlock);
3278 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303279 "%-19s|%-19s|%-19s|%-19s\n",
3280 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303281 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3282 "%s%s%s%s%s\n", single_line, single_line,
3283 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303284 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303285 len += scnprintf(fileinfo + len,
3286 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303287 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3288 buf->virt, (uint64_t)buf->phys, buf->size,
3289 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303290 }
3291 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3292 "\n%s %s %s\n", title,
3293 " LIST OF PENDING SMQCONTEXTS ", title);
3294
3295 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3296 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3297 "sc", "pid", "tgid", "used", "ctxid");
3298 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3299 "%s%s%s%s%s\n", single_line, single_line,
3300 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003301 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3302 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303303 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3304 ictx->sc, ictx->pid, ictx->tgid,
3305 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003306 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303307
Sathish Ambley1ca68232017-01-19 10:32:55 -08003308 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303309 "\n%s %s %s\n", title,
3310 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3311
3312 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3313 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3314 "sc", "pid", "tgid", "used", "ctxid");
3315 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3316 "%s%s%s%s%s\n", single_line, single_line,
3317 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003318 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303319 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3320 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3321 ictx->sc, ictx->pid, ictx->tgid,
3322 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003323 }
3324 spin_unlock(&fl->hlock);
3325 }
3326 if (len > DEBUGFS_SIZE)
3327 len = DEBUGFS_SIZE;
3328 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3329 kfree(fileinfo);
3330bail:
3331 return ret;
3332}
3333
3334static const struct file_operations debugfs_fops = {
3335 .open = fastrpc_debugfs_open,
3336 .read = fastrpc_debugfs_read,
3337};
Sathish Ambley36849af2017-02-02 09:35:55 -08003338static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003339{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003340 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08003341 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003342
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303343 mutex_lock(&me->smd_mutex);
3344
Sathish Ambley36849af2017-02-02 09:35:55 -08003345 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003346 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303347 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003348 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303349 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3350 if (err)
3351 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303352 if (me->channel[cid].ssrcount !=
3353 me->channel[cid].prevssrcount) {
3354 if (!me->channel[cid].issubsystemup) {
3355 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303356 if (err) {
3357 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303358 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303359 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303360 }
3361 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003362 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303363 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003364 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303365 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303366 if (me->glink) {
3367 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3368 if (err)
3369 goto bail;
3370 VERIFY(err, 0 == fastrpc_glink_open(cid));
3371 } else {
3372 VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
3373 gcinfo[cid].channel,
3374 (smd_channel_t **)&me->channel[cid].chan,
3375 (void *)(uintptr_t)cid,
3376 smd_event_handler));
3377 }
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303378 VERIFY(err,
3379 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003380 RPC_TIMEOUT));
3381 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303382 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003383 goto bail;
3384 }
3385 kref_init(&me->channel[cid].kref);
3386 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3387 MAJOR(me->dev_no), cid);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303388 if (me->glink) {
3389 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
3390 FASTRPC_GLINK_INTENT_LEN);
3391 err |= glink_queue_rx_intent(me->channel[cid].chan,
3392 NULL, FASTRPC_GLINK_INTENT_LEN);
3393 if (err)
3394 pr_warn("adsprpc: initial intent fail for %d err %d\n",
3395 cid, err);
3396 }
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303397 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003398 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303399 if (fastrpc_mmap_remove_ssr(fl))
3400 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003401 me->channel[cid].prevssrcount =
3402 me->channel[cid].ssrcount;
3403 }
3404 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003405
3406bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303407 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003408 return err;
3409}
3410
Sathish Ambley36849af2017-02-02 09:35:55 -08003411static int fastrpc_device_open(struct inode *inode, struct file *filp)
3412{
3413 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08003414 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05303415 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003416 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303417 char strpid[PID_SIZE];
3418 int buf_size = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003419
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303420 /*
3421 * Indicates the device node opened
3422 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3423 */
3424 int dev_minor = MINOR(inode->i_rdev);
3425
3426 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3427 (dev_minor == MINOR_NUM_SECURE_DEV)));
3428 if (err) {
3429 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3430 return err;
3431 }
3432
c_mtharue1a5ce12017-10-13 20:47:09 +05303433 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003434 if (err)
3435 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303436 snprintf(strpid, PID_SIZE, "%d", current->pid);
3437 buf_size = strlen(current->comm) + strlen(strpid) + 1;
3438 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3439 snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
3440 current->comm, "_", current->pid);
3441 debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3442 debugfs_root, fl, &debugfs_fops);
3443
Sathish Ambley36849af2017-02-02 09:35:55 -08003444 context_list_ctor(&fl->clst);
3445 spin_lock_init(&fl->hlock);
3446 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303447 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303448 INIT_HLIST_HEAD(&fl->cached_bufs);
3449 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003450 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303451 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003452 fl->tgid = current->tgid;
3453 fl->apps = me;
3454 fl->mode = FASTRPC_MODE_SERIAL;
3455 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303456 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303457 fl->init_mem = NULL;
Sathish Ambley567012b2017-03-06 11:55:04 -08003458 if (debugfs_file != NULL)
3459 fl->debugfs_file = debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303460 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303461 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003462 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303463 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303464 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003465 spin_lock(&me->hlock);
3466 hlist_add_head(&fl->hn, &me->drivers);
3467 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303468 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003469 return 0;
3470}
3471
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003472static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3473{
3474 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003475 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003476
c_mtharue1a5ce12017-10-13 20:47:09 +05303477 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003478 if (err)
3479 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003480 if (fl->cid == -1) {
3481 cid = *info;
3482 VERIFY(err, cid < NUM_CHANNELS);
3483 if (err)
3484 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303485 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003486 if (fl->dev_minor == MINOR_NUM_DEV &&
3487 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303488 /*
3489 * For non secure device node check and make sure that
3490 * the channel allows non-secure access
3491 * If not, bail. Session will not start.
3492 * cid will remain -1 and client will not be able to
3493 * invoke any other methods without failure
3494 */
3495 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3496 err = -EPERM;
3497 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3498 fl->dev_minor, cid,
3499 fl->apps->channel[cid].secure);
3500 goto bail;
3501 }
3502 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003503 fl->cid = cid;
3504 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3505 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303506 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003507 if (err)
3508 goto bail;
3509 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303510 VERIFY(err, fl->sctx != NULL);
3511 if (err)
3512 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003513 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3514bail:
3515 return err;
3516}
3517
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303518static int fastrpc_internal_control(struct fastrpc_file *fl,
3519 struct fastrpc_ioctl_control *cp)
3520{
3521 int err = 0;
3522 int latency;
3523
3524 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3525 if (err)
3526 goto bail;
3527 VERIFY(err, !IS_ERR_OR_NULL(cp));
3528 if (err)
3529 goto bail;
3530
3531 switch (cp->req) {
3532 case FASTRPC_CONTROL_LATENCY:
3533 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3534 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3535 VERIFY(err, latency != 0);
3536 if (err)
3537 goto bail;
3538 if (!fl->qos_request) {
3539 pm_qos_add_request(&fl->pm_qos_req,
3540 PM_QOS_CPU_DMA_LATENCY, latency);
3541 fl->qos_request = 1;
3542 } else
3543 pm_qos_update_request(&fl->pm_qos_req, latency);
3544 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303545 case FASTRPC_CONTROL_SMMU:
3546 fl->sharedcb = cp->smmu.sharedcb;
3547 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303548 case FASTRPC_CONTROL_KALLOC:
3549 cp->kalloc.kalloc_support = 1;
3550 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303551 default:
3552 err = -ENOTTY;
3553 break;
3554 }
3555bail:
3556 return err;
3557}
3558
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003559static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3560 unsigned long ioctl_param)
3561{
3562 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003563 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003564 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303565 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003566 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303567 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303568 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003569 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003570 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303571 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003572 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303573 union {
3574 struct fastrpc_ioctl_mmap mmap;
3575 struct fastrpc_ioctl_munmap munmap;
3576 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003577 void *param = (char *)ioctl_param;
3578 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3579 int size = 0, err = 0;
3580 uint32_t info;
3581
c_mtharue1a5ce12017-10-13 20:47:09 +05303582 p.inv.fds = NULL;
3583 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003584 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303585 spin_lock(&fl->hlock);
3586 if (fl->file_close == 1) {
3587 err = EBADF;
3588 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3589 spin_unlock(&fl->hlock);
3590 goto bail;
3591 }
3592 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003593
3594 switch (ioctl_num) {
3595 case FASTRPC_IOCTL_INVOKE:
3596 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003597 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003598 case FASTRPC_IOCTL_INVOKE_FD:
3599 if (!size)
3600 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3601 /* fall through */
3602 case FASTRPC_IOCTL_INVOKE_ATTRS:
3603 if (!size)
3604 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003605 /* fall through */
3606 case FASTRPC_IOCTL_INVOKE_CRC:
3607 if (!size)
3608 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303609 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003610 if (err)
3611 goto bail;
3612 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3613 0, &p.inv)));
3614 if (err)
3615 goto bail;
3616 break;
3617 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303618 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3619 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303620 if (err)
3621 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003622 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3623 if (err)
3624 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303625 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003626 if (err)
3627 goto bail;
3628 break;
3629 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303630 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3631 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303632 if (err)
3633 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003634 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3635 &p.munmap)));
3636 if (err)
3637 goto bail;
3638 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303639 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303640 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3641 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303642 if (err)
3643 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303644 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3645 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303646 if (err)
3647 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303648 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3649 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303650 if (err)
3651 goto bail;
3652 break;
3653 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303654 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3655 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303656 if (err)
3657 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303658 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303659 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303660 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303661 if (err)
3662 goto bail;
3663 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303664 case FASTRPC_IOCTL_MUNMAP_FD:
3665 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3666 sizeof(p.munmap_fd));
3667 if (err)
3668 goto bail;
3669 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3670 &p.munmap_fd)));
3671 if (err)
3672 goto bail;
3673 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003674 case FASTRPC_IOCTL_SETMODE:
3675 switch ((uint32_t)ioctl_param) {
3676 case FASTRPC_MODE_PARALLEL:
3677 case FASTRPC_MODE_SERIAL:
3678 fl->mode = (uint32_t)ioctl_param;
3679 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003680 case FASTRPC_MODE_PROFILE:
3681 fl->profile = (uint32_t)ioctl_param;
3682 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303683 case FASTRPC_MODE_SESSION:
3684 fl->sessionid = 1;
3685 fl->tgid |= (1 << SESSION_ID_INDEX);
3686 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003687 default:
3688 err = -ENOTTY;
3689 break;
3690 }
3691 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003692 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303693 K_COPY_FROM_USER(err, 0, &p.perf,
3694 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003695 if (err)
3696 goto bail;
3697 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3698 if (p.perf.keys) {
3699 char *keys = PERF_KEYS;
3700
c_mtharue1a5ce12017-10-13 20:47:09 +05303701 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3702 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003703 if (err)
3704 goto bail;
3705 }
3706 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303707 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3708 struct hlist_node *n = NULL;
3709
3710 mutex_lock(&fl->perf_mutex);
3711 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3712 if (perf->tid == current->pid) {
3713 fperf = perf;
3714 break;
3715 }
3716 }
3717
3718 mutex_unlock(&fl->perf_mutex);
3719
3720 if (fperf) {
3721 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3722 fperf, sizeof(*fperf));
3723 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003724 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303725 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003726 if (err)
3727 goto bail;
3728 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303729 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303730 K_COPY_FROM_USER(err, 0, &p.cp, param,
3731 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303732 if (err)
3733 goto bail;
3734 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3735 if (err)
3736 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303737 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3738 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3739 if (err)
3740 goto bail;
3741 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303742 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003743 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303744 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003745 if (err)
3746 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003747 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3748 if (err)
3749 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303750 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003751 if (err)
3752 goto bail;
3753 break;
3754 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003755 p.init.attrs = 0;
3756 p.init.siglen = 0;
3757 size = sizeof(struct fastrpc_ioctl_init);
3758 /* fall through */
3759 case FASTRPC_IOCTL_INIT_ATTRS:
3760 if (!size)
3761 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303762 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003763 if (err)
3764 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303765 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303766 p.init.init.filelen < INIT_FILELEN_MAX);
3767 if (err)
3768 goto bail;
3769 VERIFY(err, p.init.init.memlen >= 0 &&
3770 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303771 if (err)
3772 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303773 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003774 if (err)
3775 goto bail;
3776 break;
3777
3778 default:
3779 err = -ENOTTY;
3780 pr_info("bad ioctl: %d\n", ioctl_num);
3781 break;
3782 }
3783 bail:
3784 return err;
3785}
3786
3787static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3788 unsigned long code,
3789 void *data)
3790{
3791 struct fastrpc_apps *me = &gfa;
3792 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303793 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003794 int cid;
3795
3796 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3797 cid = ctx - &me->channel[0];
3798 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303799 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003800 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303801 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303802 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303803 if (me->glink)
3804 fastrpc_glink_close(ctx->chan, cid);
3805 else
3806 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303807 ctx->chan = NULL;
3808 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3809 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3810 }
3811 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303812 if (cid == 0)
3813 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003814 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303815 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3816 if (me->channel[0].remoteheap_ramdump_dev &&
3817 notifdata->enable_ramdump) {
3818 me->channel[0].ramdumpenabled = 1;
3819 }
3820 } else if (code == SUBSYS_AFTER_POWERUP) {
3821 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003822 }
3823
3824 return NOTIFY_DONE;
3825}
3826
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303827static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303828 unsigned long code,
3829 void *data)
3830{
3831 struct fastrpc_apps *me = &gfa;
3832 struct fastrpc_static_pd *spd;
3833 struct notif_data *notifdata = data;
3834
3835 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3836 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3837 mutex_lock(&me->smd_mutex);
3838 spd->pdrcount++;
3839 spd->ispdup = 0;
3840 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3841 MAJOR(me->dev_no), spd->spdname);
3842 mutex_unlock(&me->smd_mutex);
3843 if (!strcmp(spd->spdname,
3844 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3845 me->staticpd_flags = 0;
3846 fastrpc_notify_pdr_drivers(me, spd->spdname);
3847 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3848 if (me->channel[0].remoteheap_ramdump_dev &&
3849 notifdata->enable_ramdump) {
3850 me->channel[0].ramdumpenabled = 1;
3851 }
3852 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3853 spd->ispdup = 1;
3854 }
3855
3856 return NOTIFY_DONE;
3857}
3858
3859static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3860 unsigned long opcode, void *data)
3861{
3862 struct fastrpc_static_pd *spd;
3863 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303864 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303865
3866 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3867 if (opcode == LOCATOR_DOWN) {
3868 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3869 return NOTIFY_DONE;
3870 }
3871
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303872 for (i = 0; i < pdr->total_domains; i++) {
3873 if ((!strcmp(pdr->domain_list[i].name,
3874 "msm/adsp/audio_pd")) ||
3875 (!strcmp(pdr->domain_list[i].name,
3876 "msm/adsp/sensor_pd"))) {
3877 spd->pdrhandle =
3878 service_notif_register_notifier(
3879 pdr->domain_list[i].name,
3880 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303881 &spd->pdrnb, &curr_state);
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303882 if (IS_ERR(spd->pdrhandle)) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303883 pr_err("ADSPRPC: Unable to register notifier\n");
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303884 } else if (curr_state ==
3885 SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3886 pr_info("ADSPRPC: STATE_UP_V01 received\n");
3887 spd->ispdup = 1;
3888 } else if (curr_state ==
3889 SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
3890 pr_info("ADSPRPC: STATE_UNINIT_V01 received\n");
3891 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303892 break;
3893 }
3894 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303895
3896 return NOTIFY_DONE;
3897}
3898
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003899static const struct file_operations fops = {
3900 .open = fastrpc_device_open,
3901 .release = fastrpc_device_release,
3902 .unlocked_ioctl = fastrpc_device_ioctl,
3903 .compat_ioctl = compat_fastrpc_device_ioctl,
3904};
3905
3906static const struct of_device_id fastrpc_match_table[] = {
3907 { .compatible = "qcom,msm-fastrpc-adsp", },
3908 { .compatible = "qcom,msm-fastrpc-compute", },
3909 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303910 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3911 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003912 { .compatible = "qcom,msm-adsprpc-mem-region", },
3913 {}
3914};
3915
3916static int fastrpc_cb_probe(struct device *dev)
3917{
3918 struct fastrpc_channel_ctx *chan;
3919 struct fastrpc_session_ctx *sess;
3920 struct of_phandle_args iommuspec;
3921 const char *name;
3922 unsigned int start = 0x80000000;
3923 int err = 0, i;
3924 int secure_vmid = VMID_CP_PIXEL;
3925
c_mtharue1a5ce12017-10-13 20:47:09 +05303926 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3927 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003928 if (err)
3929 goto bail;
3930 for (i = 0; i < NUM_CHANNELS; i++) {
3931 if (!gcinfo[i].name)
3932 continue;
3933 if (!strcmp(name, gcinfo[i].name))
3934 break;
3935 }
3936 VERIFY(err, i < NUM_CHANNELS);
3937 if (err)
3938 goto bail;
3939 chan = &gcinfo[i];
3940 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3941 if (err)
3942 goto bail;
3943
3944 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3945 "#iommu-cells", 0, &iommuspec));
3946 if (err)
3947 goto bail;
3948 sess = &chan->session[chan->sesscount];
3949 sess->smmu.cb = iommuspec.args[0] & 0xf;
3950 sess->used = 0;
3951 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3952 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303953 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
3954 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003955 sess->smmu.secure = of_property_read_bool(dev->of_node,
3956 "qcom,secure-context-bank");
3957 if (sess->smmu.secure)
3958 start = 0x60000000;
3959 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3960 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303961 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003962 if (err)
3963 goto bail;
3964
3965 if (sess->smmu.secure)
3966 iommu_domain_set_attr(sess->smmu.mapping->domain,
3967 DOMAIN_ATTR_SECURE_VMID,
3968 &secure_vmid);
3969
3970 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
3971 if (err)
3972 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303973 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003974 sess->smmu.enabled = 1;
3975 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003976 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
3977 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003978bail:
3979 return err;
3980}
3981
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303982static int fastrpc_cb_legacy_probe(struct device *dev)
3983{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303984 struct fastrpc_channel_ctx *chan;
3985 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
3986 const char *name;
3987 unsigned int *sids = NULL, sids_size = 0;
3988 int err = 0, ret = 0, i;
3989
3990 unsigned int start = 0x80000000;
3991
3992 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3993 "label", NULL)));
3994 if (err)
3995 goto bail;
3996
3997 for (i = 0; i < NUM_CHANNELS; i++) {
3998 if (!gcinfo[i].name)
3999 continue;
4000 if (!strcmp(name, gcinfo[i].name))
4001 break;
4002 }
4003 VERIFY(err, i < NUM_CHANNELS);
4004 if (err)
4005 goto bail;
4006
4007 chan = &gcinfo[i];
4008 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4009 if (err)
4010 goto bail;
4011
4012 first_sess = &chan->session[chan->sesscount];
4013
4014 VERIFY(err, NULL != of_get_property(dev->of_node,
4015 "sids", &sids_size));
4016 if (err)
4017 goto bail;
4018
4019 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4020 if (err)
4021 goto bail;
4022 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4023 sids_size/sizeof(unsigned int));
4024 if (ret)
4025 goto bail;
4026
4027 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4028 arm_iommu_create_mapping(&platform_bus_type,
4029 start, 0x78000000)));
4030 if (err)
4031 goto bail;
4032
4033 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4034 if (err)
4035 goto bail;
4036
4037
4038 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4039 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4040 if (err)
4041 goto bail;
4042 sess = &chan->session[chan->sesscount];
4043 sess->smmu.cb = sids[i];
4044 sess->smmu.dev = dev;
4045 sess->smmu.mapping = first_sess->smmu.mapping;
4046 sess->smmu.enabled = 1;
4047 sess->used = 0;
4048 sess->smmu.coherent = false;
4049 sess->smmu.secure = false;
4050 chan->sesscount++;
4051 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304052bail:
4053 kfree(sids);
4054 return err;
4055}
4056
4057
4058
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304059static void init_secure_vmid_list(struct device *dev, char *prop_name,
4060 struct secure_vm *destvm)
4061{
4062 int err = 0;
4063 u32 len = 0, i = 0;
4064 u32 *rhvmlist = NULL;
4065 u32 *rhvmpermlist = NULL;
4066
4067 if (!of_find_property(dev->of_node, prop_name, &len))
4068 goto bail;
4069 if (len == 0)
4070 goto bail;
4071 len /= sizeof(u32);
4072 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4073 if (err)
4074 goto bail;
4075 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4076 GFP_KERNEL)));
4077 if (err)
4078 goto bail;
4079 for (i = 0; i < len; i++) {
4080 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4081 &rhvmlist[i]);
4082 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4083 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4084 if (err) {
4085 pr_err("ADSPRPC: Failed to read VMID\n");
4086 goto bail;
4087 }
4088 }
4089 destvm->vmid = rhvmlist;
4090 destvm->vmperm = rhvmpermlist;
4091 destvm->vmcount = len;
4092bail:
4093 if (err) {
4094 kfree(rhvmlist);
4095 kfree(rhvmpermlist);
4096 }
4097}
4098
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304099static void configure_secure_channels(uint32_t secure_domains)
4100{
4101 struct fastrpc_apps *me = &gfa;
4102 int ii = 0;
4103 /*
4104 * secure_domains contains the bitmask of the secure channels
4105 * Bit 0 - ADSP
4106 * Bit 1 - MDSP
4107 * Bit 2 - SLPI
4108 * Bit 3 - CDSP
4109 */
4110 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4111 int secure = (secure_domains >> ii) & 0x01;
4112
4113 me->channel[ii].secure = secure;
4114 }
4115}
4116
4117
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004118static int fastrpc_probe(struct platform_device *pdev)
4119{
4120 int err = 0;
4121 struct fastrpc_apps *me = &gfa;
4122 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004123 struct device_node *ion_node, *node;
4124 struct platform_device *ion_pdev;
4125 struct cma *cma;
4126 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304127 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304128 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304129
4130 if (of_device_is_compatible(dev->of_node,
4131 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304132 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4133 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304134
c_mtharu63ffc012017-11-16 15:26:56 +05304135
4136 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4137 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304138 if (of_get_property(dev->of_node,
4139 "qcom,secure-domains", NULL) != NULL) {
4140 VERIFY(err, !of_property_read_u32(dev->of_node,
4141 "qcom,secure-domains",
4142 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004143 if (!err) {
4144 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304145 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004146 } else {
4147 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304148 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004149 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304150 }
c_mtharu63ffc012017-11-16 15:26:56 +05304151 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004152 if (of_device_is_compatible(dev->of_node,
4153 "qcom,msm-fastrpc-compute-cb"))
4154 return fastrpc_cb_probe(dev);
4155
4156 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304157 "qcom,msm-fastrpc-legacy-compute")) {
4158 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304159 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304160 }
4161
4162 if (of_device_is_compatible(dev->of_node,
4163 "qcom,msm-fastrpc-legacy-compute-cb")){
4164 return fastrpc_cb_legacy_probe(dev);
4165 }
4166
4167 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004168 "qcom,msm-adsprpc-mem-region")) {
4169 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004170 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4171 if (ion_node) {
4172 for_each_available_child_of_node(ion_node, node) {
4173 if (of_property_read_u32(node, "reg", &val))
4174 continue;
4175 if (val != ION_ADSP_HEAP_ID)
4176 continue;
4177 ion_pdev = of_find_device_by_node(node);
4178 if (!ion_pdev)
4179 break;
4180 cma = dev_get_cma_area(&ion_pdev->dev);
4181 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304182 me->range.addr = cma_get_base(cma);
4183 me->range.size =
4184 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004185 }
4186 break;
4187 }
4188 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304189 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304190 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004191 int srcVM[1] = {VMID_HLOS};
4192 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4193 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004194 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004195 PERM_READ | PERM_WRITE | PERM_EXEC,
4196 PERM_READ | PERM_WRITE | PERM_EXEC,
4197 PERM_READ | PERM_WRITE | PERM_EXEC,
4198 };
4199
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304200 VERIFY(err, !hyp_assign_phys(me->range.addr,
4201 me->range.size, srcVM, 1,
4202 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004203 if (err)
4204 goto bail;
4205 }
4206 return 0;
4207 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304208 if (of_property_read_bool(dev->of_node,
4209 "qcom,fastrpc-adsp-audio-pdr")) {
4210 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004211
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304212 VERIFY(err, !fastrpc_get_adsp_session(
4213 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4214 if (err)
4215 goto spdbail;
4216 me->channel[0].spd[session].get_service_nb.notifier_call =
4217 fastrpc_get_service_location_notify;
4218 ret = get_service_location(
4219 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4220 AUDIO_PDR_ADSP_SERVICE_NAME,
4221 &me->channel[0].spd[session].get_service_nb);
4222 if (ret)
4223 pr_err("ADSPRPC: Get service location failed: %d\n",
4224 ret);
4225 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304226 if (of_property_read_bool(dev->of_node,
4227 "qcom,fastrpc-adsp-sensors-pdr")) {
4228 int session;
4229
4230 VERIFY(err, !fastrpc_get_adsp_session(
4231 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4232 if (err)
4233 goto spdbail;
4234 me->channel[0].spd[session].get_service_nb.notifier_call =
4235 fastrpc_get_service_location_notify;
4236 ret = get_service_location(
4237 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4238 SENSORS_PDR_ADSP_SERVICE_NAME,
4239 &me->channel[0].spd[session].get_service_nb);
4240 if (ret)
4241 pr_err("ADSPRPC: Get service location failed: %d\n",
4242 ret);
4243 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304244spdbail:
4245 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004246 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4247 fastrpc_match_table,
4248 NULL, &pdev->dev));
4249 if (err)
4250 goto bail;
4251bail:
4252 return err;
4253}
4254
4255static void fastrpc_deinit(void)
4256{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304257 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004258 struct fastrpc_channel_ctx *chan = gcinfo;
4259 int i, j;
4260
4261 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4262 if (chan->chan) {
4263 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304264 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304265 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004266 }
4267 for (j = 0; j < NUM_SESSIONS; j++) {
4268 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304269 if (sess->smmu.dev) {
4270 arm_iommu_detach_device(sess->smmu.dev);
4271 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004272 }
4273 if (sess->smmu.mapping) {
4274 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304275 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004276 }
4277 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304278 kfree(chan->rhvm.vmid);
4279 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004280 }
4281}
4282
4283static struct platform_driver fastrpc_driver = {
4284 .probe = fastrpc_probe,
4285 .driver = {
4286 .name = "fastrpc",
4287 .owner = THIS_MODULE,
4288 .of_match_table = fastrpc_match_table,
4289 },
4290};
4291
4292static int __init fastrpc_device_init(void)
4293{
4294 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304295 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304296 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004297 int err = 0, i;
4298
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304299 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004300 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004301 fastrpc_init(me);
4302 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304303 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004304 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004305 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4306 if (err)
4307 goto register_bail;
4308 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4309 DEVICE_NAME));
4310 if (err)
4311 goto alloc_chrdev_bail;
4312 cdev_init(&me->cdev, &fops);
4313 me->cdev.owner = THIS_MODULE;
4314 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304315 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004316 if (err)
4317 goto cdev_init_bail;
4318 me->class = class_create(THIS_MODULE, "fastrpc");
4319 VERIFY(err, !IS_ERR(me->class));
4320 if (err)
4321 goto class_create_bail;
4322 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304323
4324 /*
4325 * Create devices and register with sysfs
4326 * Create first device with minor number 0
4327 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004328 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304329 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4330 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004331 VERIFY(err, !IS_ERR_OR_NULL(dev));
4332 if (err)
4333 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304334
4335 /* Create secure device with minor number for secure device */
4336 secure_dev = device_create(me->class, NULL,
4337 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4338 NULL, DEVICE_NAME_SECURE);
4339 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4340 if (err)
4341 goto device_create_bail;
4342
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004343 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304344 me->channel[i].dev = secure_dev;
4345 if (i == CDSP_DOMAIN_ID)
4346 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004347 me->channel[i].ssrcount = 0;
4348 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304349 me->channel[i].issubsystemup = 1;
4350 me->channel[i].ramdumpenabled = 0;
4351 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004352 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4353 me->channel[i].handle = subsys_notif_register_notifier(
4354 gcinfo[i].subsys,
4355 &me->channel[i].nb);
4356 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004357 me->client = msm_ion_client_create(DEVICE_NAME);
4358 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4359 if (err)
4360 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304361
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004362 return 0;
4363device_create_bail:
4364 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004365 if (me->channel[i].handle)
4366 subsys_notif_unregister_notifier(me->channel[i].handle,
4367 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004368 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004369 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304370 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4371 MINOR_NUM_DEV));
4372 if (!IS_ERR_OR_NULL(secure_dev))
4373 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4374 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004375 class_destroy(me->class);
4376class_create_bail:
4377 cdev_del(&me->cdev);
4378cdev_init_bail:
4379 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4380alloc_chrdev_bail:
4381register_bail:
4382 fastrpc_deinit();
4383 return err;
4384}
4385
4386static void __exit fastrpc_device_exit(void)
4387{
4388 struct fastrpc_apps *me = &gfa;
4389 int i;
4390
4391 fastrpc_file_list_dtor(me);
4392 fastrpc_deinit();
4393 for (i = 0; i < NUM_CHANNELS; i++) {
4394 if (!gcinfo[i].name)
4395 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004396 subsys_notif_unregister_notifier(me->channel[i].handle,
4397 &me->channel[i].nb);
4398 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304399
4400 /* Destroy the secure and non secure devices */
4401 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4402 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4403 MINOR_NUM_SECURE_DEV));
4404
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004405 class_destroy(me->class);
4406 cdev_del(&me->cdev);
4407 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4408 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004409 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004410}
4411
4412late_initcall(fastrpc_device_init);
4413module_exit(fastrpc_device_exit);
4414
4415MODULE_LICENSE("GPL v2");