blob: a0652b7e1909869fd1fc5e0b7e0341ee6aa1624c [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302 * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
109 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530110#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111#define FASTRPC_STATIC_HANDLE_LISTENER (3)
112#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530113#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800114
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530115#define INIT_FILELEN_MAX (2*1024*1024)
116#define INIT_MEMLEN_MAX (8*1024*1024)
117
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800118#define PERF_END (void)0
119
120#define PERF(enb, cnt, ff) \
121 {\
122 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530123 int64_t *counter = cnt;\
124 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800125 getnstimeofday(&startT);\
126 } \
127 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530128 if (enb && counter) {\
129 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800130 } \
131 }
132
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530133#define GET_COUNTER(perf_ptr, offset) \
134 (perf_ptr != NULL ?\
135 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
136 (int64_t *)(perf_ptr + offset)\
137 : (int64_t *)NULL) : (int64_t *)NULL)
138
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700139static int fastrpc_glink_open(int cid);
140static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530141static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530142 unsigned long code,
143 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800144static struct dentry *debugfs_root;
145static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700146
147static inline uint64_t buf_page_start(uint64_t buf)
148{
149 uint64_t start = (uint64_t) buf & PAGE_MASK;
150 return start;
151}
152
153static inline uint64_t buf_page_offset(uint64_t buf)
154{
155 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
156 return offset;
157}
158
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530159static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700160{
161 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
162 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530163 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700164 return nPages;
165}
166
167static inline uint64_t buf_page_size(uint32_t size)
168{
169 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
170
171 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
172}
173
174static inline void *uint64_to_ptr(uint64_t addr)
175{
176 void *ptr = (void *)((uintptr_t)addr);
177
178 return ptr;
179}
180
181static inline uint64_t ptr_to_uint64(void *ptr)
182{
183 uint64_t addr = (uint64_t)((uintptr_t)ptr);
184
185 return addr;
186}
187
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530188struct secure_vm {
189 int *vmid;
190 int *vmperm;
191 int vmcount;
192};
193
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700194struct fastrpc_file;
195
196struct fastrpc_buf {
197 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530198 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct fastrpc_file *fl;
200 void *virt;
201 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530202 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530203 unsigned long dma_attr;
204 uintptr_t raddr;
205 uint32_t flags;
206 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700207};
208
209struct fastrpc_ctx_lst;
210
211struct overlap {
212 uintptr_t start;
213 uintptr_t end;
214 int raix;
215 uintptr_t mstart;
216 uintptr_t mend;
217 uintptr_t offset;
218};
219
220struct smq_invoke_ctx {
221 struct hlist_node hn;
222 struct completion work;
223 int retval;
224 int pid;
225 int tgid;
226 remote_arg_t *lpra;
227 remote_arg64_t *rpra;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530228 remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 struct fastrpc_mmap **maps;
231 struct fastrpc_buf *buf;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530232 struct fastrpc_buf *lbuf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530233 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700234 struct fastrpc_file *fl;
235 uint32_t sc;
236 struct overlap *overs;
237 struct overlap **overps;
238 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530239 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530240 unsigned int *attrs;
241 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530242 uint64_t ctxid;
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +0530243 void *handle;
244 const void *ptr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700245};
246
247struct fastrpc_ctx_lst {
248 struct hlist_head pending;
249 struct hlist_head interrupted;
250};
251
252struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530253 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700254 struct dma_iommu_mapping *mapping;
255 int cb;
256 int enabled;
257 int faults;
258 int secure;
259 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530260 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700261};
262
263struct fastrpc_session_ctx {
264 struct device *dev;
265 struct fastrpc_smmu smmu;
266 int used;
267};
268
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530269struct fastrpc_static_pd {
270 char *spdname;
271 struct notifier_block pdrnb;
272 struct notifier_block get_service_nb;
273 void *pdrhandle;
274 int pdrcount;
275 int prevpdrcount;
276 int ispdup;
277};
278
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700279struct fastrpc_glink_info {
280 int link_state;
281 int port_state;
282 struct glink_open_config cfg;
283 struct glink_link_info link_info;
284 void *link_notify_handle;
285};
286
287struct fastrpc_channel_ctx {
288 char *name;
289 char *subsys;
290 void *chan;
291 struct device *dev;
292 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530293 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700294 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530295 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700296 struct notifier_block nb;
297 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530298 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700299 int sesscount;
300 int ssrcount;
301 void *handle;
302 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530303 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530305 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530306 int ramdumpenabled;
307 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700308 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530309 /* Indicates, if channel is restricted to secure node only */
310 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311};
312
313struct fastrpc_apps {
314 struct fastrpc_channel_ctx *channel;
315 struct cdev cdev;
316 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530317 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700318 struct smq_phy_page range;
319 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530320 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700321 dev_t dev_no;
322 int compat;
323 struct hlist_head drivers;
324 spinlock_t hlock;
325 struct ion_client *client;
326 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530327 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530328 bool glink;
329 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800330 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530331 spinlock_t ctxlock;
332 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700333};
334
335struct fastrpc_mmap {
336 struct hlist_node hn;
337 struct fastrpc_file *fl;
338 struct fastrpc_apps *apps;
339 int fd;
340 uint32_t flags;
341 struct dma_buf *buf;
342 struct sg_table *table;
343 struct dma_buf_attachment *attach;
344 struct ion_handle *handle;
345 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530346 size_t size;
347 uintptr_t va;
348 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700349 int refs;
350 uintptr_t raddr;
351 int uncached;
352 int secure;
353 uintptr_t attr;
354};
355
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530356enum fastrpc_perfkeys {
357 PERF_COUNT = 0,
358 PERF_FLUSH = 1,
359 PERF_MAP = 2,
360 PERF_COPY = 3,
361 PERF_LINK = 4,
362 PERF_GETARGS = 5,
363 PERF_PUTARGS = 6,
364 PERF_INVARGS = 7,
365 PERF_INVOKE = 8,
366 PERF_KEY_MAX = 9,
367};
368
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800369struct fastrpc_perf {
370 int64_t count;
371 int64_t flush;
372 int64_t map;
373 int64_t copy;
374 int64_t link;
375 int64_t getargs;
376 int64_t putargs;
377 int64_t invargs;
378 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530379 int64_t tid;
380 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800381};
382
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383struct fastrpc_file {
384 struct hlist_node hn;
385 spinlock_t hlock;
386 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530387 struct hlist_head cached_bufs;
388 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700389 struct fastrpc_ctx_lst clst;
390 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530391 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700392 struct fastrpc_session_ctx *secsctx;
393 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800394 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530395 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700396 int tgid;
397 int cid;
398 int ssrcount;
399 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530400 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530401 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530402 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700403 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530404 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800405 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530406 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530407 struct pm_qos_request pm_qos_req;
408 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530409 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530410 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530411 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530412 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
413 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530414 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700415};
416
417static struct fastrpc_apps gfa;
418
419static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
420 {
421 .name = "adsprpc-smd",
422 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530423 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424 .link.link_info.edge = "lpass",
425 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530426 .spd = {
427 {
428 .spdname =
429 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
430 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530431 fastrpc_pdr_notifier_cb,
432 },
433 {
434 .spdname =
435 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
436 .pdrnb.notifier_call =
437 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530438 }
439 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700440 },
441 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700442 .name = "mdsprpc-smd",
443 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530444 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700445 .link.link_info.edge = "mpss",
446 .link.link_info.transport = "smem",
447 },
448 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800449 .name = "sdsprpc-smd",
450 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530451 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800452 .link.link_info.edge = "dsps",
453 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800454 },
455 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700456 .name = "cdsprpc-smd",
457 .subsys = "cdsp",
458 .link.link_info.edge = "cdsp",
459 .link.link_info.transport = "smem",
460 },
461};
462
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530463static int hlosvm[1] = {VMID_HLOS};
464static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
465
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800466static inline int64_t getnstimediff(struct timespec *start)
467{
468 int64_t ns;
469 struct timespec ts, b;
470
471 getnstimeofday(&ts);
472 b = timespec_sub(ts, *start);
473 ns = timespec_to_ns(&b);
474 return ns;
475}
476
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530477static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
478{
479 int err = 0;
480 int64_t *val = NULL;
481 struct fastrpc_perf *perf = NULL, *fperf = NULL;
482 struct hlist_node *n = NULL;
483
484 VERIFY(err, !IS_ERR_OR_NULL(fl));
485 if (err)
486 goto bail;
487
488 mutex_lock(&fl->perf_mutex);
489 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
490 if (perf->tid == current->pid) {
491 fperf = perf;
492 break;
493 }
494 }
495
496 if (IS_ERR_OR_NULL(fperf)) {
497 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
498
499 VERIFY(err, !IS_ERR_OR_NULL(fperf));
500 if (err) {
501 mutex_unlock(&fl->perf_mutex);
502 kfree(fperf);
503 goto bail;
504 }
505
506 fperf->tid = current->pid;
507 hlist_add_head(&fperf->hn, &fl->perf);
508 }
509
510 val = ((int64_t *)fperf) + key;
511 mutex_unlock(&fl->perf_mutex);
512bail:
513 return val;
514}
515
516
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700517static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
518{
c_mtharue1a5ce12017-10-13 20:47:09 +0530519 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700520 int vmid;
521
522 if (!fl)
523 return;
524 if (cache) {
525 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530526 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700527 spin_unlock(&fl->hlock);
528 return;
529 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530530 if (buf->remote) {
531 spin_lock(&fl->hlock);
532 hlist_del_init(&buf->hn_rem);
533 spin_unlock(&fl->hlock);
534 buf->remote = 0;
535 buf->raddr = 0;
536 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700537 if (!IS_ERR_OR_NULL(buf->virt)) {
538 int destVM[1] = {VMID_HLOS};
539 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
540
541 if (fl->sctx->smmu.cb)
542 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
543 vmid = fl->apps->channel[fl->cid].vmid;
544 if (vmid) {
545 int srcVM[2] = {VMID_HLOS, vmid};
546
547 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
548 srcVM, 2, destVM, destVMperm, 1);
549 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530550 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
551 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700552 }
553 kfree(buf);
554}
555
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530556static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700557{
558 struct fastrpc_buf *buf, *free;
559
560 do {
561 struct hlist_node *n;
562
c_mtharue1a5ce12017-10-13 20:47:09 +0530563 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700564 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530565 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700566 hlist_del_init(&buf->hn);
567 free = buf;
568 break;
569 }
570 spin_unlock(&fl->hlock);
571 if (free)
572 fastrpc_buf_free(free, 0);
573 } while (free);
574}
575
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530576static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
577{
578 struct fastrpc_buf *buf, *free;
579
580 do {
581 struct hlist_node *n;
582
583 free = NULL;
584 spin_lock(&fl->hlock);
585 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
586 free = buf;
587 break;
588 }
589 spin_unlock(&fl->hlock);
590 if (free)
591 fastrpc_buf_free(free, 0);
592 } while (free);
593}
594
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700595static void fastrpc_mmap_add(struct fastrpc_mmap *map)
596{
c_mtharue1a5ce12017-10-13 20:47:09 +0530597 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
598 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
599 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700600
c_mtharue1a5ce12017-10-13 20:47:09 +0530601 spin_lock(&me->hlock);
602 hlist_add_head(&map->hn, &me->maps);
603 spin_unlock(&me->hlock);
604 } else {
605 struct fastrpc_file *fl = map->fl;
606
c_mtharue1a5ce12017-10-13 20:47:09 +0530607 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530608 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700609}
610
c_mtharue1a5ce12017-10-13 20:47:09 +0530611static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530612 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530613 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700614{
c_mtharue1a5ce12017-10-13 20:47:09 +0530615 struct fastrpc_apps *me = &gfa;
616 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700617 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530618
619 if ((va + len) < va)
620 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530621 if (mflags == ADSP_MMAP_HEAP_ADDR ||
622 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
623 spin_lock(&me->hlock);
624 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
625 if (va >= map->va &&
626 va + len <= map->va + map->len &&
627 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530628 if (refs) {
629 if (map->refs + 1 == INT_MAX) {
630 spin_unlock(&me->hlock);
631 return -ETOOMANYREFS;
632 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530633 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530634 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530635 match = map;
636 break;
637 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700638 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530639 spin_unlock(&me->hlock);
640 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530641 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
642 if (va >= map->va &&
643 va + len <= map->va + map->len &&
644 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530645 if (refs) {
646 if (map->refs + 1 == INT_MAX)
647 return -ETOOMANYREFS;
c_mtharue1a5ce12017-10-13 20:47:09 +0530648 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530649 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530650 match = map;
651 break;
652 }
653 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700654 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700655 if (match) {
656 *ppmap = match;
657 return 0;
658 }
659 return -ENOTTY;
660}
661
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530662static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530663 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530664{
665 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530666
667 if (me->dev == NULL) {
668 pr_err("device adsprpc-mem is not initialized\n");
669 return -ENODEV;
670 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530671 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530672 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530673 if (IS_ERR_OR_NULL(*vaddr)) {
674 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
675 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530676 return -ENOMEM;
677 }
678 return 0;
679}
680
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700681static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530682 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700683{
c_mtharue1a5ce12017-10-13 20:47:09 +0530684 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700685 struct hlist_node *n;
686 struct fastrpc_apps *me = &gfa;
687
688 spin_lock(&me->hlock);
689 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
690 if (map->raddr == va &&
691 map->raddr + map->len == va + len &&
692 map->refs == 1) {
693 match = map;
694 hlist_del_init(&map->hn);
695 break;
696 }
697 }
698 spin_unlock(&me->hlock);
699 if (match) {
700 *ppmap = match;
701 return 0;
702 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700703 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
704 if (map->raddr == va &&
705 map->raddr + map->len == va + len &&
706 map->refs == 1) {
707 match = map;
708 hlist_del_init(&map->hn);
709 break;
710 }
711 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712 if (match) {
713 *ppmap = match;
714 return 0;
715 }
716 return -ENOTTY;
717}
718
c_mtharu7bd6a422017-10-17 18:15:37 +0530719static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700720{
c_mtharue1a5ce12017-10-13 20:47:09 +0530721 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700722 struct fastrpc_file *fl;
723 int vmid;
724 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700725
726 if (!map)
727 return;
728 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530729 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
730 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
731 spin_lock(&me->hlock);
732 map->refs--;
733 if (!map->refs)
734 hlist_del_init(&map->hn);
735 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530736 if (map->refs > 0)
737 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530738 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530739 map->refs--;
740 if (!map->refs)
741 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530742 if (map->refs > 0 && !flags)
743 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530744 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530745 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
746 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700747
c_mtharue1a5ce12017-10-13 20:47:09 +0530748 if (me->dev == NULL) {
749 pr_err("failed to free remote heap allocation\n");
750 return;
751 }
752 if (map->phys) {
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +0530753 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
754 DMA_ATTR_NO_KERNEL_MAPPING;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530755 dma_free_attrs(me->dev, map->size, (void *)map->va,
756 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530757 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530758 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
759 if (!IS_ERR_OR_NULL(map->handle))
760 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530761 } else {
762 int destVM[1] = {VMID_HLOS};
763 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
764
765 if (map->secure)
766 sess = fl->secsctx;
767 else
768 sess = fl->sctx;
769
770 if (!IS_ERR_OR_NULL(map->handle))
771 ion_free(fl->apps->client, map->handle);
772 if (sess && sess->smmu.enabled) {
773 if (map->size || map->phys)
774 msm_dma_unmap_sg(sess->smmu.dev,
775 map->table->sgl,
776 map->table->nents, DMA_BIDIRECTIONAL,
777 map->buf);
778 }
779 vmid = fl->apps->channel[fl->cid].vmid;
780 if (vmid && map->phys) {
781 int srcVM[2] = {VMID_HLOS, vmid};
782
783 hyp_assign_phys(map->phys, buf_page_size(map->size),
784 srcVM, 2, destVM, destVMperm, 1);
785 }
786
787 if (!IS_ERR_OR_NULL(map->table))
788 dma_buf_unmap_attachment(map->attach, map->table,
789 DMA_BIDIRECTIONAL);
790 if (!IS_ERR_OR_NULL(map->attach))
791 dma_buf_detach(map->buf, map->attach);
792 if (!IS_ERR_OR_NULL(map->buf))
793 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700794 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700795 kfree(map);
796}
797
798static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
799 struct fastrpc_session_ctx **session);
800
801static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530802 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700803 struct fastrpc_mmap **ppmap)
804{
c_mtharue1a5ce12017-10-13 20:47:09 +0530805 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700806 struct fastrpc_session_ctx *sess;
807 struct fastrpc_apps *apps = fl->apps;
808 int cid = fl->cid;
809 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530810 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700811 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530812 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530813 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700814 unsigned long flags;
815 int err = 0, vmid;
816
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800817 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700818 return 0;
819 map = kzalloc(sizeof(*map), GFP_KERNEL);
820 VERIFY(err, !IS_ERR_OR_NULL(map));
821 if (err)
822 goto bail;
823 INIT_HLIST_NODE(&map->hn);
824 map->flags = mflags;
825 map->refs = 1;
826 map->fl = fl;
827 map->fd = fd;
828 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530829 if (mflags == ADSP_MMAP_HEAP_ADDR ||
830 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530831 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
832 DMA_ATTR_NO_KERNEL_MAPPING;
833
c_mtharue1a5ce12017-10-13 20:47:09 +0530834 map->apps = me;
835 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530836 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
837 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700838 if (err)
839 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530840 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530841 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530842 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530843 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
844 ion_phys_addr_t iphys;
845
846 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
847 ion_import_dma_buf_fd(fl->apps->client, fd)));
848 if (err)
849 goto bail;
850
851 map->uncached = 1;
852 map->buf = NULL;
853 map->attach = NULL;
854 map->table = NULL;
855 map->va = 0;
856 map->phys = 0;
857
858 err = ion_phys(fl->apps->client, map->handle,
859 &iphys, &map->size);
860 if (err)
861 goto bail;
862 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530863 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530864 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
865 pr_info("adsprpc: buffer mapped with persist attr %x\n",
866 (unsigned int)map->attr);
867 map->refs = 2;
868 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530869 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
870 ion_import_dma_buf_fd(fl->apps->client, fd)));
871 if (err)
872 goto bail;
873 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
874 &flags));
875 if (err)
876 goto bail;
877
c_mtharue1a5ce12017-10-13 20:47:09 +0530878 map->secure = flags & ION_FLAG_SECURE;
879 if (map->secure) {
880 if (!fl->secsctx)
881 err = fastrpc_session_alloc(chan, 1,
882 &fl->secsctx);
883 if (err)
884 goto bail;
885 }
886 if (map->secure)
887 sess = fl->secsctx;
888 else
889 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530890
c_mtharue1a5ce12017-10-13 20:47:09 +0530891 VERIFY(err, !IS_ERR_OR_NULL(sess));
892 if (err)
893 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530894
895 map->uncached = !ION_IS_CACHED(flags);
896 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
897 map->uncached = 1;
898
c_mtharue1a5ce12017-10-13 20:47:09 +0530899 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
900 if (err)
901 goto bail;
902 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
903 dma_buf_attach(map->buf, sess->smmu.dev)));
904 if (err)
905 goto bail;
906 VERIFY(err, !IS_ERR_OR_NULL(map->table =
907 dma_buf_map_attachment(map->attach,
908 DMA_BIDIRECTIONAL)));
909 if (err)
910 goto bail;
911 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700912 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530913
914 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
915 (sess->smmu.coherent && map->uncached))
916 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
917 else if (map->attr & FASTRPC_ATTR_COHERENT)
918 attrs |= DMA_ATTR_FORCE_COHERENT;
919
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700920 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530921 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700922 map->table->sgl, map->table->nents,
923 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530924 if (err)
925 goto bail;
926 } else {
927 VERIFY(err, map->table->nents == 1);
928 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700929 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530930 }
931 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530932
c_mtharue1a5ce12017-10-13 20:47:09 +0530933 if (sess->smmu.cb) {
934 map->phys += ((uint64_t)sess->smmu.cb << 32);
935 map->size = sg_dma_len(map->table->sgl);
936 } else {
937 map->size = buf_page_size(len);
938 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530939
c_mtharue1a5ce12017-10-13 20:47:09 +0530940 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530941 if (!sess->smmu.enabled && !vmid) {
942 VERIFY(err, map->phys >= me->range.addr &&
943 map->phys + map->size <=
944 me->range.addr + me->range.size);
945 if (err) {
946 pr_err("adsprpc: mmap fail out of range\n");
947 goto bail;
948 }
949 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530950 if (vmid) {
951 int srcVM[1] = {VMID_HLOS};
952 int destVM[2] = {VMID_HLOS, vmid};
953 int destVMperm[2] = {PERM_READ | PERM_WRITE,
954 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700955
c_mtharue1a5ce12017-10-13 20:47:09 +0530956 VERIFY(err, !hyp_assign_phys(map->phys,
957 buf_page_size(map->size),
958 srcVM, 1, destVM, destVMperm, 2));
959 if (err)
960 goto bail;
961 }
962 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700963 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700964 map->len = len;
965
966 fastrpc_mmap_add(map);
967 *ppmap = map;
968
969bail:
970 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530971 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700972 return err;
973}
974
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530975static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530976 unsigned long dma_attr, uint32_t rflags,
977 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700978{
979 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530980 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700981 struct hlist_node *n;
982
983 VERIFY(err, size > 0);
984 if (err)
985 goto bail;
986
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530987 if (!remote) {
988 /* find the smallest buffer that fits in the cache */
989 spin_lock(&fl->hlock);
990 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
991 if (buf->size >= size && (!fr || fr->size > buf->size))
992 fr = buf;
993 }
994 if (fr)
995 hlist_del_init(&fr->hn);
996 spin_unlock(&fl->hlock);
997 if (fr) {
998 *obuf = fr;
999 return 0;
1000 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001001 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301002 buf = NULL;
1003 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001004 if (err)
1005 goto bail;
1006 INIT_HLIST_NODE(&buf->hn);
1007 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +05301008 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001009 buf->phys = 0;
1010 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301011 buf->dma_attr = dma_attr;
1012 buf->flags = rflags;
1013 buf->raddr = 0;
1014 buf->remote = 0;
1015 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1016 (dma_addr_t *)&buf->phys,
1017 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001018 if (IS_ERR_OR_NULL(buf->virt)) {
1019 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301020 fastrpc_cached_buf_list_free(fl);
1021 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1022 (dma_addr_t *)&buf->phys,
1023 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001024 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1025 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301026 if (err) {
1027 err = -ENOMEM;
1028 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1029 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001030 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301031 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001032 if (fl->sctx->smmu.cb)
1033 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1034 vmid = fl->apps->channel[fl->cid].vmid;
1035 if (vmid) {
1036 int srcVM[1] = {VMID_HLOS};
1037 int destVM[2] = {VMID_HLOS, vmid};
1038 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1039 PERM_READ | PERM_WRITE | PERM_EXEC};
1040
1041 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1042 srcVM, 1, destVM, destVMperm, 2));
1043 if (err)
1044 goto bail;
1045 }
1046
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301047 if (remote) {
1048 INIT_HLIST_NODE(&buf->hn_rem);
1049 spin_lock(&fl->hlock);
1050 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1051 spin_unlock(&fl->hlock);
1052 buf->remote = remote;
1053 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001054 *obuf = buf;
1055 bail:
1056 if (err && buf)
1057 fastrpc_buf_free(buf, 0);
1058 return err;
1059}
1060
1061
1062static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001063 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001064 struct smq_invoke_ctx **po)
1065{
1066 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301067 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001068 struct hlist_node *n;
1069 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1070
1071 spin_lock(&fl->hlock);
1072 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1073 if (ictx->pid == current->pid) {
1074 if (invoke->sc != ictx->sc || ictx->fl != fl)
1075 err = -1;
1076 else {
1077 ctx = ictx;
1078 hlist_del_init(&ctx->hn);
1079 hlist_add_head(&ctx->hn, &fl->clst.pending);
1080 }
1081 break;
1082 }
1083 }
1084 spin_unlock(&fl->hlock);
1085 if (ctx)
1086 *po = ctx;
1087 return err;
1088}
1089
1090#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1091static int overlap_ptr_cmp(const void *a, const void *b)
1092{
1093 struct overlap *pa = *((struct overlap **)a);
1094 struct overlap *pb = *((struct overlap **)b);
1095 /* sort with lowest starting buffer first */
1096 int st = CMP(pa->start, pb->start);
1097 /* sort with highest ending buffer first */
1098 int ed = CMP(pb->end, pa->end);
1099 return st == 0 ? ed : st;
1100}
1101
Sathish Ambley9466d672017-01-25 10:51:55 -08001102static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001103{
Sathish Ambley9466d672017-01-25 10:51:55 -08001104 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001105 remote_arg_t *lpra = ctx->lpra;
1106 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1107 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1108 int nbufs = inbufs + outbufs;
1109 struct overlap max;
1110
1111 for (i = 0; i < nbufs; ++i) {
1112 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1113 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001114 if (lpra[i].buf.len) {
1115 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1116 if (err)
1117 goto bail;
1118 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001119 ctx->overs[i].raix = i;
1120 ctx->overps[i] = &ctx->overs[i];
1121 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301122 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 max.start = 0;
1124 max.end = 0;
1125 for (i = 0; i < nbufs; ++i) {
1126 if (ctx->overps[i]->start < max.end) {
1127 ctx->overps[i]->mstart = max.end;
1128 ctx->overps[i]->mend = ctx->overps[i]->end;
1129 ctx->overps[i]->offset = max.end -
1130 ctx->overps[i]->start;
1131 if (ctx->overps[i]->end > max.end) {
1132 max.end = ctx->overps[i]->end;
1133 } else {
1134 ctx->overps[i]->mend = 0;
1135 ctx->overps[i]->mstart = 0;
1136 }
1137 } else {
1138 ctx->overps[i]->mend = ctx->overps[i]->end;
1139 ctx->overps[i]->mstart = ctx->overps[i]->start;
1140 ctx->overps[i]->offset = 0;
1141 max = *ctx->overps[i];
1142 }
1143 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001144bail:
1145 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001146}
1147
1148#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1149 do {\
1150 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301151 VERIFY(err, 0 == copy_from_user((dst),\
1152 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001153 (size)));\
1154 else\
1155 memmove((dst), (src), (size));\
1156 } while (0)
1157
1158#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1159 do {\
1160 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301161 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301162 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001163 else\
1164 memmove((dst), (src), (size));\
1165 } while (0)
1166
1167
1168static void context_free(struct smq_invoke_ctx *ctx);
1169
1170static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001171 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001172 struct smq_invoke_ctx **po)
1173{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301174 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301175 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301176 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001177 struct fastrpc_ctx_lst *clst = &fl->clst;
1178 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1179
1180 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1181 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1182 sizeof(*ctx->fds) * (bufs) +
1183 sizeof(*ctx->attrs) * (bufs) +
1184 sizeof(*ctx->overs) * (bufs) +
1185 sizeof(*ctx->overps) * (bufs);
1186
c_mtharue1a5ce12017-10-13 20:47:09 +05301187 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 if (err)
1189 goto bail;
1190
1191 INIT_HLIST_NODE(&ctx->hn);
1192 hlist_add_fake(&ctx->hn);
1193 ctx->fl = fl;
1194 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1195 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1196 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301197 if (me->legacy) {
1198 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1199 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1200 } else {
1201 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1202 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1203 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1204 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001205
c_mtharue1a5ce12017-10-13 20:47:09 +05301206 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001207 bufs * sizeof(*ctx->lpra));
1208 if (err)
1209 goto bail;
1210
1211 if (invokefd->fds) {
1212 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1213 bufs * sizeof(*ctx->fds));
1214 if (err)
1215 goto bail;
1216 }
1217 if (invokefd->attrs) {
1218 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1219 bufs * sizeof(*ctx->attrs));
1220 if (err)
1221 goto bail;
1222 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001223 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001224 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001225 if (bufs) {
1226 VERIFY(err, 0 == context_build_overlap(ctx));
1227 if (err)
1228 goto bail;
1229 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001230 ctx->retval = -1;
1231 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301232 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001233 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301234 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001235
1236 spin_lock(&fl->hlock);
1237 hlist_add_head(&ctx->hn, &clst->pending);
1238 spin_unlock(&fl->hlock);
1239
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301240 spin_lock(&me->ctxlock);
1241 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1242 if (!me->ctxtable[ii]) {
1243 me->ctxtable[ii] = ctx;
1244 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1245 break;
1246 }
1247 }
1248 spin_unlock(&me->ctxlock);
1249 VERIFY(err, ii < FASTRPC_CTX_MAX);
1250 if (err) {
1251 pr_err("adsprpc: out of context memory\n");
1252 goto bail;
1253 }
1254
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001255 *po = ctx;
1256bail:
1257 if (ctx && err)
1258 context_free(ctx);
1259 return err;
1260}
1261
1262static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1263{
1264 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1265
1266 spin_lock(&ctx->fl->hlock);
1267 hlist_del_init(&ctx->hn);
1268 hlist_add_head(&ctx->hn, &clst->interrupted);
1269 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001270}
1271
1272static void context_free(struct smq_invoke_ctx *ctx)
1273{
1274 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301275 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001276 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1277 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1278 spin_lock(&ctx->fl->hlock);
1279 hlist_del_init(&ctx->hn);
1280 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301281 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001282 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301283 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301284
1285 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001286 fastrpc_buf_free(ctx->buf, 1);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301287 fastrpc_buf_free(ctx->lbuf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301288 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301289 ctx->ctxid = 0;
1290
1291 spin_lock(&me->ctxlock);
1292 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1293 if (me->ctxtable[i] == ctx) {
1294 me->ctxtable[i] = NULL;
1295 break;
1296 }
1297 }
1298 spin_unlock(&me->ctxlock);
1299
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001300 kfree(ctx);
1301}
1302
1303static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1304{
1305 ctx->retval = retval;
1306 complete(&ctx->work);
1307}
1308
1309
1310static void fastrpc_notify_users(struct fastrpc_file *me)
1311{
1312 struct smq_invoke_ctx *ictx;
1313 struct hlist_node *n;
1314
1315 spin_lock(&me->hlock);
1316 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1317 complete(&ictx->work);
1318 }
1319 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1320 complete(&ictx->work);
1321 }
1322 spin_unlock(&me->hlock);
1323
1324}
1325
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301326
1327static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1328{
1329 struct smq_invoke_ctx *ictx;
1330 struct hlist_node *n;
1331
1332 spin_lock(&me->hlock);
1333 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1334 if (ictx->msg.pid)
1335 complete(&ictx->work);
1336 }
1337 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1338 if (ictx->msg.pid)
1339 complete(&ictx->work);
1340 }
1341 spin_unlock(&me->hlock);
1342}
1343
1344
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001345static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1346{
1347 struct fastrpc_file *fl;
1348 struct hlist_node *n;
1349
1350 spin_lock(&me->hlock);
1351 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1352 if (fl->cid == cid)
1353 fastrpc_notify_users(fl);
1354 }
1355 spin_unlock(&me->hlock);
1356
1357}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301358
1359static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1360{
1361 struct fastrpc_file *fl;
1362 struct hlist_node *n;
1363
1364 spin_lock(&me->hlock);
1365 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1366 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301367 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301368 }
1369 spin_unlock(&me->hlock);
1370
1371}
1372
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001373static void context_list_ctor(struct fastrpc_ctx_lst *me)
1374{
1375 INIT_HLIST_HEAD(&me->interrupted);
1376 INIT_HLIST_HEAD(&me->pending);
1377}
1378
1379static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1380{
1381 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301382 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001383 struct hlist_node *n;
1384
1385 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301386 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001387 spin_lock(&fl->hlock);
1388 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1389 hlist_del_init(&ictx->hn);
1390 ctxfree = ictx;
1391 break;
1392 }
1393 spin_unlock(&fl->hlock);
1394 if (ctxfree)
1395 context_free(ctxfree);
1396 } while (ctxfree);
1397 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301398 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001399 spin_lock(&fl->hlock);
1400 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1401 hlist_del_init(&ictx->hn);
1402 ctxfree = ictx;
1403 break;
1404 }
1405 spin_unlock(&fl->hlock);
1406 if (ctxfree)
1407 context_free(ctxfree);
1408 } while (ctxfree);
1409}
1410
1411static int fastrpc_file_free(struct fastrpc_file *fl);
1412static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1413{
1414 struct fastrpc_file *fl, *free;
1415 struct hlist_node *n;
1416
1417 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301418 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001419 spin_lock(&me->hlock);
1420 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1421 hlist_del_init(&fl->hn);
1422 free = fl;
1423 break;
1424 }
1425 spin_unlock(&me->hlock);
1426 if (free)
1427 fastrpc_file_free(free);
1428 } while (free);
1429}
1430
1431static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1432{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301433 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301434 remote_arg64_t *rpra, *lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001435 remote_arg_t *lpra = ctx->lpra;
1436 struct smq_invoke_buf *list;
1437 struct smq_phy_page *pages, *ipage;
1438 uint32_t sc = ctx->sc;
1439 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1440 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001441 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442 uintptr_t args;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301443 size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001444 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001445 int err = 0;
1446 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001447 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001448 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301449 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001450
1451 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301452 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001453 list = smq_invoke_buf_start(rpra, sc);
1454 pages = smq_phy_page_start(sc, list);
1455 ipage = pages;
1456
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301457 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001458 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301459 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1460 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001461
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301462 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301463 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1464 unsigned int attrs = 0;
1465
1466 if (ctx->attrs)
1467 attrs = ctx->attrs[i];
1468
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001469 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301470 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001471 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301472 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301473 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001474 ipage += 1;
1475 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301476 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001477 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301478 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001479 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301480 int dmaflags = 0;
1481
1482 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1483 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001484 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301485 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301486 if (err) {
1487 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001488 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301489 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001490 ipage += 1;
1491 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301492 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301493 if (!me->legacy) {
1494 metalen = copylen = (size_t)&ipage[0] +
1495 (sizeof(uint64_t) * M_FDLIST) +
1496 (sizeof(uint32_t) * M_CRCLIST);
1497 } else {
1498 metalen = copylen = (size_t)&ipage[0];
1499 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001500
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301501 /* allocate new local rpra buffer */
1502 lrpralen = (size_t)&list[0];
1503 if (lrpralen) {
1504 err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
1505 if (err)
1506 goto bail;
1507 }
1508 if (ctx->lbuf->virt)
1509 memset(ctx->lbuf->virt, 0, lrpralen);
1510
1511 lrpra = ctx->lbuf->virt;
1512 ctx->lrpra = lrpra;
1513
1514 /* calculate len required for copying */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001515 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1516 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001517 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301518 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001519
1520 if (!len)
1521 continue;
1522 if (ctx->maps[i])
1523 continue;
1524 if (ctx->overps[oix]->offset == 0)
1525 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001526 mstart = ctx->overps[oix]->mstart;
1527 mend = ctx->overps[oix]->mend;
1528 VERIFY(err, (mend - mstart) <= LONG_MAX);
1529 if (err)
1530 goto bail;
1531 copylen += mend - mstart;
1532 VERIFY(err, copylen >= 0);
1533 if (err)
1534 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001535 }
1536 ctx->used = copylen;
1537
1538 /* allocate new buffer */
1539 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301540 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001541 if (err)
1542 goto bail;
1543 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301544 if (ctx->buf->virt && metalen <= copylen)
1545 memset(ctx->buf->virt, 0, metalen);
1546
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001547 /* copy metadata */
1548 rpra = ctx->buf->virt;
1549 ctx->rpra = rpra;
1550 list = smq_invoke_buf_start(rpra, sc);
1551 pages = smq_phy_page_start(sc, list);
1552 ipage = pages;
1553 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001554 for (i = 0; i < bufs + handles; ++i) {
1555 if (lpra[i].buf.len)
1556 list[i].num = 1;
1557 else
1558 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001559 list[i].pgidx = ipage - pages;
1560 ipage++;
1561 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301562
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301564 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301565 for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001566 struct fastrpc_mmap *map = ctx->maps[i];
1567 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301568 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301570 rpra[i].buf.pv = lrpra[i].buf.pv = 0;
1571 rpra[i].buf.len = lrpra[i].buf.len = len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001572 if (!len)
1573 continue;
1574 if (map) {
1575 struct vm_area_struct *vma;
1576 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301577 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001578 int idx = list[i].pgidx;
1579
1580 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001581 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001582 } else {
1583 down_read(&current->mm->mmap_sem);
1584 VERIFY(err, NULL != (vma = find_vma(current->mm,
1585 map->va)));
1586 if (err) {
1587 up_read(&current->mm->mmap_sem);
1588 goto bail;
1589 }
1590 offset = buf_page_start(buf) - vma->vm_start;
1591 up_read(&current->mm->mmap_sem);
1592 VERIFY(err, offset < (uintptr_t)map->size);
1593 if (err)
1594 goto bail;
1595 }
1596 pages[idx].addr = map->phys + offset;
1597 pages[idx].size = num << PAGE_SHIFT;
1598 }
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301599 rpra[i].buf.pv = lrpra[i].buf.pv = buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001600 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001601 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001602 for (i = bufs; i < bufs + handles; ++i) {
1603 struct fastrpc_mmap *map = ctx->maps[i];
1604
1605 pages[i].addr = map->phys;
1606 pages[i].size = map->size;
1607 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301608 if (!me->legacy) {
1609 fdlist = (uint64_t *)&pages[bufs + handles];
1610 for (i = 0; i < M_FDLIST; i++)
1611 fdlist[i] = 0;
1612 crclist = (uint32_t *)&fdlist[M_FDLIST];
1613 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1614 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001615
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001616 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301617 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001618 rlen = copylen - metalen;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301619 for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001620 int i = ctx->overps[oix]->raix;
1621 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301622 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001623 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301624 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001625
1626 if (!len)
1627 continue;
1628 if (map)
1629 continue;
1630 if (ctx->overps[oix]->offset == 0) {
1631 rlen -= ALIGN(args, BALIGN) - args;
1632 args = ALIGN(args, BALIGN);
1633 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001634 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001635 VERIFY(err, rlen >= mlen);
1636 if (err)
1637 goto bail;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301638 rpra[i].buf.pv = lrpra[i].buf.pv =
1639 (args - ctx->overps[oix]->offset);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001640 pages[list[i].pgidx].addr = ctx->buf->phys -
1641 ctx->overps[oix]->offset +
1642 (copylen - rlen);
1643 pages[list[i].pgidx].addr =
1644 buf_page_start(pages[list[i].pgidx].addr);
1645 buf = rpra[i].buf.pv;
1646 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1647 if (i < inbufs) {
1648 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1649 lpra[i].buf.pv, len);
1650 if (err)
1651 goto bail;
1652 }
1653 args = args + mlen;
1654 rlen -= mlen;
1655 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001656 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001657
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301658 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001659 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1660 int i = ctx->overps[oix]->raix;
1661 struct fastrpc_mmap *map = ctx->maps[i];
1662
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001663 if (map && map->uncached)
1664 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301665 if (ctx->fl->sctx->smmu.coherent &&
1666 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1667 continue;
1668 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1669 continue;
1670
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301671 if (rpra && lrpra && rpra[i].buf.len &&
1672 ctx->overps[oix]->mstart) {
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301673 if (map && map->handle)
1674 msm_ion_do_cache_op(ctx->fl->apps->client,
1675 map->handle,
1676 uint64_to_ptr(rpra[i].buf.pv),
1677 rpra[i].buf.len,
1678 ION_IOC_CLEAN_INV_CACHES);
1679 else
1680 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1681 uint64_to_ptr(rpra[i].buf.pv
1682 + rpra[i].buf.len));
1683 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001684 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001685 PERF_END);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301686 for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
1687 rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
1688 rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1689 rpra[i].dma.offset = lrpra[i].dma.offset =
1690 (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001691 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001692
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001693 bail:
1694 return err;
1695}
1696
1697static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1698 remote_arg_t *upra)
1699{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301700 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001701 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001702 struct smq_invoke_buf *list;
1703 struct smq_phy_page *pages;
1704 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301705 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001706 uint32_t *crclist = NULL;
1707
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301708 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001709 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001710 int err = 0;
1711
1712 inbufs = REMOTE_SCALARS_INBUFS(sc);
1713 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001714 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1715 list = smq_invoke_buf_start(ctx->rpra, sc);
1716 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301717 if (!me->legacy) {
1718 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1719 crclist = (uint32_t *)(fdlist + M_FDLIST);
1720 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001721
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001722 for (i = inbufs; i < inbufs + outbufs; ++i) {
1723 if (!ctx->maps[i]) {
1724 K_COPY_TO_USER(err, kernel,
1725 ctx->lpra[i].buf.pv,
1726 uint64_to_ptr(rpra[i].buf.pv),
1727 rpra[i].buf.len);
1728 if (err)
1729 goto bail;
1730 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301731 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301732 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301733 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301734 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001735 }
1736 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301737 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301738 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001739 for (i = 0; i < M_FDLIST; i++) {
1740 if (!fdlist[i])
1741 break;
1742 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001743 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301744 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001745 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001746 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301747 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001748 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301749 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001750 crclist, M_CRCLIST*sizeof(uint32_t));
1751
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001752 bail:
1753 return err;
1754}
1755
1756static void inv_args_pre(struct smq_invoke_ctx *ctx)
1757{
1758 int i, inbufs, outbufs;
1759 uint32_t sc = ctx->sc;
1760 remote_arg64_t *rpra = ctx->rpra;
1761 uintptr_t end;
1762
1763 inbufs = REMOTE_SCALARS_INBUFS(sc);
1764 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1765 for (i = inbufs; i < inbufs + outbufs; ++i) {
1766 struct fastrpc_mmap *map = ctx->maps[i];
1767
1768 if (map && map->uncached)
1769 continue;
1770 if (!rpra[i].buf.len)
1771 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301772 if (ctx->fl->sctx->smmu.coherent &&
1773 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1774 continue;
1775 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1776 continue;
1777
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001778 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1779 buf_page_start(rpra[i].buf.pv))
1780 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301781 if (!IS_CACHE_ALIGNED((uintptr_t)
1782 uint64_to_ptr(rpra[i].buf.pv))) {
1783 if (map && map->handle)
1784 msm_ion_do_cache_op(ctx->fl->apps->client,
1785 map->handle,
1786 uint64_to_ptr(rpra[i].buf.pv),
1787 sizeof(uintptr_t),
1788 ION_IOC_CLEAN_INV_CACHES);
1789 else
1790 dmac_flush_range(
1791 uint64_to_ptr(rpra[i].buf.pv), (char *)
1792 uint64_to_ptr(rpra[i].buf.pv + 1));
1793 }
1794
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001795 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1796 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301797 if (!IS_CACHE_ALIGNED(end)) {
1798 if (map && map->handle)
1799 msm_ion_do_cache_op(ctx->fl->apps->client,
1800 map->handle,
1801 uint64_to_ptr(end),
1802 sizeof(uintptr_t),
1803 ION_IOC_CLEAN_INV_CACHES);
1804 else
1805 dmac_flush_range((char *)end,
1806 (char *)end + 1);
1807 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001808 }
1809}
1810
1811static void inv_args(struct smq_invoke_ctx *ctx)
1812{
1813 int i, inbufs, outbufs;
1814 uint32_t sc = ctx->sc;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301815 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001816
1817 inbufs = REMOTE_SCALARS_INBUFS(sc);
1818 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1819 for (i = inbufs; i < inbufs + outbufs; ++i) {
1820 struct fastrpc_mmap *map = ctx->maps[i];
1821
1822 if (map && map->uncached)
1823 continue;
1824 if (!rpra[i].buf.len)
1825 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301826 if (ctx->fl->sctx->smmu.coherent &&
1827 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1828 continue;
1829 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1830 continue;
1831
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001832 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1833 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001834 continue;
1835 }
1836 if (map && map->handle)
1837 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1838 (char *)uint64_to_ptr(rpra[i].buf.pv),
1839 rpra[i].buf.len, ION_IOC_INV_CACHES);
1840 else
1841 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1842 (char *)uint64_to_ptr(rpra[i].buf.pv
1843 + rpra[i].buf.len));
1844 }
1845
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001846}
1847
1848static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1849 uint32_t kernel, uint32_t handle)
1850{
1851 struct smq_msg *msg = &ctx->msg;
1852 struct fastrpc_file *fl = ctx->fl;
1853 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301854 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001855
c_mtharue1a5ce12017-10-13 20:47:09 +05301856 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001857 if (err)
1858 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301859 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001860 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301861 if (fl->sessionid)
1862 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001863 if (kernel)
1864 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301865 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001866 msg->invoke.header.handle = handle;
1867 msg->invoke.header.sc = ctx->sc;
1868 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1869 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301870 if (fl->apps->glink) {
1871 if (fl->ssrcount != channel_ctx->ssrcount) {
1872 err = -ECONNRESET;
1873 goto bail;
1874 }
1875 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001876 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301877 if (err)
1878 goto bail;
1879 err = glink_tx(channel_ctx->chan,
1880 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1881 GLINK_TX_REQ_INTENT);
1882 } else {
1883 spin_lock(&fl->apps->hlock);
1884 len = smd_write((smd_channel_t *)
1885 channel_ctx->chan,
1886 msg, sizeof(*msg));
1887 spin_unlock(&fl->apps->hlock);
1888 VERIFY(err, len == sizeof(*msg));
1889 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001890 bail:
1891 return err;
1892}
1893
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301894static void fastrpc_smd_read_handler(int cid)
1895{
1896 struct fastrpc_apps *me = &gfa;
1897 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301898 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301899 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301900
1901 do {
1902 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1903 sizeof(rsp));
1904 if (ret != sizeof(rsp))
1905 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301906
1907 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1908 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301909 if (err)
1910 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301911
1912 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1913 if (err)
1914 goto bail;
1915
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301916 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301917 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1918 if (err)
1919 goto bail;
1920
1921 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301922 } while (ret == sizeof(rsp));
1923bail:
1924 if (err)
1925 pr_err("adsprpc: invalid response or context\n");
1926
1927}
1928
1929static void smd_event_handler(void *priv, unsigned int event)
1930{
1931 struct fastrpc_apps *me = &gfa;
1932 int cid = (int)(uintptr_t)priv;
1933
1934 switch (event) {
1935 case SMD_EVENT_OPEN:
1936 complete(&me->channel[cid].workport);
1937 break;
1938 case SMD_EVENT_CLOSE:
1939 fastrpc_notify_drivers(me, cid);
1940 break;
1941 case SMD_EVENT_DATA:
1942 fastrpc_smd_read_handler(cid);
1943 break;
1944 }
1945}
1946
1947
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001948static void fastrpc_init(struct fastrpc_apps *me)
1949{
1950 int i;
1951
1952 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301953 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001954 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301955 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301956 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001957 me->channel = &gcinfo[0];
1958 for (i = 0; i < NUM_CHANNELS; i++) {
1959 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301960 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001961 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301962 /* All channels are secure by default except CDSP */
1963 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001964 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301965 /* Set CDSP channel to non secure */
1966 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001967}
1968
1969static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1970
1971static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1972 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001973 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001974{
c_mtharue1a5ce12017-10-13 20:47:09 +05301975 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001976 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1977 int cid = fl->cid;
1978 int interrupted = 0;
1979 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001980 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301981 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001982
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001983 if (fl->profile)
1984 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301985
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05301986 if (!kernel) {
1987 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
1988 if (err) {
1989 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
1990 __func__, current->comm, cid);
1991 goto bail;
1992 }
1993 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301994
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301995 VERIFY(err, fl->sctx != NULL);
1996 if (err)
1997 goto bail;
1998 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1999 if (err)
2000 goto bail;
2001
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002002 if (!kernel) {
2003 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
2004 &ctx));
2005 if (err)
2006 goto bail;
2007 if (fl->sctx->smmu.faults)
2008 err = FASTRPC_ENOSUCH;
2009 if (err)
2010 goto bail;
2011 if (ctx)
2012 goto wait;
2013 }
2014
2015 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
2016 if (err)
2017 goto bail;
2018
2019 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302020 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002021 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002022 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002023 if (err)
2024 goto bail;
2025 }
2026
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302027 if (!fl->sctx->smmu.coherent) {
2028 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002029 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302030 PERF_END);
2031 }
2032
2033 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002034 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002035 PERF_END);
2036
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002037 if (err)
2038 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002039 wait:
2040 if (kernel)
2041 wait_for_completion(&ctx->work);
2042 else {
2043 interrupted = wait_for_completion_interruptible(&ctx->work);
2044 VERIFY(err, 0 == (err = interrupted));
2045 if (err)
2046 goto bail;
2047 }
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302048 if (ctx->handle)
2049 glink_rx_done(ctx->handle, ctx->ptr, true);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302050 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002051 if (!fl->sctx->smmu.coherent)
2052 inv_args(ctx);
2053 PERF_END);
2054
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002055 VERIFY(err, 0 == (err = ctx->retval));
2056 if (err)
2057 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002058
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302059 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002060 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002061 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002062 if (err)
2063 goto bail;
2064 bail:
2065 if (ctx && interrupted == -ERESTARTSYS)
2066 context_save_interrupted(ctx);
2067 else if (ctx)
2068 context_free(ctx);
2069 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2070 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002071
2072 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302073 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2074 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2075
2076 if (count)
2077 *count += getnstimediff(&invoket);
2078 }
2079 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2080 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2081
2082 if (count)
2083 *count = *count+1;
2084 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002085 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002086 return err;
2087}
2088
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302089static int fastrpc_get_adsp_session(char *name, int *session)
2090{
2091 struct fastrpc_apps *me = &gfa;
2092 int err = 0, i;
2093
2094 for (i = 0; i < NUM_SESSIONS; i++) {
2095 if (!me->channel[0].spd[i].spdname)
2096 continue;
2097 if (!strcmp(name, me->channel[0].spd[i].spdname))
2098 break;
2099 }
2100 VERIFY(err, i < NUM_SESSIONS);
2101 if (err)
2102 goto bail;
2103 *session = i;
2104bail:
2105 return err;
2106}
2107
2108static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002109static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302110static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002111static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002112 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002113{
2114 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302115 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002116 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002117 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002118 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302119 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302120 struct fastrpc_buf *imem = NULL;
2121 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302122 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002123
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302124 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002125 if (err)
2126 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302127 if (init->flags == FASTRPC_INIT_ATTACH ||
2128 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002129 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302130 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002131
2132 ra[0].buf.pv = (void *)&tgid;
2133 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302134 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002135 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2136 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302137 ioctl.fds = NULL;
2138 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002139 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302140 if (init->flags == FASTRPC_INIT_ATTACH)
2141 fl->pd = 0;
2142 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2143 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2144 fl->pd = 2;
2145 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002146 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2147 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2148 if (err)
2149 goto bail;
2150 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002151 remote_arg_t ra[6];
2152 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002153 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302154 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002155 struct {
2156 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302157 unsigned int namelen;
2158 unsigned int filelen;
2159 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002160 int attrs;
2161 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002162 } inbuf;
2163
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302164 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002165 inbuf.namelen = strlen(current->comm) + 1;
2166 inbuf.filelen = init->filelen;
2167 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302168
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302169 VERIFY(err, access_ok(0, (void __user *)init->file,
2170 init->filelen));
2171 if (err)
2172 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002173 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302174 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002175 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2176 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302177 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002178 if (err)
2179 goto bail;
2180 }
2181 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302182
2183 VERIFY(err, !init->mem);
2184 if (err) {
2185 err = -EINVAL;
2186 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2187 current->comm, __func__);
2188 goto bail;
2189 }
2190 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2191 1024*1024);
2192 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2193 DMA_ATTR_NO_KERNEL_MAPPING |
2194 DMA_ATTR_FORCE_NON_COHERENT;
2195 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302196 if (err)
2197 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302198 fl->init_mem = imem;
2199
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002200 inbuf.pageslen = 1;
2201 ra[0].buf.pv = (void *)&inbuf;
2202 ra[0].buf.len = sizeof(inbuf);
2203 fds[0] = 0;
2204
2205 ra[1].buf.pv = (void *)current->comm;
2206 ra[1].buf.len = inbuf.namelen;
2207 fds[1] = 0;
2208
2209 ra[2].buf.pv = (void *)init->file;
2210 ra[2].buf.len = inbuf.filelen;
2211 fds[2] = init->filefd;
2212
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302213 pages[0].addr = imem->phys;
2214 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002215 ra[3].buf.pv = (void *)pages;
2216 ra[3].buf.len = 1 * sizeof(*pages);
2217 fds[3] = 0;
2218
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002219 inbuf.attrs = uproc->attrs;
2220 ra[4].buf.pv = (void *)&(inbuf.attrs);
2221 ra[4].buf.len = sizeof(inbuf.attrs);
2222 fds[4] = 0;
2223
2224 inbuf.siglen = uproc->siglen;
2225 ra[5].buf.pv = (void *)&(inbuf.siglen);
2226 ra[5].buf.len = sizeof(inbuf.siglen);
2227 fds[5] = 0;
2228
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302229 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002230 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002231 if (uproc->attrs)
2232 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002233 ioctl.inv.pra = ra;
2234 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302235 ioctl.attrs = NULL;
2236 ioctl.crc = NULL;
2237 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2238 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2239 if (err)
2240 goto bail;
2241 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2242 remote_arg_t ra[3];
2243 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302244 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302245 int fds[3];
2246 struct {
2247 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302248 unsigned int namelen;
2249 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302250 } inbuf;
2251
2252 if (!init->filelen)
2253 goto bail;
2254
2255 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2256 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2257 if (err)
2258 goto bail;
2259 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2260 (void __user *)init->file, init->filelen));
2261 if (err)
2262 goto bail;
2263
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302264 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302265 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302266 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302267 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302268
2269 if (!strcmp(proc_name, "audiopd")) {
2270 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2271 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302272 if (err)
2273 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302274 }
2275
c_mtharue1a5ce12017-10-13 20:47:09 +05302276 if (!me->staticpd_flags) {
2277 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302278 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302279 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2280 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2281 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302282 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302283 if (err)
2284 goto bail;
2285 phys = mem->phys;
2286 size = mem->size;
2287 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302288 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2289 me->channel[fl->cid].rhvm.vmperm,
2290 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302291 if (err) {
2292 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2293 err);
2294 pr_err("map->phys %llx, map->size %d\n",
2295 phys, (int)size);
2296 goto bail;
2297 }
2298 me->staticpd_flags = 1;
2299 }
2300
2301 ra[0].buf.pv = (void *)&inbuf;
2302 ra[0].buf.len = sizeof(inbuf);
2303 fds[0] = 0;
2304
2305 ra[1].buf.pv = (void *)proc_name;
2306 ra[1].buf.len = inbuf.namelen;
2307 fds[1] = 0;
2308
2309 pages[0].addr = phys;
2310 pages[0].size = size;
2311
2312 ra[2].buf.pv = (void *)pages;
2313 ra[2].buf.len = sizeof(*pages);
2314 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302315 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302316
2317 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2318 ioctl.inv.pra = ra;
2319 ioctl.fds = NULL;
2320 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002321 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002322 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2323 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2324 if (err)
2325 goto bail;
2326 } else {
2327 err = -ENOTTY;
2328 }
2329bail:
c_mtharud91205a2017-11-07 16:01:06 +05302330 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302331 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2332 me->staticpd_flags = 0;
2333 if (mem && err) {
2334 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2335 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302336 me->channel[fl->cid].rhvm.vmid,
2337 me->channel[fl->cid].rhvm.vmcount,
2338 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302339 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302340 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302341 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302342 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302343 if (file) {
2344 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302345 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302346 mutex_unlock(&fl->fl_map_mutex);
2347 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002348 return err;
2349}
2350
2351static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2352{
2353 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002354 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002355 remote_arg_t ra[1];
2356 int tgid = 0;
2357
Sathish Ambley36849af2017-02-02 09:35:55 -08002358 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2359 if (err)
2360 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302361 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002362 if (err)
2363 goto bail;
2364 tgid = fl->tgid;
2365 ra[0].buf.pv = (void *)&tgid;
2366 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302367 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002368 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2369 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302370 ioctl.fds = NULL;
2371 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002372 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002373 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2374 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2375bail:
2376 return err;
2377}
2378
2379static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302380 uintptr_t va, uint64_t phys,
2381 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002382{
Sathish Ambleybae51902017-07-03 15:00:49 -07002383 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302384 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002385 struct smq_phy_page page;
2386 int num = 1;
2387 remote_arg_t ra[3];
2388 int err = 0;
2389 struct {
2390 int pid;
2391 uint32_t flags;
2392 uintptr_t vaddrin;
2393 int num;
2394 } inargs;
2395 struct {
2396 uintptr_t vaddrout;
2397 } routargs;
2398
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302399 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302400 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002401 inargs.flags = flags;
2402 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2403 ra[0].buf.pv = (void *)&inargs;
2404 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302405 page.addr = phys;
2406 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002407 ra[1].buf.pv = (void *)&page;
2408 ra[1].buf.len = num * sizeof(page);
2409
2410 ra[2].buf.pv = (void *)&routargs;
2411 ra[2].buf.len = sizeof(routargs);
2412
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302413 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002414 if (fl->apps->compat)
2415 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2416 else
2417 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2418 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302419 ioctl.fds = NULL;
2420 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002421 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002422 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2423 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302424 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302425 if (err)
2426 goto bail;
2427 if (flags == ADSP_MMAP_HEAP_ADDR) {
2428 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002429
c_mtharue1a5ce12017-10-13 20:47:09 +05302430 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302431 desc.args[1] = phys;
2432 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302433 desc.arginfo = SCM_ARGS(3);
2434 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2435 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2436 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302437 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302438 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2439 me->channel[fl->cid].rhvm.vmperm,
2440 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302441 if (err)
2442 goto bail;
2443 }
2444bail:
2445 return err;
2446}
2447
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302448static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2449 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302450{
2451 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302452 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302453 int tgid = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302454 int destVM[1] = {VMID_HLOS};
2455 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2456
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302457 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302458 struct fastrpc_ioctl_invoke_crc ioctl;
2459 struct scm_desc desc = {0};
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302460 remote_arg_t ra[2];
2461
c_mtharue1a5ce12017-10-13 20:47:09 +05302462 struct {
2463 uint8_t skey;
2464 } routargs;
2465
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302466 if (fl == NULL)
2467 goto bail;
2468 tgid = fl->tgid;
2469 ra[0].buf.pv = (void *)&tgid;
2470 ra[0].buf.len = sizeof(tgid);
2471 ra[1].buf.pv = (void *)&routargs;
2472 ra[1].buf.len = sizeof(routargs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302473
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302474 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302475 ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1);
c_mtharue1a5ce12017-10-13 20:47:09 +05302476 ioctl.inv.pra = ra;
2477 ioctl.fds = NULL;
2478 ioctl.attrs = NULL;
2479 ioctl.crc = NULL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302480
c_mtharue1a5ce12017-10-13 20:47:09 +05302481
2482 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2483 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302484 if (err == AEE_EUNSUPPORTED) {
2485 remote_arg_t ra[1];
2486
2487 pr_warn("ADSPRPC:Failed to get security key with updated remote call, falling back to older method");
2488 ra[0].buf.pv = (void *)&routargs;
2489 ra[0].buf.len = sizeof(routargs);
2490 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2491 ioctl.inv.pra = ra;
2492 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2493 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2494 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302495 if (err)
2496 goto bail;
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302497
c_mtharue1a5ce12017-10-13 20:47:09 +05302498 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302499 desc.args[1] = phys;
2500 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302501 desc.args[3] = routargs.skey;
2502 desc.arginfo = SCM_ARGS(4);
2503 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2504 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302505 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2506 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302507 me->channel[fl->cid].rhvm.vmid,
2508 me->channel[fl->cid].rhvm.vmcount,
2509 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302510 if (err)
2511 goto bail;
2512 }
2513
2514bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002515 return err;
2516}
2517
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302518static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2519 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002520{
Sathish Ambleybae51902017-07-03 15:00:49 -07002521 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002522 remote_arg_t ra[1];
2523 int err = 0;
2524 struct {
2525 int pid;
2526 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302527 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002528 } inargs;
2529
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302530 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302531 inargs.size = size;
2532 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002533 ra[0].buf.pv = (void *)&inargs;
2534 ra[0].buf.len = sizeof(inargs);
2535
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302536 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002537 if (fl->apps->compat)
2538 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2539 else
2540 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2541 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302542 ioctl.fds = NULL;
2543 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002544 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002545 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2546 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302547 if (err)
2548 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302549 if (flags == ADSP_MMAP_HEAP_ADDR ||
2550 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2551 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302552 if (err)
2553 goto bail;
2554 }
2555bail:
2556 return err;
2557}
2558
2559static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2560{
2561 struct fastrpc_mmap *match = NULL, *map = NULL;
2562 struct hlist_node *n = NULL;
2563 int err = 0, ret = 0;
2564 struct fastrpc_apps *me = &gfa;
2565 struct ramdump_segment *ramdump_segments_rh = NULL;
2566
2567 do {
2568 match = NULL;
2569 spin_lock(&me->hlock);
2570 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2571 match = map;
2572 hlist_del_init(&map->hn);
2573 break;
2574 }
2575 spin_unlock(&me->hlock);
2576
2577 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302578 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2579 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302580 if (err)
2581 goto bail;
2582 if (me->channel[0].ramdumpenabled) {
2583 ramdump_segments_rh = kcalloc(1,
2584 sizeof(struct ramdump_segment), GFP_KERNEL);
2585 if (ramdump_segments_rh) {
2586 ramdump_segments_rh->address =
2587 match->phys;
2588 ramdump_segments_rh->size = match->size;
2589 ret = do_elf_ramdump(
2590 me->channel[0].remoteheap_ramdump_dev,
2591 ramdump_segments_rh, 1);
2592 if (ret < 0)
2593 pr_err("ADSPRPC: unable to dump heap");
2594 kfree(ramdump_segments_rh);
2595 }
2596 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302597 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302598 }
2599 } while (match);
2600bail:
2601 if (err && match)
2602 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002603 return err;
2604}
2605
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302606static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2607{
2608 struct fastrpc_apps *me = &gfa;
2609 int session = 0, err = 0;
2610
2611 VERIFY(err, !fastrpc_get_adsp_session(
2612 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2613 if (err)
2614 goto bail;
2615 if (me->channel[fl->cid].spd[session].pdrcount !=
2616 me->channel[fl->cid].spd[session].prevpdrcount) {
2617 if (fastrpc_mmap_remove_ssr(fl))
2618 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2619 me->channel[fl->cid].spd[session].prevpdrcount =
2620 me->channel[fl->cid].spd[session].pdrcount;
2621 }
2622 if (!me->channel[fl->cid].spd[session].ispdup) {
2623 VERIFY(err, 0);
2624 if (err) {
2625 err = -ENOTCONN;
2626 goto bail;
2627 }
2628 }
2629bail:
2630 return err;
2631}
2632
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002633static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302634 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002635
2636static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2637
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302638static inline void get_fastrpc_ioctl_mmap_64(
2639 struct fastrpc_ioctl_mmap_64 *mmap64,
2640 struct fastrpc_ioctl_mmap *immap)
2641{
2642 immap->fd = mmap64->fd;
2643 immap->flags = mmap64->flags;
2644 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2645 immap->size = mmap64->size;
2646}
2647
2648static inline void put_fastrpc_ioctl_mmap_64(
2649 struct fastrpc_ioctl_mmap_64 *mmap64,
2650 struct fastrpc_ioctl_mmap *immap)
2651{
2652 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2653}
2654
2655static inline void get_fastrpc_ioctl_munmap_64(
2656 struct fastrpc_ioctl_munmap_64 *munmap64,
2657 struct fastrpc_ioctl_munmap *imunmap)
2658{
2659 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2660 imunmap->size = munmap64->size;
2661}
2662
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002663static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2664 struct fastrpc_ioctl_munmap *ud)
2665{
2666 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302667 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302668 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2669 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002670
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302671 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302672
2673 spin_lock(&fl->hlock);
2674 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2675 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2676 if ((rbuf->raddr == ud->vaddrout) &&
2677 (rbuf->size == ud->size)) {
2678 free = rbuf;
2679 break;
2680 }
2681 }
2682 }
2683 spin_unlock(&fl->hlock);
2684
2685 if (free) {
2686 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2687 free->phys, free->size, free->flags));
2688 if (err)
2689 goto bail;
2690 fastrpc_buf_free(rbuf, 0);
2691 mutex_unlock(&fl->map_mutex);
2692 return err;
2693 }
2694
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302695 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002696 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302697 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002698 if (err)
2699 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302700 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
2701 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002702 if (err)
2703 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302704 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302705 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302706 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002707bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302708 if (err && map) {
2709 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002710 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302711 mutex_unlock(&fl->fl_map_mutex);
2712 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302713 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002714 return err;
2715}
2716
c_mtharu7bd6a422017-10-17 18:15:37 +05302717static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2718 struct fastrpc_ioctl_munmap_fd *ud) {
2719 int err = 0;
2720 struct fastrpc_mmap *map = NULL;
2721
2722 VERIFY(err, (fl && ud));
2723 if (err)
2724 goto bail;
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302725 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302726 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302727 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2728 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302729 ud->fd, (unsigned long long)ud->va,
2730 (unsigned int)ud->len);
2731 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302732 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302733 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302734 goto bail;
2735 }
2736 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302737 fastrpc_mmap_free(map, 0);
2738 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302739 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302740bail:
2741 return err;
2742}
2743
2744
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002745static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2746 struct fastrpc_ioctl_mmap *ud)
2747{
2748
c_mtharue1a5ce12017-10-13 20:47:09 +05302749 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302750 struct fastrpc_buf *rbuf = NULL;
2751 unsigned long dma_attr = 0;
2752 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002753 int err = 0;
2754
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302755 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302756 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2757 if (ud->vaddrin) {
2758 err = -EINVAL;
2759 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2760 current->comm, __func__);
2761 goto bail;
2762 }
2763 dma_attr = DMA_ATTR_EXEC_MAPPING |
2764 DMA_ATTR_NO_KERNEL_MAPPING |
2765 DMA_ATTR_FORCE_NON_COHERENT;
2766 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2767 1, &rbuf);
2768 if (err)
2769 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302770 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302771 rbuf->phys, rbuf->size, &raddr);
2772 if (err)
2773 goto bail;
2774 rbuf->raddr = raddr;
2775 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302776
2777 uintptr_t va_to_dsp;
2778
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302779 mutex_lock(&fl->fl_map_mutex);
2780 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2781 ud->size, ud->flags, 1, &map)) {
Mohammed Nayeem Ur Rahmanaf5f6102019-10-09 13:36:52 +05302782 ud->vaddrout = map->raddr;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302783 mutex_unlock(&fl->fl_map_mutex);
2784 mutex_unlock(&fl->map_mutex);
2785 return 0;
2786 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302787
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302788 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2789 (uintptr_t)ud->vaddrin, ud->size,
2790 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302791 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302792 if (err)
2793 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302794
2795 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2796 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2797 va_to_dsp = 0;
2798 else
2799 va_to_dsp = (uintptr_t)map->va;
2800 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302801 map->phys, map->size, &raddr));
2802 if (err)
2803 goto bail;
2804 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302805 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302806 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002807 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302808 if (err && map) {
2809 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302810 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302811 mutex_unlock(&fl->fl_map_mutex);
2812 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302813 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002814 return err;
2815}
2816
2817static void fastrpc_channel_close(struct kref *kref)
2818{
2819 struct fastrpc_apps *me = &gfa;
2820 struct fastrpc_channel_ctx *ctx;
2821 int cid;
2822
2823 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2824 cid = ctx - &gcinfo[0];
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302825 if (me->glink) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302826 fastrpc_glink_close(ctx->chan, cid);
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302827 ctx->chan = NULL;
2828 }
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302829 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002830 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2831 MAJOR(me->dev_no), cid);
2832}
2833
2834static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2835
2836static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302837 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002838{
2839 struct fastrpc_apps *me = &gfa;
2840 int idx = 0, err = 0;
2841
2842 if (chan->sesscount) {
2843 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302844 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2845 (!chan->session[idx].used &&
2846 chan->session[idx].smmu.secure
2847 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002848 chan->session[idx].used = 1;
2849 break;
2850 }
2851 }
2852 VERIFY(err, idx < chan->sesscount);
2853 if (err)
2854 goto bail;
2855 chan->session[idx].smmu.faults = 0;
2856 } else {
2857 VERIFY(err, me->dev != NULL);
2858 if (err)
2859 goto bail;
2860 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302861 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002862 }
2863
2864 *session = &chan->session[idx];
2865 bail:
2866 return err;
2867}
2868
c_mtharue1a5ce12017-10-13 20:47:09 +05302869static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2870 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002871{
2872 if (glink_queue_rx_intent(h, NULL, size))
2873 return false;
2874 return true;
2875}
2876
c_mtharue1a5ce12017-10-13 20:47:09 +05302877static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002878 const void *pkt_priv, const void *ptr)
2879{
2880}
2881
c_mtharue1a5ce12017-10-13 20:47:09 +05302882static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002883 const void *pkt_priv, const void *ptr, size_t size)
2884{
2885 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302886 struct fastrpc_apps *me = &gfa;
2887 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302888 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002889
c_mtharufdac6892017-10-12 13:09:01 +05302890 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2891 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302892 goto bail;
2893
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302894 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2895 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302896 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302897 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302898
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302899 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2900 if (err)
2901 goto bail;
2902
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302903 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302904 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2905 if (err)
2906 goto bail;
2907
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302908 me->ctxtable[index]->handle = handle;
2909 me->ctxtable[index]->ptr = ptr;
2910
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302911 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302912bail:
c_mtharufdac6892017-10-12 13:09:01 +05302913 if (err)
2914 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002915}
2916
c_mtharue1a5ce12017-10-13 20:47:09 +05302917static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002918 unsigned int event)
2919{
2920 struct fastrpc_apps *me = &gfa;
2921 int cid = (int)(uintptr_t)priv;
2922 struct fastrpc_glink_info *link;
2923
2924 if (cid < 0 || cid >= NUM_CHANNELS)
2925 return;
2926 link = &me->channel[cid].link;
2927 switch (event) {
2928 case GLINK_CONNECTED:
2929 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302930 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002931 break;
2932 case GLINK_LOCAL_DISCONNECTED:
2933 link->port_state = FASTRPC_LINK_DISCONNECTED;
2934 break;
2935 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002936 break;
2937 default:
2938 break;
2939 }
2940}
2941
2942static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2943 struct fastrpc_session_ctx **session)
2944{
2945 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302946 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002947
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302948 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002949 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302950 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302951 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002952 return err;
2953}
2954
2955static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2956 struct fastrpc_session_ctx *session)
2957{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302958 struct fastrpc_apps *me = &gfa;
2959
2960 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002961 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302962 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002963}
2964
2965static int fastrpc_file_free(struct fastrpc_file *fl)
2966{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302967 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302968 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302969 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002970 int cid;
2971
2972 if (!fl)
2973 return 0;
2974 cid = fl->cid;
2975
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302976 (void)fastrpc_release_current_dsp_process(fl);
2977
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002978 spin_lock(&fl->apps->hlock);
2979 hlist_del_init(&fl->hn);
2980 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05302981 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002982
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002983 if (!fl->sctx) {
2984 kfree(fl);
2985 return 0;
2986 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302987 spin_lock(&fl->hlock);
2988 fl->file_close = 1;
2989 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302990 if (!IS_ERR_OR_NULL(fl->init_mem))
2991 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002992 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302993 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302994 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302995 do {
2996 lmap = NULL;
2997 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2998 hlist_del_init(&map->hn);
2999 lmap = map;
3000 break;
3001 }
3002 fastrpc_mmap_free(lmap, 1);
3003 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303004 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303005 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003006 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303007 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003008 if (fl->sctx)
3009 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
3010 if (fl->secsctx)
3011 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303012
3013 mutex_lock(&fl->perf_mutex);
3014 do {
3015 struct hlist_node *pn = NULL;
3016
3017 fperf = NULL;
3018 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
3019 hlist_del_init(&perf->hn);
3020 fperf = perf;
3021 break;
3022 }
3023 kfree(fperf);
3024 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303025 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303026 mutex_unlock(&fl->perf_mutex);
3027 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303028 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05303029 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003030 kfree(fl);
3031 return 0;
3032}
3033
3034static int fastrpc_device_release(struct inode *inode, struct file *file)
3035{
3036 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3037
3038 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303039 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
3040 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003041 if (fl->debugfs_file != NULL)
3042 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003043 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05303044 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003045 }
3046 return 0;
3047}
3048
3049static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
3050 void *priv)
3051{
3052 struct fastrpc_apps *me = &gfa;
3053 int cid = (int)((uintptr_t)priv);
3054 struct fastrpc_glink_info *link;
3055
3056 if (cid < 0 || cid >= NUM_CHANNELS)
3057 return;
3058
3059 link = &me->channel[cid].link;
3060 switch (cb_info->link_state) {
3061 case GLINK_LINK_STATE_UP:
3062 link->link_state = FASTRPC_LINK_STATE_UP;
3063 complete(&me->channel[cid].work);
3064 break;
3065 case GLINK_LINK_STATE_DOWN:
3066 link->link_state = FASTRPC_LINK_STATE_DOWN;
3067 break;
3068 default:
3069 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3070 break;
3071 }
3072}
3073
3074static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3075{
3076 int err = 0;
3077 struct fastrpc_glink_info *link;
3078
3079 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3080 if (err)
3081 goto bail;
3082
3083 link = &me->channel[cid].link;
3084 if (link->link_notify_handle != NULL)
3085 goto bail;
3086
3087 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3088 link->link_notify_handle = glink_register_link_state_cb(
3089 &link->link_info,
3090 (void *)((uintptr_t)cid));
3091 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3092 if (err) {
3093 link->link_notify_handle = NULL;
3094 goto bail;
3095 }
3096 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3097 RPC_TIMEOUT));
3098bail:
3099 return err;
3100}
3101
3102static void fastrpc_glink_close(void *chan, int cid)
3103{
3104 int err = 0;
3105 struct fastrpc_glink_info *link;
3106
3107 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3108 if (err)
3109 return;
3110 link = &gfa.channel[cid].link;
3111
c_mtharu314a4202017-11-15 22:09:17 +05303112 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3113 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003114 link->port_state = FASTRPC_LINK_DISCONNECTING;
3115 glink_close(chan);
3116 }
3117}
3118
3119static int fastrpc_glink_open(int cid)
3120{
3121 int err = 0;
3122 void *handle = NULL;
3123 struct fastrpc_apps *me = &gfa;
3124 struct glink_open_config *cfg;
3125 struct fastrpc_glink_info *link;
3126
3127 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3128 if (err)
3129 goto bail;
3130 link = &me->channel[cid].link;
3131 cfg = &me->channel[cid].link.cfg;
3132 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3133 if (err)
3134 goto bail;
3135
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303136 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3137 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003138 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003139
3140 link->port_state = FASTRPC_LINK_CONNECTING;
3141 cfg->priv = (void *)(uintptr_t)cid;
3142 cfg->edge = gcinfo[cid].link.link_info.edge;
3143 cfg->transport = gcinfo[cid].link.link_info.transport;
3144 cfg->name = FASTRPC_GLINK_GUID;
3145 cfg->notify_rx = fastrpc_glink_notify_rx;
3146 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3147 cfg->notify_state = fastrpc_glink_notify_state;
3148 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3149 handle = glink_open(cfg);
3150 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303151 if (err) {
3152 if (link->port_state == FASTRPC_LINK_CONNECTING)
3153 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003154 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303155 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003156 me->channel[cid].chan = handle;
3157bail:
3158 return err;
3159}
3160
Sathish Ambley1ca68232017-01-19 10:32:55 -08003161static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3162{
3163 filp->private_data = inode->i_private;
3164 return 0;
3165}
3166
3167static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3168 size_t count, loff_t *position)
3169{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303170 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003171 struct fastrpc_file *fl = filp->private_data;
3172 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303173 struct fastrpc_buf *buf = NULL;
3174 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303175 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303176 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303177 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003178 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303179 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003180 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303181 char single_line[UL_SIZE] = "----------------";
3182 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003183
3184 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3185 if (!fileinfo)
3186 goto bail;
3187 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303188 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3189 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3190 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3191 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3192 "susbsys", "refcount", "sesscount", "issubsystemup",
3193 "ssrcount", "session_used");
3194 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3195 "-%s%s%s%s-\n", single_line, single_line,
3196 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003197 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303198 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003199 chan = &gcinfo[i];
3200 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303201 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003202 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303203 DEBUGFS_SIZE - len, "|%-9d",
3204 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303205 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303206 DEBUGFS_SIZE - len, "|%-9d",
3207 chan->sesscount);
3208 len += scnprintf(fileinfo + len,
3209 DEBUGFS_SIZE - len, "|%-14d",
3210 chan->issubsystemup);
3211 len += scnprintf(fileinfo + len,
3212 DEBUGFS_SIZE - len, "|%-9d",
3213 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003214 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303215 sess_used += chan->session[j].used;
3216 }
3217 len += scnprintf(fileinfo + len,
3218 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3219
3220 }
3221 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3222 "\n%s%s%s\n", "=============",
3223 " CMA HEAP ", "==============");
3224 len += scnprintf(fileinfo + len,
3225 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3226 len += scnprintf(fileinfo + len,
3227 DEBUGFS_SIZE - len, "--%s%s---\n",
3228 single_line, single_line);
3229 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3230 "0x%-18llX", me->range.addr);
3231 len += scnprintf(fileinfo + len,
3232 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3233 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3234 "\n==========%s %s %s===========\n",
3235 title, " GMAPS ", title);
3236 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3237 "%-20s|%-20s|%-20s|%-20s\n",
3238 "fd", "phys", "size", "va");
3239 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3240 "%s%s%s%s%s\n", single_line, single_line,
3241 single_line, single_line, single_line);
3242 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3243 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3244 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3245 gmaps->fd, gmaps->phys,
3246 (uint32_t)gmaps->size,
3247 gmaps->va);
3248 }
3249 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3250 "%-20s|%-20s|%-20s|%-20s\n",
3251 "len", "refs", "raddr", "flags");
3252 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3253 "%s%s%s%s%s\n", single_line, single_line,
3254 single_line, single_line, single_line);
3255 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3256 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3257 "0x%-18X|%-20d|%-20lu|%-20u\n",
3258 (uint32_t)gmaps->len, gmaps->refs,
3259 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003260 }
3261 } else {
3262 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303263 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003264 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303265 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003266 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303267 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003268 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303269 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303270 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303271 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003272 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303273 "%s %14s %d\n", "pd", ":", fl->pd);
3274 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3275 "%s %9s %s\n", "spdname", ":", fl->spdname);
3276 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3277 "%s %6s %d\n", "file_close", ":", fl->file_close);
3278 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3279 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3280 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3281 "%s %9s %d\n", "profile", ":", fl->profile);
3282 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3283 "%s %3s %d\n", "smmu.coherent", ":",
3284 fl->sctx->smmu.coherent);
3285 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3286 "%s %4s %d\n", "smmu.enabled", ":",
3287 fl->sctx->smmu.enabled);
3288 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3289 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3290 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3291 "%s %5s %d\n", "smmu.secure", ":",
3292 fl->sctx->smmu.secure);
3293 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3294 "%s %5s %d\n", "smmu.faults", ":",
3295 fl->sctx->smmu.faults);
3296 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3297 "%s %s %d\n", "link.link_state",
3298 ":", *&me->channel[fl->cid].link.link_state);
3299
3300 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3301 "\n=======%s %s %s======\n", title,
3302 " LIST OF MAPS ", title);
3303
3304 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3305 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3306 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3307 "%s%s%s%s%s\n",
3308 single_line, single_line, single_line,
3309 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003310 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303311 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3312 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3313 map->va, map->phys,
3314 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003315 }
3316 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303317 "%-20s|%-20s|%-20s|%-20s\n",
3318 "len", "refs",
3319 "raddr", "uncached");
3320 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3321 "%s%s%s%s%s\n",
3322 single_line, single_line, single_line,
3323 single_line, single_line);
3324 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3325 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3326 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3327 map->len, map->refs, map->raddr,
3328 map->uncached);
3329 }
3330 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3331 "%-20s|%-20s\n", "secure", "attr");
3332 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3333 "%s%s%s%s%s\n",
3334 single_line, single_line, single_line,
3335 single_line, single_line);
3336 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3337 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3338 "%-20d|0x%-20lX\n\n",
3339 map->secure, map->attr);
3340 }
3341 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303342 "%s %d\n\n",
3343 "KERNEL MEMORY ALLOCATION:", 1);
3344 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303345 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303346 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303347 spin_lock(&fl->hlock);
3348 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303349 "%-19s|%-19s|%-19s|%-19s\n",
3350 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303351 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3352 "%s%s%s%s%s\n", single_line, single_line,
3353 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303354 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303355 len += scnprintf(fileinfo + len,
3356 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303357 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3358 buf->virt, (uint64_t)buf->phys, buf->size,
3359 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303360 }
3361 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3362 "\n%s %s %s\n", title,
3363 " LIST OF PENDING SMQCONTEXTS ", title);
3364
3365 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3366 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3367 "sc", "pid", "tgid", "used", "ctxid");
3368 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3369 "%s%s%s%s%s\n", single_line, single_line,
3370 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003371 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3372 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303373 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3374 ictx->sc, ictx->pid, ictx->tgid,
3375 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003376 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303377
Sathish Ambley1ca68232017-01-19 10:32:55 -08003378 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303379 "\n%s %s %s\n", title,
3380 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3381
3382 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3383 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3384 "sc", "pid", "tgid", "used", "ctxid");
3385 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3386 "%s%s%s%s%s\n", single_line, single_line,
3387 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003388 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303389 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3390 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3391 ictx->sc, ictx->pid, ictx->tgid,
3392 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003393 }
3394 spin_unlock(&fl->hlock);
3395 }
3396 if (len > DEBUGFS_SIZE)
3397 len = DEBUGFS_SIZE;
3398 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3399 kfree(fileinfo);
3400bail:
3401 return ret;
3402}
3403
3404static const struct file_operations debugfs_fops = {
3405 .open = fastrpc_debugfs_open,
3406 .read = fastrpc_debugfs_read,
3407};
Sathish Ambley36849af2017-02-02 09:35:55 -08003408static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003409{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003410 struct fastrpc_apps *me = &gfa;
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303411 int cid, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003412
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303413 mutex_lock(&me->smd_mutex);
3414
Sathish Ambley36849af2017-02-02 09:35:55 -08003415 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003416 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303417 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003418 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303419 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3420 if (err)
3421 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303422 if (me->channel[cid].ssrcount !=
3423 me->channel[cid].prevssrcount) {
3424 if (!me->channel[cid].issubsystemup) {
3425 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303426 if (err) {
3427 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303428 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303429 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303430 }
3431 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003432 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303433 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003434 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303435 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303436 if (me->glink) {
3437 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3438 if (err)
3439 goto bail;
3440 VERIFY(err, 0 == fastrpc_glink_open(cid));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303441 VERIFY(err,
3442 wait_for_completion_timeout(&me->channel[cid].workport,
3443 RPC_TIMEOUT));
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303444 } else {
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303445 if (me->channel[cid].chan == NULL) {
3446 VERIFY(err, !smd_named_open_on_edge(
3447 FASTRPC_SMD_GUID,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303448 gcinfo[cid].channel,
3449 (smd_channel_t **)&me->channel[cid].chan,
3450 (void *)(uintptr_t)cid,
3451 smd_event_handler));
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303452 VERIFY(err,
3453 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003454 RPC_TIMEOUT));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303455
3456 }
3457 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003458 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303459 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003460 goto bail;
3461 }
3462 kref_init(&me->channel[cid].kref);
3463 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3464 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303465
3466 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3467 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303468 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303469
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303470 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003471 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303472 if (fastrpc_mmap_remove_ssr(fl))
3473 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003474 me->channel[cid].prevssrcount =
3475 me->channel[cid].ssrcount;
3476 }
3477 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003478
3479bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303480 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003481 return err;
3482}
3483
Sathish Ambley36849af2017-02-02 09:35:55 -08003484static int fastrpc_device_open(struct inode *inode, struct file *filp)
3485{
3486 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303487 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003488 struct fastrpc_apps *me = &gfa;
3489
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303490 /*
3491 * Indicates the device node opened
3492 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3493 */
3494 int dev_minor = MINOR(inode->i_rdev);
3495
3496 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3497 (dev_minor == MINOR_NUM_SECURE_DEV)));
3498 if (err) {
3499 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3500 return err;
3501 }
3502
c_mtharue1a5ce12017-10-13 20:47:09 +05303503 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003504 if (err)
3505 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303506
Sathish Ambley36849af2017-02-02 09:35:55 -08003507 context_list_ctor(&fl->clst);
3508 spin_lock_init(&fl->hlock);
3509 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303510 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303511 INIT_HLIST_HEAD(&fl->cached_bufs);
3512 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003513 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303514 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003515 fl->apps = me;
3516 fl->mode = FASTRPC_MODE_SERIAL;
3517 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303518 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303519 fl->init_mem = NULL;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303520 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303521 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003522 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303523 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303524 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003525 spin_lock(&me->hlock);
3526 hlist_add_head(&fl->hn, &me->drivers);
3527 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303528 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003529 return 0;
3530}
3531
Edgar Flores1a772fa2020-02-07 14:59:29 -08003532static int fastrpc_set_process_info(struct fastrpc_file *fl)
3533{
3534 int err = 0, buf_size = 0;
3535 char strpid[PID_SIZE];
3536
3537 fl->tgid = current->tgid;
3538 snprintf(strpid, PID_SIZE, "%d", current->pid);
3539 buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
3540 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3541 if (!fl->debug_buf) {
3542 err = -ENOMEM;
3543 return err;
3544 }
3545 snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
3546 current->comm, "_", current->pid);
3547 fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3548 debugfs_root, fl, &debugfs_fops);
3549 if (!fl->debugfs_file)
3550 pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
3551 current->comm, __func__, fl->debug_buf);
3552 return err;
3553}
3554
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003555static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3556{
3557 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003558 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003559
c_mtharue1a5ce12017-10-13 20:47:09 +05303560 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003561 if (err)
3562 goto bail;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003563 err = fastrpc_set_process_info(fl);
3564 if (err)
3565 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003566 if (fl->cid == -1) {
3567 cid = *info;
3568 VERIFY(err, cid < NUM_CHANNELS);
3569 if (err)
3570 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303571 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003572 if (fl->dev_minor == MINOR_NUM_DEV &&
3573 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303574 /*
3575 * For non secure device node check and make sure that
3576 * the channel allows non-secure access
3577 * If not, bail. Session will not start.
3578 * cid will remain -1 and client will not be able to
3579 * invoke any other methods without failure
3580 */
3581 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3582 err = -EPERM;
3583 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3584 fl->dev_minor, cid,
3585 fl->apps->channel[cid].secure);
3586 goto bail;
3587 }
3588 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003589 fl->cid = cid;
3590 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3591 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303592 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003593 if (err)
3594 goto bail;
3595 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303596 VERIFY(err, fl->sctx != NULL);
3597 if (err)
3598 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003599 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3600bail:
3601 return err;
3602}
3603
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303604static int fastrpc_internal_control(struct fastrpc_file *fl,
3605 struct fastrpc_ioctl_control *cp)
3606{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303607 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303608 int err = 0;
3609 int latency;
3610
3611 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3612 if (err)
3613 goto bail;
3614 VERIFY(err, !IS_ERR_OR_NULL(cp));
3615 if (err)
3616 goto bail;
3617
3618 switch (cp->req) {
3619 case FASTRPC_CONTROL_LATENCY:
3620 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3621 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3622 VERIFY(err, latency != 0);
3623 if (err)
3624 goto bail;
3625 if (!fl->qos_request) {
3626 pm_qos_add_request(&fl->pm_qos_req,
3627 PM_QOS_CPU_DMA_LATENCY, latency);
3628 fl->qos_request = 1;
3629 } else
3630 pm_qos_update_request(&fl->pm_qos_req, latency);
3631 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303632 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303633 if (!me->legacy)
3634 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303635 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303636 case FASTRPC_CONTROL_KALLOC:
3637 cp->kalloc.kalloc_support = 1;
3638 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303639 default:
3640 err = -ENOTTY;
3641 break;
3642 }
3643bail:
3644 return err;
3645}
3646
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003647static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3648 unsigned long ioctl_param)
3649{
3650 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003651 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003652 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303653 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003654 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303655 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303656 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003657 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003658 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303659 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003660 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303661 union {
3662 struct fastrpc_ioctl_mmap mmap;
3663 struct fastrpc_ioctl_munmap munmap;
3664 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003665 void *param = (char *)ioctl_param;
3666 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3667 int size = 0, err = 0;
3668 uint32_t info;
3669
c_mtharue1a5ce12017-10-13 20:47:09 +05303670 p.inv.fds = NULL;
3671 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003672 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303673 spin_lock(&fl->hlock);
3674 if (fl->file_close == 1) {
3675 err = EBADF;
3676 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3677 spin_unlock(&fl->hlock);
3678 goto bail;
3679 }
3680 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003681
3682 switch (ioctl_num) {
3683 case FASTRPC_IOCTL_INVOKE:
3684 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003685 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003686 case FASTRPC_IOCTL_INVOKE_FD:
3687 if (!size)
3688 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3689 /* fall through */
3690 case FASTRPC_IOCTL_INVOKE_ATTRS:
3691 if (!size)
3692 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003693 /* fall through */
3694 case FASTRPC_IOCTL_INVOKE_CRC:
3695 if (!size)
3696 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303697 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003698 if (err)
3699 goto bail;
3700 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3701 0, &p.inv)));
3702 if (err)
3703 goto bail;
3704 break;
3705 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303706 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3707 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303708 if (err)
3709 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003710 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3711 if (err)
3712 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303713 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003714 if (err)
3715 goto bail;
3716 break;
3717 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303718 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3719 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303720 if (err)
3721 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003722 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3723 &p.munmap)));
3724 if (err)
3725 goto bail;
3726 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303727 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303728 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3729 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303730 if (err)
3731 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303732 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3733 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303734 if (err)
3735 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303736 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3737 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303738 if (err)
3739 goto bail;
3740 break;
3741 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303742 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3743 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303744 if (err)
3745 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303746 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303747 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303748 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303749 if (err)
3750 goto bail;
3751 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303752 case FASTRPC_IOCTL_MUNMAP_FD:
3753 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3754 sizeof(p.munmap_fd));
3755 if (err)
3756 goto bail;
3757 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3758 &p.munmap_fd)));
3759 if (err)
3760 goto bail;
3761 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003762 case FASTRPC_IOCTL_SETMODE:
3763 switch ((uint32_t)ioctl_param) {
3764 case FASTRPC_MODE_PARALLEL:
3765 case FASTRPC_MODE_SERIAL:
3766 fl->mode = (uint32_t)ioctl_param;
3767 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003768 case FASTRPC_MODE_PROFILE:
3769 fl->profile = (uint32_t)ioctl_param;
3770 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303771 case FASTRPC_MODE_SESSION:
3772 fl->sessionid = 1;
3773 fl->tgid |= (1 << SESSION_ID_INDEX);
3774 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003775 default:
3776 err = -ENOTTY;
3777 break;
3778 }
3779 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003780 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303781 K_COPY_FROM_USER(err, 0, &p.perf,
3782 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003783 if (err)
3784 goto bail;
3785 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3786 if (p.perf.keys) {
3787 char *keys = PERF_KEYS;
3788
c_mtharue1a5ce12017-10-13 20:47:09 +05303789 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3790 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003791 if (err)
3792 goto bail;
3793 }
3794 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303795 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3796 struct hlist_node *n = NULL;
3797
3798 mutex_lock(&fl->perf_mutex);
3799 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3800 if (perf->tid == current->pid) {
3801 fperf = perf;
3802 break;
3803 }
3804 }
3805
3806 mutex_unlock(&fl->perf_mutex);
3807
3808 if (fperf) {
3809 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3810 fperf, sizeof(*fperf));
3811 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003812 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303813 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003814 if (err)
3815 goto bail;
3816 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303817 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303818 K_COPY_FROM_USER(err, 0, &p.cp, param,
3819 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303820 if (err)
3821 goto bail;
3822 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3823 if (err)
3824 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303825 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3826 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3827 if (err)
3828 goto bail;
3829 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303830 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003831 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303832 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003833 if (err)
3834 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003835 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3836 if (err)
3837 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303838 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003839 if (err)
3840 goto bail;
3841 break;
3842 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003843 p.init.attrs = 0;
3844 p.init.siglen = 0;
3845 size = sizeof(struct fastrpc_ioctl_init);
3846 /* fall through */
3847 case FASTRPC_IOCTL_INIT_ATTRS:
3848 if (!size)
3849 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303850 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003851 if (err)
3852 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303853 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303854 p.init.init.filelen < INIT_FILELEN_MAX);
3855 if (err)
3856 goto bail;
3857 VERIFY(err, p.init.init.memlen >= 0 &&
3858 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303859 if (err)
3860 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303861 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003862 if (err)
3863 goto bail;
3864 break;
3865
3866 default:
3867 err = -ENOTTY;
3868 pr_info("bad ioctl: %d\n", ioctl_num);
3869 break;
3870 }
3871 bail:
3872 return err;
3873}
3874
3875static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3876 unsigned long code,
3877 void *data)
3878{
3879 struct fastrpc_apps *me = &gfa;
3880 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303881 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003882 int cid;
3883
3884 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3885 cid = ctx - &me->channel[0];
3886 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303887 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003888 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303889 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303890 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303891 if (me->glink)
3892 fastrpc_glink_close(ctx->chan, cid);
3893 else
3894 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303895 ctx->chan = NULL;
3896 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3897 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3898 }
3899 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303900 if (cid == 0)
3901 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003902 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303903 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3904 if (me->channel[0].remoteheap_ramdump_dev &&
3905 notifdata->enable_ramdump) {
3906 me->channel[0].ramdumpenabled = 1;
3907 }
3908 } else if (code == SUBSYS_AFTER_POWERUP) {
3909 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003910 }
3911
3912 return NOTIFY_DONE;
3913}
3914
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303915static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303916 unsigned long code,
3917 void *data)
3918{
3919 struct fastrpc_apps *me = &gfa;
3920 struct fastrpc_static_pd *spd;
3921 struct notif_data *notifdata = data;
3922
3923 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3924 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3925 mutex_lock(&me->smd_mutex);
3926 spd->pdrcount++;
3927 spd->ispdup = 0;
3928 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3929 MAJOR(me->dev_no), spd->spdname);
3930 mutex_unlock(&me->smd_mutex);
3931 if (!strcmp(spd->spdname,
3932 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3933 me->staticpd_flags = 0;
3934 fastrpc_notify_pdr_drivers(me, spd->spdname);
3935 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3936 if (me->channel[0].remoteheap_ramdump_dev &&
3937 notifdata->enable_ramdump) {
3938 me->channel[0].ramdumpenabled = 1;
3939 }
3940 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3941 spd->ispdup = 1;
3942 }
3943
3944 return NOTIFY_DONE;
3945}
3946
3947static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3948 unsigned long opcode, void *data)
3949{
3950 struct fastrpc_static_pd *spd;
3951 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303952 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303953
3954 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3955 if (opcode == LOCATOR_DOWN) {
3956 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3957 return NOTIFY_DONE;
3958 }
3959
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303960 for (i = 0; i < pdr->total_domains; i++) {
3961 if ((!strcmp(pdr->domain_list[i].name,
3962 "msm/adsp/audio_pd")) ||
3963 (!strcmp(pdr->domain_list[i].name,
3964 "msm/adsp/sensor_pd"))) {
3965 spd->pdrhandle =
3966 service_notif_register_notifier(
3967 pdr->domain_list[i].name,
3968 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303969 &spd->pdrnb, &curr_state);
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303970 if (IS_ERR(spd->pdrhandle)) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303971 pr_err("ADSPRPC: Unable to register notifier\n");
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303972 } else if (curr_state ==
3973 SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3974 pr_info("ADSPRPC: STATE_UP_V01 received\n");
3975 spd->ispdup = 1;
3976 } else if (curr_state ==
3977 SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
3978 pr_info("ADSPRPC: STATE_UNINIT_V01 received\n");
3979 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303980 break;
3981 }
3982 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303983
3984 return NOTIFY_DONE;
3985}
3986
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003987static const struct file_operations fops = {
3988 .open = fastrpc_device_open,
3989 .release = fastrpc_device_release,
3990 .unlocked_ioctl = fastrpc_device_ioctl,
3991 .compat_ioctl = compat_fastrpc_device_ioctl,
3992};
3993
3994static const struct of_device_id fastrpc_match_table[] = {
3995 { .compatible = "qcom,msm-fastrpc-adsp", },
3996 { .compatible = "qcom,msm-fastrpc-compute", },
3997 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303998 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3999 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004000 { .compatible = "qcom,msm-adsprpc-mem-region", },
4001 {}
4002};
4003
4004static int fastrpc_cb_probe(struct device *dev)
4005{
4006 struct fastrpc_channel_ctx *chan;
4007 struct fastrpc_session_ctx *sess;
4008 struct of_phandle_args iommuspec;
4009 const char *name;
4010 unsigned int start = 0x80000000;
4011 int err = 0, i;
4012 int secure_vmid = VMID_CP_PIXEL;
4013
c_mtharue1a5ce12017-10-13 20:47:09 +05304014 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4015 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004016 if (err)
4017 goto bail;
4018 for (i = 0; i < NUM_CHANNELS; i++) {
4019 if (!gcinfo[i].name)
4020 continue;
4021 if (!strcmp(name, gcinfo[i].name))
4022 break;
4023 }
4024 VERIFY(err, i < NUM_CHANNELS);
4025 if (err)
4026 goto bail;
4027 chan = &gcinfo[i];
4028 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4029 if (err)
4030 goto bail;
4031
4032 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
4033 "#iommu-cells", 0, &iommuspec));
4034 if (err)
4035 goto bail;
4036 sess = &chan->session[chan->sesscount];
4037 sess->smmu.cb = iommuspec.args[0] & 0xf;
4038 sess->used = 0;
4039 sess->smmu.coherent = of_property_read_bool(dev->of_node,
4040 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304041 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
4042 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004043 sess->smmu.secure = of_property_read_bool(dev->of_node,
4044 "qcom,secure-context-bank");
4045 if (sess->smmu.secure)
4046 start = 0x60000000;
4047 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
4048 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05304049 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004050 if (err)
4051 goto bail;
4052
4053 if (sess->smmu.secure)
4054 iommu_domain_set_attr(sess->smmu.mapping->domain,
4055 DOMAIN_ATTR_SECURE_VMID,
4056 &secure_vmid);
4057
4058 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
4059 if (err)
4060 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304061 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004062 sess->smmu.enabled = 1;
4063 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08004064 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
4065 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004066bail:
4067 return err;
4068}
4069
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304070static int fastrpc_cb_legacy_probe(struct device *dev)
4071{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304072 struct fastrpc_channel_ctx *chan;
4073 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4074 const char *name;
4075 unsigned int *sids = NULL, sids_size = 0;
4076 int err = 0, ret = 0, i;
4077
4078 unsigned int start = 0x80000000;
4079
4080 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4081 "label", NULL)));
4082 if (err)
4083 goto bail;
4084
4085 for (i = 0; i < NUM_CHANNELS; i++) {
4086 if (!gcinfo[i].name)
4087 continue;
4088 if (!strcmp(name, gcinfo[i].name))
4089 break;
4090 }
4091 VERIFY(err, i < NUM_CHANNELS);
4092 if (err)
4093 goto bail;
4094
4095 chan = &gcinfo[i];
4096 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4097 if (err)
4098 goto bail;
4099
4100 first_sess = &chan->session[chan->sesscount];
4101
4102 VERIFY(err, NULL != of_get_property(dev->of_node,
4103 "sids", &sids_size));
4104 if (err)
4105 goto bail;
4106
4107 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4108 if (err)
4109 goto bail;
4110 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4111 sids_size/sizeof(unsigned int));
4112 if (ret)
4113 goto bail;
4114
4115 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4116 arm_iommu_create_mapping(&platform_bus_type,
4117 start, 0x78000000)));
4118 if (err)
4119 goto bail;
4120
4121 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4122 if (err)
4123 goto bail;
4124
4125
4126 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4127 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4128 if (err)
4129 goto bail;
4130 sess = &chan->session[chan->sesscount];
4131 sess->smmu.cb = sids[i];
4132 sess->smmu.dev = dev;
4133 sess->smmu.mapping = first_sess->smmu.mapping;
4134 sess->smmu.enabled = 1;
4135 sess->used = 0;
4136 sess->smmu.coherent = false;
4137 sess->smmu.secure = false;
4138 chan->sesscount++;
4139 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304140bail:
4141 kfree(sids);
4142 return err;
4143}
4144
4145
4146
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304147static void init_secure_vmid_list(struct device *dev, char *prop_name,
4148 struct secure_vm *destvm)
4149{
4150 int err = 0;
4151 u32 len = 0, i = 0;
4152 u32 *rhvmlist = NULL;
4153 u32 *rhvmpermlist = NULL;
4154
4155 if (!of_find_property(dev->of_node, prop_name, &len))
4156 goto bail;
4157 if (len == 0)
4158 goto bail;
4159 len /= sizeof(u32);
4160 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4161 if (err)
4162 goto bail;
4163 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4164 GFP_KERNEL)));
4165 if (err)
4166 goto bail;
4167 for (i = 0; i < len; i++) {
4168 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4169 &rhvmlist[i]);
4170 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4171 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4172 if (err) {
4173 pr_err("ADSPRPC: Failed to read VMID\n");
4174 goto bail;
4175 }
4176 }
4177 destvm->vmid = rhvmlist;
4178 destvm->vmperm = rhvmpermlist;
4179 destvm->vmcount = len;
4180bail:
4181 if (err) {
4182 kfree(rhvmlist);
4183 kfree(rhvmpermlist);
4184 }
4185}
4186
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304187static void configure_secure_channels(uint32_t secure_domains)
4188{
4189 struct fastrpc_apps *me = &gfa;
4190 int ii = 0;
4191 /*
4192 * secure_domains contains the bitmask of the secure channels
4193 * Bit 0 - ADSP
4194 * Bit 1 - MDSP
4195 * Bit 2 - SLPI
4196 * Bit 3 - CDSP
4197 */
4198 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4199 int secure = (secure_domains >> ii) & 0x01;
4200
4201 me->channel[ii].secure = secure;
4202 }
4203}
4204
4205
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004206static int fastrpc_probe(struct platform_device *pdev)
4207{
4208 int err = 0;
4209 struct fastrpc_apps *me = &gfa;
4210 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004211 struct device_node *ion_node, *node;
4212 struct platform_device *ion_pdev;
4213 struct cma *cma;
4214 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304215 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304216 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304217
4218 if (of_device_is_compatible(dev->of_node,
4219 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304220 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4221 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304222
c_mtharu63ffc012017-11-16 15:26:56 +05304223
4224 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4225 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304226 if (of_get_property(dev->of_node,
4227 "qcom,secure-domains", NULL) != NULL) {
4228 VERIFY(err, !of_property_read_u32(dev->of_node,
4229 "qcom,secure-domains",
4230 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004231 if (!err) {
4232 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304233 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004234 } else {
4235 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304236 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004237 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304238 }
c_mtharu63ffc012017-11-16 15:26:56 +05304239 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004240 if (of_device_is_compatible(dev->of_node,
4241 "qcom,msm-fastrpc-compute-cb"))
4242 return fastrpc_cb_probe(dev);
4243
4244 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304245 "qcom,msm-fastrpc-legacy-compute")) {
4246 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304247 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304248 }
4249
4250 if (of_device_is_compatible(dev->of_node,
4251 "qcom,msm-fastrpc-legacy-compute-cb")){
4252 return fastrpc_cb_legacy_probe(dev);
4253 }
4254
4255 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004256 "qcom,msm-adsprpc-mem-region")) {
4257 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004258 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4259 if (ion_node) {
4260 for_each_available_child_of_node(ion_node, node) {
4261 if (of_property_read_u32(node, "reg", &val))
4262 continue;
4263 if (val != ION_ADSP_HEAP_ID)
4264 continue;
4265 ion_pdev = of_find_device_by_node(node);
4266 if (!ion_pdev)
4267 break;
4268 cma = dev_get_cma_area(&ion_pdev->dev);
4269 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304270 me->range.addr = cma_get_base(cma);
4271 me->range.size =
4272 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004273 }
4274 break;
4275 }
4276 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304277 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304278 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004279 int srcVM[1] = {VMID_HLOS};
4280 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4281 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004282 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004283 PERM_READ | PERM_WRITE | PERM_EXEC,
4284 PERM_READ | PERM_WRITE | PERM_EXEC,
4285 PERM_READ | PERM_WRITE | PERM_EXEC,
4286 };
4287
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304288 VERIFY(err, !hyp_assign_phys(me->range.addr,
4289 me->range.size, srcVM, 1,
4290 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004291 if (err)
4292 goto bail;
4293 }
4294 return 0;
4295 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304296 if (of_property_read_bool(dev->of_node,
4297 "qcom,fastrpc-adsp-audio-pdr")) {
4298 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004299
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304300 VERIFY(err, !fastrpc_get_adsp_session(
4301 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4302 if (err)
4303 goto spdbail;
4304 me->channel[0].spd[session].get_service_nb.notifier_call =
4305 fastrpc_get_service_location_notify;
4306 ret = get_service_location(
4307 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4308 AUDIO_PDR_ADSP_SERVICE_NAME,
4309 &me->channel[0].spd[session].get_service_nb);
4310 if (ret)
4311 pr_err("ADSPRPC: Get service location failed: %d\n",
4312 ret);
4313 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304314 if (of_property_read_bool(dev->of_node,
4315 "qcom,fastrpc-adsp-sensors-pdr")) {
4316 int session;
4317
4318 VERIFY(err, !fastrpc_get_adsp_session(
4319 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4320 if (err)
4321 goto spdbail;
4322 me->channel[0].spd[session].get_service_nb.notifier_call =
4323 fastrpc_get_service_location_notify;
4324 ret = get_service_location(
4325 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4326 SENSORS_PDR_ADSP_SERVICE_NAME,
4327 &me->channel[0].spd[session].get_service_nb);
4328 if (ret)
4329 pr_err("ADSPRPC: Get service location failed: %d\n",
4330 ret);
4331 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304332spdbail:
4333 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004334 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4335 fastrpc_match_table,
4336 NULL, &pdev->dev));
4337 if (err)
4338 goto bail;
4339bail:
4340 return err;
4341}
4342
4343static void fastrpc_deinit(void)
4344{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304345 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004346 struct fastrpc_channel_ctx *chan = gcinfo;
4347 int i, j;
4348
4349 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4350 if (chan->chan) {
4351 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304352 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304353 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004354 }
4355 for (j = 0; j < NUM_SESSIONS; j++) {
4356 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304357 if (sess->smmu.dev) {
4358 arm_iommu_detach_device(sess->smmu.dev);
4359 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004360 }
4361 if (sess->smmu.mapping) {
4362 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304363 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004364 }
4365 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304366 kfree(chan->rhvm.vmid);
4367 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004368 }
4369}
4370
4371static struct platform_driver fastrpc_driver = {
4372 .probe = fastrpc_probe,
4373 .driver = {
4374 .name = "fastrpc",
4375 .owner = THIS_MODULE,
4376 .of_match_table = fastrpc_match_table,
4377 },
4378};
4379
4380static int __init fastrpc_device_init(void)
4381{
4382 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304383 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304384 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004385 int err = 0, i;
4386
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304387 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004388 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004389 fastrpc_init(me);
4390 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304391 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004392 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004393 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4394 if (err)
4395 goto register_bail;
4396 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4397 DEVICE_NAME));
4398 if (err)
4399 goto alloc_chrdev_bail;
4400 cdev_init(&me->cdev, &fops);
4401 me->cdev.owner = THIS_MODULE;
4402 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304403 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004404 if (err)
4405 goto cdev_init_bail;
4406 me->class = class_create(THIS_MODULE, "fastrpc");
4407 VERIFY(err, !IS_ERR(me->class));
4408 if (err)
4409 goto class_create_bail;
4410 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304411
4412 /*
4413 * Create devices and register with sysfs
4414 * Create first device with minor number 0
4415 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004416 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304417 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4418 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004419 VERIFY(err, !IS_ERR_OR_NULL(dev));
4420 if (err)
4421 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304422
4423 /* Create secure device with minor number for secure device */
4424 secure_dev = device_create(me->class, NULL,
4425 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4426 NULL, DEVICE_NAME_SECURE);
4427 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4428 if (err)
4429 goto device_create_bail;
4430
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004431 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304432 me->channel[i].dev = secure_dev;
4433 if (i == CDSP_DOMAIN_ID)
4434 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004435 me->channel[i].ssrcount = 0;
4436 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304437 me->channel[i].issubsystemup = 1;
4438 me->channel[i].ramdumpenabled = 0;
4439 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004440 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4441 me->channel[i].handle = subsys_notif_register_notifier(
4442 gcinfo[i].subsys,
4443 &me->channel[i].nb);
4444 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004445 me->client = msm_ion_client_create(DEVICE_NAME);
4446 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4447 if (err)
4448 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304449
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004450 return 0;
4451device_create_bail:
4452 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004453 if (me->channel[i].handle)
4454 subsys_notif_unregister_notifier(me->channel[i].handle,
4455 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004456 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004457 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304458 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4459 MINOR_NUM_DEV));
4460 if (!IS_ERR_OR_NULL(secure_dev))
4461 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4462 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004463 class_destroy(me->class);
4464class_create_bail:
4465 cdev_del(&me->cdev);
4466cdev_init_bail:
4467 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4468alloc_chrdev_bail:
4469register_bail:
4470 fastrpc_deinit();
4471 return err;
4472}
4473
4474static void __exit fastrpc_device_exit(void)
4475{
4476 struct fastrpc_apps *me = &gfa;
4477 int i;
4478
4479 fastrpc_file_list_dtor(me);
4480 fastrpc_deinit();
4481 for (i = 0; i < NUM_CHANNELS; i++) {
4482 if (!gcinfo[i].name)
4483 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004484 subsys_notif_unregister_notifier(me->channel[i].handle,
4485 &me->channel[i].nb);
4486 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304487
4488 /* Destroy the secure and non secure devices */
4489 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4490 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4491 MINOR_NUM_SECURE_DEV));
4492
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004493 class_destroy(me->class);
4494 cdev_del(&me->cdev);
4495 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4496 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004497 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004498}
4499
4500late_initcall(fastrpc_device_init);
4501module_exit(fastrpc_device_exit);
4502
4503MODULE_LICENSE("GPL v2");