blob: cd7545a92d2c41bca9466fed9743835508391a60 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
109 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530110#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111#define FASTRPC_STATIC_HANDLE_LISTENER (3)
112#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530113#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800114
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530115#define INIT_FILELEN_MAX (2*1024*1024)
116#define INIT_MEMLEN_MAX (8*1024*1024)
117
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800118#define PERF_END (void)0
119
120#define PERF(enb, cnt, ff) \
121 {\
122 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530123 int64_t *counter = cnt;\
124 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800125 getnstimeofday(&startT);\
126 } \
127 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530128 if (enb && counter) {\
129 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800130 } \
131 }
132
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530133#define GET_COUNTER(perf_ptr, offset) \
134 (perf_ptr != NULL ?\
135 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
136 (int64_t *)(perf_ptr + offset)\
137 : (int64_t *)NULL) : (int64_t *)NULL)
138
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700139static int fastrpc_glink_open(int cid);
140static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530141static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530142 unsigned long code,
143 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800144static struct dentry *debugfs_root;
145static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700146
147static inline uint64_t buf_page_start(uint64_t buf)
148{
149 uint64_t start = (uint64_t) buf & PAGE_MASK;
150 return start;
151}
152
153static inline uint64_t buf_page_offset(uint64_t buf)
154{
155 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
156 return offset;
157}
158
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530159static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700160{
161 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
162 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530163 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700164 return nPages;
165}
166
167static inline uint64_t buf_page_size(uint32_t size)
168{
169 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
170
171 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
172}
173
174static inline void *uint64_to_ptr(uint64_t addr)
175{
176 void *ptr = (void *)((uintptr_t)addr);
177
178 return ptr;
179}
180
181static inline uint64_t ptr_to_uint64(void *ptr)
182{
183 uint64_t addr = (uint64_t)((uintptr_t)ptr);
184
185 return addr;
186}
187
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530188struct secure_vm {
189 int *vmid;
190 int *vmperm;
191 int vmcount;
192};
193
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700194struct fastrpc_file;
195
196struct fastrpc_buf {
197 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530198 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct fastrpc_file *fl;
200 void *virt;
201 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530202 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530203 unsigned long dma_attr;
204 uintptr_t raddr;
205 uint32_t flags;
206 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700207};
208
209struct fastrpc_ctx_lst;
210
211struct overlap {
212 uintptr_t start;
213 uintptr_t end;
214 int raix;
215 uintptr_t mstart;
216 uintptr_t mend;
217 uintptr_t offset;
218};
219
220struct smq_invoke_ctx {
221 struct hlist_node hn;
222 struct completion work;
223 int retval;
224 int pid;
225 int tgid;
226 remote_arg_t *lpra;
227 remote_arg64_t *rpra;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530228 remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 struct fastrpc_mmap **maps;
231 struct fastrpc_buf *buf;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530232 struct fastrpc_buf *lbuf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530233 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700234 struct fastrpc_file *fl;
235 uint32_t sc;
236 struct overlap *overs;
237 struct overlap **overps;
238 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530239 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530240 unsigned int *attrs;
241 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530242 uint64_t ctxid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700243};
244
245struct fastrpc_ctx_lst {
246 struct hlist_head pending;
247 struct hlist_head interrupted;
248};
249
250struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530251 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700252 struct dma_iommu_mapping *mapping;
253 int cb;
254 int enabled;
255 int faults;
256 int secure;
257 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530258 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700259};
260
261struct fastrpc_session_ctx {
262 struct device *dev;
263 struct fastrpc_smmu smmu;
264 int used;
265};
266
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530267struct fastrpc_static_pd {
268 char *spdname;
269 struct notifier_block pdrnb;
270 struct notifier_block get_service_nb;
271 void *pdrhandle;
272 int pdrcount;
273 int prevpdrcount;
274 int ispdup;
275};
276
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700277struct fastrpc_glink_info {
278 int link_state;
279 int port_state;
280 struct glink_open_config cfg;
281 struct glink_link_info link_info;
282 void *link_notify_handle;
283};
284
285struct fastrpc_channel_ctx {
286 char *name;
287 char *subsys;
288 void *chan;
289 struct device *dev;
290 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530291 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700292 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530293 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700294 struct notifier_block nb;
295 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530296 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700297 int sesscount;
298 int ssrcount;
299 void *handle;
300 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530301 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700302 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530303 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530304 int ramdumpenabled;
305 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530307 /* Indicates, if channel is restricted to secure node only */
308 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309};
310
311struct fastrpc_apps {
312 struct fastrpc_channel_ctx *channel;
313 struct cdev cdev;
314 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530315 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700316 struct smq_phy_page range;
317 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530318 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700319 dev_t dev_no;
320 int compat;
321 struct hlist_head drivers;
322 spinlock_t hlock;
323 struct ion_client *client;
324 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530325 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530326 bool glink;
327 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800328 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530329 spinlock_t ctxlock;
330 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700331};
332
333struct fastrpc_mmap {
334 struct hlist_node hn;
335 struct fastrpc_file *fl;
336 struct fastrpc_apps *apps;
337 int fd;
338 uint32_t flags;
339 struct dma_buf *buf;
340 struct sg_table *table;
341 struct dma_buf_attachment *attach;
342 struct ion_handle *handle;
343 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530344 size_t size;
345 uintptr_t va;
346 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700347 int refs;
348 uintptr_t raddr;
349 int uncached;
350 int secure;
351 uintptr_t attr;
352};
353
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530354enum fastrpc_perfkeys {
355 PERF_COUNT = 0,
356 PERF_FLUSH = 1,
357 PERF_MAP = 2,
358 PERF_COPY = 3,
359 PERF_LINK = 4,
360 PERF_GETARGS = 5,
361 PERF_PUTARGS = 6,
362 PERF_INVARGS = 7,
363 PERF_INVOKE = 8,
364 PERF_KEY_MAX = 9,
365};
366
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800367struct fastrpc_perf {
368 int64_t count;
369 int64_t flush;
370 int64_t map;
371 int64_t copy;
372 int64_t link;
373 int64_t getargs;
374 int64_t putargs;
375 int64_t invargs;
376 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530377 int64_t tid;
378 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800379};
380
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700381struct fastrpc_file {
382 struct hlist_node hn;
383 spinlock_t hlock;
384 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530385 struct hlist_head cached_bufs;
386 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700387 struct fastrpc_ctx_lst clst;
388 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530389 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700390 struct fastrpc_session_ctx *secsctx;
391 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800392 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530393 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700394 int tgid;
395 int cid;
396 int ssrcount;
397 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530398 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530399 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530400 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700401 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530402 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800403 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530404 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530405 struct pm_qos_request pm_qos_req;
406 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530407 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530408 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530409 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530410 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
411 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530412 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413};
414
415static struct fastrpc_apps gfa;
416
417static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
418 {
419 .name = "adsprpc-smd",
420 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530421 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700422 .link.link_info.edge = "lpass",
423 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530424 .spd = {
425 {
426 .spdname =
427 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
428 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530429 fastrpc_pdr_notifier_cb,
430 },
431 {
432 .spdname =
433 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
434 .pdrnb.notifier_call =
435 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530436 }
437 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700438 },
439 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700440 .name = "mdsprpc-smd",
441 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530442 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700443 .link.link_info.edge = "mpss",
444 .link.link_info.transport = "smem",
445 },
446 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800447 .name = "sdsprpc-smd",
448 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530449 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800450 .link.link_info.edge = "dsps",
451 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800452 },
453 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700454 .name = "cdsprpc-smd",
455 .subsys = "cdsp",
456 .link.link_info.edge = "cdsp",
457 .link.link_info.transport = "smem",
458 },
459};
460
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530461static int hlosvm[1] = {VMID_HLOS};
462static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
463
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800464static inline int64_t getnstimediff(struct timespec *start)
465{
466 int64_t ns;
467 struct timespec ts, b;
468
469 getnstimeofday(&ts);
470 b = timespec_sub(ts, *start);
471 ns = timespec_to_ns(&b);
472 return ns;
473}
474
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530475static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
476{
477 int err = 0;
478 int64_t *val = NULL;
479 struct fastrpc_perf *perf = NULL, *fperf = NULL;
480 struct hlist_node *n = NULL;
481
482 VERIFY(err, !IS_ERR_OR_NULL(fl));
483 if (err)
484 goto bail;
485
486 mutex_lock(&fl->perf_mutex);
487 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
488 if (perf->tid == current->pid) {
489 fperf = perf;
490 break;
491 }
492 }
493
494 if (IS_ERR_OR_NULL(fperf)) {
495 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
496
497 VERIFY(err, !IS_ERR_OR_NULL(fperf));
498 if (err) {
499 mutex_unlock(&fl->perf_mutex);
500 kfree(fperf);
501 goto bail;
502 }
503
504 fperf->tid = current->pid;
505 hlist_add_head(&fperf->hn, &fl->perf);
506 }
507
508 val = ((int64_t *)fperf) + key;
509 mutex_unlock(&fl->perf_mutex);
510bail:
511 return val;
512}
513
514
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700515static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
516{
c_mtharue1a5ce12017-10-13 20:47:09 +0530517 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700518 int vmid;
519
520 if (!fl)
521 return;
522 if (cache) {
523 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530524 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700525 spin_unlock(&fl->hlock);
526 return;
527 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530528 if (buf->remote) {
529 spin_lock(&fl->hlock);
530 hlist_del_init(&buf->hn_rem);
531 spin_unlock(&fl->hlock);
532 buf->remote = 0;
533 buf->raddr = 0;
534 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700535 if (!IS_ERR_OR_NULL(buf->virt)) {
536 int destVM[1] = {VMID_HLOS};
537 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
538
539 if (fl->sctx->smmu.cb)
540 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
541 vmid = fl->apps->channel[fl->cid].vmid;
542 if (vmid) {
543 int srcVM[2] = {VMID_HLOS, vmid};
544
545 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
546 srcVM, 2, destVM, destVMperm, 1);
547 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530548 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
549 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700550 }
551 kfree(buf);
552}
553
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530554static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700555{
556 struct fastrpc_buf *buf, *free;
557
558 do {
559 struct hlist_node *n;
560
c_mtharue1a5ce12017-10-13 20:47:09 +0530561 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700562 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530563 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700564 hlist_del_init(&buf->hn);
565 free = buf;
566 break;
567 }
568 spin_unlock(&fl->hlock);
569 if (free)
570 fastrpc_buf_free(free, 0);
571 } while (free);
572}
573
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530574static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
575{
576 struct fastrpc_buf *buf, *free;
577
578 do {
579 struct hlist_node *n;
580
581 free = NULL;
582 spin_lock(&fl->hlock);
583 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
584 free = buf;
585 break;
586 }
587 spin_unlock(&fl->hlock);
588 if (free)
589 fastrpc_buf_free(free, 0);
590 } while (free);
591}
592
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700593static void fastrpc_mmap_add(struct fastrpc_mmap *map)
594{
c_mtharue1a5ce12017-10-13 20:47:09 +0530595 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
596 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
597 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700598
c_mtharue1a5ce12017-10-13 20:47:09 +0530599 spin_lock(&me->hlock);
600 hlist_add_head(&map->hn, &me->maps);
601 spin_unlock(&me->hlock);
602 } else {
603 struct fastrpc_file *fl = map->fl;
604
c_mtharue1a5ce12017-10-13 20:47:09 +0530605 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530606 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700607}
608
c_mtharue1a5ce12017-10-13 20:47:09 +0530609static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530610 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700612{
c_mtharue1a5ce12017-10-13 20:47:09 +0530613 struct fastrpc_apps *me = &gfa;
614 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700615 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530616
617 if ((va + len) < va)
618 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530619 if (mflags == ADSP_MMAP_HEAP_ADDR ||
620 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
621 spin_lock(&me->hlock);
622 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
623 if (va >= map->va &&
624 va + len <= map->va + map->len &&
625 map->fd == fd) {
626 if (refs)
627 map->refs++;
628 match = map;
629 break;
630 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700631 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530632 spin_unlock(&me->hlock);
633 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530634 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
635 if (va >= map->va &&
636 va + len <= map->va + map->len &&
637 map->fd == fd) {
638 if (refs)
639 map->refs++;
640 match = map;
641 break;
642 }
643 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700644 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700645 if (match) {
646 *ppmap = match;
647 return 0;
648 }
649 return -ENOTTY;
650}
651
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530652static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530653 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530654{
655 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530656
657 if (me->dev == NULL) {
658 pr_err("device adsprpc-mem is not initialized\n");
659 return -ENODEV;
660 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530661 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530662 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530663 if (IS_ERR_OR_NULL(*vaddr)) {
664 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
665 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530666 return -ENOMEM;
667 }
668 return 0;
669}
670
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700671static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530672 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700673{
c_mtharue1a5ce12017-10-13 20:47:09 +0530674 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700675 struct hlist_node *n;
676 struct fastrpc_apps *me = &gfa;
677
678 spin_lock(&me->hlock);
679 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
680 if (map->raddr == va &&
681 map->raddr + map->len == va + len &&
682 map->refs == 1) {
683 match = map;
684 hlist_del_init(&map->hn);
685 break;
686 }
687 }
688 spin_unlock(&me->hlock);
689 if (match) {
690 *ppmap = match;
691 return 0;
692 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700693 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
694 if (map->raddr == va &&
695 map->raddr + map->len == va + len &&
696 map->refs == 1) {
697 match = map;
698 hlist_del_init(&map->hn);
699 break;
700 }
701 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700702 if (match) {
703 *ppmap = match;
704 return 0;
705 }
706 return -ENOTTY;
707}
708
c_mtharu7bd6a422017-10-17 18:15:37 +0530709static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700710{
c_mtharue1a5ce12017-10-13 20:47:09 +0530711 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712 struct fastrpc_file *fl;
713 int vmid;
714 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700715
716 if (!map)
717 return;
718 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530719 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
720 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
721 spin_lock(&me->hlock);
722 map->refs--;
723 if (!map->refs)
724 hlist_del_init(&map->hn);
725 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530726 if (map->refs > 0)
727 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530728 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530729 map->refs--;
730 if (!map->refs)
731 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530732 if (map->refs > 0 && !flags)
733 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530734 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530735 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
736 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530737 unsigned long dma_attrs = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700738
c_mtharue1a5ce12017-10-13 20:47:09 +0530739 if (me->dev == NULL) {
740 pr_err("failed to free remote heap allocation\n");
741 return;
742 }
743 if (map->phys) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530744 dma_attrs |=
745 DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
746 dma_free_attrs(me->dev, map->size, (void *)map->va,
747 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530748 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530749 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
750 if (!IS_ERR_OR_NULL(map->handle))
751 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530752 } else {
753 int destVM[1] = {VMID_HLOS};
754 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
755
756 if (map->secure)
757 sess = fl->secsctx;
758 else
759 sess = fl->sctx;
760
761 if (!IS_ERR_OR_NULL(map->handle))
762 ion_free(fl->apps->client, map->handle);
763 if (sess && sess->smmu.enabled) {
764 if (map->size || map->phys)
765 msm_dma_unmap_sg(sess->smmu.dev,
766 map->table->sgl,
767 map->table->nents, DMA_BIDIRECTIONAL,
768 map->buf);
769 }
770 vmid = fl->apps->channel[fl->cid].vmid;
771 if (vmid && map->phys) {
772 int srcVM[2] = {VMID_HLOS, vmid};
773
774 hyp_assign_phys(map->phys, buf_page_size(map->size),
775 srcVM, 2, destVM, destVMperm, 1);
776 }
777
778 if (!IS_ERR_OR_NULL(map->table))
779 dma_buf_unmap_attachment(map->attach, map->table,
780 DMA_BIDIRECTIONAL);
781 if (!IS_ERR_OR_NULL(map->attach))
782 dma_buf_detach(map->buf, map->attach);
783 if (!IS_ERR_OR_NULL(map->buf))
784 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700785 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700786 kfree(map);
787}
788
789static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
790 struct fastrpc_session_ctx **session);
791
792static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530793 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700794 struct fastrpc_mmap **ppmap)
795{
c_mtharue1a5ce12017-10-13 20:47:09 +0530796 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700797 struct fastrpc_session_ctx *sess;
798 struct fastrpc_apps *apps = fl->apps;
799 int cid = fl->cid;
800 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530801 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700802 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530803 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530804 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700805 unsigned long flags;
806 int err = 0, vmid;
807
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800808 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700809 return 0;
810 map = kzalloc(sizeof(*map), GFP_KERNEL);
811 VERIFY(err, !IS_ERR_OR_NULL(map));
812 if (err)
813 goto bail;
814 INIT_HLIST_NODE(&map->hn);
815 map->flags = mflags;
816 map->refs = 1;
817 map->fl = fl;
818 map->fd = fd;
819 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530820 if (mflags == ADSP_MMAP_HEAP_ADDR ||
821 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530822 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
823 DMA_ATTR_NO_KERNEL_MAPPING;
824
c_mtharue1a5ce12017-10-13 20:47:09 +0530825 map->apps = me;
826 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530827 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
828 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700829 if (err)
830 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530831 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530832 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530833 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530834 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
835 ion_phys_addr_t iphys;
836
837 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
838 ion_import_dma_buf_fd(fl->apps->client, fd)));
839 if (err)
840 goto bail;
841
842 map->uncached = 1;
843 map->buf = NULL;
844 map->attach = NULL;
845 map->table = NULL;
846 map->va = 0;
847 map->phys = 0;
848
849 err = ion_phys(fl->apps->client, map->handle,
850 &iphys, &map->size);
851 if (err)
852 goto bail;
853 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530854 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530855 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
856 pr_info("adsprpc: buffer mapped with persist attr %x\n",
857 (unsigned int)map->attr);
858 map->refs = 2;
859 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530860 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
861 ion_import_dma_buf_fd(fl->apps->client, fd)));
862 if (err)
863 goto bail;
864 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
865 &flags));
866 if (err)
867 goto bail;
868
c_mtharue1a5ce12017-10-13 20:47:09 +0530869 map->secure = flags & ION_FLAG_SECURE;
870 if (map->secure) {
871 if (!fl->secsctx)
872 err = fastrpc_session_alloc(chan, 1,
873 &fl->secsctx);
874 if (err)
875 goto bail;
876 }
877 if (map->secure)
878 sess = fl->secsctx;
879 else
880 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530881
c_mtharue1a5ce12017-10-13 20:47:09 +0530882 VERIFY(err, !IS_ERR_OR_NULL(sess));
883 if (err)
884 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530885
886 map->uncached = !ION_IS_CACHED(flags);
887 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
888 map->uncached = 1;
889
c_mtharue1a5ce12017-10-13 20:47:09 +0530890 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
891 if (err)
892 goto bail;
893 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
894 dma_buf_attach(map->buf, sess->smmu.dev)));
895 if (err)
896 goto bail;
897 VERIFY(err, !IS_ERR_OR_NULL(map->table =
898 dma_buf_map_attachment(map->attach,
899 DMA_BIDIRECTIONAL)));
900 if (err)
901 goto bail;
902 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700903 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530904
905 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
906 (sess->smmu.coherent && map->uncached))
907 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
908 else if (map->attr & FASTRPC_ATTR_COHERENT)
909 attrs |= DMA_ATTR_FORCE_COHERENT;
910
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700911 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530912 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700913 map->table->sgl, map->table->nents,
914 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530915 if (err)
916 goto bail;
917 } else {
918 VERIFY(err, map->table->nents == 1);
919 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700920 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530921 }
922 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530923
c_mtharue1a5ce12017-10-13 20:47:09 +0530924 if (sess->smmu.cb) {
925 map->phys += ((uint64_t)sess->smmu.cb << 32);
926 map->size = sg_dma_len(map->table->sgl);
927 } else {
928 map->size = buf_page_size(len);
929 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530930
c_mtharue1a5ce12017-10-13 20:47:09 +0530931 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530932 if (!sess->smmu.enabled && !vmid) {
933 VERIFY(err, map->phys >= me->range.addr &&
934 map->phys + map->size <=
935 me->range.addr + me->range.size);
936 if (err) {
937 pr_err("adsprpc: mmap fail out of range\n");
938 goto bail;
939 }
940 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530941 if (vmid) {
942 int srcVM[1] = {VMID_HLOS};
943 int destVM[2] = {VMID_HLOS, vmid};
944 int destVMperm[2] = {PERM_READ | PERM_WRITE,
945 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700946
c_mtharue1a5ce12017-10-13 20:47:09 +0530947 VERIFY(err, !hyp_assign_phys(map->phys,
948 buf_page_size(map->size),
949 srcVM, 1, destVM, destVMperm, 2));
950 if (err)
951 goto bail;
952 }
953 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700954 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700955 map->len = len;
956
957 fastrpc_mmap_add(map);
958 *ppmap = map;
959
960bail:
961 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530962 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700963 return err;
964}
965
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530966static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530967 unsigned long dma_attr, uint32_t rflags,
968 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700969{
970 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530971 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700972 struct hlist_node *n;
973
974 VERIFY(err, size > 0);
975 if (err)
976 goto bail;
977
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530978 if (!remote) {
979 /* find the smallest buffer that fits in the cache */
980 spin_lock(&fl->hlock);
981 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
982 if (buf->size >= size && (!fr || fr->size > buf->size))
983 fr = buf;
984 }
985 if (fr)
986 hlist_del_init(&fr->hn);
987 spin_unlock(&fl->hlock);
988 if (fr) {
989 *obuf = fr;
990 return 0;
991 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700992 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530993 buf = NULL;
994 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700995 if (err)
996 goto bail;
997 INIT_HLIST_NODE(&buf->hn);
998 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530999 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001000 buf->phys = 0;
1001 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301002 buf->dma_attr = dma_attr;
1003 buf->flags = rflags;
1004 buf->raddr = 0;
1005 buf->remote = 0;
1006 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1007 (dma_addr_t *)&buf->phys,
1008 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001009 if (IS_ERR_OR_NULL(buf->virt)) {
1010 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301011 fastrpc_cached_buf_list_free(fl);
1012 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1013 (dma_addr_t *)&buf->phys,
1014 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001015 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1016 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301017 if (err) {
1018 err = -ENOMEM;
1019 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1020 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001021 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301022 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001023 if (fl->sctx->smmu.cb)
1024 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1025 vmid = fl->apps->channel[fl->cid].vmid;
1026 if (vmid) {
1027 int srcVM[1] = {VMID_HLOS};
1028 int destVM[2] = {VMID_HLOS, vmid};
1029 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1030 PERM_READ | PERM_WRITE | PERM_EXEC};
1031
1032 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1033 srcVM, 1, destVM, destVMperm, 2));
1034 if (err)
1035 goto bail;
1036 }
1037
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301038 if (remote) {
1039 INIT_HLIST_NODE(&buf->hn_rem);
1040 spin_lock(&fl->hlock);
1041 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1042 spin_unlock(&fl->hlock);
1043 buf->remote = remote;
1044 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001045 *obuf = buf;
1046 bail:
1047 if (err && buf)
1048 fastrpc_buf_free(buf, 0);
1049 return err;
1050}
1051
1052
1053static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001054 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001055 struct smq_invoke_ctx **po)
1056{
1057 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301058 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001059 struct hlist_node *n;
1060 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1061
1062 spin_lock(&fl->hlock);
1063 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1064 if (ictx->pid == current->pid) {
1065 if (invoke->sc != ictx->sc || ictx->fl != fl)
1066 err = -1;
1067 else {
1068 ctx = ictx;
1069 hlist_del_init(&ctx->hn);
1070 hlist_add_head(&ctx->hn, &fl->clst.pending);
1071 }
1072 break;
1073 }
1074 }
1075 spin_unlock(&fl->hlock);
1076 if (ctx)
1077 *po = ctx;
1078 return err;
1079}
1080
1081#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1082static int overlap_ptr_cmp(const void *a, const void *b)
1083{
1084 struct overlap *pa = *((struct overlap **)a);
1085 struct overlap *pb = *((struct overlap **)b);
1086 /* sort with lowest starting buffer first */
1087 int st = CMP(pa->start, pb->start);
1088 /* sort with highest ending buffer first */
1089 int ed = CMP(pb->end, pa->end);
1090 return st == 0 ? ed : st;
1091}
1092
Sathish Ambley9466d672017-01-25 10:51:55 -08001093static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001094{
Sathish Ambley9466d672017-01-25 10:51:55 -08001095 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001096 remote_arg_t *lpra = ctx->lpra;
1097 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1098 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1099 int nbufs = inbufs + outbufs;
1100 struct overlap max;
1101
1102 for (i = 0; i < nbufs; ++i) {
1103 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1104 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001105 if (lpra[i].buf.len) {
1106 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1107 if (err)
1108 goto bail;
1109 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001110 ctx->overs[i].raix = i;
1111 ctx->overps[i] = &ctx->overs[i];
1112 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301113 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001114 max.start = 0;
1115 max.end = 0;
1116 for (i = 0; i < nbufs; ++i) {
1117 if (ctx->overps[i]->start < max.end) {
1118 ctx->overps[i]->mstart = max.end;
1119 ctx->overps[i]->mend = ctx->overps[i]->end;
1120 ctx->overps[i]->offset = max.end -
1121 ctx->overps[i]->start;
1122 if (ctx->overps[i]->end > max.end) {
1123 max.end = ctx->overps[i]->end;
1124 } else {
1125 ctx->overps[i]->mend = 0;
1126 ctx->overps[i]->mstart = 0;
1127 }
1128 } else {
1129 ctx->overps[i]->mend = ctx->overps[i]->end;
1130 ctx->overps[i]->mstart = ctx->overps[i]->start;
1131 ctx->overps[i]->offset = 0;
1132 max = *ctx->overps[i];
1133 }
1134 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001135bail:
1136 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001137}
1138
1139#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1140 do {\
1141 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301142 VERIFY(err, 0 == copy_from_user((dst),\
1143 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001144 (size)));\
1145 else\
1146 memmove((dst), (src), (size));\
1147 } while (0)
1148
1149#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1150 do {\
1151 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301152 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301153 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001154 else\
1155 memmove((dst), (src), (size));\
1156 } while (0)
1157
1158
1159static void context_free(struct smq_invoke_ctx *ctx);
1160
1161static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001162 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001163 struct smq_invoke_ctx **po)
1164{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301165 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301166 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301167 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001168 struct fastrpc_ctx_lst *clst = &fl->clst;
1169 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1170
1171 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1172 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1173 sizeof(*ctx->fds) * (bufs) +
1174 sizeof(*ctx->attrs) * (bufs) +
1175 sizeof(*ctx->overs) * (bufs) +
1176 sizeof(*ctx->overps) * (bufs);
1177
c_mtharue1a5ce12017-10-13 20:47:09 +05301178 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001179 if (err)
1180 goto bail;
1181
1182 INIT_HLIST_NODE(&ctx->hn);
1183 hlist_add_fake(&ctx->hn);
1184 ctx->fl = fl;
1185 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1186 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1187 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301188 if (me->legacy) {
1189 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1190 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1191 } else {
1192 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1193 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1194 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1195 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001196
c_mtharue1a5ce12017-10-13 20:47:09 +05301197 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001198 bufs * sizeof(*ctx->lpra));
1199 if (err)
1200 goto bail;
1201
1202 if (invokefd->fds) {
1203 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1204 bufs * sizeof(*ctx->fds));
1205 if (err)
1206 goto bail;
1207 }
1208 if (invokefd->attrs) {
1209 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1210 bufs * sizeof(*ctx->attrs));
1211 if (err)
1212 goto bail;
1213 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001214 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001215 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001216 if (bufs) {
1217 VERIFY(err, 0 == context_build_overlap(ctx));
1218 if (err)
1219 goto bail;
1220 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001221 ctx->retval = -1;
1222 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301223 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001224 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301225 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001226
1227 spin_lock(&fl->hlock);
1228 hlist_add_head(&ctx->hn, &clst->pending);
1229 spin_unlock(&fl->hlock);
1230
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301231 spin_lock(&me->ctxlock);
1232 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1233 if (!me->ctxtable[ii]) {
1234 me->ctxtable[ii] = ctx;
1235 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1236 break;
1237 }
1238 }
1239 spin_unlock(&me->ctxlock);
1240 VERIFY(err, ii < FASTRPC_CTX_MAX);
1241 if (err) {
1242 pr_err("adsprpc: out of context memory\n");
1243 goto bail;
1244 }
1245
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001246 *po = ctx;
1247bail:
1248 if (ctx && err)
1249 context_free(ctx);
1250 return err;
1251}
1252
1253static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1254{
1255 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1256
1257 spin_lock(&ctx->fl->hlock);
1258 hlist_del_init(&ctx->hn);
1259 hlist_add_head(&ctx->hn, &clst->interrupted);
1260 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001261}
1262
1263static void context_free(struct smq_invoke_ctx *ctx)
1264{
1265 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301266 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001267 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1268 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1269 spin_lock(&ctx->fl->hlock);
1270 hlist_del_init(&ctx->hn);
1271 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301272 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001273 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301274 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301275
1276 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001277 fastrpc_buf_free(ctx->buf, 1);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301278 fastrpc_buf_free(ctx->lbuf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301279 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301280 ctx->ctxid = 0;
1281
1282 spin_lock(&me->ctxlock);
1283 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1284 if (me->ctxtable[i] == ctx) {
1285 me->ctxtable[i] = NULL;
1286 break;
1287 }
1288 }
1289 spin_unlock(&me->ctxlock);
1290
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001291 kfree(ctx);
1292}
1293
1294static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1295{
1296 ctx->retval = retval;
1297 complete(&ctx->work);
1298}
1299
1300
1301static void fastrpc_notify_users(struct fastrpc_file *me)
1302{
1303 struct smq_invoke_ctx *ictx;
1304 struct hlist_node *n;
1305
1306 spin_lock(&me->hlock);
1307 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1308 complete(&ictx->work);
1309 }
1310 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1311 complete(&ictx->work);
1312 }
1313 spin_unlock(&me->hlock);
1314
1315}
1316
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301317
1318static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1319{
1320 struct smq_invoke_ctx *ictx;
1321 struct hlist_node *n;
1322
1323 spin_lock(&me->hlock);
1324 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1325 if (ictx->msg.pid)
1326 complete(&ictx->work);
1327 }
1328 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1329 if (ictx->msg.pid)
1330 complete(&ictx->work);
1331 }
1332 spin_unlock(&me->hlock);
1333}
1334
1335
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1337{
1338 struct fastrpc_file *fl;
1339 struct hlist_node *n;
1340
1341 spin_lock(&me->hlock);
1342 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1343 if (fl->cid == cid)
1344 fastrpc_notify_users(fl);
1345 }
1346 spin_unlock(&me->hlock);
1347
1348}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301349
1350static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1351{
1352 struct fastrpc_file *fl;
1353 struct hlist_node *n;
1354
1355 spin_lock(&me->hlock);
1356 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1357 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301358 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301359 }
1360 spin_unlock(&me->hlock);
1361
1362}
1363
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364static void context_list_ctor(struct fastrpc_ctx_lst *me)
1365{
1366 INIT_HLIST_HEAD(&me->interrupted);
1367 INIT_HLIST_HEAD(&me->pending);
1368}
1369
1370static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1371{
1372 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301373 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001374 struct hlist_node *n;
1375
1376 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301377 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001378 spin_lock(&fl->hlock);
1379 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1380 hlist_del_init(&ictx->hn);
1381 ctxfree = ictx;
1382 break;
1383 }
1384 spin_unlock(&fl->hlock);
1385 if (ctxfree)
1386 context_free(ctxfree);
1387 } while (ctxfree);
1388 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301389 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001390 spin_lock(&fl->hlock);
1391 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1392 hlist_del_init(&ictx->hn);
1393 ctxfree = ictx;
1394 break;
1395 }
1396 spin_unlock(&fl->hlock);
1397 if (ctxfree)
1398 context_free(ctxfree);
1399 } while (ctxfree);
1400}
1401
1402static int fastrpc_file_free(struct fastrpc_file *fl);
1403static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1404{
1405 struct fastrpc_file *fl, *free;
1406 struct hlist_node *n;
1407
1408 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301409 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001410 spin_lock(&me->hlock);
1411 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1412 hlist_del_init(&fl->hn);
1413 free = fl;
1414 break;
1415 }
1416 spin_unlock(&me->hlock);
1417 if (free)
1418 fastrpc_file_free(free);
1419 } while (free);
1420}
1421
1422static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1423{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301424 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301425 remote_arg64_t *rpra, *lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001426 remote_arg_t *lpra = ctx->lpra;
1427 struct smq_invoke_buf *list;
1428 struct smq_phy_page *pages, *ipage;
1429 uint32_t sc = ctx->sc;
1430 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1431 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001432 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001433 uintptr_t args;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301434 size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001435 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436 int err = 0;
1437 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001438 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001439 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301440 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441
1442 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301443 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001444 list = smq_invoke_buf_start(rpra, sc);
1445 pages = smq_phy_page_start(sc, list);
1446 ipage = pages;
1447
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301448 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001449 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301450 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1451 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301453 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301454 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1455 unsigned int attrs = 0;
1456
1457 if (ctx->attrs)
1458 attrs = ctx->attrs[i];
1459
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001460 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301461 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001462 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301463 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301464 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001465 ipage += 1;
1466 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301467 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001468 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301469 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001470 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301471 int dmaflags = 0;
1472
1473 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1474 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001475 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301476 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301477 if (err) {
1478 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001479 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301480 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001481 ipage += 1;
1482 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301483 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301484 if (!me->legacy) {
1485 metalen = copylen = (size_t)&ipage[0] +
1486 (sizeof(uint64_t) * M_FDLIST) +
1487 (sizeof(uint32_t) * M_CRCLIST);
1488 } else {
1489 metalen = copylen = (size_t)&ipage[0];
1490 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001491
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301492 /* allocate new local rpra buffer */
1493 lrpralen = (size_t)&list[0];
1494 if (lrpralen) {
1495 err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
1496 if (err)
1497 goto bail;
1498 }
1499 if (ctx->lbuf->virt)
1500 memset(ctx->lbuf->virt, 0, lrpralen);
1501
1502 lrpra = ctx->lbuf->virt;
1503 ctx->lrpra = lrpra;
1504
1505 /* calculate len required for copying */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001506 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1507 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001508 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301509 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510
1511 if (!len)
1512 continue;
1513 if (ctx->maps[i])
1514 continue;
1515 if (ctx->overps[oix]->offset == 0)
1516 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001517 mstart = ctx->overps[oix]->mstart;
1518 mend = ctx->overps[oix]->mend;
1519 VERIFY(err, (mend - mstart) <= LONG_MAX);
1520 if (err)
1521 goto bail;
1522 copylen += mend - mstart;
1523 VERIFY(err, copylen >= 0);
1524 if (err)
1525 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001526 }
1527 ctx->used = copylen;
1528
1529 /* allocate new buffer */
1530 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301531 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001532 if (err)
1533 goto bail;
1534 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301535 if (ctx->buf->virt && metalen <= copylen)
1536 memset(ctx->buf->virt, 0, metalen);
1537
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001538 /* copy metadata */
1539 rpra = ctx->buf->virt;
1540 ctx->rpra = rpra;
1541 list = smq_invoke_buf_start(rpra, sc);
1542 pages = smq_phy_page_start(sc, list);
1543 ipage = pages;
1544 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001545 for (i = 0; i < bufs + handles; ++i) {
1546 if (lpra[i].buf.len)
1547 list[i].num = 1;
1548 else
1549 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001550 list[i].pgidx = ipage - pages;
1551 ipage++;
1552 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301553
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001554 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301555 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301556 for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001557 struct fastrpc_mmap *map = ctx->maps[i];
1558 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301559 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001560
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301561 rpra[i].buf.pv = lrpra[i].buf.pv = 0;
1562 rpra[i].buf.len = lrpra[i].buf.len = len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 if (!len)
1564 continue;
1565 if (map) {
1566 struct vm_area_struct *vma;
1567 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301568 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001569 int idx = list[i].pgidx;
1570
1571 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001572 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001573 } else {
1574 down_read(&current->mm->mmap_sem);
1575 VERIFY(err, NULL != (vma = find_vma(current->mm,
1576 map->va)));
1577 if (err) {
1578 up_read(&current->mm->mmap_sem);
1579 goto bail;
1580 }
1581 offset = buf_page_start(buf) - vma->vm_start;
1582 up_read(&current->mm->mmap_sem);
1583 VERIFY(err, offset < (uintptr_t)map->size);
1584 if (err)
1585 goto bail;
1586 }
1587 pages[idx].addr = map->phys + offset;
1588 pages[idx].size = num << PAGE_SHIFT;
1589 }
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301590 rpra[i].buf.pv = lrpra[i].buf.pv = buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001591 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001592 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001593 for (i = bufs; i < bufs + handles; ++i) {
1594 struct fastrpc_mmap *map = ctx->maps[i];
1595
1596 pages[i].addr = map->phys;
1597 pages[i].size = map->size;
1598 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301599 if (!me->legacy) {
1600 fdlist = (uint64_t *)&pages[bufs + handles];
1601 for (i = 0; i < M_FDLIST; i++)
1602 fdlist[i] = 0;
1603 crclist = (uint32_t *)&fdlist[M_FDLIST];
1604 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1605 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001606
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001607 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301608 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001609 rlen = copylen - metalen;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301610 for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611 int i = ctx->overps[oix]->raix;
1612 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301613 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001614 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301615 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001616
1617 if (!len)
1618 continue;
1619 if (map)
1620 continue;
1621 if (ctx->overps[oix]->offset == 0) {
1622 rlen -= ALIGN(args, BALIGN) - args;
1623 args = ALIGN(args, BALIGN);
1624 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001625 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001626 VERIFY(err, rlen >= mlen);
1627 if (err)
1628 goto bail;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301629 rpra[i].buf.pv = lrpra[i].buf.pv =
1630 (args - ctx->overps[oix]->offset);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001631 pages[list[i].pgidx].addr = ctx->buf->phys -
1632 ctx->overps[oix]->offset +
1633 (copylen - rlen);
1634 pages[list[i].pgidx].addr =
1635 buf_page_start(pages[list[i].pgidx].addr);
1636 buf = rpra[i].buf.pv;
1637 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1638 if (i < inbufs) {
1639 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1640 lpra[i].buf.pv, len);
1641 if (err)
1642 goto bail;
1643 }
1644 args = args + mlen;
1645 rlen -= mlen;
1646 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001647 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001648
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301649 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001650 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1651 int i = ctx->overps[oix]->raix;
1652 struct fastrpc_mmap *map = ctx->maps[i];
1653
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001654 if (map && map->uncached)
1655 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301656 if (ctx->fl->sctx->smmu.coherent &&
1657 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1658 continue;
1659 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1660 continue;
1661
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301662 if (rpra && lrpra && rpra[i].buf.len &&
1663 ctx->overps[oix]->mstart) {
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301664 if (map && map->handle)
1665 msm_ion_do_cache_op(ctx->fl->apps->client,
1666 map->handle,
1667 uint64_to_ptr(rpra[i].buf.pv),
1668 rpra[i].buf.len,
1669 ION_IOC_CLEAN_INV_CACHES);
1670 else
1671 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1672 uint64_to_ptr(rpra[i].buf.pv
1673 + rpra[i].buf.len));
1674 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001675 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001676 PERF_END);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301677 for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
1678 rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
1679 rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1680 rpra[i].dma.offset = lrpra[i].dma.offset =
1681 (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001682 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001683
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001684 bail:
1685 return err;
1686}
1687
1688static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1689 remote_arg_t *upra)
1690{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301691 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001692 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001693 struct smq_invoke_buf *list;
1694 struct smq_phy_page *pages;
1695 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301696 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001697 uint32_t *crclist = NULL;
1698
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301699 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001700 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001701 int err = 0;
1702
1703 inbufs = REMOTE_SCALARS_INBUFS(sc);
1704 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001705 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1706 list = smq_invoke_buf_start(ctx->rpra, sc);
1707 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301708 if (!me->legacy) {
1709 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1710 crclist = (uint32_t *)(fdlist + M_FDLIST);
1711 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001712
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001713 for (i = inbufs; i < inbufs + outbufs; ++i) {
1714 if (!ctx->maps[i]) {
1715 K_COPY_TO_USER(err, kernel,
1716 ctx->lpra[i].buf.pv,
1717 uint64_to_ptr(rpra[i].buf.pv),
1718 rpra[i].buf.len);
1719 if (err)
1720 goto bail;
1721 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301722 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301723 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301724 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301725 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001726 }
1727 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301728 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301729 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001730 for (i = 0; i < M_FDLIST; i++) {
1731 if (!fdlist[i])
1732 break;
1733 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001734 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301735 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001736 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001737 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301738 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001739 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301740 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001741 crclist, M_CRCLIST*sizeof(uint32_t));
1742
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001743 bail:
1744 return err;
1745}
1746
1747static void inv_args_pre(struct smq_invoke_ctx *ctx)
1748{
1749 int i, inbufs, outbufs;
1750 uint32_t sc = ctx->sc;
1751 remote_arg64_t *rpra = ctx->rpra;
1752 uintptr_t end;
1753
1754 inbufs = REMOTE_SCALARS_INBUFS(sc);
1755 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1756 for (i = inbufs; i < inbufs + outbufs; ++i) {
1757 struct fastrpc_mmap *map = ctx->maps[i];
1758
1759 if (map && map->uncached)
1760 continue;
1761 if (!rpra[i].buf.len)
1762 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301763 if (ctx->fl->sctx->smmu.coherent &&
1764 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1765 continue;
1766 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1767 continue;
1768
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001769 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1770 buf_page_start(rpra[i].buf.pv))
1771 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301772 if (!IS_CACHE_ALIGNED((uintptr_t)
1773 uint64_to_ptr(rpra[i].buf.pv))) {
1774 if (map && map->handle)
1775 msm_ion_do_cache_op(ctx->fl->apps->client,
1776 map->handle,
1777 uint64_to_ptr(rpra[i].buf.pv),
1778 sizeof(uintptr_t),
1779 ION_IOC_CLEAN_INV_CACHES);
1780 else
1781 dmac_flush_range(
1782 uint64_to_ptr(rpra[i].buf.pv), (char *)
1783 uint64_to_ptr(rpra[i].buf.pv + 1));
1784 }
1785
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001786 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1787 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301788 if (!IS_CACHE_ALIGNED(end)) {
1789 if (map && map->handle)
1790 msm_ion_do_cache_op(ctx->fl->apps->client,
1791 map->handle,
1792 uint64_to_ptr(end),
1793 sizeof(uintptr_t),
1794 ION_IOC_CLEAN_INV_CACHES);
1795 else
1796 dmac_flush_range((char *)end,
1797 (char *)end + 1);
1798 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001799 }
1800}
1801
1802static void inv_args(struct smq_invoke_ctx *ctx)
1803{
1804 int i, inbufs, outbufs;
1805 uint32_t sc = ctx->sc;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301806 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001807
1808 inbufs = REMOTE_SCALARS_INBUFS(sc);
1809 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1810 for (i = inbufs; i < inbufs + outbufs; ++i) {
1811 struct fastrpc_mmap *map = ctx->maps[i];
1812
1813 if (map && map->uncached)
1814 continue;
1815 if (!rpra[i].buf.len)
1816 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301817 if (ctx->fl->sctx->smmu.coherent &&
1818 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1819 continue;
1820 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1821 continue;
1822
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001823 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1824 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001825 continue;
1826 }
1827 if (map && map->handle)
1828 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1829 (char *)uint64_to_ptr(rpra[i].buf.pv),
1830 rpra[i].buf.len, ION_IOC_INV_CACHES);
1831 else
1832 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1833 (char *)uint64_to_ptr(rpra[i].buf.pv
1834 + rpra[i].buf.len));
1835 }
1836
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001837}
1838
1839static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1840 uint32_t kernel, uint32_t handle)
1841{
1842 struct smq_msg *msg = &ctx->msg;
1843 struct fastrpc_file *fl = ctx->fl;
1844 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301845 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001846
c_mtharue1a5ce12017-10-13 20:47:09 +05301847 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001848 if (err)
1849 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301850 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001851 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301852 if (fl->sessionid)
1853 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001854 if (kernel)
1855 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301856 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001857 msg->invoke.header.handle = handle;
1858 msg->invoke.header.sc = ctx->sc;
1859 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1860 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301861 if (fl->apps->glink) {
1862 if (fl->ssrcount != channel_ctx->ssrcount) {
1863 err = -ECONNRESET;
1864 goto bail;
1865 }
1866 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001867 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301868 if (err)
1869 goto bail;
1870 err = glink_tx(channel_ctx->chan,
1871 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1872 GLINK_TX_REQ_INTENT);
1873 } else {
1874 spin_lock(&fl->apps->hlock);
1875 len = smd_write((smd_channel_t *)
1876 channel_ctx->chan,
1877 msg, sizeof(*msg));
1878 spin_unlock(&fl->apps->hlock);
1879 VERIFY(err, len == sizeof(*msg));
1880 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001881 bail:
1882 return err;
1883}
1884
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301885static void fastrpc_smd_read_handler(int cid)
1886{
1887 struct fastrpc_apps *me = &gfa;
1888 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301889 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301890 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301891
1892 do {
1893 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1894 sizeof(rsp));
1895 if (ret != sizeof(rsp))
1896 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301897
1898 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1899 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301900 if (err)
1901 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301902
1903 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1904 if (err)
1905 goto bail;
1906
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301907 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301908 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1909 if (err)
1910 goto bail;
1911
1912 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301913 } while (ret == sizeof(rsp));
1914bail:
1915 if (err)
1916 pr_err("adsprpc: invalid response or context\n");
1917
1918}
1919
1920static void smd_event_handler(void *priv, unsigned int event)
1921{
1922 struct fastrpc_apps *me = &gfa;
1923 int cid = (int)(uintptr_t)priv;
1924
1925 switch (event) {
1926 case SMD_EVENT_OPEN:
1927 complete(&me->channel[cid].workport);
1928 break;
1929 case SMD_EVENT_CLOSE:
1930 fastrpc_notify_drivers(me, cid);
1931 break;
1932 case SMD_EVENT_DATA:
1933 fastrpc_smd_read_handler(cid);
1934 break;
1935 }
1936}
1937
1938
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001939static void fastrpc_init(struct fastrpc_apps *me)
1940{
1941 int i;
1942
1943 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301944 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001945 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301946 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301947 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001948 me->channel = &gcinfo[0];
1949 for (i = 0; i < NUM_CHANNELS; i++) {
1950 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301951 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001952 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301953 /* All channels are secure by default except CDSP */
1954 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001955 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301956 /* Set CDSP channel to non secure */
1957 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001958}
1959
1960static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1961
1962static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1963 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001964 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001965{
c_mtharue1a5ce12017-10-13 20:47:09 +05301966 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001967 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1968 int cid = fl->cid;
1969 int interrupted = 0;
1970 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001971 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301972 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001973
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001974 if (fl->profile)
1975 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301976
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05301977 if (!kernel) {
1978 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
1979 if (err) {
1980 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
1981 __func__, current->comm, cid);
1982 goto bail;
1983 }
1984 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301985
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301986 VERIFY(err, fl->sctx != NULL);
1987 if (err)
1988 goto bail;
1989 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1990 if (err)
1991 goto bail;
1992
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001993 if (!kernel) {
1994 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1995 &ctx));
1996 if (err)
1997 goto bail;
1998 if (fl->sctx->smmu.faults)
1999 err = FASTRPC_ENOSUCH;
2000 if (err)
2001 goto bail;
2002 if (ctx)
2003 goto wait;
2004 }
2005
2006 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
2007 if (err)
2008 goto bail;
2009
2010 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302011 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002012 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002013 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002014 if (err)
2015 goto bail;
2016 }
2017
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302018 if (!fl->sctx->smmu.coherent) {
2019 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002020 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302021 PERF_END);
2022 }
2023
2024 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002025 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002026 PERF_END);
2027
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002028 if (err)
2029 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002030 wait:
2031 if (kernel)
2032 wait_for_completion(&ctx->work);
2033 else {
2034 interrupted = wait_for_completion_interruptible(&ctx->work);
2035 VERIFY(err, 0 == (err = interrupted));
2036 if (err)
2037 goto bail;
2038 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07002039
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302040 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002041 if (!fl->sctx->smmu.coherent)
2042 inv_args(ctx);
2043 PERF_END);
2044
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002045 VERIFY(err, 0 == (err = ctx->retval));
2046 if (err)
2047 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002048
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302049 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002050 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002051 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002052 if (err)
2053 goto bail;
2054 bail:
2055 if (ctx && interrupted == -ERESTARTSYS)
2056 context_save_interrupted(ctx);
2057 else if (ctx)
2058 context_free(ctx);
2059 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2060 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002061
2062 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302063 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2064 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2065
2066 if (count)
2067 *count += getnstimediff(&invoket);
2068 }
2069 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2070 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2071
2072 if (count)
2073 *count = *count+1;
2074 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002075 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002076 return err;
2077}
2078
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302079static int fastrpc_get_adsp_session(char *name, int *session)
2080{
2081 struct fastrpc_apps *me = &gfa;
2082 int err = 0, i;
2083
2084 for (i = 0; i < NUM_SESSIONS; i++) {
2085 if (!me->channel[0].spd[i].spdname)
2086 continue;
2087 if (!strcmp(name, me->channel[0].spd[i].spdname))
2088 break;
2089 }
2090 VERIFY(err, i < NUM_SESSIONS);
2091 if (err)
2092 goto bail;
2093 *session = i;
2094bail:
2095 return err;
2096}
2097
2098static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002099static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302100static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002101static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002102 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002103{
2104 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302105 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002106 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002107 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002108 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302109 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302110 struct fastrpc_buf *imem = NULL;
2111 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302112 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002113
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302114 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002115 if (err)
2116 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302117 if (init->flags == FASTRPC_INIT_ATTACH ||
2118 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002119 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302120 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002121
2122 ra[0].buf.pv = (void *)&tgid;
2123 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302124 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002125 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2126 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302127 ioctl.fds = NULL;
2128 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002129 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302130 if (init->flags == FASTRPC_INIT_ATTACH)
2131 fl->pd = 0;
2132 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2133 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2134 fl->pd = 2;
2135 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002136 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2137 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2138 if (err)
2139 goto bail;
2140 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002141 remote_arg_t ra[6];
2142 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002143 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302144 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002145 struct {
2146 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302147 unsigned int namelen;
2148 unsigned int filelen;
2149 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002150 int attrs;
2151 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002152 } inbuf;
2153
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302154 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002155 inbuf.namelen = strlen(current->comm) + 1;
2156 inbuf.filelen = init->filelen;
2157 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302158
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302159 VERIFY(err, access_ok(0, (void __user *)init->file,
2160 init->filelen));
2161 if (err)
2162 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002163 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302164 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002165 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2166 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302167 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002168 if (err)
2169 goto bail;
2170 }
2171 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302172
2173 VERIFY(err, !init->mem);
2174 if (err) {
2175 err = -EINVAL;
2176 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2177 current->comm, __func__);
2178 goto bail;
2179 }
2180 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2181 1024*1024);
2182 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2183 DMA_ATTR_NO_KERNEL_MAPPING |
2184 DMA_ATTR_FORCE_NON_COHERENT;
2185 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302186 if (err)
2187 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302188 fl->init_mem = imem;
2189
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002190 inbuf.pageslen = 1;
2191 ra[0].buf.pv = (void *)&inbuf;
2192 ra[0].buf.len = sizeof(inbuf);
2193 fds[0] = 0;
2194
2195 ra[1].buf.pv = (void *)current->comm;
2196 ra[1].buf.len = inbuf.namelen;
2197 fds[1] = 0;
2198
2199 ra[2].buf.pv = (void *)init->file;
2200 ra[2].buf.len = inbuf.filelen;
2201 fds[2] = init->filefd;
2202
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302203 pages[0].addr = imem->phys;
2204 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002205 ra[3].buf.pv = (void *)pages;
2206 ra[3].buf.len = 1 * sizeof(*pages);
2207 fds[3] = 0;
2208
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002209 inbuf.attrs = uproc->attrs;
2210 ra[4].buf.pv = (void *)&(inbuf.attrs);
2211 ra[4].buf.len = sizeof(inbuf.attrs);
2212 fds[4] = 0;
2213
2214 inbuf.siglen = uproc->siglen;
2215 ra[5].buf.pv = (void *)&(inbuf.siglen);
2216 ra[5].buf.len = sizeof(inbuf.siglen);
2217 fds[5] = 0;
2218
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302219 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002220 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002221 if (uproc->attrs)
2222 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002223 ioctl.inv.pra = ra;
2224 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302225 ioctl.attrs = NULL;
2226 ioctl.crc = NULL;
2227 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2228 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2229 if (err)
2230 goto bail;
2231 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2232 remote_arg_t ra[3];
2233 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302234 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302235 int fds[3];
2236 struct {
2237 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302238 unsigned int namelen;
2239 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302240 } inbuf;
2241
2242 if (!init->filelen)
2243 goto bail;
2244
2245 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2246 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2247 if (err)
2248 goto bail;
2249 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2250 (void __user *)init->file, init->filelen));
2251 if (err)
2252 goto bail;
2253
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302254 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302255 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302256 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302257 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302258
2259 if (!strcmp(proc_name, "audiopd")) {
2260 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2261 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302262 if (err)
2263 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302264 }
2265
c_mtharue1a5ce12017-10-13 20:47:09 +05302266 if (!me->staticpd_flags) {
2267 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302268 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302269 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2270 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2271 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302272 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302273 if (err)
2274 goto bail;
2275 phys = mem->phys;
2276 size = mem->size;
2277 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302278 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2279 me->channel[fl->cid].rhvm.vmperm,
2280 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302281 if (err) {
2282 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2283 err);
2284 pr_err("map->phys %llx, map->size %d\n",
2285 phys, (int)size);
2286 goto bail;
2287 }
2288 me->staticpd_flags = 1;
2289 }
2290
2291 ra[0].buf.pv = (void *)&inbuf;
2292 ra[0].buf.len = sizeof(inbuf);
2293 fds[0] = 0;
2294
2295 ra[1].buf.pv = (void *)proc_name;
2296 ra[1].buf.len = inbuf.namelen;
2297 fds[1] = 0;
2298
2299 pages[0].addr = phys;
2300 pages[0].size = size;
2301
2302 ra[2].buf.pv = (void *)pages;
2303 ra[2].buf.len = sizeof(*pages);
2304 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302305 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302306
2307 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2308 ioctl.inv.pra = ra;
2309 ioctl.fds = NULL;
2310 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002311 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002312 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2313 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2314 if (err)
2315 goto bail;
2316 } else {
2317 err = -ENOTTY;
2318 }
2319bail:
c_mtharud91205a2017-11-07 16:01:06 +05302320 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302321 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2322 me->staticpd_flags = 0;
2323 if (mem && err) {
2324 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2325 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302326 me->channel[fl->cid].rhvm.vmid,
2327 me->channel[fl->cid].rhvm.vmcount,
2328 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302329 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302330 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302331 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302332 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302333 if (file) {
2334 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302335 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302336 mutex_unlock(&fl->fl_map_mutex);
2337 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002338 return err;
2339}
2340
2341static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2342{
2343 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002344 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002345 remote_arg_t ra[1];
2346 int tgid = 0;
2347
Sathish Ambley36849af2017-02-02 09:35:55 -08002348 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2349 if (err)
2350 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302351 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002352 if (err)
2353 goto bail;
2354 tgid = fl->tgid;
2355 ra[0].buf.pv = (void *)&tgid;
2356 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302357 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002358 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2359 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302360 ioctl.fds = NULL;
2361 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002362 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002363 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2364 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2365bail:
2366 return err;
2367}
2368
2369static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302370 uintptr_t va, uint64_t phys,
2371 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002372{
Sathish Ambleybae51902017-07-03 15:00:49 -07002373 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302374 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002375 struct smq_phy_page page;
2376 int num = 1;
2377 remote_arg_t ra[3];
2378 int err = 0;
2379 struct {
2380 int pid;
2381 uint32_t flags;
2382 uintptr_t vaddrin;
2383 int num;
2384 } inargs;
2385 struct {
2386 uintptr_t vaddrout;
2387 } routargs;
2388
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302389 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302390 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002391 inargs.flags = flags;
2392 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2393 ra[0].buf.pv = (void *)&inargs;
2394 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302395 page.addr = phys;
2396 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002397 ra[1].buf.pv = (void *)&page;
2398 ra[1].buf.len = num * sizeof(page);
2399
2400 ra[2].buf.pv = (void *)&routargs;
2401 ra[2].buf.len = sizeof(routargs);
2402
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302403 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002404 if (fl->apps->compat)
2405 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2406 else
2407 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2408 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302409 ioctl.fds = NULL;
2410 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002411 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002412 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2413 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302414 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302415 if (err)
2416 goto bail;
2417 if (flags == ADSP_MMAP_HEAP_ADDR) {
2418 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002419
c_mtharue1a5ce12017-10-13 20:47:09 +05302420 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302421 desc.args[1] = phys;
2422 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302423 desc.arginfo = SCM_ARGS(3);
2424 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2425 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2426 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302427 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302428 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2429 me->channel[fl->cid].rhvm.vmperm,
2430 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302431 if (err)
2432 goto bail;
2433 }
2434bail:
2435 return err;
2436}
2437
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302438static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2439 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302440{
2441 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302442 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302443 int destVM[1] = {VMID_HLOS};
2444 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2445
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302446 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302447 struct fastrpc_ioctl_invoke_crc ioctl;
2448 struct scm_desc desc = {0};
2449 remote_arg_t ra[1];
2450 int err = 0;
2451 struct {
2452 uint8_t skey;
2453 } routargs;
2454
2455 ra[0].buf.pv = (void *)&routargs;
2456 ra[0].buf.len = sizeof(routargs);
2457
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302458 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302459 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2460 ioctl.inv.pra = ra;
2461 ioctl.fds = NULL;
2462 ioctl.attrs = NULL;
2463 ioctl.crc = NULL;
2464 if (fl == NULL)
2465 goto bail;
2466
2467 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2468 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2469 if (err)
2470 goto bail;
2471 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302472 desc.args[1] = phys;
2473 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302474 desc.args[3] = routargs.skey;
2475 desc.arginfo = SCM_ARGS(4);
2476 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2477 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302478 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2479 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302480 me->channel[fl->cid].rhvm.vmid,
2481 me->channel[fl->cid].rhvm.vmcount,
2482 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302483 if (err)
2484 goto bail;
2485 }
2486
2487bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002488 return err;
2489}
2490
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302491static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2492 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002493{
Sathish Ambleybae51902017-07-03 15:00:49 -07002494 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002495 remote_arg_t ra[1];
2496 int err = 0;
2497 struct {
2498 int pid;
2499 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302500 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002501 } inargs;
2502
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302503 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302504 inargs.size = size;
2505 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002506 ra[0].buf.pv = (void *)&inargs;
2507 ra[0].buf.len = sizeof(inargs);
2508
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302509 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002510 if (fl->apps->compat)
2511 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2512 else
2513 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2514 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302515 ioctl.fds = NULL;
2516 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002517 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002518 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2519 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302520 if (err)
2521 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302522 if (flags == ADSP_MMAP_HEAP_ADDR ||
2523 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2524 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302525 if (err)
2526 goto bail;
2527 }
2528bail:
2529 return err;
2530}
2531
2532static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2533{
2534 struct fastrpc_mmap *match = NULL, *map = NULL;
2535 struct hlist_node *n = NULL;
2536 int err = 0, ret = 0;
2537 struct fastrpc_apps *me = &gfa;
2538 struct ramdump_segment *ramdump_segments_rh = NULL;
2539
2540 do {
2541 match = NULL;
2542 spin_lock(&me->hlock);
2543 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2544 match = map;
2545 hlist_del_init(&map->hn);
2546 break;
2547 }
2548 spin_unlock(&me->hlock);
2549
2550 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302551 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2552 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302553 if (err)
2554 goto bail;
2555 if (me->channel[0].ramdumpenabled) {
2556 ramdump_segments_rh = kcalloc(1,
2557 sizeof(struct ramdump_segment), GFP_KERNEL);
2558 if (ramdump_segments_rh) {
2559 ramdump_segments_rh->address =
2560 match->phys;
2561 ramdump_segments_rh->size = match->size;
2562 ret = do_elf_ramdump(
2563 me->channel[0].remoteheap_ramdump_dev,
2564 ramdump_segments_rh, 1);
2565 if (ret < 0)
2566 pr_err("ADSPRPC: unable to dump heap");
2567 kfree(ramdump_segments_rh);
2568 }
2569 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302570 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302571 }
2572 } while (match);
2573bail:
2574 if (err && match)
2575 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002576 return err;
2577}
2578
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302579static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2580{
2581 struct fastrpc_apps *me = &gfa;
2582 int session = 0, err = 0;
2583
2584 VERIFY(err, !fastrpc_get_adsp_session(
2585 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2586 if (err)
2587 goto bail;
2588 if (me->channel[fl->cid].spd[session].pdrcount !=
2589 me->channel[fl->cid].spd[session].prevpdrcount) {
2590 if (fastrpc_mmap_remove_ssr(fl))
2591 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2592 me->channel[fl->cid].spd[session].prevpdrcount =
2593 me->channel[fl->cid].spd[session].pdrcount;
2594 }
2595 if (!me->channel[fl->cid].spd[session].ispdup) {
2596 VERIFY(err, 0);
2597 if (err) {
2598 err = -ENOTCONN;
2599 goto bail;
2600 }
2601 }
2602bail:
2603 return err;
2604}
2605
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002606static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302607 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002608
2609static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2610
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302611static inline void get_fastrpc_ioctl_mmap_64(
2612 struct fastrpc_ioctl_mmap_64 *mmap64,
2613 struct fastrpc_ioctl_mmap *immap)
2614{
2615 immap->fd = mmap64->fd;
2616 immap->flags = mmap64->flags;
2617 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2618 immap->size = mmap64->size;
2619}
2620
2621static inline void put_fastrpc_ioctl_mmap_64(
2622 struct fastrpc_ioctl_mmap_64 *mmap64,
2623 struct fastrpc_ioctl_mmap *immap)
2624{
2625 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2626}
2627
2628static inline void get_fastrpc_ioctl_munmap_64(
2629 struct fastrpc_ioctl_munmap_64 *munmap64,
2630 struct fastrpc_ioctl_munmap *imunmap)
2631{
2632 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2633 imunmap->size = munmap64->size;
2634}
2635
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002636static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2637 struct fastrpc_ioctl_munmap *ud)
2638{
2639 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302640 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302641 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2642 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002643
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302644 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302645
2646 spin_lock(&fl->hlock);
2647 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2648 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2649 if ((rbuf->raddr == ud->vaddrout) &&
2650 (rbuf->size == ud->size)) {
2651 free = rbuf;
2652 break;
2653 }
2654 }
2655 }
2656 spin_unlock(&fl->hlock);
2657
2658 if (free) {
2659 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2660 free->phys, free->size, free->flags));
2661 if (err)
2662 goto bail;
2663 fastrpc_buf_free(rbuf, 0);
2664 mutex_unlock(&fl->map_mutex);
2665 return err;
2666 }
2667
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302668 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002669 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302670 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002671 if (err)
2672 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302673 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
2674 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002675 if (err)
2676 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302677 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302678 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302679 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002680bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302681 if (err && map) {
2682 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002683 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302684 mutex_unlock(&fl->fl_map_mutex);
2685 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302686 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002687 return err;
2688}
2689
c_mtharu7bd6a422017-10-17 18:15:37 +05302690static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2691 struct fastrpc_ioctl_munmap_fd *ud) {
2692 int err = 0;
2693 struct fastrpc_mmap *map = NULL;
2694
2695 VERIFY(err, (fl && ud));
2696 if (err)
2697 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302698 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302699 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2700 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302701 ud->fd, (unsigned long long)ud->va,
2702 (unsigned int)ud->len);
2703 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302704 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302705 goto bail;
2706 }
2707 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302708 fastrpc_mmap_free(map, 0);
2709 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302710bail:
2711 return err;
2712}
2713
2714
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002715static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2716 struct fastrpc_ioctl_mmap *ud)
2717{
2718
c_mtharue1a5ce12017-10-13 20:47:09 +05302719 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302720 struct fastrpc_buf *rbuf = NULL;
2721 unsigned long dma_attr = 0;
2722 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002723 int err = 0;
2724
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302725 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302726 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2727 if (ud->vaddrin) {
2728 err = -EINVAL;
2729 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2730 current->comm, __func__);
2731 goto bail;
2732 }
2733 dma_attr = DMA_ATTR_EXEC_MAPPING |
2734 DMA_ATTR_NO_KERNEL_MAPPING |
2735 DMA_ATTR_FORCE_NON_COHERENT;
2736 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2737 1, &rbuf);
2738 if (err)
2739 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302740 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302741 rbuf->phys, rbuf->size, &raddr);
2742 if (err)
2743 goto bail;
2744 rbuf->raddr = raddr;
2745 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302746
2747 uintptr_t va_to_dsp;
2748
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302749 mutex_lock(&fl->fl_map_mutex);
2750 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2751 ud->size, ud->flags, 1, &map)) {
2752 mutex_unlock(&fl->fl_map_mutex);
2753 mutex_unlock(&fl->map_mutex);
2754 return 0;
2755 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302756
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302757 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2758 (uintptr_t)ud->vaddrin, ud->size,
2759 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302760 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302761 if (err)
2762 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302763
2764 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2765 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2766 va_to_dsp = 0;
2767 else
2768 va_to_dsp = (uintptr_t)map->va;
2769 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302770 map->phys, map->size, &raddr));
2771 if (err)
2772 goto bail;
2773 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302774 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302775 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002776 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302777 if (err && map) {
2778 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302779 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302780 mutex_unlock(&fl->fl_map_mutex);
2781 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302782 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002783 return err;
2784}
2785
2786static void fastrpc_channel_close(struct kref *kref)
2787{
2788 struct fastrpc_apps *me = &gfa;
2789 struct fastrpc_channel_ctx *ctx;
2790 int cid;
2791
2792 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2793 cid = ctx - &gcinfo[0];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302794 if (!me->glink)
2795 smd_close(ctx->chan);
2796 else
2797 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302798 ctx->chan = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302799 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002800 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2801 MAJOR(me->dev_no), cid);
2802}
2803
2804static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2805
2806static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302807 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002808{
2809 struct fastrpc_apps *me = &gfa;
2810 int idx = 0, err = 0;
2811
2812 if (chan->sesscount) {
2813 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302814 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2815 (!chan->session[idx].used &&
2816 chan->session[idx].smmu.secure
2817 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002818 chan->session[idx].used = 1;
2819 break;
2820 }
2821 }
2822 VERIFY(err, idx < chan->sesscount);
2823 if (err)
2824 goto bail;
2825 chan->session[idx].smmu.faults = 0;
2826 } else {
2827 VERIFY(err, me->dev != NULL);
2828 if (err)
2829 goto bail;
2830 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302831 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002832 }
2833
2834 *session = &chan->session[idx];
2835 bail:
2836 return err;
2837}
2838
c_mtharue1a5ce12017-10-13 20:47:09 +05302839static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2840 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002841{
2842 if (glink_queue_rx_intent(h, NULL, size))
2843 return false;
2844 return true;
2845}
2846
c_mtharue1a5ce12017-10-13 20:47:09 +05302847static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002848 const void *pkt_priv, const void *ptr)
2849{
2850}
2851
c_mtharue1a5ce12017-10-13 20:47:09 +05302852static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002853 const void *pkt_priv, const void *ptr, size_t size)
2854{
2855 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302856 struct fastrpc_apps *me = &gfa;
2857 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302858 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002859
c_mtharufdac6892017-10-12 13:09:01 +05302860 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2861 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302862 goto bail;
2863
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302864 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2865 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302866 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302867 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302868
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302869 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2870 if (err)
2871 goto bail;
2872
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302873 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302874 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2875 if (err)
2876 goto bail;
2877
2878 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302879bail:
c_mtharufdac6892017-10-12 13:09:01 +05302880 if (err)
2881 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002882 glink_rx_done(handle, ptr, true);
2883}
2884
c_mtharue1a5ce12017-10-13 20:47:09 +05302885static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002886 unsigned int event)
2887{
2888 struct fastrpc_apps *me = &gfa;
2889 int cid = (int)(uintptr_t)priv;
2890 struct fastrpc_glink_info *link;
2891
2892 if (cid < 0 || cid >= NUM_CHANNELS)
2893 return;
2894 link = &me->channel[cid].link;
2895 switch (event) {
2896 case GLINK_CONNECTED:
2897 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302898 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002899 break;
2900 case GLINK_LOCAL_DISCONNECTED:
2901 link->port_state = FASTRPC_LINK_DISCONNECTED;
2902 break;
2903 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002904 break;
2905 default:
2906 break;
2907 }
2908}
2909
2910static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2911 struct fastrpc_session_ctx **session)
2912{
2913 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302914 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002915
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302916 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002917 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302918 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302919 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002920 return err;
2921}
2922
2923static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2924 struct fastrpc_session_ctx *session)
2925{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302926 struct fastrpc_apps *me = &gfa;
2927
2928 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002929 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302930 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002931}
2932
2933static int fastrpc_file_free(struct fastrpc_file *fl)
2934{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302935 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302936 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302937 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002938 int cid;
2939
2940 if (!fl)
2941 return 0;
2942 cid = fl->cid;
2943
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302944 (void)fastrpc_release_current_dsp_process(fl);
2945
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002946 spin_lock(&fl->apps->hlock);
2947 hlist_del_init(&fl->hn);
2948 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05302949 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002950
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002951 if (!fl->sctx) {
2952 kfree(fl);
2953 return 0;
2954 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302955 spin_lock(&fl->hlock);
2956 fl->file_close = 1;
2957 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302958 if (!IS_ERR_OR_NULL(fl->init_mem))
2959 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002960 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302961 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302962 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302963 do {
2964 lmap = NULL;
2965 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2966 hlist_del_init(&map->hn);
2967 lmap = map;
2968 break;
2969 }
2970 fastrpc_mmap_free(lmap, 1);
2971 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302972 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302973 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002974 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302975 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002976 if (fl->sctx)
2977 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2978 if (fl->secsctx)
2979 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302980
2981 mutex_lock(&fl->perf_mutex);
2982 do {
2983 struct hlist_node *pn = NULL;
2984
2985 fperf = NULL;
2986 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2987 hlist_del_init(&perf->hn);
2988 fperf = perf;
2989 break;
2990 }
2991 kfree(fperf);
2992 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302993 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302994 mutex_unlock(&fl->perf_mutex);
2995 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302996 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05302997 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002998 kfree(fl);
2999 return 0;
3000}
3001
3002static int fastrpc_device_release(struct inode *inode, struct file *file)
3003{
3004 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3005
3006 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303007 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
3008 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003009 if (fl->debugfs_file != NULL)
3010 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003011 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05303012 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003013 }
3014 return 0;
3015}
3016
3017static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
3018 void *priv)
3019{
3020 struct fastrpc_apps *me = &gfa;
3021 int cid = (int)((uintptr_t)priv);
3022 struct fastrpc_glink_info *link;
3023
3024 if (cid < 0 || cid >= NUM_CHANNELS)
3025 return;
3026
3027 link = &me->channel[cid].link;
3028 switch (cb_info->link_state) {
3029 case GLINK_LINK_STATE_UP:
3030 link->link_state = FASTRPC_LINK_STATE_UP;
3031 complete(&me->channel[cid].work);
3032 break;
3033 case GLINK_LINK_STATE_DOWN:
3034 link->link_state = FASTRPC_LINK_STATE_DOWN;
3035 break;
3036 default:
3037 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3038 break;
3039 }
3040}
3041
3042static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3043{
3044 int err = 0;
3045 struct fastrpc_glink_info *link;
3046
3047 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3048 if (err)
3049 goto bail;
3050
3051 link = &me->channel[cid].link;
3052 if (link->link_notify_handle != NULL)
3053 goto bail;
3054
3055 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3056 link->link_notify_handle = glink_register_link_state_cb(
3057 &link->link_info,
3058 (void *)((uintptr_t)cid));
3059 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3060 if (err) {
3061 link->link_notify_handle = NULL;
3062 goto bail;
3063 }
3064 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3065 RPC_TIMEOUT));
3066bail:
3067 return err;
3068}
3069
3070static void fastrpc_glink_close(void *chan, int cid)
3071{
3072 int err = 0;
3073 struct fastrpc_glink_info *link;
3074
3075 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3076 if (err)
3077 return;
3078 link = &gfa.channel[cid].link;
3079
c_mtharu314a4202017-11-15 22:09:17 +05303080 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3081 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003082 link->port_state = FASTRPC_LINK_DISCONNECTING;
3083 glink_close(chan);
3084 }
3085}
3086
3087static int fastrpc_glink_open(int cid)
3088{
3089 int err = 0;
3090 void *handle = NULL;
3091 struct fastrpc_apps *me = &gfa;
3092 struct glink_open_config *cfg;
3093 struct fastrpc_glink_info *link;
3094
3095 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3096 if (err)
3097 goto bail;
3098 link = &me->channel[cid].link;
3099 cfg = &me->channel[cid].link.cfg;
3100 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3101 if (err)
3102 goto bail;
3103
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303104 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3105 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003106 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003107
3108 link->port_state = FASTRPC_LINK_CONNECTING;
3109 cfg->priv = (void *)(uintptr_t)cid;
3110 cfg->edge = gcinfo[cid].link.link_info.edge;
3111 cfg->transport = gcinfo[cid].link.link_info.transport;
3112 cfg->name = FASTRPC_GLINK_GUID;
3113 cfg->notify_rx = fastrpc_glink_notify_rx;
3114 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3115 cfg->notify_state = fastrpc_glink_notify_state;
3116 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3117 handle = glink_open(cfg);
3118 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303119 if (err) {
3120 if (link->port_state == FASTRPC_LINK_CONNECTING)
3121 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003122 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303123 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003124 me->channel[cid].chan = handle;
3125bail:
3126 return err;
3127}
3128
Sathish Ambley1ca68232017-01-19 10:32:55 -08003129static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3130{
3131 filp->private_data = inode->i_private;
3132 return 0;
3133}
3134
3135static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3136 size_t count, loff_t *position)
3137{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303138 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003139 struct fastrpc_file *fl = filp->private_data;
3140 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303141 struct fastrpc_buf *buf = NULL;
3142 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303143 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303144 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303145 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003146 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303147 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003148 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303149 char single_line[UL_SIZE] = "----------------";
3150 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003151
3152 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3153 if (!fileinfo)
3154 goto bail;
3155 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303156 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3157 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3158 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3159 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3160 "susbsys", "refcount", "sesscount", "issubsystemup",
3161 "ssrcount", "session_used");
3162 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3163 "-%s%s%s%s-\n", single_line, single_line,
3164 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003165 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303166 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003167 chan = &gcinfo[i];
3168 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303169 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003170 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303171 DEBUGFS_SIZE - len, "|%-9d",
3172 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303173 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303174 DEBUGFS_SIZE - len, "|%-9d",
3175 chan->sesscount);
3176 len += scnprintf(fileinfo + len,
3177 DEBUGFS_SIZE - len, "|%-14d",
3178 chan->issubsystemup);
3179 len += scnprintf(fileinfo + len,
3180 DEBUGFS_SIZE - len, "|%-9d",
3181 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003182 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303183 sess_used += chan->session[j].used;
3184 }
3185 len += scnprintf(fileinfo + len,
3186 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3187
3188 }
3189 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3190 "\n%s%s%s\n", "=============",
3191 " CMA HEAP ", "==============");
3192 len += scnprintf(fileinfo + len,
3193 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3194 len += scnprintf(fileinfo + len,
3195 DEBUGFS_SIZE - len, "--%s%s---\n",
3196 single_line, single_line);
3197 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3198 "0x%-18llX", me->range.addr);
3199 len += scnprintf(fileinfo + len,
3200 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3201 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3202 "\n==========%s %s %s===========\n",
3203 title, " GMAPS ", title);
3204 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3205 "%-20s|%-20s|%-20s|%-20s\n",
3206 "fd", "phys", "size", "va");
3207 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3208 "%s%s%s%s%s\n", single_line, single_line,
3209 single_line, single_line, single_line);
3210 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3211 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3212 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3213 gmaps->fd, gmaps->phys,
3214 (uint32_t)gmaps->size,
3215 gmaps->va);
3216 }
3217 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3218 "%-20s|%-20s|%-20s|%-20s\n",
3219 "len", "refs", "raddr", "flags");
3220 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3221 "%s%s%s%s%s\n", single_line, single_line,
3222 single_line, single_line, single_line);
3223 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3224 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3225 "0x%-18X|%-20d|%-20lu|%-20u\n",
3226 (uint32_t)gmaps->len, gmaps->refs,
3227 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003228 }
3229 } else {
3230 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303231 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003232 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303233 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003234 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303235 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003236 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303237 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303238 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303239 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003240 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303241 "%s %14s %d\n", "pd", ":", fl->pd);
3242 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3243 "%s %9s %s\n", "spdname", ":", fl->spdname);
3244 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3245 "%s %6s %d\n", "file_close", ":", fl->file_close);
3246 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3247 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3248 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3249 "%s %9s %d\n", "profile", ":", fl->profile);
3250 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3251 "%s %3s %d\n", "smmu.coherent", ":",
3252 fl->sctx->smmu.coherent);
3253 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3254 "%s %4s %d\n", "smmu.enabled", ":",
3255 fl->sctx->smmu.enabled);
3256 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3257 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3258 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3259 "%s %5s %d\n", "smmu.secure", ":",
3260 fl->sctx->smmu.secure);
3261 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3262 "%s %5s %d\n", "smmu.faults", ":",
3263 fl->sctx->smmu.faults);
3264 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3265 "%s %s %d\n", "link.link_state",
3266 ":", *&me->channel[fl->cid].link.link_state);
3267
3268 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3269 "\n=======%s %s %s======\n", title,
3270 " LIST OF MAPS ", title);
3271
3272 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3273 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3274 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3275 "%s%s%s%s%s\n",
3276 single_line, single_line, single_line,
3277 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003278 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303279 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3280 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3281 map->va, map->phys,
3282 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003283 }
3284 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303285 "%-20s|%-20s|%-20s|%-20s\n",
3286 "len", "refs",
3287 "raddr", "uncached");
3288 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3289 "%s%s%s%s%s\n",
3290 single_line, single_line, single_line,
3291 single_line, single_line);
3292 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3293 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3294 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3295 map->len, map->refs, map->raddr,
3296 map->uncached);
3297 }
3298 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3299 "%-20s|%-20s\n", "secure", "attr");
3300 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3301 "%s%s%s%s%s\n",
3302 single_line, single_line, single_line,
3303 single_line, single_line);
3304 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3305 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3306 "%-20d|0x%-20lX\n\n",
3307 map->secure, map->attr);
3308 }
3309 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303310 "%s %d\n\n",
3311 "KERNEL MEMORY ALLOCATION:", 1);
3312 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303313 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303314 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303315 spin_lock(&fl->hlock);
3316 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303317 "%-19s|%-19s|%-19s|%-19s\n",
3318 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303319 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3320 "%s%s%s%s%s\n", single_line, single_line,
3321 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303322 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303323 len += scnprintf(fileinfo + len,
3324 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303325 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3326 buf->virt, (uint64_t)buf->phys, buf->size,
3327 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303328 }
3329 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3330 "\n%s %s %s\n", title,
3331 " LIST OF PENDING SMQCONTEXTS ", title);
3332
3333 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3334 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3335 "sc", "pid", "tgid", "used", "ctxid");
3336 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3337 "%s%s%s%s%s\n", single_line, single_line,
3338 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003339 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3340 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303341 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3342 ictx->sc, ictx->pid, ictx->tgid,
3343 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003344 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303345
Sathish Ambley1ca68232017-01-19 10:32:55 -08003346 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303347 "\n%s %s %s\n", title,
3348 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3349
3350 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3351 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3352 "sc", "pid", "tgid", "used", "ctxid");
3353 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3354 "%s%s%s%s%s\n", single_line, single_line,
3355 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003356 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303357 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3358 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3359 ictx->sc, ictx->pid, ictx->tgid,
3360 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003361 }
3362 spin_unlock(&fl->hlock);
3363 }
3364 if (len > DEBUGFS_SIZE)
3365 len = DEBUGFS_SIZE;
3366 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3367 kfree(fileinfo);
3368bail:
3369 return ret;
3370}
3371
3372static const struct file_operations debugfs_fops = {
3373 .open = fastrpc_debugfs_open,
3374 .read = fastrpc_debugfs_read,
3375};
Sathish Ambley36849af2017-02-02 09:35:55 -08003376static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003377{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003378 struct fastrpc_apps *me = &gfa;
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303379 int cid, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003380
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303381 mutex_lock(&me->smd_mutex);
3382
Sathish Ambley36849af2017-02-02 09:35:55 -08003383 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003384 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303385 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003386 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303387 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3388 if (err)
3389 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303390 if (me->channel[cid].ssrcount !=
3391 me->channel[cid].prevssrcount) {
3392 if (!me->channel[cid].issubsystemup) {
3393 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303394 if (err) {
3395 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303396 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303397 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303398 }
3399 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003400 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303401 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003402 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303403 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303404 if (me->glink) {
3405 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3406 if (err)
3407 goto bail;
3408 VERIFY(err, 0 == fastrpc_glink_open(cid));
3409 } else {
3410 VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
3411 gcinfo[cid].channel,
3412 (smd_channel_t **)&me->channel[cid].chan,
3413 (void *)(uintptr_t)cid,
3414 smd_event_handler));
3415 }
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303416 VERIFY(err,
3417 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003418 RPC_TIMEOUT));
3419 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303420 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003421 goto bail;
3422 }
3423 kref_init(&me->channel[cid].kref);
3424 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3425 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303426
3427 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3428 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303429 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303430
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303431 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003432 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303433 if (fastrpc_mmap_remove_ssr(fl))
3434 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003435 me->channel[cid].prevssrcount =
3436 me->channel[cid].ssrcount;
3437 }
3438 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003439
3440bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303441 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003442 return err;
3443}
3444
Sathish Ambley36849af2017-02-02 09:35:55 -08003445static int fastrpc_device_open(struct inode *inode, struct file *filp)
3446{
3447 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08003448 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05303449 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003450 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303451 char strpid[PID_SIZE];
3452 int buf_size = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003453
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303454 /*
3455 * Indicates the device node opened
3456 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3457 */
3458 int dev_minor = MINOR(inode->i_rdev);
3459
3460 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3461 (dev_minor == MINOR_NUM_SECURE_DEV)));
3462 if (err) {
3463 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3464 return err;
3465 }
3466
c_mtharue1a5ce12017-10-13 20:47:09 +05303467 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003468 if (err)
3469 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303470 snprintf(strpid, PID_SIZE, "%d", current->pid);
Mohammed Nayeem Ur Rahman2d65b4a2018-10-10 16:34:37 +05303471 buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303472 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3473 snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
3474 current->comm, "_", current->pid);
3475 debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3476 debugfs_root, fl, &debugfs_fops);
3477
Sathish Ambley36849af2017-02-02 09:35:55 -08003478 context_list_ctor(&fl->clst);
3479 spin_lock_init(&fl->hlock);
3480 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303481 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303482 INIT_HLIST_HEAD(&fl->cached_bufs);
3483 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003484 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303485 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003486 fl->tgid = current->tgid;
3487 fl->apps = me;
3488 fl->mode = FASTRPC_MODE_SERIAL;
3489 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303490 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303491 fl->init_mem = NULL;
Sathish Ambley567012b2017-03-06 11:55:04 -08003492 if (debugfs_file != NULL)
3493 fl->debugfs_file = debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303494 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303495 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003496 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303497 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303498 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003499 spin_lock(&me->hlock);
3500 hlist_add_head(&fl->hn, &me->drivers);
3501 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303502 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003503 return 0;
3504}
3505
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003506static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3507{
3508 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003509 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003510
c_mtharue1a5ce12017-10-13 20:47:09 +05303511 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003512 if (err)
3513 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003514 if (fl->cid == -1) {
3515 cid = *info;
3516 VERIFY(err, cid < NUM_CHANNELS);
3517 if (err)
3518 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303519 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003520 if (fl->dev_minor == MINOR_NUM_DEV &&
3521 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303522 /*
3523 * For non secure device node check and make sure that
3524 * the channel allows non-secure access
3525 * If not, bail. Session will not start.
3526 * cid will remain -1 and client will not be able to
3527 * invoke any other methods without failure
3528 */
3529 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3530 err = -EPERM;
3531 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3532 fl->dev_minor, cid,
3533 fl->apps->channel[cid].secure);
3534 goto bail;
3535 }
3536 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003537 fl->cid = cid;
3538 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3539 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303540 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003541 if (err)
3542 goto bail;
3543 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303544 VERIFY(err, fl->sctx != NULL);
3545 if (err)
3546 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003547 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3548bail:
3549 return err;
3550}
3551
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303552static int fastrpc_internal_control(struct fastrpc_file *fl,
3553 struct fastrpc_ioctl_control *cp)
3554{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303555 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303556 int err = 0;
3557 int latency;
3558
3559 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3560 if (err)
3561 goto bail;
3562 VERIFY(err, !IS_ERR_OR_NULL(cp));
3563 if (err)
3564 goto bail;
3565
3566 switch (cp->req) {
3567 case FASTRPC_CONTROL_LATENCY:
3568 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3569 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3570 VERIFY(err, latency != 0);
3571 if (err)
3572 goto bail;
3573 if (!fl->qos_request) {
3574 pm_qos_add_request(&fl->pm_qos_req,
3575 PM_QOS_CPU_DMA_LATENCY, latency);
3576 fl->qos_request = 1;
3577 } else
3578 pm_qos_update_request(&fl->pm_qos_req, latency);
3579 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303580 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303581 if (!me->legacy)
3582 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303583 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303584 case FASTRPC_CONTROL_KALLOC:
3585 cp->kalloc.kalloc_support = 1;
3586 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303587 default:
3588 err = -ENOTTY;
3589 break;
3590 }
3591bail:
3592 return err;
3593}
3594
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003595static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3596 unsigned long ioctl_param)
3597{
3598 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003599 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003600 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303601 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003602 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303603 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303604 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003605 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003606 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303607 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003608 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303609 union {
3610 struct fastrpc_ioctl_mmap mmap;
3611 struct fastrpc_ioctl_munmap munmap;
3612 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003613 void *param = (char *)ioctl_param;
3614 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3615 int size = 0, err = 0;
3616 uint32_t info;
3617
c_mtharue1a5ce12017-10-13 20:47:09 +05303618 p.inv.fds = NULL;
3619 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003620 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303621 spin_lock(&fl->hlock);
3622 if (fl->file_close == 1) {
3623 err = EBADF;
3624 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3625 spin_unlock(&fl->hlock);
3626 goto bail;
3627 }
3628 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003629
3630 switch (ioctl_num) {
3631 case FASTRPC_IOCTL_INVOKE:
3632 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003633 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003634 case FASTRPC_IOCTL_INVOKE_FD:
3635 if (!size)
3636 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3637 /* fall through */
3638 case FASTRPC_IOCTL_INVOKE_ATTRS:
3639 if (!size)
3640 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003641 /* fall through */
3642 case FASTRPC_IOCTL_INVOKE_CRC:
3643 if (!size)
3644 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303645 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003646 if (err)
3647 goto bail;
3648 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3649 0, &p.inv)));
3650 if (err)
3651 goto bail;
3652 break;
3653 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303654 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3655 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303656 if (err)
3657 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003658 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3659 if (err)
3660 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303661 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003662 if (err)
3663 goto bail;
3664 break;
3665 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303666 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3667 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303668 if (err)
3669 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003670 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3671 &p.munmap)));
3672 if (err)
3673 goto bail;
3674 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303675 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303676 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3677 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303678 if (err)
3679 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303680 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3681 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303682 if (err)
3683 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303684 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3685 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303686 if (err)
3687 goto bail;
3688 break;
3689 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303690 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3691 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303692 if (err)
3693 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303694 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303695 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303696 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303697 if (err)
3698 goto bail;
3699 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303700 case FASTRPC_IOCTL_MUNMAP_FD:
3701 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3702 sizeof(p.munmap_fd));
3703 if (err)
3704 goto bail;
3705 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3706 &p.munmap_fd)));
3707 if (err)
3708 goto bail;
3709 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003710 case FASTRPC_IOCTL_SETMODE:
3711 switch ((uint32_t)ioctl_param) {
3712 case FASTRPC_MODE_PARALLEL:
3713 case FASTRPC_MODE_SERIAL:
3714 fl->mode = (uint32_t)ioctl_param;
3715 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003716 case FASTRPC_MODE_PROFILE:
3717 fl->profile = (uint32_t)ioctl_param;
3718 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303719 case FASTRPC_MODE_SESSION:
3720 fl->sessionid = 1;
3721 fl->tgid |= (1 << SESSION_ID_INDEX);
3722 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003723 default:
3724 err = -ENOTTY;
3725 break;
3726 }
3727 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003728 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303729 K_COPY_FROM_USER(err, 0, &p.perf,
3730 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003731 if (err)
3732 goto bail;
3733 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3734 if (p.perf.keys) {
3735 char *keys = PERF_KEYS;
3736
c_mtharue1a5ce12017-10-13 20:47:09 +05303737 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3738 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003739 if (err)
3740 goto bail;
3741 }
3742 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303743 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3744 struct hlist_node *n = NULL;
3745
3746 mutex_lock(&fl->perf_mutex);
3747 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3748 if (perf->tid == current->pid) {
3749 fperf = perf;
3750 break;
3751 }
3752 }
3753
3754 mutex_unlock(&fl->perf_mutex);
3755
3756 if (fperf) {
3757 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3758 fperf, sizeof(*fperf));
3759 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003760 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303761 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003762 if (err)
3763 goto bail;
3764 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303765 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303766 K_COPY_FROM_USER(err, 0, &p.cp, param,
3767 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303768 if (err)
3769 goto bail;
3770 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3771 if (err)
3772 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303773 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3774 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3775 if (err)
3776 goto bail;
3777 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303778 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003779 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303780 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003781 if (err)
3782 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003783 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3784 if (err)
3785 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303786 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003787 if (err)
3788 goto bail;
3789 break;
3790 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003791 p.init.attrs = 0;
3792 p.init.siglen = 0;
3793 size = sizeof(struct fastrpc_ioctl_init);
3794 /* fall through */
3795 case FASTRPC_IOCTL_INIT_ATTRS:
3796 if (!size)
3797 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303798 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003799 if (err)
3800 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303801 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303802 p.init.init.filelen < INIT_FILELEN_MAX);
3803 if (err)
3804 goto bail;
3805 VERIFY(err, p.init.init.memlen >= 0 &&
3806 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303807 if (err)
3808 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303809 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003810 if (err)
3811 goto bail;
3812 break;
3813
3814 default:
3815 err = -ENOTTY;
3816 pr_info("bad ioctl: %d\n", ioctl_num);
3817 break;
3818 }
3819 bail:
3820 return err;
3821}
3822
3823static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3824 unsigned long code,
3825 void *data)
3826{
3827 struct fastrpc_apps *me = &gfa;
3828 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303829 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003830 int cid;
3831
3832 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3833 cid = ctx - &me->channel[0];
3834 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303835 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003836 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303837 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303838 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303839 if (me->glink)
3840 fastrpc_glink_close(ctx->chan, cid);
3841 else
3842 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303843 ctx->chan = NULL;
3844 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3845 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3846 }
3847 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303848 if (cid == 0)
3849 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003850 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303851 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3852 if (me->channel[0].remoteheap_ramdump_dev &&
3853 notifdata->enable_ramdump) {
3854 me->channel[0].ramdumpenabled = 1;
3855 }
3856 } else if (code == SUBSYS_AFTER_POWERUP) {
3857 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003858 }
3859
3860 return NOTIFY_DONE;
3861}
3862
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303863static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303864 unsigned long code,
3865 void *data)
3866{
3867 struct fastrpc_apps *me = &gfa;
3868 struct fastrpc_static_pd *spd;
3869 struct notif_data *notifdata = data;
3870
3871 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3872 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3873 mutex_lock(&me->smd_mutex);
3874 spd->pdrcount++;
3875 spd->ispdup = 0;
3876 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3877 MAJOR(me->dev_no), spd->spdname);
3878 mutex_unlock(&me->smd_mutex);
3879 if (!strcmp(spd->spdname,
3880 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3881 me->staticpd_flags = 0;
3882 fastrpc_notify_pdr_drivers(me, spd->spdname);
3883 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3884 if (me->channel[0].remoteheap_ramdump_dev &&
3885 notifdata->enable_ramdump) {
3886 me->channel[0].ramdumpenabled = 1;
3887 }
3888 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3889 spd->ispdup = 1;
3890 }
3891
3892 return NOTIFY_DONE;
3893}
3894
3895static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3896 unsigned long opcode, void *data)
3897{
3898 struct fastrpc_static_pd *spd;
3899 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303900 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303901
3902 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3903 if (opcode == LOCATOR_DOWN) {
3904 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3905 return NOTIFY_DONE;
3906 }
3907
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303908 for (i = 0; i < pdr->total_domains; i++) {
3909 if ((!strcmp(pdr->domain_list[i].name,
3910 "msm/adsp/audio_pd")) ||
3911 (!strcmp(pdr->domain_list[i].name,
3912 "msm/adsp/sensor_pd"))) {
3913 spd->pdrhandle =
3914 service_notif_register_notifier(
3915 pdr->domain_list[i].name,
3916 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303917 &spd->pdrnb, &curr_state);
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303918 if (IS_ERR(spd->pdrhandle)) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303919 pr_err("ADSPRPC: Unable to register notifier\n");
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303920 } else if (curr_state ==
3921 SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3922 pr_info("ADSPRPC: STATE_UP_V01 received\n");
3923 spd->ispdup = 1;
3924 } else if (curr_state ==
3925 SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
3926 pr_info("ADSPRPC: STATE_UNINIT_V01 received\n");
3927 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303928 break;
3929 }
3930 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303931
3932 return NOTIFY_DONE;
3933}
3934
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003935static const struct file_operations fops = {
3936 .open = fastrpc_device_open,
3937 .release = fastrpc_device_release,
3938 .unlocked_ioctl = fastrpc_device_ioctl,
3939 .compat_ioctl = compat_fastrpc_device_ioctl,
3940};
3941
3942static const struct of_device_id fastrpc_match_table[] = {
3943 { .compatible = "qcom,msm-fastrpc-adsp", },
3944 { .compatible = "qcom,msm-fastrpc-compute", },
3945 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303946 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3947 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003948 { .compatible = "qcom,msm-adsprpc-mem-region", },
3949 {}
3950};
3951
3952static int fastrpc_cb_probe(struct device *dev)
3953{
3954 struct fastrpc_channel_ctx *chan;
3955 struct fastrpc_session_ctx *sess;
3956 struct of_phandle_args iommuspec;
3957 const char *name;
3958 unsigned int start = 0x80000000;
3959 int err = 0, i;
3960 int secure_vmid = VMID_CP_PIXEL;
3961
c_mtharue1a5ce12017-10-13 20:47:09 +05303962 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3963 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003964 if (err)
3965 goto bail;
3966 for (i = 0; i < NUM_CHANNELS; i++) {
3967 if (!gcinfo[i].name)
3968 continue;
3969 if (!strcmp(name, gcinfo[i].name))
3970 break;
3971 }
3972 VERIFY(err, i < NUM_CHANNELS);
3973 if (err)
3974 goto bail;
3975 chan = &gcinfo[i];
3976 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3977 if (err)
3978 goto bail;
3979
3980 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3981 "#iommu-cells", 0, &iommuspec));
3982 if (err)
3983 goto bail;
3984 sess = &chan->session[chan->sesscount];
3985 sess->smmu.cb = iommuspec.args[0] & 0xf;
3986 sess->used = 0;
3987 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3988 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303989 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
3990 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003991 sess->smmu.secure = of_property_read_bool(dev->of_node,
3992 "qcom,secure-context-bank");
3993 if (sess->smmu.secure)
3994 start = 0x60000000;
3995 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3996 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303997 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003998 if (err)
3999 goto bail;
4000
4001 if (sess->smmu.secure)
4002 iommu_domain_set_attr(sess->smmu.mapping->domain,
4003 DOMAIN_ATTR_SECURE_VMID,
4004 &secure_vmid);
4005
4006 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
4007 if (err)
4008 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304009 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004010 sess->smmu.enabled = 1;
4011 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08004012 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
4013 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004014bail:
4015 return err;
4016}
4017
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304018static int fastrpc_cb_legacy_probe(struct device *dev)
4019{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304020 struct fastrpc_channel_ctx *chan;
4021 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4022 const char *name;
4023 unsigned int *sids = NULL, sids_size = 0;
4024 int err = 0, ret = 0, i;
4025
4026 unsigned int start = 0x80000000;
4027
4028 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4029 "label", NULL)));
4030 if (err)
4031 goto bail;
4032
4033 for (i = 0; i < NUM_CHANNELS; i++) {
4034 if (!gcinfo[i].name)
4035 continue;
4036 if (!strcmp(name, gcinfo[i].name))
4037 break;
4038 }
4039 VERIFY(err, i < NUM_CHANNELS);
4040 if (err)
4041 goto bail;
4042
4043 chan = &gcinfo[i];
4044 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4045 if (err)
4046 goto bail;
4047
4048 first_sess = &chan->session[chan->sesscount];
4049
4050 VERIFY(err, NULL != of_get_property(dev->of_node,
4051 "sids", &sids_size));
4052 if (err)
4053 goto bail;
4054
4055 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4056 if (err)
4057 goto bail;
4058 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4059 sids_size/sizeof(unsigned int));
4060 if (ret)
4061 goto bail;
4062
4063 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4064 arm_iommu_create_mapping(&platform_bus_type,
4065 start, 0x78000000)));
4066 if (err)
4067 goto bail;
4068
4069 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4070 if (err)
4071 goto bail;
4072
4073
4074 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4075 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4076 if (err)
4077 goto bail;
4078 sess = &chan->session[chan->sesscount];
4079 sess->smmu.cb = sids[i];
4080 sess->smmu.dev = dev;
4081 sess->smmu.mapping = first_sess->smmu.mapping;
4082 sess->smmu.enabled = 1;
4083 sess->used = 0;
4084 sess->smmu.coherent = false;
4085 sess->smmu.secure = false;
4086 chan->sesscount++;
4087 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304088bail:
4089 kfree(sids);
4090 return err;
4091}
4092
4093
4094
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304095static void init_secure_vmid_list(struct device *dev, char *prop_name,
4096 struct secure_vm *destvm)
4097{
4098 int err = 0;
4099 u32 len = 0, i = 0;
4100 u32 *rhvmlist = NULL;
4101 u32 *rhvmpermlist = NULL;
4102
4103 if (!of_find_property(dev->of_node, prop_name, &len))
4104 goto bail;
4105 if (len == 0)
4106 goto bail;
4107 len /= sizeof(u32);
4108 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4109 if (err)
4110 goto bail;
4111 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4112 GFP_KERNEL)));
4113 if (err)
4114 goto bail;
4115 for (i = 0; i < len; i++) {
4116 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4117 &rhvmlist[i]);
4118 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4119 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4120 if (err) {
4121 pr_err("ADSPRPC: Failed to read VMID\n");
4122 goto bail;
4123 }
4124 }
4125 destvm->vmid = rhvmlist;
4126 destvm->vmperm = rhvmpermlist;
4127 destvm->vmcount = len;
4128bail:
4129 if (err) {
4130 kfree(rhvmlist);
4131 kfree(rhvmpermlist);
4132 }
4133}
4134
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304135static void configure_secure_channels(uint32_t secure_domains)
4136{
4137 struct fastrpc_apps *me = &gfa;
4138 int ii = 0;
4139 /*
4140 * secure_domains contains the bitmask of the secure channels
4141 * Bit 0 - ADSP
4142 * Bit 1 - MDSP
4143 * Bit 2 - SLPI
4144 * Bit 3 - CDSP
4145 */
4146 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4147 int secure = (secure_domains >> ii) & 0x01;
4148
4149 me->channel[ii].secure = secure;
4150 }
4151}
4152
4153
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004154static int fastrpc_probe(struct platform_device *pdev)
4155{
4156 int err = 0;
4157 struct fastrpc_apps *me = &gfa;
4158 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004159 struct device_node *ion_node, *node;
4160 struct platform_device *ion_pdev;
4161 struct cma *cma;
4162 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304163 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304164 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304165
4166 if (of_device_is_compatible(dev->of_node,
4167 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304168 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4169 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304170
c_mtharu63ffc012017-11-16 15:26:56 +05304171
4172 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4173 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304174 if (of_get_property(dev->of_node,
4175 "qcom,secure-domains", NULL) != NULL) {
4176 VERIFY(err, !of_property_read_u32(dev->of_node,
4177 "qcom,secure-domains",
4178 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004179 if (!err) {
4180 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304181 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004182 } else {
4183 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304184 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004185 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304186 }
c_mtharu63ffc012017-11-16 15:26:56 +05304187 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004188 if (of_device_is_compatible(dev->of_node,
4189 "qcom,msm-fastrpc-compute-cb"))
4190 return fastrpc_cb_probe(dev);
4191
4192 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304193 "qcom,msm-fastrpc-legacy-compute")) {
4194 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304195 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304196 }
4197
4198 if (of_device_is_compatible(dev->of_node,
4199 "qcom,msm-fastrpc-legacy-compute-cb")){
4200 return fastrpc_cb_legacy_probe(dev);
4201 }
4202
4203 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004204 "qcom,msm-adsprpc-mem-region")) {
4205 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004206 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4207 if (ion_node) {
4208 for_each_available_child_of_node(ion_node, node) {
4209 if (of_property_read_u32(node, "reg", &val))
4210 continue;
4211 if (val != ION_ADSP_HEAP_ID)
4212 continue;
4213 ion_pdev = of_find_device_by_node(node);
4214 if (!ion_pdev)
4215 break;
4216 cma = dev_get_cma_area(&ion_pdev->dev);
4217 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304218 me->range.addr = cma_get_base(cma);
4219 me->range.size =
4220 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004221 }
4222 break;
4223 }
4224 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304225 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304226 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004227 int srcVM[1] = {VMID_HLOS};
4228 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4229 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004230 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004231 PERM_READ | PERM_WRITE | PERM_EXEC,
4232 PERM_READ | PERM_WRITE | PERM_EXEC,
4233 PERM_READ | PERM_WRITE | PERM_EXEC,
4234 };
4235
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304236 VERIFY(err, !hyp_assign_phys(me->range.addr,
4237 me->range.size, srcVM, 1,
4238 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004239 if (err)
4240 goto bail;
4241 }
4242 return 0;
4243 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304244 if (of_property_read_bool(dev->of_node,
4245 "qcom,fastrpc-adsp-audio-pdr")) {
4246 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004247
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304248 VERIFY(err, !fastrpc_get_adsp_session(
4249 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4250 if (err)
4251 goto spdbail;
4252 me->channel[0].spd[session].get_service_nb.notifier_call =
4253 fastrpc_get_service_location_notify;
4254 ret = get_service_location(
4255 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4256 AUDIO_PDR_ADSP_SERVICE_NAME,
4257 &me->channel[0].spd[session].get_service_nb);
4258 if (ret)
4259 pr_err("ADSPRPC: Get service location failed: %d\n",
4260 ret);
4261 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304262 if (of_property_read_bool(dev->of_node,
4263 "qcom,fastrpc-adsp-sensors-pdr")) {
4264 int session;
4265
4266 VERIFY(err, !fastrpc_get_adsp_session(
4267 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4268 if (err)
4269 goto spdbail;
4270 me->channel[0].spd[session].get_service_nb.notifier_call =
4271 fastrpc_get_service_location_notify;
4272 ret = get_service_location(
4273 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4274 SENSORS_PDR_ADSP_SERVICE_NAME,
4275 &me->channel[0].spd[session].get_service_nb);
4276 if (ret)
4277 pr_err("ADSPRPC: Get service location failed: %d\n",
4278 ret);
4279 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304280spdbail:
4281 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004282 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4283 fastrpc_match_table,
4284 NULL, &pdev->dev));
4285 if (err)
4286 goto bail;
4287bail:
4288 return err;
4289}
4290
4291static void fastrpc_deinit(void)
4292{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304293 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004294 struct fastrpc_channel_ctx *chan = gcinfo;
4295 int i, j;
4296
4297 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4298 if (chan->chan) {
4299 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304300 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304301 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004302 }
4303 for (j = 0; j < NUM_SESSIONS; j++) {
4304 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304305 if (sess->smmu.dev) {
4306 arm_iommu_detach_device(sess->smmu.dev);
4307 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004308 }
4309 if (sess->smmu.mapping) {
4310 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304311 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004312 }
4313 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304314 kfree(chan->rhvm.vmid);
4315 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004316 }
4317}
4318
4319static struct platform_driver fastrpc_driver = {
4320 .probe = fastrpc_probe,
4321 .driver = {
4322 .name = "fastrpc",
4323 .owner = THIS_MODULE,
4324 .of_match_table = fastrpc_match_table,
4325 },
4326};
4327
4328static int __init fastrpc_device_init(void)
4329{
4330 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304331 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304332 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004333 int err = 0, i;
4334
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304335 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004336 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004337 fastrpc_init(me);
4338 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304339 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004340 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004341 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4342 if (err)
4343 goto register_bail;
4344 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4345 DEVICE_NAME));
4346 if (err)
4347 goto alloc_chrdev_bail;
4348 cdev_init(&me->cdev, &fops);
4349 me->cdev.owner = THIS_MODULE;
4350 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304351 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004352 if (err)
4353 goto cdev_init_bail;
4354 me->class = class_create(THIS_MODULE, "fastrpc");
4355 VERIFY(err, !IS_ERR(me->class));
4356 if (err)
4357 goto class_create_bail;
4358 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304359
4360 /*
4361 * Create devices and register with sysfs
4362 * Create first device with minor number 0
4363 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004364 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304365 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4366 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004367 VERIFY(err, !IS_ERR_OR_NULL(dev));
4368 if (err)
4369 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304370
4371 /* Create secure device with minor number for secure device */
4372 secure_dev = device_create(me->class, NULL,
4373 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4374 NULL, DEVICE_NAME_SECURE);
4375 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4376 if (err)
4377 goto device_create_bail;
4378
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004379 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304380 me->channel[i].dev = secure_dev;
4381 if (i == CDSP_DOMAIN_ID)
4382 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004383 me->channel[i].ssrcount = 0;
4384 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304385 me->channel[i].issubsystemup = 1;
4386 me->channel[i].ramdumpenabled = 0;
4387 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004388 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4389 me->channel[i].handle = subsys_notif_register_notifier(
4390 gcinfo[i].subsys,
4391 &me->channel[i].nb);
4392 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004393 me->client = msm_ion_client_create(DEVICE_NAME);
4394 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4395 if (err)
4396 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304397
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004398 return 0;
4399device_create_bail:
4400 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004401 if (me->channel[i].handle)
4402 subsys_notif_unregister_notifier(me->channel[i].handle,
4403 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004404 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004405 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304406 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4407 MINOR_NUM_DEV));
4408 if (!IS_ERR_OR_NULL(secure_dev))
4409 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4410 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004411 class_destroy(me->class);
4412class_create_bail:
4413 cdev_del(&me->cdev);
4414cdev_init_bail:
4415 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4416alloc_chrdev_bail:
4417register_bail:
4418 fastrpc_deinit();
4419 return err;
4420}
4421
4422static void __exit fastrpc_device_exit(void)
4423{
4424 struct fastrpc_apps *me = &gfa;
4425 int i;
4426
4427 fastrpc_file_list_dtor(me);
4428 fastrpc_deinit();
4429 for (i = 0; i < NUM_CHANNELS; i++) {
4430 if (!gcinfo[i].name)
4431 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004432 subsys_notif_unregister_notifier(me->channel[i].handle,
4433 &me->channel[i].nb);
4434 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304435
4436 /* Destroy the secure and non secure devices */
4437 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4438 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4439 MINOR_NUM_SECURE_DEV));
4440
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004441 class_destroy(me->class);
4442 cdev_del(&me->cdev);
4443 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4444 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004445 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004446}
4447
4448late_initcall(fastrpc_device_init);
4449module_exit(fastrpc_device_exit);
4450
4451MODULE_LICENSE("GPL v2");