blob: 63368cfc2ac216e1450a5da06f32a7e634cd5d01 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700106
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530107#define PERF_KEYS \
108 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530109#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800110#define FASTRPC_STATIC_HANDLE_LISTENER (3)
111#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530112#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800113
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530114#define INIT_FILELEN_MAX (2*1024*1024)
115#define INIT_MEMLEN_MAX (8*1024*1024)
116
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800117#define PERF_END (void)0
118
119#define PERF(enb, cnt, ff) \
120 {\
121 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530122 int64_t *counter = cnt;\
123 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800124 getnstimeofday(&startT);\
125 } \
126 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530127 if (enb && counter) {\
128 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800129 } \
130 }
131
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530132#define GET_COUNTER(perf_ptr, offset) \
133 (perf_ptr != NULL ?\
134 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
135 (int64_t *)(perf_ptr + offset)\
136 : (int64_t *)NULL) : (int64_t *)NULL)
137
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700138static int fastrpc_glink_open(int cid);
139static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530140static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530141 unsigned long code,
142 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800143static struct dentry *debugfs_root;
144static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700145
146static inline uint64_t buf_page_start(uint64_t buf)
147{
148 uint64_t start = (uint64_t) buf & PAGE_MASK;
149 return start;
150}
151
152static inline uint64_t buf_page_offset(uint64_t buf)
153{
154 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
155 return offset;
156}
157
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530158static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700159{
160 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
161 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530162 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700163 return nPages;
164}
165
166static inline uint64_t buf_page_size(uint32_t size)
167{
168 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
169
170 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
171}
172
173static inline void *uint64_to_ptr(uint64_t addr)
174{
175 void *ptr = (void *)((uintptr_t)addr);
176
177 return ptr;
178}
179
180static inline uint64_t ptr_to_uint64(void *ptr)
181{
182 uint64_t addr = (uint64_t)((uintptr_t)ptr);
183
184 return addr;
185}
186
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530187struct secure_vm {
188 int *vmid;
189 int *vmperm;
190 int vmcount;
191};
192
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700193struct fastrpc_file;
194
195struct fastrpc_buf {
196 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530197 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700198 struct fastrpc_file *fl;
199 void *virt;
200 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530201 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530202 unsigned long dma_attr;
203 uintptr_t raddr;
204 uint32_t flags;
205 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700206};
207
208struct fastrpc_ctx_lst;
209
210struct overlap {
211 uintptr_t start;
212 uintptr_t end;
213 int raix;
214 uintptr_t mstart;
215 uintptr_t mend;
216 uintptr_t offset;
217};
218
219struct smq_invoke_ctx {
220 struct hlist_node hn;
221 struct completion work;
222 int retval;
223 int pid;
224 int tgid;
225 remote_arg_t *lpra;
226 remote_arg64_t *rpra;
227 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700228 struct fastrpc_mmap **maps;
229 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530230 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700231 struct fastrpc_file *fl;
232 uint32_t sc;
233 struct overlap *overs;
234 struct overlap **overps;
235 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530236 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530237 unsigned int *attrs;
238 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530239 uint64_t ctxid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700240};
241
242struct fastrpc_ctx_lst {
243 struct hlist_head pending;
244 struct hlist_head interrupted;
245};
246
247struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530248 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700249 struct dma_iommu_mapping *mapping;
250 int cb;
251 int enabled;
252 int faults;
253 int secure;
254 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530255 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700256};
257
258struct fastrpc_session_ctx {
259 struct device *dev;
260 struct fastrpc_smmu smmu;
261 int used;
262};
263
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530264struct fastrpc_static_pd {
265 char *spdname;
266 struct notifier_block pdrnb;
267 struct notifier_block get_service_nb;
268 void *pdrhandle;
269 int pdrcount;
270 int prevpdrcount;
271 int ispdup;
272};
273
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700274struct fastrpc_glink_info {
275 int link_state;
276 int port_state;
277 struct glink_open_config cfg;
278 struct glink_link_info link_info;
279 void *link_notify_handle;
280};
281
282struct fastrpc_channel_ctx {
283 char *name;
284 char *subsys;
285 void *chan;
286 struct device *dev;
287 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530288 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700289 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530290 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700291 struct notifier_block nb;
292 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530293 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700294 int sesscount;
295 int ssrcount;
296 void *handle;
297 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530298 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700299 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530300 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530301 int ramdumpenabled;
302 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700303 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530304 /* Indicates, if channel is restricted to secure node only */
305 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306};
307
308struct fastrpc_apps {
309 struct fastrpc_channel_ctx *channel;
310 struct cdev cdev;
311 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530312 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700313 struct smq_phy_page range;
314 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530315 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700316 dev_t dev_no;
317 int compat;
318 struct hlist_head drivers;
319 spinlock_t hlock;
320 struct ion_client *client;
321 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530322 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530323 bool glink;
324 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800325 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530326 spinlock_t ctxlock;
327 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700328};
329
330struct fastrpc_mmap {
331 struct hlist_node hn;
332 struct fastrpc_file *fl;
333 struct fastrpc_apps *apps;
334 int fd;
335 uint32_t flags;
336 struct dma_buf *buf;
337 struct sg_table *table;
338 struct dma_buf_attachment *attach;
339 struct ion_handle *handle;
340 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530341 size_t size;
342 uintptr_t va;
343 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700344 int refs;
345 uintptr_t raddr;
346 int uncached;
347 int secure;
348 uintptr_t attr;
349};
350
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530351enum fastrpc_perfkeys {
352 PERF_COUNT = 0,
353 PERF_FLUSH = 1,
354 PERF_MAP = 2,
355 PERF_COPY = 3,
356 PERF_LINK = 4,
357 PERF_GETARGS = 5,
358 PERF_PUTARGS = 6,
359 PERF_INVARGS = 7,
360 PERF_INVOKE = 8,
361 PERF_KEY_MAX = 9,
362};
363
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800364struct fastrpc_perf {
365 int64_t count;
366 int64_t flush;
367 int64_t map;
368 int64_t copy;
369 int64_t link;
370 int64_t getargs;
371 int64_t putargs;
372 int64_t invargs;
373 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530374 int64_t tid;
375 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800376};
377
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700378struct fastrpc_file {
379 struct hlist_node hn;
380 spinlock_t hlock;
381 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530382 struct hlist_head cached_bufs;
383 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700384 struct fastrpc_ctx_lst clst;
385 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530386 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700387 struct fastrpc_session_ctx *secsctx;
388 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800389 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530390 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700391 int tgid;
392 int cid;
393 int ssrcount;
394 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530395 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530396 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530397 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700398 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530399 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800400 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530401 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530402 struct pm_qos_request pm_qos_req;
403 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530404 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530405 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530406 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530407 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
408 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530409 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700410};
411
412static struct fastrpc_apps gfa;
413
414static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
415 {
416 .name = "adsprpc-smd",
417 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530418 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700419 .link.link_info.edge = "lpass",
420 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530421 .spd = {
422 {
423 .spdname =
424 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
425 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530426 fastrpc_pdr_notifier_cb,
427 },
428 {
429 .spdname =
430 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
431 .pdrnb.notifier_call =
432 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530433 }
434 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700435 },
436 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700437 .name = "mdsprpc-smd",
438 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530439 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700440 .link.link_info.edge = "mpss",
441 .link.link_info.transport = "smem",
442 },
443 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800444 .name = "sdsprpc-smd",
445 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530446 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800447 .link.link_info.edge = "dsps",
448 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800449 },
450 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700451 .name = "cdsprpc-smd",
452 .subsys = "cdsp",
453 .link.link_info.edge = "cdsp",
454 .link.link_info.transport = "smem",
455 },
456};
457
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530458static int hlosvm[1] = {VMID_HLOS};
459static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
460
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800461static inline int64_t getnstimediff(struct timespec *start)
462{
463 int64_t ns;
464 struct timespec ts, b;
465
466 getnstimeofday(&ts);
467 b = timespec_sub(ts, *start);
468 ns = timespec_to_ns(&b);
469 return ns;
470}
471
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530472static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
473{
474 int err = 0;
475 int64_t *val = NULL;
476 struct fastrpc_perf *perf = NULL, *fperf = NULL;
477 struct hlist_node *n = NULL;
478
479 VERIFY(err, !IS_ERR_OR_NULL(fl));
480 if (err)
481 goto bail;
482
483 mutex_lock(&fl->perf_mutex);
484 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
485 if (perf->tid == current->pid) {
486 fperf = perf;
487 break;
488 }
489 }
490
491 if (IS_ERR_OR_NULL(fperf)) {
492 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
493
494 VERIFY(err, !IS_ERR_OR_NULL(fperf));
495 if (err) {
496 mutex_unlock(&fl->perf_mutex);
497 kfree(fperf);
498 goto bail;
499 }
500
501 fperf->tid = current->pid;
502 hlist_add_head(&fperf->hn, &fl->perf);
503 }
504
505 val = ((int64_t *)fperf) + key;
506 mutex_unlock(&fl->perf_mutex);
507bail:
508 return val;
509}
510
511
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700512static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
513{
c_mtharue1a5ce12017-10-13 20:47:09 +0530514 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700515 int vmid;
516
517 if (!fl)
518 return;
519 if (cache) {
520 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530521 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700522 spin_unlock(&fl->hlock);
523 return;
524 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530525 if (buf->remote) {
526 spin_lock(&fl->hlock);
527 hlist_del_init(&buf->hn_rem);
528 spin_unlock(&fl->hlock);
529 buf->remote = 0;
530 buf->raddr = 0;
531 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532 if (!IS_ERR_OR_NULL(buf->virt)) {
533 int destVM[1] = {VMID_HLOS};
534 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
535
536 if (fl->sctx->smmu.cb)
537 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
538 vmid = fl->apps->channel[fl->cid].vmid;
539 if (vmid) {
540 int srcVM[2] = {VMID_HLOS, vmid};
541
542 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
543 srcVM, 2, destVM, destVMperm, 1);
544 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530545 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
546 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700547 }
548 kfree(buf);
549}
550
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530551static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700552{
553 struct fastrpc_buf *buf, *free;
554
555 do {
556 struct hlist_node *n;
557
c_mtharue1a5ce12017-10-13 20:47:09 +0530558 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700559 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530560 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700561 hlist_del_init(&buf->hn);
562 free = buf;
563 break;
564 }
565 spin_unlock(&fl->hlock);
566 if (free)
567 fastrpc_buf_free(free, 0);
568 } while (free);
569}
570
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530571static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
572{
573 struct fastrpc_buf *buf, *free;
574
575 do {
576 struct hlist_node *n;
577
578 free = NULL;
579 spin_lock(&fl->hlock);
580 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
581 free = buf;
582 break;
583 }
584 spin_unlock(&fl->hlock);
585 if (free)
586 fastrpc_buf_free(free, 0);
587 } while (free);
588}
589
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700590static void fastrpc_mmap_add(struct fastrpc_mmap *map)
591{
c_mtharue1a5ce12017-10-13 20:47:09 +0530592 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
593 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
594 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700595
c_mtharue1a5ce12017-10-13 20:47:09 +0530596 spin_lock(&me->hlock);
597 hlist_add_head(&map->hn, &me->maps);
598 spin_unlock(&me->hlock);
599 } else {
600 struct fastrpc_file *fl = map->fl;
601
c_mtharue1a5ce12017-10-13 20:47:09 +0530602 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530603 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700604}
605
c_mtharue1a5ce12017-10-13 20:47:09 +0530606static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530607 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530608 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700609{
c_mtharue1a5ce12017-10-13 20:47:09 +0530610 struct fastrpc_apps *me = &gfa;
611 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700612 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530613
614 if ((va + len) < va)
615 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530616 if (mflags == ADSP_MMAP_HEAP_ADDR ||
617 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
618 spin_lock(&me->hlock);
619 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
620 if (va >= map->va &&
621 va + len <= map->va + map->len &&
622 map->fd == fd) {
623 if (refs)
624 map->refs++;
625 match = map;
626 break;
627 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700628 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530629 spin_unlock(&me->hlock);
630 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530631 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
632 if (va >= map->va &&
633 va + len <= map->va + map->len &&
634 map->fd == fd) {
635 if (refs)
636 map->refs++;
637 match = map;
638 break;
639 }
640 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700641 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700642 if (match) {
643 *ppmap = match;
644 return 0;
645 }
646 return -ENOTTY;
647}
648
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530649static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530650 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530651{
652 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530653
654 if (me->dev == NULL) {
655 pr_err("device adsprpc-mem is not initialized\n");
656 return -ENODEV;
657 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530658 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530659 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530660 if (IS_ERR_OR_NULL(*vaddr)) {
661 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
662 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530663 return -ENOMEM;
664 }
665 return 0;
666}
667
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700668static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530669 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700670{
c_mtharue1a5ce12017-10-13 20:47:09 +0530671 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700672 struct hlist_node *n;
673 struct fastrpc_apps *me = &gfa;
674
675 spin_lock(&me->hlock);
676 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
677 if (map->raddr == va &&
678 map->raddr + map->len == va + len &&
679 map->refs == 1) {
680 match = map;
681 hlist_del_init(&map->hn);
682 break;
683 }
684 }
685 spin_unlock(&me->hlock);
686 if (match) {
687 *ppmap = match;
688 return 0;
689 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700690 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
691 if (map->raddr == va &&
692 map->raddr + map->len == va + len &&
693 map->refs == 1) {
694 match = map;
695 hlist_del_init(&map->hn);
696 break;
697 }
698 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700699 if (match) {
700 *ppmap = match;
701 return 0;
702 }
703 return -ENOTTY;
704}
705
c_mtharu7bd6a422017-10-17 18:15:37 +0530706static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700707{
c_mtharue1a5ce12017-10-13 20:47:09 +0530708 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700709 struct fastrpc_file *fl;
710 int vmid;
711 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700712
713 if (!map)
714 return;
715 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530716 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
717 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
718 spin_lock(&me->hlock);
719 map->refs--;
720 if (!map->refs)
721 hlist_del_init(&map->hn);
722 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530723 if (map->refs > 0)
724 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530725 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530726 map->refs--;
727 if (!map->refs)
728 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530729 if (map->refs > 0 && !flags)
730 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530731 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530732 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
733 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530734 unsigned long dma_attrs = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735
c_mtharue1a5ce12017-10-13 20:47:09 +0530736 if (me->dev == NULL) {
737 pr_err("failed to free remote heap allocation\n");
738 return;
739 }
740 if (map->phys) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530741 dma_attrs |=
742 DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
743 dma_free_attrs(me->dev, map->size, (void *)map->va,
744 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530745 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530746 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
747 if (!IS_ERR_OR_NULL(map->handle))
748 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530749 } else {
750 int destVM[1] = {VMID_HLOS};
751 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
752
753 if (map->secure)
754 sess = fl->secsctx;
755 else
756 sess = fl->sctx;
757
758 if (!IS_ERR_OR_NULL(map->handle))
759 ion_free(fl->apps->client, map->handle);
760 if (sess && sess->smmu.enabled) {
761 if (map->size || map->phys)
762 msm_dma_unmap_sg(sess->smmu.dev,
763 map->table->sgl,
764 map->table->nents, DMA_BIDIRECTIONAL,
765 map->buf);
766 }
767 vmid = fl->apps->channel[fl->cid].vmid;
768 if (vmid && map->phys) {
769 int srcVM[2] = {VMID_HLOS, vmid};
770
771 hyp_assign_phys(map->phys, buf_page_size(map->size),
772 srcVM, 2, destVM, destVMperm, 1);
773 }
774
775 if (!IS_ERR_OR_NULL(map->table))
776 dma_buf_unmap_attachment(map->attach, map->table,
777 DMA_BIDIRECTIONAL);
778 if (!IS_ERR_OR_NULL(map->attach))
779 dma_buf_detach(map->buf, map->attach);
780 if (!IS_ERR_OR_NULL(map->buf))
781 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700782 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700783 kfree(map);
784}
785
786static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
787 struct fastrpc_session_ctx **session);
788
789static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530790 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700791 struct fastrpc_mmap **ppmap)
792{
c_mtharue1a5ce12017-10-13 20:47:09 +0530793 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700794 struct fastrpc_session_ctx *sess;
795 struct fastrpc_apps *apps = fl->apps;
796 int cid = fl->cid;
797 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530798 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700799 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530800 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530801 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700802 unsigned long flags;
803 int err = 0, vmid;
804
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800805 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700806 return 0;
807 map = kzalloc(sizeof(*map), GFP_KERNEL);
808 VERIFY(err, !IS_ERR_OR_NULL(map));
809 if (err)
810 goto bail;
811 INIT_HLIST_NODE(&map->hn);
812 map->flags = mflags;
813 map->refs = 1;
814 map->fl = fl;
815 map->fd = fd;
816 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530817 if (mflags == ADSP_MMAP_HEAP_ADDR ||
818 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530819 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
820 DMA_ATTR_NO_KERNEL_MAPPING;
821
c_mtharue1a5ce12017-10-13 20:47:09 +0530822 map->apps = me;
823 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530824 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
825 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700826 if (err)
827 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530828 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530829 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530830 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530831 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
832 ion_phys_addr_t iphys;
833
834 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
835 ion_import_dma_buf_fd(fl->apps->client, fd)));
836 if (err)
837 goto bail;
838
839 map->uncached = 1;
840 map->buf = NULL;
841 map->attach = NULL;
842 map->table = NULL;
843 map->va = 0;
844 map->phys = 0;
845
846 err = ion_phys(fl->apps->client, map->handle,
847 &iphys, &map->size);
848 if (err)
849 goto bail;
850 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530851 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530852 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
853 pr_info("adsprpc: buffer mapped with persist attr %x\n",
854 (unsigned int)map->attr);
855 map->refs = 2;
856 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530857 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
858 ion_import_dma_buf_fd(fl->apps->client, fd)));
859 if (err)
860 goto bail;
861 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
862 &flags));
863 if (err)
864 goto bail;
865
c_mtharue1a5ce12017-10-13 20:47:09 +0530866 map->secure = flags & ION_FLAG_SECURE;
867 if (map->secure) {
868 if (!fl->secsctx)
869 err = fastrpc_session_alloc(chan, 1,
870 &fl->secsctx);
871 if (err)
872 goto bail;
873 }
874 if (map->secure)
875 sess = fl->secsctx;
876 else
877 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530878
c_mtharue1a5ce12017-10-13 20:47:09 +0530879 VERIFY(err, !IS_ERR_OR_NULL(sess));
880 if (err)
881 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530882
883 map->uncached = !ION_IS_CACHED(flags);
884 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
885 map->uncached = 1;
886
c_mtharue1a5ce12017-10-13 20:47:09 +0530887 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
888 if (err)
889 goto bail;
890 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
891 dma_buf_attach(map->buf, sess->smmu.dev)));
892 if (err)
893 goto bail;
894 VERIFY(err, !IS_ERR_OR_NULL(map->table =
895 dma_buf_map_attachment(map->attach,
896 DMA_BIDIRECTIONAL)));
897 if (err)
898 goto bail;
899 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700900 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530901
902 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
903 (sess->smmu.coherent && map->uncached))
904 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
905 else if (map->attr & FASTRPC_ATTR_COHERENT)
906 attrs |= DMA_ATTR_FORCE_COHERENT;
907
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700908 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530909 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700910 map->table->sgl, map->table->nents,
911 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530912 if (err)
913 goto bail;
914 } else {
915 VERIFY(err, map->table->nents == 1);
916 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700917 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530918 }
919 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530920
c_mtharue1a5ce12017-10-13 20:47:09 +0530921 if (sess->smmu.cb) {
922 map->phys += ((uint64_t)sess->smmu.cb << 32);
923 map->size = sg_dma_len(map->table->sgl);
924 } else {
925 map->size = buf_page_size(len);
926 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530927
c_mtharue1a5ce12017-10-13 20:47:09 +0530928 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530929 if (!sess->smmu.enabled && !vmid) {
930 VERIFY(err, map->phys >= me->range.addr &&
931 map->phys + map->size <=
932 me->range.addr + me->range.size);
933 if (err) {
934 pr_err("adsprpc: mmap fail out of range\n");
935 goto bail;
936 }
937 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530938 if (vmid) {
939 int srcVM[1] = {VMID_HLOS};
940 int destVM[2] = {VMID_HLOS, vmid};
941 int destVMperm[2] = {PERM_READ | PERM_WRITE,
942 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700943
c_mtharue1a5ce12017-10-13 20:47:09 +0530944 VERIFY(err, !hyp_assign_phys(map->phys,
945 buf_page_size(map->size),
946 srcVM, 1, destVM, destVMperm, 2));
947 if (err)
948 goto bail;
949 }
950 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700951 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700952 map->len = len;
953
954 fastrpc_mmap_add(map);
955 *ppmap = map;
956
957bail:
958 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530959 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700960 return err;
961}
962
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530963static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530964 unsigned long dma_attr, uint32_t rflags,
965 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700966{
967 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530968 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700969 struct hlist_node *n;
970
971 VERIFY(err, size > 0);
972 if (err)
973 goto bail;
974
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530975 if (!remote) {
976 /* find the smallest buffer that fits in the cache */
977 spin_lock(&fl->hlock);
978 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
979 if (buf->size >= size && (!fr || fr->size > buf->size))
980 fr = buf;
981 }
982 if (fr)
983 hlist_del_init(&fr->hn);
984 spin_unlock(&fl->hlock);
985 if (fr) {
986 *obuf = fr;
987 return 0;
988 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700989 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530990 buf = NULL;
991 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700992 if (err)
993 goto bail;
994 INIT_HLIST_NODE(&buf->hn);
995 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530996 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700997 buf->phys = 0;
998 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530999 buf->dma_attr = dma_attr;
1000 buf->flags = rflags;
1001 buf->raddr = 0;
1002 buf->remote = 0;
1003 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1004 (dma_addr_t *)&buf->phys,
1005 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001006 if (IS_ERR_OR_NULL(buf->virt)) {
1007 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301008 fastrpc_cached_buf_list_free(fl);
1009 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1010 (dma_addr_t *)&buf->phys,
1011 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001012 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1013 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301014 if (err) {
1015 err = -ENOMEM;
1016 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1017 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001018 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301019 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001020 if (fl->sctx->smmu.cb)
1021 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1022 vmid = fl->apps->channel[fl->cid].vmid;
1023 if (vmid) {
1024 int srcVM[1] = {VMID_HLOS};
1025 int destVM[2] = {VMID_HLOS, vmid};
1026 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1027 PERM_READ | PERM_WRITE | PERM_EXEC};
1028
1029 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1030 srcVM, 1, destVM, destVMperm, 2));
1031 if (err)
1032 goto bail;
1033 }
1034
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301035 if (remote) {
1036 INIT_HLIST_NODE(&buf->hn_rem);
1037 spin_lock(&fl->hlock);
1038 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1039 spin_unlock(&fl->hlock);
1040 buf->remote = remote;
1041 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001042 *obuf = buf;
1043 bail:
1044 if (err && buf)
1045 fastrpc_buf_free(buf, 0);
1046 return err;
1047}
1048
1049
1050static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001051 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001052 struct smq_invoke_ctx **po)
1053{
1054 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301055 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001056 struct hlist_node *n;
1057 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1058
1059 spin_lock(&fl->hlock);
1060 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1061 if (ictx->pid == current->pid) {
1062 if (invoke->sc != ictx->sc || ictx->fl != fl)
1063 err = -1;
1064 else {
1065 ctx = ictx;
1066 hlist_del_init(&ctx->hn);
1067 hlist_add_head(&ctx->hn, &fl->clst.pending);
1068 }
1069 break;
1070 }
1071 }
1072 spin_unlock(&fl->hlock);
1073 if (ctx)
1074 *po = ctx;
1075 return err;
1076}
1077
1078#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1079static int overlap_ptr_cmp(const void *a, const void *b)
1080{
1081 struct overlap *pa = *((struct overlap **)a);
1082 struct overlap *pb = *((struct overlap **)b);
1083 /* sort with lowest starting buffer first */
1084 int st = CMP(pa->start, pb->start);
1085 /* sort with highest ending buffer first */
1086 int ed = CMP(pb->end, pa->end);
1087 return st == 0 ? ed : st;
1088}
1089
Sathish Ambley9466d672017-01-25 10:51:55 -08001090static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001091{
Sathish Ambley9466d672017-01-25 10:51:55 -08001092 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001093 remote_arg_t *lpra = ctx->lpra;
1094 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1095 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1096 int nbufs = inbufs + outbufs;
1097 struct overlap max;
1098
1099 for (i = 0; i < nbufs; ++i) {
1100 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1101 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001102 if (lpra[i].buf.len) {
1103 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1104 if (err)
1105 goto bail;
1106 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001107 ctx->overs[i].raix = i;
1108 ctx->overps[i] = &ctx->overs[i];
1109 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301110 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001111 max.start = 0;
1112 max.end = 0;
1113 for (i = 0; i < nbufs; ++i) {
1114 if (ctx->overps[i]->start < max.end) {
1115 ctx->overps[i]->mstart = max.end;
1116 ctx->overps[i]->mend = ctx->overps[i]->end;
1117 ctx->overps[i]->offset = max.end -
1118 ctx->overps[i]->start;
1119 if (ctx->overps[i]->end > max.end) {
1120 max.end = ctx->overps[i]->end;
1121 } else {
1122 ctx->overps[i]->mend = 0;
1123 ctx->overps[i]->mstart = 0;
1124 }
1125 } else {
1126 ctx->overps[i]->mend = ctx->overps[i]->end;
1127 ctx->overps[i]->mstart = ctx->overps[i]->start;
1128 ctx->overps[i]->offset = 0;
1129 max = *ctx->overps[i];
1130 }
1131 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001132bail:
1133 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001134}
1135
1136#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1137 do {\
1138 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301139 VERIFY(err, 0 == copy_from_user((dst),\
1140 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001141 (size)));\
1142 else\
1143 memmove((dst), (src), (size));\
1144 } while (0)
1145
1146#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1147 do {\
1148 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301149 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301150 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001151 else\
1152 memmove((dst), (src), (size));\
1153 } while (0)
1154
1155
1156static void context_free(struct smq_invoke_ctx *ctx);
1157
1158static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001159 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001160 struct smq_invoke_ctx **po)
1161{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301162 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301163 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301164 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001165 struct fastrpc_ctx_lst *clst = &fl->clst;
1166 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1167
1168 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1169 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1170 sizeof(*ctx->fds) * (bufs) +
1171 sizeof(*ctx->attrs) * (bufs) +
1172 sizeof(*ctx->overs) * (bufs) +
1173 sizeof(*ctx->overps) * (bufs);
1174
c_mtharue1a5ce12017-10-13 20:47:09 +05301175 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001176 if (err)
1177 goto bail;
1178
1179 INIT_HLIST_NODE(&ctx->hn);
1180 hlist_add_fake(&ctx->hn);
1181 ctx->fl = fl;
1182 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1183 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1184 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301185 if (me->legacy) {
1186 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1187 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1188 } else {
1189 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1190 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1191 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1192 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001193
c_mtharue1a5ce12017-10-13 20:47:09 +05301194 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001195 bufs * sizeof(*ctx->lpra));
1196 if (err)
1197 goto bail;
1198
1199 if (invokefd->fds) {
1200 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1201 bufs * sizeof(*ctx->fds));
1202 if (err)
1203 goto bail;
1204 }
1205 if (invokefd->attrs) {
1206 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1207 bufs * sizeof(*ctx->attrs));
1208 if (err)
1209 goto bail;
1210 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001211 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001212 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001213 if (bufs) {
1214 VERIFY(err, 0 == context_build_overlap(ctx));
1215 if (err)
1216 goto bail;
1217 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001218 ctx->retval = -1;
1219 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301220 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001221 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301222 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001223
1224 spin_lock(&fl->hlock);
1225 hlist_add_head(&ctx->hn, &clst->pending);
1226 spin_unlock(&fl->hlock);
1227
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301228 spin_lock(&me->ctxlock);
1229 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1230 if (!me->ctxtable[ii]) {
1231 me->ctxtable[ii] = ctx;
1232 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1233 break;
1234 }
1235 }
1236 spin_unlock(&me->ctxlock);
1237 VERIFY(err, ii < FASTRPC_CTX_MAX);
1238 if (err) {
1239 pr_err("adsprpc: out of context memory\n");
1240 goto bail;
1241 }
1242
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001243 *po = ctx;
1244bail:
1245 if (ctx && err)
1246 context_free(ctx);
1247 return err;
1248}
1249
1250static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1251{
1252 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1253
1254 spin_lock(&ctx->fl->hlock);
1255 hlist_del_init(&ctx->hn);
1256 hlist_add_head(&ctx->hn, &clst->interrupted);
1257 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001258}
1259
1260static void context_free(struct smq_invoke_ctx *ctx)
1261{
1262 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301263 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001264 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1265 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1266 spin_lock(&ctx->fl->hlock);
1267 hlist_del_init(&ctx->hn);
1268 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301269 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001270 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301271 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301272
1273 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001274 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301275 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301276 ctx->ctxid = 0;
1277
1278 spin_lock(&me->ctxlock);
1279 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1280 if (me->ctxtable[i] == ctx) {
1281 me->ctxtable[i] = NULL;
1282 break;
1283 }
1284 }
1285 spin_unlock(&me->ctxlock);
1286
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001287 kfree(ctx);
1288}
1289
1290static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1291{
1292 ctx->retval = retval;
1293 complete(&ctx->work);
1294}
1295
1296
1297static void fastrpc_notify_users(struct fastrpc_file *me)
1298{
1299 struct smq_invoke_ctx *ictx;
1300 struct hlist_node *n;
1301
1302 spin_lock(&me->hlock);
1303 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1304 complete(&ictx->work);
1305 }
1306 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1307 complete(&ictx->work);
1308 }
1309 spin_unlock(&me->hlock);
1310
1311}
1312
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301313
1314static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1315{
1316 struct smq_invoke_ctx *ictx;
1317 struct hlist_node *n;
1318
1319 spin_lock(&me->hlock);
1320 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1321 if (ictx->msg.pid)
1322 complete(&ictx->work);
1323 }
1324 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1325 if (ictx->msg.pid)
1326 complete(&ictx->work);
1327 }
1328 spin_unlock(&me->hlock);
1329}
1330
1331
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001332static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1333{
1334 struct fastrpc_file *fl;
1335 struct hlist_node *n;
1336
1337 spin_lock(&me->hlock);
1338 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1339 if (fl->cid == cid)
1340 fastrpc_notify_users(fl);
1341 }
1342 spin_unlock(&me->hlock);
1343
1344}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301345
1346static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1347{
1348 struct fastrpc_file *fl;
1349 struct hlist_node *n;
1350
1351 spin_lock(&me->hlock);
1352 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1353 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301354 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301355 }
1356 spin_unlock(&me->hlock);
1357
1358}
1359
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360static void context_list_ctor(struct fastrpc_ctx_lst *me)
1361{
1362 INIT_HLIST_HEAD(&me->interrupted);
1363 INIT_HLIST_HEAD(&me->pending);
1364}
1365
1366static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1367{
1368 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301369 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001370 struct hlist_node *n;
1371
1372 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301373 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001374 spin_lock(&fl->hlock);
1375 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1376 hlist_del_init(&ictx->hn);
1377 ctxfree = ictx;
1378 break;
1379 }
1380 spin_unlock(&fl->hlock);
1381 if (ctxfree)
1382 context_free(ctxfree);
1383 } while (ctxfree);
1384 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301385 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001386 spin_lock(&fl->hlock);
1387 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1388 hlist_del_init(&ictx->hn);
1389 ctxfree = ictx;
1390 break;
1391 }
1392 spin_unlock(&fl->hlock);
1393 if (ctxfree)
1394 context_free(ctxfree);
1395 } while (ctxfree);
1396}
1397
1398static int fastrpc_file_free(struct fastrpc_file *fl);
1399static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1400{
1401 struct fastrpc_file *fl, *free;
1402 struct hlist_node *n;
1403
1404 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301405 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001406 spin_lock(&me->hlock);
1407 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1408 hlist_del_init(&fl->hn);
1409 free = fl;
1410 break;
1411 }
1412 spin_unlock(&me->hlock);
1413 if (free)
1414 fastrpc_file_free(free);
1415 } while (free);
1416}
1417
1418static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1419{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301420 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001421 remote_arg64_t *rpra;
1422 remote_arg_t *lpra = ctx->lpra;
1423 struct smq_invoke_buf *list;
1424 struct smq_phy_page *pages, *ipage;
1425 uint32_t sc = ctx->sc;
1426 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1427 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001428 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001429 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301430 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001431 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001432 int err = 0;
1433 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001434 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001435 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301436 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001437
1438 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301439 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001440 list = smq_invoke_buf_start(rpra, sc);
1441 pages = smq_phy_page_start(sc, list);
1442 ipage = pages;
1443
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301444 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001445 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301446 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1447 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001448
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301449 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301450 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1451 unsigned int attrs = 0;
1452
1453 if (ctx->attrs)
1454 attrs = ctx->attrs[i];
1455
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001456 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301457 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001458 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301459 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301460 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001461 ipage += 1;
1462 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301463 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001464 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301465 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001466 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301467 int dmaflags = 0;
1468
1469 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1470 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001471 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301472 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301473 if (err) {
1474 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001475 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301476 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001477 ipage += 1;
1478 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301479 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301480 if (!me->legacy) {
1481 metalen = copylen = (size_t)&ipage[0] +
1482 (sizeof(uint64_t) * M_FDLIST) +
1483 (sizeof(uint32_t) * M_CRCLIST);
1484 } else {
1485 metalen = copylen = (size_t)&ipage[0];
1486 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001487
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001488 /* calculate len requreed for copying */
1489 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1490 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001491 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301492 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001493
1494 if (!len)
1495 continue;
1496 if (ctx->maps[i])
1497 continue;
1498 if (ctx->overps[oix]->offset == 0)
1499 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001500 mstart = ctx->overps[oix]->mstart;
1501 mend = ctx->overps[oix]->mend;
1502 VERIFY(err, (mend - mstart) <= LONG_MAX);
1503 if (err)
1504 goto bail;
1505 copylen += mend - mstart;
1506 VERIFY(err, copylen >= 0);
1507 if (err)
1508 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001509 }
1510 ctx->used = copylen;
1511
1512 /* allocate new buffer */
1513 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301514 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001515 if (err)
1516 goto bail;
1517 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301518 if (ctx->buf->virt && metalen <= copylen)
1519 memset(ctx->buf->virt, 0, metalen);
1520
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001521 /* copy metadata */
1522 rpra = ctx->buf->virt;
1523 ctx->rpra = rpra;
1524 list = smq_invoke_buf_start(rpra, sc);
1525 pages = smq_phy_page_start(sc, list);
1526 ipage = pages;
1527 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001528 for (i = 0; i < bufs + handles; ++i) {
1529 if (lpra[i].buf.len)
1530 list[i].num = 1;
1531 else
1532 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001533 list[i].pgidx = ipage - pages;
1534 ipage++;
1535 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301536
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001537 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301538 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301539 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001540 struct fastrpc_mmap *map = ctx->maps[i];
1541 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301542 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001543
1544 rpra[i].buf.pv = 0;
1545 rpra[i].buf.len = len;
1546 if (!len)
1547 continue;
1548 if (map) {
1549 struct vm_area_struct *vma;
1550 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301551 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001552 int idx = list[i].pgidx;
1553
1554 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001555 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001556 } else {
1557 down_read(&current->mm->mmap_sem);
1558 VERIFY(err, NULL != (vma = find_vma(current->mm,
1559 map->va)));
1560 if (err) {
1561 up_read(&current->mm->mmap_sem);
1562 goto bail;
1563 }
1564 offset = buf_page_start(buf) - vma->vm_start;
1565 up_read(&current->mm->mmap_sem);
1566 VERIFY(err, offset < (uintptr_t)map->size);
1567 if (err)
1568 goto bail;
1569 }
1570 pages[idx].addr = map->phys + offset;
1571 pages[idx].size = num << PAGE_SHIFT;
1572 }
1573 rpra[i].buf.pv = buf;
1574 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001575 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001576 for (i = bufs; i < bufs + handles; ++i) {
1577 struct fastrpc_mmap *map = ctx->maps[i];
1578
1579 pages[i].addr = map->phys;
1580 pages[i].size = map->size;
1581 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301582 if (!me->legacy) {
1583 fdlist = (uint64_t *)&pages[bufs + handles];
1584 for (i = 0; i < M_FDLIST; i++)
1585 fdlist[i] = 0;
1586 crclist = (uint32_t *)&fdlist[M_FDLIST];
1587 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1588 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001589
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001590 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301591 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001592 rlen = copylen - metalen;
Tharun Kumar Meruguc230bd72018-01-29 18:02:42 +05301593 for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001594 int i = ctx->overps[oix]->raix;
1595 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301596 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001597 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301598 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599
1600 if (!len)
1601 continue;
1602 if (map)
1603 continue;
1604 if (ctx->overps[oix]->offset == 0) {
1605 rlen -= ALIGN(args, BALIGN) - args;
1606 args = ALIGN(args, BALIGN);
1607 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001608 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001609 VERIFY(err, rlen >= mlen);
1610 if (err)
1611 goto bail;
1612 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1613 pages[list[i].pgidx].addr = ctx->buf->phys -
1614 ctx->overps[oix]->offset +
1615 (copylen - rlen);
1616 pages[list[i].pgidx].addr =
1617 buf_page_start(pages[list[i].pgidx].addr);
1618 buf = rpra[i].buf.pv;
1619 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1620 if (i < inbufs) {
1621 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1622 lpra[i].buf.pv, len);
1623 if (err)
1624 goto bail;
1625 }
1626 args = args + mlen;
1627 rlen -= mlen;
1628 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001629 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301631 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001632 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1633 int i = ctx->overps[oix]->raix;
1634 struct fastrpc_mmap *map = ctx->maps[i];
1635
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001636 if (map && map->uncached)
1637 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301638 if (ctx->fl->sctx->smmu.coherent &&
1639 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1640 continue;
1641 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1642 continue;
1643
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301644 if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
1645 if (map && map->handle)
1646 msm_ion_do_cache_op(ctx->fl->apps->client,
1647 map->handle,
1648 uint64_to_ptr(rpra[i].buf.pv),
1649 rpra[i].buf.len,
1650 ION_IOC_CLEAN_INV_CACHES);
1651 else
1652 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1653 uint64_to_ptr(rpra[i].buf.pv
1654 + rpra[i].buf.len));
1655 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001656 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001657 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301658 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001659 rpra[i].dma.fd = ctx->fds[i];
1660 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1661 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001662 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001663
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001664 bail:
1665 return err;
1666}
1667
1668static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1669 remote_arg_t *upra)
1670{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301671 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001672 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001673 struct smq_invoke_buf *list;
1674 struct smq_phy_page *pages;
1675 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301676 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001677 uint32_t *crclist = NULL;
1678
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001679 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001680 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001681 int err = 0;
1682
1683 inbufs = REMOTE_SCALARS_INBUFS(sc);
1684 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001685 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1686 list = smq_invoke_buf_start(ctx->rpra, sc);
1687 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301688 if (!me->legacy) {
1689 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1690 crclist = (uint32_t *)(fdlist + M_FDLIST);
1691 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001692
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001693 for (i = inbufs; i < inbufs + outbufs; ++i) {
1694 if (!ctx->maps[i]) {
1695 K_COPY_TO_USER(err, kernel,
1696 ctx->lpra[i].buf.pv,
1697 uint64_to_ptr(rpra[i].buf.pv),
1698 rpra[i].buf.len);
1699 if (err)
1700 goto bail;
1701 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301702 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301703 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301704 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301705 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001706 }
1707 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301708 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301709 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001710 for (i = 0; i < M_FDLIST; i++) {
1711 if (!fdlist[i])
1712 break;
1713 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001714 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301715 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001716 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001717 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301718 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001719 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301720 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001721 crclist, M_CRCLIST*sizeof(uint32_t));
1722
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001723 bail:
1724 return err;
1725}
1726
1727static void inv_args_pre(struct smq_invoke_ctx *ctx)
1728{
1729 int i, inbufs, outbufs;
1730 uint32_t sc = ctx->sc;
1731 remote_arg64_t *rpra = ctx->rpra;
1732 uintptr_t end;
1733
1734 inbufs = REMOTE_SCALARS_INBUFS(sc);
1735 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1736 for (i = inbufs; i < inbufs + outbufs; ++i) {
1737 struct fastrpc_mmap *map = ctx->maps[i];
1738
1739 if (map && map->uncached)
1740 continue;
1741 if (!rpra[i].buf.len)
1742 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301743 if (ctx->fl->sctx->smmu.coherent &&
1744 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1745 continue;
1746 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1747 continue;
1748
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001749 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1750 buf_page_start(rpra[i].buf.pv))
1751 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301752 if (!IS_CACHE_ALIGNED((uintptr_t)
1753 uint64_to_ptr(rpra[i].buf.pv))) {
1754 if (map && map->handle)
1755 msm_ion_do_cache_op(ctx->fl->apps->client,
1756 map->handle,
1757 uint64_to_ptr(rpra[i].buf.pv),
1758 sizeof(uintptr_t),
1759 ION_IOC_CLEAN_INV_CACHES);
1760 else
1761 dmac_flush_range(
1762 uint64_to_ptr(rpra[i].buf.pv), (char *)
1763 uint64_to_ptr(rpra[i].buf.pv + 1));
1764 }
1765
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001766 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1767 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301768 if (!IS_CACHE_ALIGNED(end)) {
1769 if (map && map->handle)
1770 msm_ion_do_cache_op(ctx->fl->apps->client,
1771 map->handle,
1772 uint64_to_ptr(end),
1773 sizeof(uintptr_t),
1774 ION_IOC_CLEAN_INV_CACHES);
1775 else
1776 dmac_flush_range((char *)end,
1777 (char *)end + 1);
1778 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001779 }
1780}
1781
1782static void inv_args(struct smq_invoke_ctx *ctx)
1783{
1784 int i, inbufs, outbufs;
1785 uint32_t sc = ctx->sc;
1786 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001787
1788 inbufs = REMOTE_SCALARS_INBUFS(sc);
1789 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1790 for (i = inbufs; i < inbufs + outbufs; ++i) {
1791 struct fastrpc_mmap *map = ctx->maps[i];
1792
1793 if (map && map->uncached)
1794 continue;
1795 if (!rpra[i].buf.len)
1796 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301797 if (ctx->fl->sctx->smmu.coherent &&
1798 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1799 continue;
1800 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1801 continue;
1802
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001803 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1804 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001805 continue;
1806 }
1807 if (map && map->handle)
1808 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1809 (char *)uint64_to_ptr(rpra[i].buf.pv),
1810 rpra[i].buf.len, ION_IOC_INV_CACHES);
1811 else
1812 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1813 (char *)uint64_to_ptr(rpra[i].buf.pv
1814 + rpra[i].buf.len));
1815 }
1816
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001817}
1818
1819static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1820 uint32_t kernel, uint32_t handle)
1821{
1822 struct smq_msg *msg = &ctx->msg;
1823 struct fastrpc_file *fl = ctx->fl;
1824 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301825 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001826
c_mtharue1a5ce12017-10-13 20:47:09 +05301827 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001828 if (err)
1829 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301830 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001831 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301832 if (fl->sessionid)
1833 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001834 if (kernel)
1835 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301836 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001837 msg->invoke.header.handle = handle;
1838 msg->invoke.header.sc = ctx->sc;
1839 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1840 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301841 if (fl->apps->glink) {
1842 if (fl->ssrcount != channel_ctx->ssrcount) {
1843 err = -ECONNRESET;
1844 goto bail;
1845 }
1846 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001847 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301848 if (err)
1849 goto bail;
1850 err = glink_tx(channel_ctx->chan,
1851 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1852 GLINK_TX_REQ_INTENT);
1853 } else {
1854 spin_lock(&fl->apps->hlock);
1855 len = smd_write((smd_channel_t *)
1856 channel_ctx->chan,
1857 msg, sizeof(*msg));
1858 spin_unlock(&fl->apps->hlock);
1859 VERIFY(err, len == sizeof(*msg));
1860 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001861 bail:
1862 return err;
1863}
1864
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301865static void fastrpc_smd_read_handler(int cid)
1866{
1867 struct fastrpc_apps *me = &gfa;
1868 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301869 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301870 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301871
1872 do {
1873 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1874 sizeof(rsp));
1875 if (ret != sizeof(rsp))
1876 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301877
1878 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1879 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301880 if (err)
1881 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301882
1883 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1884 if (err)
1885 goto bail;
1886
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301887 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301888 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1889 if (err)
1890 goto bail;
1891
1892 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301893 } while (ret == sizeof(rsp));
1894bail:
1895 if (err)
1896 pr_err("adsprpc: invalid response or context\n");
1897
1898}
1899
1900static void smd_event_handler(void *priv, unsigned int event)
1901{
1902 struct fastrpc_apps *me = &gfa;
1903 int cid = (int)(uintptr_t)priv;
1904
1905 switch (event) {
1906 case SMD_EVENT_OPEN:
1907 complete(&me->channel[cid].workport);
1908 break;
1909 case SMD_EVENT_CLOSE:
1910 fastrpc_notify_drivers(me, cid);
1911 break;
1912 case SMD_EVENT_DATA:
1913 fastrpc_smd_read_handler(cid);
1914 break;
1915 }
1916}
1917
1918
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001919static void fastrpc_init(struct fastrpc_apps *me)
1920{
1921 int i;
1922
1923 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301924 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001925 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301926 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301927 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001928 me->channel = &gcinfo[0];
1929 for (i = 0; i < NUM_CHANNELS; i++) {
1930 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301931 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001932 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301933 /* All channels are secure by default except CDSP */
1934 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001935 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301936 /* Set CDSP channel to non secure */
1937 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001938}
1939
1940static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1941
1942static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1943 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001944 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001945{
c_mtharue1a5ce12017-10-13 20:47:09 +05301946 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001947 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1948 int cid = fl->cid;
1949 int interrupted = 0;
1950 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001951 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301952 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001953
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001954 if (fl->profile)
1955 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301956
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05301957 if (!kernel) {
1958 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
1959 if (err) {
1960 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
1961 __func__, current->comm, cid);
1962 goto bail;
1963 }
1964 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301965
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301966 VERIFY(err, fl->sctx != NULL);
1967 if (err)
1968 goto bail;
1969 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1970 if (err)
1971 goto bail;
1972
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001973 if (!kernel) {
1974 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1975 &ctx));
1976 if (err)
1977 goto bail;
1978 if (fl->sctx->smmu.faults)
1979 err = FASTRPC_ENOSUCH;
1980 if (err)
1981 goto bail;
1982 if (ctx)
1983 goto wait;
1984 }
1985
1986 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1987 if (err)
1988 goto bail;
1989
1990 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301991 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001992 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001993 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001994 if (err)
1995 goto bail;
1996 }
1997
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301998 if (!fl->sctx->smmu.coherent) {
1999 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002000 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302001 PERF_END);
2002 }
2003
2004 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002005 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002006 PERF_END);
2007
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002008 if (err)
2009 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002010 wait:
2011 if (kernel)
2012 wait_for_completion(&ctx->work);
2013 else {
2014 interrupted = wait_for_completion_interruptible(&ctx->work);
2015 VERIFY(err, 0 == (err = interrupted));
2016 if (err)
2017 goto bail;
2018 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07002019
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302020 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002021 if (!fl->sctx->smmu.coherent)
2022 inv_args(ctx);
2023 PERF_END);
2024
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002025 VERIFY(err, 0 == (err = ctx->retval));
2026 if (err)
2027 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002028
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302029 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002030 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002031 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002032 if (err)
2033 goto bail;
2034 bail:
2035 if (ctx && interrupted == -ERESTARTSYS)
2036 context_save_interrupted(ctx);
2037 else if (ctx)
2038 context_free(ctx);
2039 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2040 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002041
2042 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302043 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2044 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2045
2046 if (count)
2047 *count += getnstimediff(&invoket);
2048 }
2049 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2050 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2051
2052 if (count)
2053 *count = *count+1;
2054 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002055 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002056 return err;
2057}
2058
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302059static int fastrpc_get_adsp_session(char *name, int *session)
2060{
2061 struct fastrpc_apps *me = &gfa;
2062 int err = 0, i;
2063
2064 for (i = 0; i < NUM_SESSIONS; i++) {
2065 if (!me->channel[0].spd[i].spdname)
2066 continue;
2067 if (!strcmp(name, me->channel[0].spd[i].spdname))
2068 break;
2069 }
2070 VERIFY(err, i < NUM_SESSIONS);
2071 if (err)
2072 goto bail;
2073 *session = i;
2074bail:
2075 return err;
2076}
2077
2078static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002079static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302080static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002081static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002082 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002083{
2084 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302085 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002086 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002087 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002088 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302089 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302090 struct fastrpc_buf *imem = NULL;
2091 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302092 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002093
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302094 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002095 if (err)
2096 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302097 if (init->flags == FASTRPC_INIT_ATTACH ||
2098 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002099 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302100 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002101
2102 ra[0].buf.pv = (void *)&tgid;
2103 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302104 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002105 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2106 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302107 ioctl.fds = NULL;
2108 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002109 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302110 if (init->flags == FASTRPC_INIT_ATTACH)
2111 fl->pd = 0;
2112 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2113 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2114 fl->pd = 2;
2115 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002116 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2117 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2118 if (err)
2119 goto bail;
2120 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002121 remote_arg_t ra[6];
2122 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002123 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302124 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002125 struct {
2126 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302127 unsigned int namelen;
2128 unsigned int filelen;
2129 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002130 int attrs;
2131 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002132 } inbuf;
2133
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302134 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002135 inbuf.namelen = strlen(current->comm) + 1;
2136 inbuf.filelen = init->filelen;
2137 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302138
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302139 VERIFY(err, access_ok(0, (void __user *)init->file,
2140 init->filelen));
2141 if (err)
2142 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002143 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302144 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002145 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2146 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302147 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002148 if (err)
2149 goto bail;
2150 }
2151 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302152
2153 VERIFY(err, !init->mem);
2154 if (err) {
2155 err = -EINVAL;
2156 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2157 current->comm, __func__);
2158 goto bail;
2159 }
2160 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2161 1024*1024);
2162 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2163 DMA_ATTR_NO_KERNEL_MAPPING |
2164 DMA_ATTR_FORCE_NON_COHERENT;
2165 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302166 if (err)
2167 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302168 fl->init_mem = imem;
2169
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002170 inbuf.pageslen = 1;
2171 ra[0].buf.pv = (void *)&inbuf;
2172 ra[0].buf.len = sizeof(inbuf);
2173 fds[0] = 0;
2174
2175 ra[1].buf.pv = (void *)current->comm;
2176 ra[1].buf.len = inbuf.namelen;
2177 fds[1] = 0;
2178
2179 ra[2].buf.pv = (void *)init->file;
2180 ra[2].buf.len = inbuf.filelen;
2181 fds[2] = init->filefd;
2182
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302183 pages[0].addr = imem->phys;
2184 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002185 ra[3].buf.pv = (void *)pages;
2186 ra[3].buf.len = 1 * sizeof(*pages);
2187 fds[3] = 0;
2188
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002189 inbuf.attrs = uproc->attrs;
2190 ra[4].buf.pv = (void *)&(inbuf.attrs);
2191 ra[4].buf.len = sizeof(inbuf.attrs);
2192 fds[4] = 0;
2193
2194 inbuf.siglen = uproc->siglen;
2195 ra[5].buf.pv = (void *)&(inbuf.siglen);
2196 ra[5].buf.len = sizeof(inbuf.siglen);
2197 fds[5] = 0;
2198
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302199 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002200 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002201 if (uproc->attrs)
2202 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002203 ioctl.inv.pra = ra;
2204 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302205 ioctl.attrs = NULL;
2206 ioctl.crc = NULL;
2207 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2208 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2209 if (err)
2210 goto bail;
2211 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2212 remote_arg_t ra[3];
2213 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302214 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302215 int fds[3];
2216 struct {
2217 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302218 unsigned int namelen;
2219 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302220 } inbuf;
2221
2222 if (!init->filelen)
2223 goto bail;
2224
2225 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2226 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2227 if (err)
2228 goto bail;
2229 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2230 (void __user *)init->file, init->filelen));
2231 if (err)
2232 goto bail;
2233
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302234 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302235 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302236 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302237 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302238
2239 if (!strcmp(proc_name, "audiopd")) {
2240 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2241 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302242 if (err)
2243 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302244 }
2245
c_mtharue1a5ce12017-10-13 20:47:09 +05302246 if (!me->staticpd_flags) {
2247 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302248 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302249 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2250 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2251 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302252 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302253 if (err)
2254 goto bail;
2255 phys = mem->phys;
2256 size = mem->size;
2257 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302258 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2259 me->channel[fl->cid].rhvm.vmperm,
2260 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302261 if (err) {
2262 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2263 err);
2264 pr_err("map->phys %llx, map->size %d\n",
2265 phys, (int)size);
2266 goto bail;
2267 }
2268 me->staticpd_flags = 1;
2269 }
2270
2271 ra[0].buf.pv = (void *)&inbuf;
2272 ra[0].buf.len = sizeof(inbuf);
2273 fds[0] = 0;
2274
2275 ra[1].buf.pv = (void *)proc_name;
2276 ra[1].buf.len = inbuf.namelen;
2277 fds[1] = 0;
2278
2279 pages[0].addr = phys;
2280 pages[0].size = size;
2281
2282 ra[2].buf.pv = (void *)pages;
2283 ra[2].buf.len = sizeof(*pages);
2284 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302285 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302286
2287 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2288 ioctl.inv.pra = ra;
2289 ioctl.fds = NULL;
2290 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002291 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002292 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2293 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2294 if (err)
2295 goto bail;
2296 } else {
2297 err = -ENOTTY;
2298 }
2299bail:
c_mtharud91205a2017-11-07 16:01:06 +05302300 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302301 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2302 me->staticpd_flags = 0;
2303 if (mem && err) {
2304 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2305 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302306 me->channel[fl->cid].rhvm.vmid,
2307 me->channel[fl->cid].rhvm.vmcount,
2308 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302309 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302310 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302311 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302312 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302313 if (file) {
2314 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302315 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302316 mutex_unlock(&fl->fl_map_mutex);
2317 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002318 return err;
2319}
2320
2321static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2322{
2323 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002324 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002325 remote_arg_t ra[1];
2326 int tgid = 0;
2327
Sathish Ambley36849af2017-02-02 09:35:55 -08002328 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2329 if (err)
2330 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302331 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002332 if (err)
2333 goto bail;
2334 tgid = fl->tgid;
2335 ra[0].buf.pv = (void *)&tgid;
2336 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302337 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002338 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2339 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302340 ioctl.fds = NULL;
2341 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002342 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002343 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2344 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2345bail:
2346 return err;
2347}
2348
2349static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302350 uintptr_t va, uint64_t phys,
2351 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002352{
Sathish Ambleybae51902017-07-03 15:00:49 -07002353 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302354 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002355 struct smq_phy_page page;
2356 int num = 1;
2357 remote_arg_t ra[3];
2358 int err = 0;
2359 struct {
2360 int pid;
2361 uint32_t flags;
2362 uintptr_t vaddrin;
2363 int num;
2364 } inargs;
2365 struct {
2366 uintptr_t vaddrout;
2367 } routargs;
2368
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302369 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302370 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002371 inargs.flags = flags;
2372 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2373 ra[0].buf.pv = (void *)&inargs;
2374 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302375 page.addr = phys;
2376 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002377 ra[1].buf.pv = (void *)&page;
2378 ra[1].buf.len = num * sizeof(page);
2379
2380 ra[2].buf.pv = (void *)&routargs;
2381 ra[2].buf.len = sizeof(routargs);
2382
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302383 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002384 if (fl->apps->compat)
2385 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2386 else
2387 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2388 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302389 ioctl.fds = NULL;
2390 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002391 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002392 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2393 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302394 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302395 if (err)
2396 goto bail;
2397 if (flags == ADSP_MMAP_HEAP_ADDR) {
2398 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002399
c_mtharue1a5ce12017-10-13 20:47:09 +05302400 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302401 desc.args[1] = phys;
2402 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302403 desc.arginfo = SCM_ARGS(3);
2404 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2405 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2406 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302407 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302408 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2409 me->channel[fl->cid].rhvm.vmperm,
2410 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302411 if (err)
2412 goto bail;
2413 }
2414bail:
2415 return err;
2416}
2417
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302418static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2419 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302420{
2421 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302422 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302423 int destVM[1] = {VMID_HLOS};
2424 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2425
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302426 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302427 struct fastrpc_ioctl_invoke_crc ioctl;
2428 struct scm_desc desc = {0};
2429 remote_arg_t ra[1];
2430 int err = 0;
2431 struct {
2432 uint8_t skey;
2433 } routargs;
2434
2435 ra[0].buf.pv = (void *)&routargs;
2436 ra[0].buf.len = sizeof(routargs);
2437
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302438 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302439 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2440 ioctl.inv.pra = ra;
2441 ioctl.fds = NULL;
2442 ioctl.attrs = NULL;
2443 ioctl.crc = NULL;
2444 if (fl == NULL)
2445 goto bail;
2446
2447 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2448 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2449 if (err)
2450 goto bail;
2451 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302452 desc.args[1] = phys;
2453 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302454 desc.args[3] = routargs.skey;
2455 desc.arginfo = SCM_ARGS(4);
2456 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2457 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302458 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2459 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302460 me->channel[fl->cid].rhvm.vmid,
2461 me->channel[fl->cid].rhvm.vmcount,
2462 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302463 if (err)
2464 goto bail;
2465 }
2466
2467bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002468 return err;
2469}
2470
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302471static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2472 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002473{
Sathish Ambleybae51902017-07-03 15:00:49 -07002474 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002475 remote_arg_t ra[1];
2476 int err = 0;
2477 struct {
2478 int pid;
2479 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302480 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002481 } inargs;
2482
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302483 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302484 inargs.size = size;
2485 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002486 ra[0].buf.pv = (void *)&inargs;
2487 ra[0].buf.len = sizeof(inargs);
2488
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302489 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002490 if (fl->apps->compat)
2491 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2492 else
2493 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2494 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302495 ioctl.fds = NULL;
2496 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002497 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002498 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2499 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302500 if (err)
2501 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302502 if (flags == ADSP_MMAP_HEAP_ADDR ||
2503 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2504 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302505 if (err)
2506 goto bail;
2507 }
2508bail:
2509 return err;
2510}
2511
2512static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2513{
2514 struct fastrpc_mmap *match = NULL, *map = NULL;
2515 struct hlist_node *n = NULL;
2516 int err = 0, ret = 0;
2517 struct fastrpc_apps *me = &gfa;
2518 struct ramdump_segment *ramdump_segments_rh = NULL;
2519
2520 do {
2521 match = NULL;
2522 spin_lock(&me->hlock);
2523 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2524 match = map;
2525 hlist_del_init(&map->hn);
2526 break;
2527 }
2528 spin_unlock(&me->hlock);
2529
2530 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302531 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2532 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302533 if (err)
2534 goto bail;
2535 if (me->channel[0].ramdumpenabled) {
2536 ramdump_segments_rh = kcalloc(1,
2537 sizeof(struct ramdump_segment), GFP_KERNEL);
2538 if (ramdump_segments_rh) {
2539 ramdump_segments_rh->address =
2540 match->phys;
2541 ramdump_segments_rh->size = match->size;
2542 ret = do_elf_ramdump(
2543 me->channel[0].remoteheap_ramdump_dev,
2544 ramdump_segments_rh, 1);
2545 if (ret < 0)
2546 pr_err("ADSPRPC: unable to dump heap");
2547 kfree(ramdump_segments_rh);
2548 }
2549 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302550 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302551 }
2552 } while (match);
2553bail:
2554 if (err && match)
2555 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002556 return err;
2557}
2558
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302559static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2560{
2561 struct fastrpc_apps *me = &gfa;
2562 int session = 0, err = 0;
2563
2564 VERIFY(err, !fastrpc_get_adsp_session(
2565 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2566 if (err)
2567 goto bail;
2568 if (me->channel[fl->cid].spd[session].pdrcount !=
2569 me->channel[fl->cid].spd[session].prevpdrcount) {
2570 if (fastrpc_mmap_remove_ssr(fl))
2571 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2572 me->channel[fl->cid].spd[session].prevpdrcount =
2573 me->channel[fl->cid].spd[session].pdrcount;
2574 }
2575 if (!me->channel[fl->cid].spd[session].ispdup) {
2576 VERIFY(err, 0);
2577 if (err) {
2578 err = -ENOTCONN;
2579 goto bail;
2580 }
2581 }
2582bail:
2583 return err;
2584}
2585
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002586static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302587 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002588
2589static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2590
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302591static inline void get_fastrpc_ioctl_mmap_64(
2592 struct fastrpc_ioctl_mmap_64 *mmap64,
2593 struct fastrpc_ioctl_mmap *immap)
2594{
2595 immap->fd = mmap64->fd;
2596 immap->flags = mmap64->flags;
2597 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2598 immap->size = mmap64->size;
2599}
2600
2601static inline void put_fastrpc_ioctl_mmap_64(
2602 struct fastrpc_ioctl_mmap_64 *mmap64,
2603 struct fastrpc_ioctl_mmap *immap)
2604{
2605 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2606}
2607
2608static inline void get_fastrpc_ioctl_munmap_64(
2609 struct fastrpc_ioctl_munmap_64 *munmap64,
2610 struct fastrpc_ioctl_munmap *imunmap)
2611{
2612 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2613 imunmap->size = munmap64->size;
2614}
2615
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002616static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2617 struct fastrpc_ioctl_munmap *ud)
2618{
2619 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302620 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302621 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2622 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002623
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302624 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302625
2626 spin_lock(&fl->hlock);
2627 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2628 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2629 if ((rbuf->raddr == ud->vaddrout) &&
2630 (rbuf->size == ud->size)) {
2631 free = rbuf;
2632 break;
2633 }
2634 }
2635 }
2636 spin_unlock(&fl->hlock);
2637
2638 if (free) {
2639 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2640 free->phys, free->size, free->flags));
2641 if (err)
2642 goto bail;
2643 fastrpc_buf_free(rbuf, 0);
2644 mutex_unlock(&fl->map_mutex);
2645 return err;
2646 }
2647
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302648 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002649 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302650 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002651 if (err)
2652 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302653 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
2654 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002655 if (err)
2656 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302657 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302658 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302659 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002660bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302661 if (err && map) {
2662 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002663 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302664 mutex_unlock(&fl->fl_map_mutex);
2665 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302666 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002667 return err;
2668}
2669
c_mtharu7bd6a422017-10-17 18:15:37 +05302670static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2671 struct fastrpc_ioctl_munmap_fd *ud) {
2672 int err = 0;
2673 struct fastrpc_mmap *map = NULL;
2674
2675 VERIFY(err, (fl && ud));
2676 if (err)
2677 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302678 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302679 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2680 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302681 ud->fd, (unsigned long long)ud->va,
2682 (unsigned int)ud->len);
2683 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302684 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302685 goto bail;
2686 }
2687 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302688 fastrpc_mmap_free(map, 0);
2689 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302690bail:
2691 return err;
2692}
2693
2694
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002695static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2696 struct fastrpc_ioctl_mmap *ud)
2697{
2698
c_mtharue1a5ce12017-10-13 20:47:09 +05302699 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302700 struct fastrpc_buf *rbuf = NULL;
2701 unsigned long dma_attr = 0;
2702 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002703 int err = 0;
2704
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302705 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302706 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2707 if (ud->vaddrin) {
2708 err = -EINVAL;
2709 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2710 current->comm, __func__);
2711 goto bail;
2712 }
2713 dma_attr = DMA_ATTR_EXEC_MAPPING |
2714 DMA_ATTR_NO_KERNEL_MAPPING |
2715 DMA_ATTR_FORCE_NON_COHERENT;
2716 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2717 1, &rbuf);
2718 if (err)
2719 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302720 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302721 rbuf->phys, rbuf->size, &raddr);
2722 if (err)
2723 goto bail;
2724 rbuf->raddr = raddr;
2725 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302726
2727 uintptr_t va_to_dsp;
2728
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302729 mutex_lock(&fl->fl_map_mutex);
2730 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2731 ud->size, ud->flags, 1, &map)) {
2732 mutex_unlock(&fl->fl_map_mutex);
2733 mutex_unlock(&fl->map_mutex);
2734 return 0;
2735 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302736
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302737 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2738 (uintptr_t)ud->vaddrin, ud->size,
2739 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302740 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302741 if (err)
2742 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302743
2744 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2745 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2746 va_to_dsp = 0;
2747 else
2748 va_to_dsp = (uintptr_t)map->va;
2749 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302750 map->phys, map->size, &raddr));
2751 if (err)
2752 goto bail;
2753 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302754 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302755 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002756 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302757 if (err && map) {
2758 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302759 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302760 mutex_unlock(&fl->fl_map_mutex);
2761 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302762 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002763 return err;
2764}
2765
2766static void fastrpc_channel_close(struct kref *kref)
2767{
2768 struct fastrpc_apps *me = &gfa;
2769 struct fastrpc_channel_ctx *ctx;
2770 int cid;
2771
2772 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2773 cid = ctx - &gcinfo[0];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302774 if (!me->glink)
2775 smd_close(ctx->chan);
2776 else
2777 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302778 ctx->chan = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302779 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002780 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2781 MAJOR(me->dev_no), cid);
2782}
2783
2784static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2785
2786static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302787 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002788{
2789 struct fastrpc_apps *me = &gfa;
2790 int idx = 0, err = 0;
2791
2792 if (chan->sesscount) {
2793 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302794 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2795 (!chan->session[idx].used &&
2796 chan->session[idx].smmu.secure
2797 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002798 chan->session[idx].used = 1;
2799 break;
2800 }
2801 }
2802 VERIFY(err, idx < chan->sesscount);
2803 if (err)
2804 goto bail;
2805 chan->session[idx].smmu.faults = 0;
2806 } else {
2807 VERIFY(err, me->dev != NULL);
2808 if (err)
2809 goto bail;
2810 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302811 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002812 }
2813
2814 *session = &chan->session[idx];
2815 bail:
2816 return err;
2817}
2818
c_mtharue1a5ce12017-10-13 20:47:09 +05302819static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2820 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002821{
2822 if (glink_queue_rx_intent(h, NULL, size))
2823 return false;
2824 return true;
2825}
2826
c_mtharue1a5ce12017-10-13 20:47:09 +05302827static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002828 const void *pkt_priv, const void *ptr)
2829{
2830}
2831
c_mtharue1a5ce12017-10-13 20:47:09 +05302832static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002833 const void *pkt_priv, const void *ptr, size_t size)
2834{
2835 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302836 struct fastrpc_apps *me = &gfa;
2837 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302838 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002839
c_mtharufdac6892017-10-12 13:09:01 +05302840 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2841 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302842 goto bail;
2843
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302844 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2845 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302846 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302847 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302848
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302849 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2850 if (err)
2851 goto bail;
2852
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302853 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302854 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2855 if (err)
2856 goto bail;
2857
2858 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302859bail:
c_mtharufdac6892017-10-12 13:09:01 +05302860 if (err)
2861 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002862 glink_rx_done(handle, ptr, true);
2863}
2864
c_mtharue1a5ce12017-10-13 20:47:09 +05302865static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002866 unsigned int event)
2867{
2868 struct fastrpc_apps *me = &gfa;
2869 int cid = (int)(uintptr_t)priv;
2870 struct fastrpc_glink_info *link;
2871
2872 if (cid < 0 || cid >= NUM_CHANNELS)
2873 return;
2874 link = &me->channel[cid].link;
2875 switch (event) {
2876 case GLINK_CONNECTED:
2877 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302878 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002879 break;
2880 case GLINK_LOCAL_DISCONNECTED:
2881 link->port_state = FASTRPC_LINK_DISCONNECTED;
2882 break;
2883 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002884 break;
2885 default:
2886 break;
2887 }
2888}
2889
2890static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2891 struct fastrpc_session_ctx **session)
2892{
2893 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302894 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002895
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302896 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002897 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302898 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302899 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002900 return err;
2901}
2902
2903static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2904 struct fastrpc_session_ctx *session)
2905{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302906 struct fastrpc_apps *me = &gfa;
2907
2908 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002909 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302910 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002911}
2912
2913static int fastrpc_file_free(struct fastrpc_file *fl)
2914{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302915 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302916 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302917 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002918 int cid;
2919
2920 if (!fl)
2921 return 0;
2922 cid = fl->cid;
2923
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302924 (void)fastrpc_release_current_dsp_process(fl);
2925
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002926 spin_lock(&fl->apps->hlock);
2927 hlist_del_init(&fl->hn);
2928 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05302929 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002930
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002931 if (!fl->sctx) {
2932 kfree(fl);
2933 return 0;
2934 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302935 spin_lock(&fl->hlock);
2936 fl->file_close = 1;
2937 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302938 if (!IS_ERR_OR_NULL(fl->init_mem))
2939 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002940 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302941 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302942 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302943 do {
2944 lmap = NULL;
2945 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2946 hlist_del_init(&map->hn);
2947 lmap = map;
2948 break;
2949 }
2950 fastrpc_mmap_free(lmap, 1);
2951 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302952 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302953 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002954 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302955 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002956 if (fl->sctx)
2957 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2958 if (fl->secsctx)
2959 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302960
2961 mutex_lock(&fl->perf_mutex);
2962 do {
2963 struct hlist_node *pn = NULL;
2964
2965 fperf = NULL;
2966 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2967 hlist_del_init(&perf->hn);
2968 fperf = perf;
2969 break;
2970 }
2971 kfree(fperf);
2972 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302973 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302974 mutex_unlock(&fl->perf_mutex);
2975 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302976 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05302977 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002978 kfree(fl);
2979 return 0;
2980}
2981
2982static int fastrpc_device_release(struct inode *inode, struct file *file)
2983{
2984 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2985
2986 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302987 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2988 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002989 if (fl->debugfs_file != NULL)
2990 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002991 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302992 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002993 }
2994 return 0;
2995}
2996
2997static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2998 void *priv)
2999{
3000 struct fastrpc_apps *me = &gfa;
3001 int cid = (int)((uintptr_t)priv);
3002 struct fastrpc_glink_info *link;
3003
3004 if (cid < 0 || cid >= NUM_CHANNELS)
3005 return;
3006
3007 link = &me->channel[cid].link;
3008 switch (cb_info->link_state) {
3009 case GLINK_LINK_STATE_UP:
3010 link->link_state = FASTRPC_LINK_STATE_UP;
3011 complete(&me->channel[cid].work);
3012 break;
3013 case GLINK_LINK_STATE_DOWN:
3014 link->link_state = FASTRPC_LINK_STATE_DOWN;
3015 break;
3016 default:
3017 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3018 break;
3019 }
3020}
3021
3022static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3023{
3024 int err = 0;
3025 struct fastrpc_glink_info *link;
3026
3027 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3028 if (err)
3029 goto bail;
3030
3031 link = &me->channel[cid].link;
3032 if (link->link_notify_handle != NULL)
3033 goto bail;
3034
3035 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3036 link->link_notify_handle = glink_register_link_state_cb(
3037 &link->link_info,
3038 (void *)((uintptr_t)cid));
3039 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3040 if (err) {
3041 link->link_notify_handle = NULL;
3042 goto bail;
3043 }
3044 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3045 RPC_TIMEOUT));
3046bail:
3047 return err;
3048}
3049
3050static void fastrpc_glink_close(void *chan, int cid)
3051{
3052 int err = 0;
3053 struct fastrpc_glink_info *link;
3054
3055 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3056 if (err)
3057 return;
3058 link = &gfa.channel[cid].link;
3059
c_mtharu314a4202017-11-15 22:09:17 +05303060 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3061 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003062 link->port_state = FASTRPC_LINK_DISCONNECTING;
3063 glink_close(chan);
3064 }
3065}
3066
3067static int fastrpc_glink_open(int cid)
3068{
3069 int err = 0;
3070 void *handle = NULL;
3071 struct fastrpc_apps *me = &gfa;
3072 struct glink_open_config *cfg;
3073 struct fastrpc_glink_info *link;
3074
3075 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3076 if (err)
3077 goto bail;
3078 link = &me->channel[cid].link;
3079 cfg = &me->channel[cid].link.cfg;
3080 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3081 if (err)
3082 goto bail;
3083
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303084 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3085 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003086 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003087
3088 link->port_state = FASTRPC_LINK_CONNECTING;
3089 cfg->priv = (void *)(uintptr_t)cid;
3090 cfg->edge = gcinfo[cid].link.link_info.edge;
3091 cfg->transport = gcinfo[cid].link.link_info.transport;
3092 cfg->name = FASTRPC_GLINK_GUID;
3093 cfg->notify_rx = fastrpc_glink_notify_rx;
3094 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3095 cfg->notify_state = fastrpc_glink_notify_state;
3096 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3097 handle = glink_open(cfg);
3098 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303099 if (err) {
3100 if (link->port_state == FASTRPC_LINK_CONNECTING)
3101 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003102 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303103 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003104 me->channel[cid].chan = handle;
3105bail:
3106 return err;
3107}
3108
Sathish Ambley1ca68232017-01-19 10:32:55 -08003109static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3110{
3111 filp->private_data = inode->i_private;
3112 return 0;
3113}
3114
3115static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3116 size_t count, loff_t *position)
3117{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303118 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003119 struct fastrpc_file *fl = filp->private_data;
3120 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303121 struct fastrpc_buf *buf = NULL;
3122 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303123 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303124 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303125 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003126 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303127 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003128 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303129 char single_line[UL_SIZE] = "----------------";
3130 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003131
3132 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3133 if (!fileinfo)
3134 goto bail;
3135 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303136 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3137 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3138 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3139 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3140 "susbsys", "refcount", "sesscount", "issubsystemup",
3141 "ssrcount", "session_used");
3142 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3143 "-%s%s%s%s-\n", single_line, single_line,
3144 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003145 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303146 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003147 chan = &gcinfo[i];
3148 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303149 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003150 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303151 DEBUGFS_SIZE - len, "|%-9d",
3152 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303153 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303154 DEBUGFS_SIZE - len, "|%-9d",
3155 chan->sesscount);
3156 len += scnprintf(fileinfo + len,
3157 DEBUGFS_SIZE - len, "|%-14d",
3158 chan->issubsystemup);
3159 len += scnprintf(fileinfo + len,
3160 DEBUGFS_SIZE - len, "|%-9d",
3161 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003162 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303163 sess_used += chan->session[j].used;
3164 }
3165 len += scnprintf(fileinfo + len,
3166 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3167
3168 }
3169 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3170 "\n%s%s%s\n", "=============",
3171 " CMA HEAP ", "==============");
3172 len += scnprintf(fileinfo + len,
3173 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3174 len += scnprintf(fileinfo + len,
3175 DEBUGFS_SIZE - len, "--%s%s---\n",
3176 single_line, single_line);
3177 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3178 "0x%-18llX", me->range.addr);
3179 len += scnprintf(fileinfo + len,
3180 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3181 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3182 "\n==========%s %s %s===========\n",
3183 title, " GMAPS ", title);
3184 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3185 "%-20s|%-20s|%-20s|%-20s\n",
3186 "fd", "phys", "size", "va");
3187 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3188 "%s%s%s%s%s\n", single_line, single_line,
3189 single_line, single_line, single_line);
3190 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3191 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3192 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3193 gmaps->fd, gmaps->phys,
3194 (uint32_t)gmaps->size,
3195 gmaps->va);
3196 }
3197 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3198 "%-20s|%-20s|%-20s|%-20s\n",
3199 "len", "refs", "raddr", "flags");
3200 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3201 "%s%s%s%s%s\n", single_line, single_line,
3202 single_line, single_line, single_line);
3203 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3204 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3205 "0x%-18X|%-20d|%-20lu|%-20u\n",
3206 (uint32_t)gmaps->len, gmaps->refs,
3207 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003208 }
3209 } else {
3210 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303211 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003212 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303213 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003214 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303215 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003216 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303217 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303218 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303219 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003220 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303221 "%s %14s %d\n", "pd", ":", fl->pd);
3222 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3223 "%s %9s %s\n", "spdname", ":", fl->spdname);
3224 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3225 "%s %6s %d\n", "file_close", ":", fl->file_close);
3226 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3227 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3228 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3229 "%s %9s %d\n", "profile", ":", fl->profile);
3230 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3231 "%s %3s %d\n", "smmu.coherent", ":",
3232 fl->sctx->smmu.coherent);
3233 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3234 "%s %4s %d\n", "smmu.enabled", ":",
3235 fl->sctx->smmu.enabled);
3236 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3237 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3238 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3239 "%s %5s %d\n", "smmu.secure", ":",
3240 fl->sctx->smmu.secure);
3241 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3242 "%s %5s %d\n", "smmu.faults", ":",
3243 fl->sctx->smmu.faults);
3244 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3245 "%s %s %d\n", "link.link_state",
3246 ":", *&me->channel[fl->cid].link.link_state);
3247
3248 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3249 "\n=======%s %s %s======\n", title,
3250 " LIST OF MAPS ", title);
3251
3252 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3253 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3254 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3255 "%s%s%s%s%s\n",
3256 single_line, single_line, single_line,
3257 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003258 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303259 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3260 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3261 map->va, map->phys,
3262 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003263 }
3264 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303265 "%-20s|%-20s|%-20s|%-20s\n",
3266 "len", "refs",
3267 "raddr", "uncached");
3268 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3269 "%s%s%s%s%s\n",
3270 single_line, single_line, single_line,
3271 single_line, single_line);
3272 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3273 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3274 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3275 map->len, map->refs, map->raddr,
3276 map->uncached);
3277 }
3278 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3279 "%-20s|%-20s\n", "secure", "attr");
3280 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3281 "%s%s%s%s%s\n",
3282 single_line, single_line, single_line,
3283 single_line, single_line);
3284 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3285 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3286 "%-20d|0x%-20lX\n\n",
3287 map->secure, map->attr);
3288 }
3289 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303290 "%s %d\n\n",
3291 "KERNEL MEMORY ALLOCATION:", 1);
3292 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303293 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303294 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303295 spin_lock(&fl->hlock);
3296 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303297 "%-19s|%-19s|%-19s|%-19s\n",
3298 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303299 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3300 "%s%s%s%s%s\n", single_line, single_line,
3301 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303302 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303303 len += scnprintf(fileinfo + len,
3304 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303305 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3306 buf->virt, (uint64_t)buf->phys, buf->size,
3307 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303308 }
3309 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3310 "\n%s %s %s\n", title,
3311 " LIST OF PENDING SMQCONTEXTS ", title);
3312
3313 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3314 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3315 "sc", "pid", "tgid", "used", "ctxid");
3316 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3317 "%s%s%s%s%s\n", single_line, single_line,
3318 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003319 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3320 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303321 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3322 ictx->sc, ictx->pid, ictx->tgid,
3323 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003324 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303325
Sathish Ambley1ca68232017-01-19 10:32:55 -08003326 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303327 "\n%s %s %s\n", title,
3328 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3329
3330 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3331 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3332 "sc", "pid", "tgid", "used", "ctxid");
3333 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3334 "%s%s%s%s%s\n", single_line, single_line,
3335 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003336 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303337 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3338 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3339 ictx->sc, ictx->pid, ictx->tgid,
3340 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003341 }
3342 spin_unlock(&fl->hlock);
3343 }
3344 if (len > DEBUGFS_SIZE)
3345 len = DEBUGFS_SIZE;
3346 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3347 kfree(fileinfo);
3348bail:
3349 return ret;
3350}
3351
3352static const struct file_operations debugfs_fops = {
3353 .open = fastrpc_debugfs_open,
3354 .read = fastrpc_debugfs_read,
3355};
Sathish Ambley36849af2017-02-02 09:35:55 -08003356static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003357{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003358 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08003359 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003360
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303361 mutex_lock(&me->smd_mutex);
3362
Sathish Ambley36849af2017-02-02 09:35:55 -08003363 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003364 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303365 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003366 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303367 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3368 if (err)
3369 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303370 if (me->channel[cid].ssrcount !=
3371 me->channel[cid].prevssrcount) {
3372 if (!me->channel[cid].issubsystemup) {
3373 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303374 if (err) {
3375 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303376 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303377 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303378 }
3379 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003380 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303381 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003382 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303383 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303384 if (me->glink) {
3385 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3386 if (err)
3387 goto bail;
3388 VERIFY(err, 0 == fastrpc_glink_open(cid));
3389 } else {
3390 VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
3391 gcinfo[cid].channel,
3392 (smd_channel_t **)&me->channel[cid].chan,
3393 (void *)(uintptr_t)cid,
3394 smd_event_handler));
3395 }
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303396 VERIFY(err,
3397 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003398 RPC_TIMEOUT));
3399 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303400 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003401 goto bail;
3402 }
3403 kref_init(&me->channel[cid].kref);
3404 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3405 MAJOR(me->dev_no), cid);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303406 if (me->glink) {
3407 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
3408 FASTRPC_GLINK_INTENT_LEN);
3409 err |= glink_queue_rx_intent(me->channel[cid].chan,
3410 NULL, FASTRPC_GLINK_INTENT_LEN);
3411 if (err)
3412 pr_warn("adsprpc: initial intent fail for %d err %d\n",
3413 cid, err);
3414 }
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303415 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003416 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303417 if (fastrpc_mmap_remove_ssr(fl))
3418 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003419 me->channel[cid].prevssrcount =
3420 me->channel[cid].ssrcount;
3421 }
3422 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003423
3424bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303425 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003426 return err;
3427}
3428
Sathish Ambley36849af2017-02-02 09:35:55 -08003429static int fastrpc_device_open(struct inode *inode, struct file *filp)
3430{
3431 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08003432 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05303433 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003434 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303435 char strpid[PID_SIZE];
3436 int buf_size = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003437
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303438 /*
3439 * Indicates the device node opened
3440 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3441 */
3442 int dev_minor = MINOR(inode->i_rdev);
3443
3444 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3445 (dev_minor == MINOR_NUM_SECURE_DEV)));
3446 if (err) {
3447 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3448 return err;
3449 }
3450
c_mtharue1a5ce12017-10-13 20:47:09 +05303451 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003452 if (err)
3453 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303454 snprintf(strpid, PID_SIZE, "%d", current->pid);
Mohammed Nayeem Ur Rahman2d65b4a2018-10-10 16:34:37 +05303455 buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303456 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3457 snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
3458 current->comm, "_", current->pid);
3459 debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3460 debugfs_root, fl, &debugfs_fops);
3461
Sathish Ambley36849af2017-02-02 09:35:55 -08003462 context_list_ctor(&fl->clst);
3463 spin_lock_init(&fl->hlock);
3464 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303465 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303466 INIT_HLIST_HEAD(&fl->cached_bufs);
3467 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003468 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303469 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003470 fl->tgid = current->tgid;
3471 fl->apps = me;
3472 fl->mode = FASTRPC_MODE_SERIAL;
3473 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303474 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303475 fl->init_mem = NULL;
Sathish Ambley567012b2017-03-06 11:55:04 -08003476 if (debugfs_file != NULL)
3477 fl->debugfs_file = debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303478 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303479 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003480 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303481 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303482 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003483 spin_lock(&me->hlock);
3484 hlist_add_head(&fl->hn, &me->drivers);
3485 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303486 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003487 return 0;
3488}
3489
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003490static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3491{
3492 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003493 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003494
c_mtharue1a5ce12017-10-13 20:47:09 +05303495 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003496 if (err)
3497 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003498 if (fl->cid == -1) {
3499 cid = *info;
3500 VERIFY(err, cid < NUM_CHANNELS);
3501 if (err)
3502 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303503 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003504 if (fl->dev_minor == MINOR_NUM_DEV &&
3505 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303506 /*
3507 * For non secure device node check and make sure that
3508 * the channel allows non-secure access
3509 * If not, bail. Session will not start.
3510 * cid will remain -1 and client will not be able to
3511 * invoke any other methods without failure
3512 */
3513 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3514 err = -EPERM;
3515 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3516 fl->dev_minor, cid,
3517 fl->apps->channel[cid].secure);
3518 goto bail;
3519 }
3520 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003521 fl->cid = cid;
3522 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3523 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303524 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003525 if (err)
3526 goto bail;
3527 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303528 VERIFY(err, fl->sctx != NULL);
3529 if (err)
3530 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003531 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3532bail:
3533 return err;
3534}
3535
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303536static int fastrpc_internal_control(struct fastrpc_file *fl,
3537 struct fastrpc_ioctl_control *cp)
3538{
3539 int err = 0;
3540 int latency;
3541
3542 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3543 if (err)
3544 goto bail;
3545 VERIFY(err, !IS_ERR_OR_NULL(cp));
3546 if (err)
3547 goto bail;
3548
3549 switch (cp->req) {
3550 case FASTRPC_CONTROL_LATENCY:
3551 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3552 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3553 VERIFY(err, latency != 0);
3554 if (err)
3555 goto bail;
3556 if (!fl->qos_request) {
3557 pm_qos_add_request(&fl->pm_qos_req,
3558 PM_QOS_CPU_DMA_LATENCY, latency);
3559 fl->qos_request = 1;
3560 } else
3561 pm_qos_update_request(&fl->pm_qos_req, latency);
3562 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303563 case FASTRPC_CONTROL_SMMU:
3564 fl->sharedcb = cp->smmu.sharedcb;
3565 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303566 case FASTRPC_CONTROL_KALLOC:
3567 cp->kalloc.kalloc_support = 1;
3568 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303569 default:
3570 err = -ENOTTY;
3571 break;
3572 }
3573bail:
3574 return err;
3575}
3576
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003577static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3578 unsigned long ioctl_param)
3579{
3580 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003581 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003582 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303583 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003584 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303585 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303586 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003587 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003588 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303589 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003590 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303591 union {
3592 struct fastrpc_ioctl_mmap mmap;
3593 struct fastrpc_ioctl_munmap munmap;
3594 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003595 void *param = (char *)ioctl_param;
3596 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3597 int size = 0, err = 0;
3598 uint32_t info;
3599
c_mtharue1a5ce12017-10-13 20:47:09 +05303600 p.inv.fds = NULL;
3601 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003602 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303603 spin_lock(&fl->hlock);
3604 if (fl->file_close == 1) {
3605 err = EBADF;
3606 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3607 spin_unlock(&fl->hlock);
3608 goto bail;
3609 }
3610 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003611
3612 switch (ioctl_num) {
3613 case FASTRPC_IOCTL_INVOKE:
3614 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003615 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003616 case FASTRPC_IOCTL_INVOKE_FD:
3617 if (!size)
3618 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3619 /* fall through */
3620 case FASTRPC_IOCTL_INVOKE_ATTRS:
3621 if (!size)
3622 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003623 /* fall through */
3624 case FASTRPC_IOCTL_INVOKE_CRC:
3625 if (!size)
3626 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303627 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003628 if (err)
3629 goto bail;
3630 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3631 0, &p.inv)));
3632 if (err)
3633 goto bail;
3634 break;
3635 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303636 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3637 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303638 if (err)
3639 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003640 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3641 if (err)
3642 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303643 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003644 if (err)
3645 goto bail;
3646 break;
3647 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303648 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3649 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303650 if (err)
3651 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003652 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3653 &p.munmap)));
3654 if (err)
3655 goto bail;
3656 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303657 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303658 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3659 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303660 if (err)
3661 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303662 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3663 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303664 if (err)
3665 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303666 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3667 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303668 if (err)
3669 goto bail;
3670 break;
3671 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303672 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3673 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303674 if (err)
3675 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303676 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303677 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303678 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303679 if (err)
3680 goto bail;
3681 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303682 case FASTRPC_IOCTL_MUNMAP_FD:
3683 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3684 sizeof(p.munmap_fd));
3685 if (err)
3686 goto bail;
3687 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3688 &p.munmap_fd)));
3689 if (err)
3690 goto bail;
3691 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003692 case FASTRPC_IOCTL_SETMODE:
3693 switch ((uint32_t)ioctl_param) {
3694 case FASTRPC_MODE_PARALLEL:
3695 case FASTRPC_MODE_SERIAL:
3696 fl->mode = (uint32_t)ioctl_param;
3697 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003698 case FASTRPC_MODE_PROFILE:
3699 fl->profile = (uint32_t)ioctl_param;
3700 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303701 case FASTRPC_MODE_SESSION:
3702 fl->sessionid = 1;
3703 fl->tgid |= (1 << SESSION_ID_INDEX);
3704 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003705 default:
3706 err = -ENOTTY;
3707 break;
3708 }
3709 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003710 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303711 K_COPY_FROM_USER(err, 0, &p.perf,
3712 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003713 if (err)
3714 goto bail;
3715 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3716 if (p.perf.keys) {
3717 char *keys = PERF_KEYS;
3718
c_mtharue1a5ce12017-10-13 20:47:09 +05303719 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3720 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003721 if (err)
3722 goto bail;
3723 }
3724 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303725 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3726 struct hlist_node *n = NULL;
3727
3728 mutex_lock(&fl->perf_mutex);
3729 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3730 if (perf->tid == current->pid) {
3731 fperf = perf;
3732 break;
3733 }
3734 }
3735
3736 mutex_unlock(&fl->perf_mutex);
3737
3738 if (fperf) {
3739 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3740 fperf, sizeof(*fperf));
3741 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003742 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303743 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003744 if (err)
3745 goto bail;
3746 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303747 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303748 K_COPY_FROM_USER(err, 0, &p.cp, param,
3749 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303750 if (err)
3751 goto bail;
3752 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3753 if (err)
3754 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303755 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3756 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3757 if (err)
3758 goto bail;
3759 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303760 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003761 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303762 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003763 if (err)
3764 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003765 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3766 if (err)
3767 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303768 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003769 if (err)
3770 goto bail;
3771 break;
3772 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003773 p.init.attrs = 0;
3774 p.init.siglen = 0;
3775 size = sizeof(struct fastrpc_ioctl_init);
3776 /* fall through */
3777 case FASTRPC_IOCTL_INIT_ATTRS:
3778 if (!size)
3779 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303780 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003781 if (err)
3782 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303783 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303784 p.init.init.filelen < INIT_FILELEN_MAX);
3785 if (err)
3786 goto bail;
3787 VERIFY(err, p.init.init.memlen >= 0 &&
3788 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303789 if (err)
3790 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303791 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003792 if (err)
3793 goto bail;
3794 break;
3795
3796 default:
3797 err = -ENOTTY;
3798 pr_info("bad ioctl: %d\n", ioctl_num);
3799 break;
3800 }
3801 bail:
3802 return err;
3803}
3804
3805static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3806 unsigned long code,
3807 void *data)
3808{
3809 struct fastrpc_apps *me = &gfa;
3810 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303811 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003812 int cid;
3813
3814 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3815 cid = ctx - &me->channel[0];
3816 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303817 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003818 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303819 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303820 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303821 if (me->glink)
3822 fastrpc_glink_close(ctx->chan, cid);
3823 else
3824 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303825 ctx->chan = NULL;
3826 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3827 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3828 }
3829 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303830 if (cid == 0)
3831 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003832 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303833 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3834 if (me->channel[0].remoteheap_ramdump_dev &&
3835 notifdata->enable_ramdump) {
3836 me->channel[0].ramdumpenabled = 1;
3837 }
3838 } else if (code == SUBSYS_AFTER_POWERUP) {
3839 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003840 }
3841
3842 return NOTIFY_DONE;
3843}
3844
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303845static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303846 unsigned long code,
3847 void *data)
3848{
3849 struct fastrpc_apps *me = &gfa;
3850 struct fastrpc_static_pd *spd;
3851 struct notif_data *notifdata = data;
3852
3853 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3854 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3855 mutex_lock(&me->smd_mutex);
3856 spd->pdrcount++;
3857 spd->ispdup = 0;
3858 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3859 MAJOR(me->dev_no), spd->spdname);
3860 mutex_unlock(&me->smd_mutex);
3861 if (!strcmp(spd->spdname,
3862 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3863 me->staticpd_flags = 0;
3864 fastrpc_notify_pdr_drivers(me, spd->spdname);
3865 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3866 if (me->channel[0].remoteheap_ramdump_dev &&
3867 notifdata->enable_ramdump) {
3868 me->channel[0].ramdumpenabled = 1;
3869 }
3870 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3871 spd->ispdup = 1;
3872 }
3873
3874 return NOTIFY_DONE;
3875}
3876
3877static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3878 unsigned long opcode, void *data)
3879{
3880 struct fastrpc_static_pd *spd;
3881 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303882 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303883
3884 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3885 if (opcode == LOCATOR_DOWN) {
3886 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3887 return NOTIFY_DONE;
3888 }
3889
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303890 for (i = 0; i < pdr->total_domains; i++) {
3891 if ((!strcmp(pdr->domain_list[i].name,
3892 "msm/adsp/audio_pd")) ||
3893 (!strcmp(pdr->domain_list[i].name,
3894 "msm/adsp/sensor_pd"))) {
3895 spd->pdrhandle =
3896 service_notif_register_notifier(
3897 pdr->domain_list[i].name,
3898 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303899 &spd->pdrnb, &curr_state);
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303900 if (IS_ERR(spd->pdrhandle)) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303901 pr_err("ADSPRPC: Unable to register notifier\n");
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303902 } else if (curr_state ==
3903 SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3904 pr_info("ADSPRPC: STATE_UP_V01 received\n");
3905 spd->ispdup = 1;
3906 } else if (curr_state ==
3907 SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
3908 pr_info("ADSPRPC: STATE_UNINIT_V01 received\n");
3909 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303910 break;
3911 }
3912 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303913
3914 return NOTIFY_DONE;
3915}
3916
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003917static const struct file_operations fops = {
3918 .open = fastrpc_device_open,
3919 .release = fastrpc_device_release,
3920 .unlocked_ioctl = fastrpc_device_ioctl,
3921 .compat_ioctl = compat_fastrpc_device_ioctl,
3922};
3923
3924static const struct of_device_id fastrpc_match_table[] = {
3925 { .compatible = "qcom,msm-fastrpc-adsp", },
3926 { .compatible = "qcom,msm-fastrpc-compute", },
3927 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303928 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3929 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003930 { .compatible = "qcom,msm-adsprpc-mem-region", },
3931 {}
3932};
3933
3934static int fastrpc_cb_probe(struct device *dev)
3935{
3936 struct fastrpc_channel_ctx *chan;
3937 struct fastrpc_session_ctx *sess;
3938 struct of_phandle_args iommuspec;
3939 const char *name;
3940 unsigned int start = 0x80000000;
3941 int err = 0, i;
3942 int secure_vmid = VMID_CP_PIXEL;
3943
c_mtharue1a5ce12017-10-13 20:47:09 +05303944 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3945 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003946 if (err)
3947 goto bail;
3948 for (i = 0; i < NUM_CHANNELS; i++) {
3949 if (!gcinfo[i].name)
3950 continue;
3951 if (!strcmp(name, gcinfo[i].name))
3952 break;
3953 }
3954 VERIFY(err, i < NUM_CHANNELS);
3955 if (err)
3956 goto bail;
3957 chan = &gcinfo[i];
3958 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3959 if (err)
3960 goto bail;
3961
3962 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3963 "#iommu-cells", 0, &iommuspec));
3964 if (err)
3965 goto bail;
3966 sess = &chan->session[chan->sesscount];
3967 sess->smmu.cb = iommuspec.args[0] & 0xf;
3968 sess->used = 0;
3969 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3970 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303971 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
3972 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003973 sess->smmu.secure = of_property_read_bool(dev->of_node,
3974 "qcom,secure-context-bank");
3975 if (sess->smmu.secure)
3976 start = 0x60000000;
3977 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3978 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303979 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003980 if (err)
3981 goto bail;
3982
3983 if (sess->smmu.secure)
3984 iommu_domain_set_attr(sess->smmu.mapping->domain,
3985 DOMAIN_ATTR_SECURE_VMID,
3986 &secure_vmid);
3987
3988 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
3989 if (err)
3990 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303991 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003992 sess->smmu.enabled = 1;
3993 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003994 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
3995 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003996bail:
3997 return err;
3998}
3999
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304000static int fastrpc_cb_legacy_probe(struct device *dev)
4001{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304002 struct fastrpc_channel_ctx *chan;
4003 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4004 const char *name;
4005 unsigned int *sids = NULL, sids_size = 0;
4006 int err = 0, ret = 0, i;
4007
4008 unsigned int start = 0x80000000;
4009
4010 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4011 "label", NULL)));
4012 if (err)
4013 goto bail;
4014
4015 for (i = 0; i < NUM_CHANNELS; i++) {
4016 if (!gcinfo[i].name)
4017 continue;
4018 if (!strcmp(name, gcinfo[i].name))
4019 break;
4020 }
4021 VERIFY(err, i < NUM_CHANNELS);
4022 if (err)
4023 goto bail;
4024
4025 chan = &gcinfo[i];
4026 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4027 if (err)
4028 goto bail;
4029
4030 first_sess = &chan->session[chan->sesscount];
4031
4032 VERIFY(err, NULL != of_get_property(dev->of_node,
4033 "sids", &sids_size));
4034 if (err)
4035 goto bail;
4036
4037 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4038 if (err)
4039 goto bail;
4040 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4041 sids_size/sizeof(unsigned int));
4042 if (ret)
4043 goto bail;
4044
4045 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4046 arm_iommu_create_mapping(&platform_bus_type,
4047 start, 0x78000000)));
4048 if (err)
4049 goto bail;
4050
4051 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4052 if (err)
4053 goto bail;
4054
4055
4056 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4057 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4058 if (err)
4059 goto bail;
4060 sess = &chan->session[chan->sesscount];
4061 sess->smmu.cb = sids[i];
4062 sess->smmu.dev = dev;
4063 sess->smmu.mapping = first_sess->smmu.mapping;
4064 sess->smmu.enabled = 1;
4065 sess->used = 0;
4066 sess->smmu.coherent = false;
4067 sess->smmu.secure = false;
4068 chan->sesscount++;
4069 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304070bail:
4071 kfree(sids);
4072 return err;
4073}
4074
4075
4076
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304077static void init_secure_vmid_list(struct device *dev, char *prop_name,
4078 struct secure_vm *destvm)
4079{
4080 int err = 0;
4081 u32 len = 0, i = 0;
4082 u32 *rhvmlist = NULL;
4083 u32 *rhvmpermlist = NULL;
4084
4085 if (!of_find_property(dev->of_node, prop_name, &len))
4086 goto bail;
4087 if (len == 0)
4088 goto bail;
4089 len /= sizeof(u32);
4090 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4091 if (err)
4092 goto bail;
4093 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4094 GFP_KERNEL)));
4095 if (err)
4096 goto bail;
4097 for (i = 0; i < len; i++) {
4098 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4099 &rhvmlist[i]);
4100 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4101 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4102 if (err) {
4103 pr_err("ADSPRPC: Failed to read VMID\n");
4104 goto bail;
4105 }
4106 }
4107 destvm->vmid = rhvmlist;
4108 destvm->vmperm = rhvmpermlist;
4109 destvm->vmcount = len;
4110bail:
4111 if (err) {
4112 kfree(rhvmlist);
4113 kfree(rhvmpermlist);
4114 }
4115}
4116
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304117static void configure_secure_channels(uint32_t secure_domains)
4118{
4119 struct fastrpc_apps *me = &gfa;
4120 int ii = 0;
4121 /*
4122 * secure_domains contains the bitmask of the secure channels
4123 * Bit 0 - ADSP
4124 * Bit 1 - MDSP
4125 * Bit 2 - SLPI
4126 * Bit 3 - CDSP
4127 */
4128 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4129 int secure = (secure_domains >> ii) & 0x01;
4130
4131 me->channel[ii].secure = secure;
4132 }
4133}
4134
4135
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004136static int fastrpc_probe(struct platform_device *pdev)
4137{
4138 int err = 0;
4139 struct fastrpc_apps *me = &gfa;
4140 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004141 struct device_node *ion_node, *node;
4142 struct platform_device *ion_pdev;
4143 struct cma *cma;
4144 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304145 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304146 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304147
4148 if (of_device_is_compatible(dev->of_node,
4149 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304150 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4151 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304152
c_mtharu63ffc012017-11-16 15:26:56 +05304153
4154 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4155 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304156 if (of_get_property(dev->of_node,
4157 "qcom,secure-domains", NULL) != NULL) {
4158 VERIFY(err, !of_property_read_u32(dev->of_node,
4159 "qcom,secure-domains",
4160 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004161 if (!err) {
4162 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304163 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004164 } else {
4165 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304166 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004167 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304168 }
c_mtharu63ffc012017-11-16 15:26:56 +05304169 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004170 if (of_device_is_compatible(dev->of_node,
4171 "qcom,msm-fastrpc-compute-cb"))
4172 return fastrpc_cb_probe(dev);
4173
4174 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304175 "qcom,msm-fastrpc-legacy-compute")) {
4176 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304177 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304178 }
4179
4180 if (of_device_is_compatible(dev->of_node,
4181 "qcom,msm-fastrpc-legacy-compute-cb")){
4182 return fastrpc_cb_legacy_probe(dev);
4183 }
4184
4185 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004186 "qcom,msm-adsprpc-mem-region")) {
4187 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004188 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4189 if (ion_node) {
4190 for_each_available_child_of_node(ion_node, node) {
4191 if (of_property_read_u32(node, "reg", &val))
4192 continue;
4193 if (val != ION_ADSP_HEAP_ID)
4194 continue;
4195 ion_pdev = of_find_device_by_node(node);
4196 if (!ion_pdev)
4197 break;
4198 cma = dev_get_cma_area(&ion_pdev->dev);
4199 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304200 me->range.addr = cma_get_base(cma);
4201 me->range.size =
4202 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004203 }
4204 break;
4205 }
4206 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304207 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304208 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004209 int srcVM[1] = {VMID_HLOS};
4210 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4211 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004212 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004213 PERM_READ | PERM_WRITE | PERM_EXEC,
4214 PERM_READ | PERM_WRITE | PERM_EXEC,
4215 PERM_READ | PERM_WRITE | PERM_EXEC,
4216 };
4217
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304218 VERIFY(err, !hyp_assign_phys(me->range.addr,
4219 me->range.size, srcVM, 1,
4220 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004221 if (err)
4222 goto bail;
4223 }
4224 return 0;
4225 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304226 if (of_property_read_bool(dev->of_node,
4227 "qcom,fastrpc-adsp-audio-pdr")) {
4228 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004229
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304230 VERIFY(err, !fastrpc_get_adsp_session(
4231 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4232 if (err)
4233 goto spdbail;
4234 me->channel[0].spd[session].get_service_nb.notifier_call =
4235 fastrpc_get_service_location_notify;
4236 ret = get_service_location(
4237 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4238 AUDIO_PDR_ADSP_SERVICE_NAME,
4239 &me->channel[0].spd[session].get_service_nb);
4240 if (ret)
4241 pr_err("ADSPRPC: Get service location failed: %d\n",
4242 ret);
4243 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304244 if (of_property_read_bool(dev->of_node,
4245 "qcom,fastrpc-adsp-sensors-pdr")) {
4246 int session;
4247
4248 VERIFY(err, !fastrpc_get_adsp_session(
4249 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4250 if (err)
4251 goto spdbail;
4252 me->channel[0].spd[session].get_service_nb.notifier_call =
4253 fastrpc_get_service_location_notify;
4254 ret = get_service_location(
4255 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4256 SENSORS_PDR_ADSP_SERVICE_NAME,
4257 &me->channel[0].spd[session].get_service_nb);
4258 if (ret)
4259 pr_err("ADSPRPC: Get service location failed: %d\n",
4260 ret);
4261 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304262spdbail:
4263 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004264 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4265 fastrpc_match_table,
4266 NULL, &pdev->dev));
4267 if (err)
4268 goto bail;
4269bail:
4270 return err;
4271}
4272
4273static void fastrpc_deinit(void)
4274{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304275 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004276 struct fastrpc_channel_ctx *chan = gcinfo;
4277 int i, j;
4278
4279 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4280 if (chan->chan) {
4281 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304282 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304283 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004284 }
4285 for (j = 0; j < NUM_SESSIONS; j++) {
4286 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304287 if (sess->smmu.dev) {
4288 arm_iommu_detach_device(sess->smmu.dev);
4289 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004290 }
4291 if (sess->smmu.mapping) {
4292 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304293 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004294 }
4295 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304296 kfree(chan->rhvm.vmid);
4297 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004298 }
4299}
4300
4301static struct platform_driver fastrpc_driver = {
4302 .probe = fastrpc_probe,
4303 .driver = {
4304 .name = "fastrpc",
4305 .owner = THIS_MODULE,
4306 .of_match_table = fastrpc_match_table,
4307 },
4308};
4309
4310static int __init fastrpc_device_init(void)
4311{
4312 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304313 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304314 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004315 int err = 0, i;
4316
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304317 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004318 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004319 fastrpc_init(me);
4320 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304321 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004322 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004323 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4324 if (err)
4325 goto register_bail;
4326 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4327 DEVICE_NAME));
4328 if (err)
4329 goto alloc_chrdev_bail;
4330 cdev_init(&me->cdev, &fops);
4331 me->cdev.owner = THIS_MODULE;
4332 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304333 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004334 if (err)
4335 goto cdev_init_bail;
4336 me->class = class_create(THIS_MODULE, "fastrpc");
4337 VERIFY(err, !IS_ERR(me->class));
4338 if (err)
4339 goto class_create_bail;
4340 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304341
4342 /*
4343 * Create devices and register with sysfs
4344 * Create first device with minor number 0
4345 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004346 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304347 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4348 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004349 VERIFY(err, !IS_ERR_OR_NULL(dev));
4350 if (err)
4351 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304352
4353 /* Create secure device with minor number for secure device */
4354 secure_dev = device_create(me->class, NULL,
4355 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4356 NULL, DEVICE_NAME_SECURE);
4357 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4358 if (err)
4359 goto device_create_bail;
4360
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004361 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304362 me->channel[i].dev = secure_dev;
4363 if (i == CDSP_DOMAIN_ID)
4364 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004365 me->channel[i].ssrcount = 0;
4366 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304367 me->channel[i].issubsystemup = 1;
4368 me->channel[i].ramdumpenabled = 0;
4369 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004370 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4371 me->channel[i].handle = subsys_notif_register_notifier(
4372 gcinfo[i].subsys,
4373 &me->channel[i].nb);
4374 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004375 me->client = msm_ion_client_create(DEVICE_NAME);
4376 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4377 if (err)
4378 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304379
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004380 return 0;
4381device_create_bail:
4382 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004383 if (me->channel[i].handle)
4384 subsys_notif_unregister_notifier(me->channel[i].handle,
4385 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004386 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004387 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304388 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4389 MINOR_NUM_DEV));
4390 if (!IS_ERR_OR_NULL(secure_dev))
4391 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4392 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004393 class_destroy(me->class);
4394class_create_bail:
4395 cdev_del(&me->cdev);
4396cdev_init_bail:
4397 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4398alloc_chrdev_bail:
4399register_bail:
4400 fastrpc_deinit();
4401 return err;
4402}
4403
4404static void __exit fastrpc_device_exit(void)
4405{
4406 struct fastrpc_apps *me = &gfa;
4407 int i;
4408
4409 fastrpc_file_list_dtor(me);
4410 fastrpc_deinit();
4411 for (i = 0; i < NUM_CHANNELS; i++) {
4412 if (!gcinfo[i].name)
4413 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004414 subsys_notif_unregister_notifier(me->channel[i].handle,
4415 &me->channel[i].nb);
4416 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304417
4418 /* Destroy the secure and non secure devices */
4419 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4420 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4421 MINOR_NUM_SECURE_DEV));
4422
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004423 class_destroy(me->class);
4424 cdev_del(&me->cdev);
4425 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4426 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004427 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004428}
4429
4430late_initcall(fastrpc_device_init);
4431module_exit(fastrpc_device_exit);
4432
4433MODULE_LICENSE("GPL v2");