blob: 1c9eb0fec49ab74527c64c5cb67d8f06e7401fbc [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
109 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530110#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111#define FASTRPC_STATIC_HANDLE_LISTENER (3)
112#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530113#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800114
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530115#define INIT_FILELEN_MAX (2*1024*1024)
116#define INIT_MEMLEN_MAX (8*1024*1024)
117
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800118#define PERF_END (void)0
119
120#define PERF(enb, cnt, ff) \
121 {\
122 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530123 int64_t *counter = cnt;\
124 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800125 getnstimeofday(&startT);\
126 } \
127 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530128 if (enb && counter) {\
129 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800130 } \
131 }
132
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530133#define GET_COUNTER(perf_ptr, offset) \
134 (perf_ptr != NULL ?\
135 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
136 (int64_t *)(perf_ptr + offset)\
137 : (int64_t *)NULL) : (int64_t *)NULL)
138
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700139static int fastrpc_glink_open(int cid);
140static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530141static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530142 unsigned long code,
143 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800144static struct dentry *debugfs_root;
145static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700146
147static inline uint64_t buf_page_start(uint64_t buf)
148{
149 uint64_t start = (uint64_t) buf & PAGE_MASK;
150 return start;
151}
152
153static inline uint64_t buf_page_offset(uint64_t buf)
154{
155 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
156 return offset;
157}
158
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530159static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700160{
161 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
162 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530163 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700164 return nPages;
165}
166
167static inline uint64_t buf_page_size(uint32_t size)
168{
169 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
170
171 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
172}
173
174static inline void *uint64_to_ptr(uint64_t addr)
175{
176 void *ptr = (void *)((uintptr_t)addr);
177
178 return ptr;
179}
180
181static inline uint64_t ptr_to_uint64(void *ptr)
182{
183 uint64_t addr = (uint64_t)((uintptr_t)ptr);
184
185 return addr;
186}
187
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530188struct secure_vm {
189 int *vmid;
190 int *vmperm;
191 int vmcount;
192};
193
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700194struct fastrpc_file;
195
196struct fastrpc_buf {
197 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530198 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700199 struct fastrpc_file *fl;
200 void *virt;
201 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530202 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530203 unsigned long dma_attr;
204 uintptr_t raddr;
205 uint32_t flags;
206 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700207};
208
209struct fastrpc_ctx_lst;
210
211struct overlap {
212 uintptr_t start;
213 uintptr_t end;
214 int raix;
215 uintptr_t mstart;
216 uintptr_t mend;
217 uintptr_t offset;
218};
219
220struct smq_invoke_ctx {
221 struct hlist_node hn;
222 struct completion work;
223 int retval;
224 int pid;
225 int tgid;
226 remote_arg_t *lpra;
227 remote_arg64_t *rpra;
228 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700229 struct fastrpc_mmap **maps;
230 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530231 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700232 struct fastrpc_file *fl;
233 uint32_t sc;
234 struct overlap *overs;
235 struct overlap **overps;
236 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530237 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530238 unsigned int *attrs;
239 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530240 uint64_t ctxid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700241};
242
243struct fastrpc_ctx_lst {
244 struct hlist_head pending;
245 struct hlist_head interrupted;
246};
247
248struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530249 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700250 struct dma_iommu_mapping *mapping;
251 int cb;
252 int enabled;
253 int faults;
254 int secure;
255 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530256 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700257};
258
259struct fastrpc_session_ctx {
260 struct device *dev;
261 struct fastrpc_smmu smmu;
262 int used;
263};
264
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530265struct fastrpc_static_pd {
266 char *spdname;
267 struct notifier_block pdrnb;
268 struct notifier_block get_service_nb;
269 void *pdrhandle;
270 int pdrcount;
271 int prevpdrcount;
272 int ispdup;
273};
274
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700275struct fastrpc_glink_info {
276 int link_state;
277 int port_state;
278 struct glink_open_config cfg;
279 struct glink_link_info link_info;
280 void *link_notify_handle;
281};
282
283struct fastrpc_channel_ctx {
284 char *name;
285 char *subsys;
286 void *chan;
287 struct device *dev;
288 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530289 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700290 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530291 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700292 struct notifier_block nb;
293 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530294 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700295 int sesscount;
296 int ssrcount;
297 void *handle;
298 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530299 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700300 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530301 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530302 int ramdumpenabled;
303 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700304 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530305 /* Indicates, if channel is restricted to secure node only */
306 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700307};
308
309struct fastrpc_apps {
310 struct fastrpc_channel_ctx *channel;
311 struct cdev cdev;
312 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530313 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700314 struct smq_phy_page range;
315 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530316 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700317 dev_t dev_no;
318 int compat;
319 struct hlist_head drivers;
320 spinlock_t hlock;
321 struct ion_client *client;
322 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530323 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530324 bool glink;
325 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800326 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530327 spinlock_t ctxlock;
328 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700329};
330
331struct fastrpc_mmap {
332 struct hlist_node hn;
333 struct fastrpc_file *fl;
334 struct fastrpc_apps *apps;
335 int fd;
336 uint32_t flags;
337 struct dma_buf *buf;
338 struct sg_table *table;
339 struct dma_buf_attachment *attach;
340 struct ion_handle *handle;
341 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530342 size_t size;
343 uintptr_t va;
344 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700345 int refs;
346 uintptr_t raddr;
347 int uncached;
348 int secure;
349 uintptr_t attr;
350};
351
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530352enum fastrpc_perfkeys {
353 PERF_COUNT = 0,
354 PERF_FLUSH = 1,
355 PERF_MAP = 2,
356 PERF_COPY = 3,
357 PERF_LINK = 4,
358 PERF_GETARGS = 5,
359 PERF_PUTARGS = 6,
360 PERF_INVARGS = 7,
361 PERF_INVOKE = 8,
362 PERF_KEY_MAX = 9,
363};
364
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800365struct fastrpc_perf {
366 int64_t count;
367 int64_t flush;
368 int64_t map;
369 int64_t copy;
370 int64_t link;
371 int64_t getargs;
372 int64_t putargs;
373 int64_t invargs;
374 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530375 int64_t tid;
376 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800377};
378
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700379struct fastrpc_file {
380 struct hlist_node hn;
381 spinlock_t hlock;
382 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530383 struct hlist_head cached_bufs;
384 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700385 struct fastrpc_ctx_lst clst;
386 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530387 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700388 struct fastrpc_session_ctx *secsctx;
389 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800390 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530391 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700392 int tgid;
393 int cid;
394 int ssrcount;
395 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530396 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530397 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530398 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700399 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530400 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800401 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530402 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530403 struct pm_qos_request pm_qos_req;
404 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530405 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530406 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530407 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530408 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
409 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530410 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700411};
412
413static struct fastrpc_apps gfa;
414
415static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
416 {
417 .name = "adsprpc-smd",
418 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530419 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700420 .link.link_info.edge = "lpass",
421 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530422 .spd = {
423 {
424 .spdname =
425 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
426 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530427 fastrpc_pdr_notifier_cb,
428 },
429 {
430 .spdname =
431 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
432 .pdrnb.notifier_call =
433 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530434 }
435 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700436 },
437 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700438 .name = "mdsprpc-smd",
439 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530440 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700441 .link.link_info.edge = "mpss",
442 .link.link_info.transport = "smem",
443 },
444 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800445 .name = "sdsprpc-smd",
446 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530447 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800448 .link.link_info.edge = "dsps",
449 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800450 },
451 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700452 .name = "cdsprpc-smd",
453 .subsys = "cdsp",
454 .link.link_info.edge = "cdsp",
455 .link.link_info.transport = "smem",
456 },
457};
458
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530459static int hlosvm[1] = {VMID_HLOS};
460static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
461
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800462static inline int64_t getnstimediff(struct timespec *start)
463{
464 int64_t ns;
465 struct timespec ts, b;
466
467 getnstimeofday(&ts);
468 b = timespec_sub(ts, *start);
469 ns = timespec_to_ns(&b);
470 return ns;
471}
472
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530473static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
474{
475 int err = 0;
476 int64_t *val = NULL;
477 struct fastrpc_perf *perf = NULL, *fperf = NULL;
478 struct hlist_node *n = NULL;
479
480 VERIFY(err, !IS_ERR_OR_NULL(fl));
481 if (err)
482 goto bail;
483
484 mutex_lock(&fl->perf_mutex);
485 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
486 if (perf->tid == current->pid) {
487 fperf = perf;
488 break;
489 }
490 }
491
492 if (IS_ERR_OR_NULL(fperf)) {
493 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
494
495 VERIFY(err, !IS_ERR_OR_NULL(fperf));
496 if (err) {
497 mutex_unlock(&fl->perf_mutex);
498 kfree(fperf);
499 goto bail;
500 }
501
502 fperf->tid = current->pid;
503 hlist_add_head(&fperf->hn, &fl->perf);
504 }
505
506 val = ((int64_t *)fperf) + key;
507 mutex_unlock(&fl->perf_mutex);
508bail:
509 return val;
510}
511
512
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700513static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
514{
c_mtharue1a5ce12017-10-13 20:47:09 +0530515 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700516 int vmid;
517
518 if (!fl)
519 return;
520 if (cache) {
521 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530522 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700523 spin_unlock(&fl->hlock);
524 return;
525 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530526 if (buf->remote) {
527 spin_lock(&fl->hlock);
528 hlist_del_init(&buf->hn_rem);
529 spin_unlock(&fl->hlock);
530 buf->remote = 0;
531 buf->raddr = 0;
532 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700533 if (!IS_ERR_OR_NULL(buf->virt)) {
534 int destVM[1] = {VMID_HLOS};
535 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
536
537 if (fl->sctx->smmu.cb)
538 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
539 vmid = fl->apps->channel[fl->cid].vmid;
540 if (vmid) {
541 int srcVM[2] = {VMID_HLOS, vmid};
542
543 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
544 srcVM, 2, destVM, destVMperm, 1);
545 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530546 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
547 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700548 }
549 kfree(buf);
550}
551
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530552static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700553{
554 struct fastrpc_buf *buf, *free;
555
556 do {
557 struct hlist_node *n;
558
c_mtharue1a5ce12017-10-13 20:47:09 +0530559 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700560 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530561 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700562 hlist_del_init(&buf->hn);
563 free = buf;
564 break;
565 }
566 spin_unlock(&fl->hlock);
567 if (free)
568 fastrpc_buf_free(free, 0);
569 } while (free);
570}
571
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530572static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
573{
574 struct fastrpc_buf *buf, *free;
575
576 do {
577 struct hlist_node *n;
578
579 free = NULL;
580 spin_lock(&fl->hlock);
581 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
582 free = buf;
583 break;
584 }
585 spin_unlock(&fl->hlock);
586 if (free)
587 fastrpc_buf_free(free, 0);
588 } while (free);
589}
590
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700591static void fastrpc_mmap_add(struct fastrpc_mmap *map)
592{
c_mtharue1a5ce12017-10-13 20:47:09 +0530593 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
594 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
595 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700596
c_mtharue1a5ce12017-10-13 20:47:09 +0530597 spin_lock(&me->hlock);
598 hlist_add_head(&map->hn, &me->maps);
599 spin_unlock(&me->hlock);
600 } else {
601 struct fastrpc_file *fl = map->fl;
602
c_mtharue1a5ce12017-10-13 20:47:09 +0530603 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530604 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700605}
606
c_mtharue1a5ce12017-10-13 20:47:09 +0530607static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530608 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530609 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610{
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 struct fastrpc_apps *me = &gfa;
612 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700613 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530614
615 if ((va + len) < va)
616 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530617 if (mflags == ADSP_MMAP_HEAP_ADDR ||
618 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
619 spin_lock(&me->hlock);
620 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
621 if (va >= map->va &&
622 va + len <= map->va + map->len &&
623 map->fd == fd) {
624 if (refs)
625 map->refs++;
626 match = map;
627 break;
628 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700629 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530630 spin_unlock(&me->hlock);
631 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530632 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
633 if (va >= map->va &&
634 va + len <= map->va + map->len &&
635 map->fd == fd) {
636 if (refs)
637 map->refs++;
638 match = map;
639 break;
640 }
641 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700642 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700643 if (match) {
644 *ppmap = match;
645 return 0;
646 }
647 return -ENOTTY;
648}
649
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530650static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530651 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530652{
653 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530654
655 if (me->dev == NULL) {
656 pr_err("device adsprpc-mem is not initialized\n");
657 return -ENODEV;
658 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530659 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530660 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530661 if (IS_ERR_OR_NULL(*vaddr)) {
662 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
663 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530664 return -ENOMEM;
665 }
666 return 0;
667}
668
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700669static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530670 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700671{
c_mtharue1a5ce12017-10-13 20:47:09 +0530672 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700673 struct hlist_node *n;
674 struct fastrpc_apps *me = &gfa;
675
676 spin_lock(&me->hlock);
677 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
678 if (map->raddr == va &&
679 map->raddr + map->len == va + len &&
680 map->refs == 1) {
681 match = map;
682 hlist_del_init(&map->hn);
683 break;
684 }
685 }
686 spin_unlock(&me->hlock);
687 if (match) {
688 *ppmap = match;
689 return 0;
690 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700691 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
692 if (map->raddr == va &&
693 map->raddr + map->len == va + len &&
694 map->refs == 1) {
695 match = map;
696 hlist_del_init(&map->hn);
697 break;
698 }
699 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700700 if (match) {
701 *ppmap = match;
702 return 0;
703 }
704 return -ENOTTY;
705}
706
c_mtharu7bd6a422017-10-17 18:15:37 +0530707static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700708{
c_mtharue1a5ce12017-10-13 20:47:09 +0530709 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700710 struct fastrpc_file *fl;
711 int vmid;
712 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700713
714 if (!map)
715 return;
716 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530717 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
718 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
719 spin_lock(&me->hlock);
720 map->refs--;
721 if (!map->refs)
722 hlist_del_init(&map->hn);
723 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530724 if (map->refs > 0)
725 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530726 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530727 map->refs--;
728 if (!map->refs)
729 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530730 if (map->refs > 0 && !flags)
731 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530732 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530733 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
734 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530735 unsigned long dma_attrs = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700736
c_mtharue1a5ce12017-10-13 20:47:09 +0530737 if (me->dev == NULL) {
738 pr_err("failed to free remote heap allocation\n");
739 return;
740 }
741 if (map->phys) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530742 dma_attrs |=
743 DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
744 dma_free_attrs(me->dev, map->size, (void *)map->va,
745 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530746 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530747 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
748 if (!IS_ERR_OR_NULL(map->handle))
749 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530750 } else {
751 int destVM[1] = {VMID_HLOS};
752 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
753
754 if (map->secure)
755 sess = fl->secsctx;
756 else
757 sess = fl->sctx;
758
759 if (!IS_ERR_OR_NULL(map->handle))
760 ion_free(fl->apps->client, map->handle);
761 if (sess && sess->smmu.enabled) {
762 if (map->size || map->phys)
763 msm_dma_unmap_sg(sess->smmu.dev,
764 map->table->sgl,
765 map->table->nents, DMA_BIDIRECTIONAL,
766 map->buf);
767 }
768 vmid = fl->apps->channel[fl->cid].vmid;
769 if (vmid && map->phys) {
770 int srcVM[2] = {VMID_HLOS, vmid};
771
772 hyp_assign_phys(map->phys, buf_page_size(map->size),
773 srcVM, 2, destVM, destVMperm, 1);
774 }
775
776 if (!IS_ERR_OR_NULL(map->table))
777 dma_buf_unmap_attachment(map->attach, map->table,
778 DMA_BIDIRECTIONAL);
779 if (!IS_ERR_OR_NULL(map->attach))
780 dma_buf_detach(map->buf, map->attach);
781 if (!IS_ERR_OR_NULL(map->buf))
782 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700783 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700784 kfree(map);
785}
786
787static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
788 struct fastrpc_session_ctx **session);
789
790static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530791 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700792 struct fastrpc_mmap **ppmap)
793{
c_mtharue1a5ce12017-10-13 20:47:09 +0530794 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700795 struct fastrpc_session_ctx *sess;
796 struct fastrpc_apps *apps = fl->apps;
797 int cid = fl->cid;
798 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530799 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700800 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530801 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530802 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700803 unsigned long flags;
804 int err = 0, vmid;
805
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800806 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700807 return 0;
808 map = kzalloc(sizeof(*map), GFP_KERNEL);
809 VERIFY(err, !IS_ERR_OR_NULL(map));
810 if (err)
811 goto bail;
812 INIT_HLIST_NODE(&map->hn);
813 map->flags = mflags;
814 map->refs = 1;
815 map->fl = fl;
816 map->fd = fd;
817 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530818 if (mflags == ADSP_MMAP_HEAP_ADDR ||
819 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530820 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
821 DMA_ATTR_NO_KERNEL_MAPPING;
822
c_mtharue1a5ce12017-10-13 20:47:09 +0530823 map->apps = me;
824 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530825 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
826 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700827 if (err)
828 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530829 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530830 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530831 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530832 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
833 ion_phys_addr_t iphys;
834
835 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
836 ion_import_dma_buf_fd(fl->apps->client, fd)));
837 if (err)
838 goto bail;
839
840 map->uncached = 1;
841 map->buf = NULL;
842 map->attach = NULL;
843 map->table = NULL;
844 map->va = 0;
845 map->phys = 0;
846
847 err = ion_phys(fl->apps->client, map->handle,
848 &iphys, &map->size);
849 if (err)
850 goto bail;
851 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530852 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530853 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
854 pr_info("adsprpc: buffer mapped with persist attr %x\n",
855 (unsigned int)map->attr);
856 map->refs = 2;
857 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530858 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
859 ion_import_dma_buf_fd(fl->apps->client, fd)));
860 if (err)
861 goto bail;
862 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
863 &flags));
864 if (err)
865 goto bail;
866
c_mtharue1a5ce12017-10-13 20:47:09 +0530867 map->secure = flags & ION_FLAG_SECURE;
868 if (map->secure) {
869 if (!fl->secsctx)
870 err = fastrpc_session_alloc(chan, 1,
871 &fl->secsctx);
872 if (err)
873 goto bail;
874 }
875 if (map->secure)
876 sess = fl->secsctx;
877 else
878 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530879
c_mtharue1a5ce12017-10-13 20:47:09 +0530880 VERIFY(err, !IS_ERR_OR_NULL(sess));
881 if (err)
882 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530883
884 map->uncached = !ION_IS_CACHED(flags);
885 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
886 map->uncached = 1;
887
c_mtharue1a5ce12017-10-13 20:47:09 +0530888 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
889 if (err)
890 goto bail;
891 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
892 dma_buf_attach(map->buf, sess->smmu.dev)));
893 if (err)
894 goto bail;
895 VERIFY(err, !IS_ERR_OR_NULL(map->table =
896 dma_buf_map_attachment(map->attach,
897 DMA_BIDIRECTIONAL)));
898 if (err)
899 goto bail;
900 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700901 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530902
903 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
904 (sess->smmu.coherent && map->uncached))
905 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
906 else if (map->attr & FASTRPC_ATTR_COHERENT)
907 attrs |= DMA_ATTR_FORCE_COHERENT;
908
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700909 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530910 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700911 map->table->sgl, map->table->nents,
912 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530913 if (err)
914 goto bail;
915 } else {
916 VERIFY(err, map->table->nents == 1);
917 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700918 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530919 }
920 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530921
c_mtharue1a5ce12017-10-13 20:47:09 +0530922 if (sess->smmu.cb) {
923 map->phys += ((uint64_t)sess->smmu.cb << 32);
924 map->size = sg_dma_len(map->table->sgl);
925 } else {
926 map->size = buf_page_size(len);
927 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530928
c_mtharue1a5ce12017-10-13 20:47:09 +0530929 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530930 if (!sess->smmu.enabled && !vmid) {
931 VERIFY(err, map->phys >= me->range.addr &&
932 map->phys + map->size <=
933 me->range.addr + me->range.size);
934 if (err) {
935 pr_err("adsprpc: mmap fail out of range\n");
936 goto bail;
937 }
938 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530939 if (vmid) {
940 int srcVM[1] = {VMID_HLOS};
941 int destVM[2] = {VMID_HLOS, vmid};
942 int destVMperm[2] = {PERM_READ | PERM_WRITE,
943 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700944
c_mtharue1a5ce12017-10-13 20:47:09 +0530945 VERIFY(err, !hyp_assign_phys(map->phys,
946 buf_page_size(map->size),
947 srcVM, 1, destVM, destVMperm, 2));
948 if (err)
949 goto bail;
950 }
951 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700952 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700953 map->len = len;
954
955 fastrpc_mmap_add(map);
956 *ppmap = map;
957
958bail:
959 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530960 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700961 return err;
962}
963
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530964static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530965 unsigned long dma_attr, uint32_t rflags,
966 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700967{
968 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530969 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700970 struct hlist_node *n;
971
972 VERIFY(err, size > 0);
973 if (err)
974 goto bail;
975
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530976 if (!remote) {
977 /* find the smallest buffer that fits in the cache */
978 spin_lock(&fl->hlock);
979 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
980 if (buf->size >= size && (!fr || fr->size > buf->size))
981 fr = buf;
982 }
983 if (fr)
984 hlist_del_init(&fr->hn);
985 spin_unlock(&fl->hlock);
986 if (fr) {
987 *obuf = fr;
988 return 0;
989 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700990 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530991 buf = NULL;
992 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700993 if (err)
994 goto bail;
995 INIT_HLIST_NODE(&buf->hn);
996 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530997 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700998 buf->phys = 0;
999 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301000 buf->dma_attr = dma_attr;
1001 buf->flags = rflags;
1002 buf->raddr = 0;
1003 buf->remote = 0;
1004 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1005 (dma_addr_t *)&buf->phys,
1006 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001007 if (IS_ERR_OR_NULL(buf->virt)) {
1008 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301009 fastrpc_cached_buf_list_free(fl);
1010 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1011 (dma_addr_t *)&buf->phys,
1012 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001013 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1014 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301015 if (err) {
1016 err = -ENOMEM;
1017 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1018 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001019 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301020 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001021 if (fl->sctx->smmu.cb)
1022 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1023 vmid = fl->apps->channel[fl->cid].vmid;
1024 if (vmid) {
1025 int srcVM[1] = {VMID_HLOS};
1026 int destVM[2] = {VMID_HLOS, vmid};
1027 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1028 PERM_READ | PERM_WRITE | PERM_EXEC};
1029
1030 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1031 srcVM, 1, destVM, destVMperm, 2));
1032 if (err)
1033 goto bail;
1034 }
1035
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301036 if (remote) {
1037 INIT_HLIST_NODE(&buf->hn_rem);
1038 spin_lock(&fl->hlock);
1039 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1040 spin_unlock(&fl->hlock);
1041 buf->remote = remote;
1042 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001043 *obuf = buf;
1044 bail:
1045 if (err && buf)
1046 fastrpc_buf_free(buf, 0);
1047 return err;
1048}
1049
1050
1051static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001052 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001053 struct smq_invoke_ctx **po)
1054{
1055 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301056 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001057 struct hlist_node *n;
1058 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1059
1060 spin_lock(&fl->hlock);
1061 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1062 if (ictx->pid == current->pid) {
1063 if (invoke->sc != ictx->sc || ictx->fl != fl)
1064 err = -1;
1065 else {
1066 ctx = ictx;
1067 hlist_del_init(&ctx->hn);
1068 hlist_add_head(&ctx->hn, &fl->clst.pending);
1069 }
1070 break;
1071 }
1072 }
1073 spin_unlock(&fl->hlock);
1074 if (ctx)
1075 *po = ctx;
1076 return err;
1077}
1078
1079#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1080static int overlap_ptr_cmp(const void *a, const void *b)
1081{
1082 struct overlap *pa = *((struct overlap **)a);
1083 struct overlap *pb = *((struct overlap **)b);
1084 /* sort with lowest starting buffer first */
1085 int st = CMP(pa->start, pb->start);
1086 /* sort with highest ending buffer first */
1087 int ed = CMP(pb->end, pa->end);
1088 return st == 0 ? ed : st;
1089}
1090
Sathish Ambley9466d672017-01-25 10:51:55 -08001091static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001092{
Sathish Ambley9466d672017-01-25 10:51:55 -08001093 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001094 remote_arg_t *lpra = ctx->lpra;
1095 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1096 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1097 int nbufs = inbufs + outbufs;
1098 struct overlap max;
1099
1100 for (i = 0; i < nbufs; ++i) {
1101 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1102 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001103 if (lpra[i].buf.len) {
1104 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1105 if (err)
1106 goto bail;
1107 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001108 ctx->overs[i].raix = i;
1109 ctx->overps[i] = &ctx->overs[i];
1110 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301111 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001112 max.start = 0;
1113 max.end = 0;
1114 for (i = 0; i < nbufs; ++i) {
1115 if (ctx->overps[i]->start < max.end) {
1116 ctx->overps[i]->mstart = max.end;
1117 ctx->overps[i]->mend = ctx->overps[i]->end;
1118 ctx->overps[i]->offset = max.end -
1119 ctx->overps[i]->start;
1120 if (ctx->overps[i]->end > max.end) {
1121 max.end = ctx->overps[i]->end;
1122 } else {
1123 ctx->overps[i]->mend = 0;
1124 ctx->overps[i]->mstart = 0;
1125 }
1126 } else {
1127 ctx->overps[i]->mend = ctx->overps[i]->end;
1128 ctx->overps[i]->mstart = ctx->overps[i]->start;
1129 ctx->overps[i]->offset = 0;
1130 max = *ctx->overps[i];
1131 }
1132 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001133bail:
1134 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001135}
1136
1137#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1138 do {\
1139 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301140 VERIFY(err, 0 == copy_from_user((dst),\
1141 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001142 (size)));\
1143 else\
1144 memmove((dst), (src), (size));\
1145 } while (0)
1146
1147#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1148 do {\
1149 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301150 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301151 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001152 else\
1153 memmove((dst), (src), (size));\
1154 } while (0)
1155
1156
1157static void context_free(struct smq_invoke_ctx *ctx);
1158
1159static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001160 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001161 struct smq_invoke_ctx **po)
1162{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301163 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301164 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301165 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001166 struct fastrpc_ctx_lst *clst = &fl->clst;
1167 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1168
1169 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1170 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1171 sizeof(*ctx->fds) * (bufs) +
1172 sizeof(*ctx->attrs) * (bufs) +
1173 sizeof(*ctx->overs) * (bufs) +
1174 sizeof(*ctx->overps) * (bufs);
1175
c_mtharue1a5ce12017-10-13 20:47:09 +05301176 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001177 if (err)
1178 goto bail;
1179
1180 INIT_HLIST_NODE(&ctx->hn);
1181 hlist_add_fake(&ctx->hn);
1182 ctx->fl = fl;
1183 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1184 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1185 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301186 if (me->legacy) {
1187 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1188 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1189 } else {
1190 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1191 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1192 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1193 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001194
c_mtharue1a5ce12017-10-13 20:47:09 +05301195 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001196 bufs * sizeof(*ctx->lpra));
1197 if (err)
1198 goto bail;
1199
1200 if (invokefd->fds) {
1201 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1202 bufs * sizeof(*ctx->fds));
1203 if (err)
1204 goto bail;
1205 }
1206 if (invokefd->attrs) {
1207 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1208 bufs * sizeof(*ctx->attrs));
1209 if (err)
1210 goto bail;
1211 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001212 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001213 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001214 if (bufs) {
1215 VERIFY(err, 0 == context_build_overlap(ctx));
1216 if (err)
1217 goto bail;
1218 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001219 ctx->retval = -1;
1220 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301221 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301223 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001224
1225 spin_lock(&fl->hlock);
1226 hlist_add_head(&ctx->hn, &clst->pending);
1227 spin_unlock(&fl->hlock);
1228
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301229 spin_lock(&me->ctxlock);
1230 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1231 if (!me->ctxtable[ii]) {
1232 me->ctxtable[ii] = ctx;
1233 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1234 break;
1235 }
1236 }
1237 spin_unlock(&me->ctxlock);
1238 VERIFY(err, ii < FASTRPC_CTX_MAX);
1239 if (err) {
1240 pr_err("adsprpc: out of context memory\n");
1241 goto bail;
1242 }
1243
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001244 *po = ctx;
1245bail:
1246 if (ctx && err)
1247 context_free(ctx);
1248 return err;
1249}
1250
1251static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1252{
1253 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1254
1255 spin_lock(&ctx->fl->hlock);
1256 hlist_del_init(&ctx->hn);
1257 hlist_add_head(&ctx->hn, &clst->interrupted);
1258 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001259}
1260
1261static void context_free(struct smq_invoke_ctx *ctx)
1262{
1263 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301264 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001265 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1266 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1267 spin_lock(&ctx->fl->hlock);
1268 hlist_del_init(&ctx->hn);
1269 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301270 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001271 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301272 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301273
1274 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001275 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301276 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301277 ctx->ctxid = 0;
1278
1279 spin_lock(&me->ctxlock);
1280 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1281 if (me->ctxtable[i] == ctx) {
1282 me->ctxtable[i] = NULL;
1283 break;
1284 }
1285 }
1286 spin_unlock(&me->ctxlock);
1287
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001288 kfree(ctx);
1289}
1290
1291static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1292{
1293 ctx->retval = retval;
1294 complete(&ctx->work);
1295}
1296
1297
1298static void fastrpc_notify_users(struct fastrpc_file *me)
1299{
1300 struct smq_invoke_ctx *ictx;
1301 struct hlist_node *n;
1302
1303 spin_lock(&me->hlock);
1304 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1305 complete(&ictx->work);
1306 }
1307 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1308 complete(&ictx->work);
1309 }
1310 spin_unlock(&me->hlock);
1311
1312}
1313
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301314
1315static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1316{
1317 struct smq_invoke_ctx *ictx;
1318 struct hlist_node *n;
1319
1320 spin_lock(&me->hlock);
1321 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1322 if (ictx->msg.pid)
1323 complete(&ictx->work);
1324 }
1325 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1326 if (ictx->msg.pid)
1327 complete(&ictx->work);
1328 }
1329 spin_unlock(&me->hlock);
1330}
1331
1332
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001333static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1334{
1335 struct fastrpc_file *fl;
1336 struct hlist_node *n;
1337
1338 spin_lock(&me->hlock);
1339 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1340 if (fl->cid == cid)
1341 fastrpc_notify_users(fl);
1342 }
1343 spin_unlock(&me->hlock);
1344
1345}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301346
1347static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1348{
1349 struct fastrpc_file *fl;
1350 struct hlist_node *n;
1351
1352 spin_lock(&me->hlock);
1353 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1354 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301355 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301356 }
1357 spin_unlock(&me->hlock);
1358
1359}
1360
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001361static void context_list_ctor(struct fastrpc_ctx_lst *me)
1362{
1363 INIT_HLIST_HEAD(&me->interrupted);
1364 INIT_HLIST_HEAD(&me->pending);
1365}
1366
1367static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1368{
1369 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301370 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001371 struct hlist_node *n;
1372
1373 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301374 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001375 spin_lock(&fl->hlock);
1376 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1377 hlist_del_init(&ictx->hn);
1378 ctxfree = ictx;
1379 break;
1380 }
1381 spin_unlock(&fl->hlock);
1382 if (ctxfree)
1383 context_free(ctxfree);
1384 } while (ctxfree);
1385 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301386 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001387 spin_lock(&fl->hlock);
1388 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1389 hlist_del_init(&ictx->hn);
1390 ctxfree = ictx;
1391 break;
1392 }
1393 spin_unlock(&fl->hlock);
1394 if (ctxfree)
1395 context_free(ctxfree);
1396 } while (ctxfree);
1397}
1398
1399static int fastrpc_file_free(struct fastrpc_file *fl);
1400static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1401{
1402 struct fastrpc_file *fl, *free;
1403 struct hlist_node *n;
1404
1405 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301406 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001407 spin_lock(&me->hlock);
1408 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1409 hlist_del_init(&fl->hn);
1410 free = fl;
1411 break;
1412 }
1413 spin_unlock(&me->hlock);
1414 if (free)
1415 fastrpc_file_free(free);
1416 } while (free);
1417}
1418
1419static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1420{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301421 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001422 remote_arg64_t *rpra;
1423 remote_arg_t *lpra = ctx->lpra;
1424 struct smq_invoke_buf *list;
1425 struct smq_phy_page *pages, *ipage;
1426 uint32_t sc = ctx->sc;
1427 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1428 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001429 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001430 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301431 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001432 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001433 int err = 0;
1434 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001435 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001436 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301437 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001438
1439 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301440 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001441 list = smq_invoke_buf_start(rpra, sc);
1442 pages = smq_phy_page_start(sc, list);
1443 ipage = pages;
1444
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301445 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001446 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301447 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1448 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001449
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301450 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301451 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1452 unsigned int attrs = 0;
1453
1454 if (ctx->attrs)
1455 attrs = ctx->attrs[i];
1456
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301458 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001459 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301460 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301461 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001462 ipage += 1;
1463 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301464 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001465 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301466 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001467 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301468 int dmaflags = 0;
1469
1470 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1471 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001472 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301473 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301474 if (err) {
1475 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001476 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301477 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001478 ipage += 1;
1479 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301480 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301481 if (!me->legacy) {
1482 metalen = copylen = (size_t)&ipage[0] +
1483 (sizeof(uint64_t) * M_FDLIST) +
1484 (sizeof(uint32_t) * M_CRCLIST);
1485 } else {
1486 metalen = copylen = (size_t)&ipage[0];
1487 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001488
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001489 /* calculate len requreed for copying */
1490 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1491 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001492 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301493 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001494
1495 if (!len)
1496 continue;
1497 if (ctx->maps[i])
1498 continue;
1499 if (ctx->overps[oix]->offset == 0)
1500 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001501 mstart = ctx->overps[oix]->mstart;
1502 mend = ctx->overps[oix]->mend;
1503 VERIFY(err, (mend - mstart) <= LONG_MAX);
1504 if (err)
1505 goto bail;
1506 copylen += mend - mstart;
1507 VERIFY(err, copylen >= 0);
1508 if (err)
1509 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510 }
1511 ctx->used = copylen;
1512
1513 /* allocate new buffer */
1514 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301515 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001516 if (err)
1517 goto bail;
1518 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301519 if (ctx->buf->virt && metalen <= copylen)
1520 memset(ctx->buf->virt, 0, metalen);
1521
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 /* copy metadata */
1523 rpra = ctx->buf->virt;
1524 ctx->rpra = rpra;
1525 list = smq_invoke_buf_start(rpra, sc);
1526 pages = smq_phy_page_start(sc, list);
1527 ipage = pages;
1528 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001529 for (i = 0; i < bufs + handles; ++i) {
1530 if (lpra[i].buf.len)
1531 list[i].num = 1;
1532 else
1533 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001534 list[i].pgidx = ipage - pages;
1535 ipage++;
1536 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301537
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001538 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301539 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301540 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001541 struct fastrpc_mmap *map = ctx->maps[i];
1542 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301543 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001544
1545 rpra[i].buf.pv = 0;
1546 rpra[i].buf.len = len;
1547 if (!len)
1548 continue;
1549 if (map) {
1550 struct vm_area_struct *vma;
1551 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301552 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001553 int idx = list[i].pgidx;
1554
1555 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001556 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001557 } else {
1558 down_read(&current->mm->mmap_sem);
1559 VERIFY(err, NULL != (vma = find_vma(current->mm,
1560 map->va)));
1561 if (err) {
1562 up_read(&current->mm->mmap_sem);
1563 goto bail;
1564 }
1565 offset = buf_page_start(buf) - vma->vm_start;
1566 up_read(&current->mm->mmap_sem);
1567 VERIFY(err, offset < (uintptr_t)map->size);
1568 if (err)
1569 goto bail;
1570 }
1571 pages[idx].addr = map->phys + offset;
1572 pages[idx].size = num << PAGE_SHIFT;
1573 }
1574 rpra[i].buf.pv = buf;
1575 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001576 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001577 for (i = bufs; i < bufs + handles; ++i) {
1578 struct fastrpc_mmap *map = ctx->maps[i];
1579
1580 pages[i].addr = map->phys;
1581 pages[i].size = map->size;
1582 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301583 if (!me->legacy) {
1584 fdlist = (uint64_t *)&pages[bufs + handles];
1585 for (i = 0; i < M_FDLIST; i++)
1586 fdlist[i] = 0;
1587 crclist = (uint32_t *)&fdlist[M_FDLIST];
1588 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1589 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001590
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001591 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301592 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001593 rlen = copylen - metalen;
Tharun Kumar Meruguc230bd72018-01-29 18:02:42 +05301594 for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001595 int i = ctx->overps[oix]->raix;
1596 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301597 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001598 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301599 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001600
1601 if (!len)
1602 continue;
1603 if (map)
1604 continue;
1605 if (ctx->overps[oix]->offset == 0) {
1606 rlen -= ALIGN(args, BALIGN) - args;
1607 args = ALIGN(args, BALIGN);
1608 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001609 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001610 VERIFY(err, rlen >= mlen);
1611 if (err)
1612 goto bail;
1613 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1614 pages[list[i].pgidx].addr = ctx->buf->phys -
1615 ctx->overps[oix]->offset +
1616 (copylen - rlen);
1617 pages[list[i].pgidx].addr =
1618 buf_page_start(pages[list[i].pgidx].addr);
1619 buf = rpra[i].buf.pv;
1620 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1621 if (i < inbufs) {
1622 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1623 lpra[i].buf.pv, len);
1624 if (err)
1625 goto bail;
1626 }
1627 args = args + mlen;
1628 rlen -= mlen;
1629 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001630 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001631
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301632 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1634 int i = ctx->overps[oix]->raix;
1635 struct fastrpc_mmap *map = ctx->maps[i];
1636
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001637 if (map && map->uncached)
1638 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301639 if (ctx->fl->sctx->smmu.coherent &&
1640 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1641 continue;
1642 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1643 continue;
1644
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301645 if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
1646 if (map && map->handle)
1647 msm_ion_do_cache_op(ctx->fl->apps->client,
1648 map->handle,
1649 uint64_to_ptr(rpra[i].buf.pv),
1650 rpra[i].buf.len,
1651 ION_IOC_CLEAN_INV_CACHES);
1652 else
1653 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1654 uint64_to_ptr(rpra[i].buf.pv
1655 + rpra[i].buf.len));
1656 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001657 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001658 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301659 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001660 rpra[i].dma.fd = ctx->fds[i];
1661 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1662 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001663 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001664
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001665 bail:
1666 return err;
1667}
1668
1669static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1670 remote_arg_t *upra)
1671{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301672 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001673 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001674 struct smq_invoke_buf *list;
1675 struct smq_phy_page *pages;
1676 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301677 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001678 uint32_t *crclist = NULL;
1679
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001680 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001681 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001682 int err = 0;
1683
1684 inbufs = REMOTE_SCALARS_INBUFS(sc);
1685 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001686 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1687 list = smq_invoke_buf_start(ctx->rpra, sc);
1688 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301689 if (!me->legacy) {
1690 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1691 crclist = (uint32_t *)(fdlist + M_FDLIST);
1692 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001693
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001694 for (i = inbufs; i < inbufs + outbufs; ++i) {
1695 if (!ctx->maps[i]) {
1696 K_COPY_TO_USER(err, kernel,
1697 ctx->lpra[i].buf.pv,
1698 uint64_to_ptr(rpra[i].buf.pv),
1699 rpra[i].buf.len);
1700 if (err)
1701 goto bail;
1702 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301703 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301704 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301705 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301706 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001707 }
1708 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301709 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301710 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001711 for (i = 0; i < M_FDLIST; i++) {
1712 if (!fdlist[i])
1713 break;
1714 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001715 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301716 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001717 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001718 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301719 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001720 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301721 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001722 crclist, M_CRCLIST*sizeof(uint32_t));
1723
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001724 bail:
1725 return err;
1726}
1727
1728static void inv_args_pre(struct smq_invoke_ctx *ctx)
1729{
1730 int i, inbufs, outbufs;
1731 uint32_t sc = ctx->sc;
1732 remote_arg64_t *rpra = ctx->rpra;
1733 uintptr_t end;
1734
1735 inbufs = REMOTE_SCALARS_INBUFS(sc);
1736 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1737 for (i = inbufs; i < inbufs + outbufs; ++i) {
1738 struct fastrpc_mmap *map = ctx->maps[i];
1739
1740 if (map && map->uncached)
1741 continue;
1742 if (!rpra[i].buf.len)
1743 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301744 if (ctx->fl->sctx->smmu.coherent &&
1745 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1746 continue;
1747 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1748 continue;
1749
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001750 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1751 buf_page_start(rpra[i].buf.pv))
1752 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301753 if (!IS_CACHE_ALIGNED((uintptr_t)
1754 uint64_to_ptr(rpra[i].buf.pv))) {
1755 if (map && map->handle)
1756 msm_ion_do_cache_op(ctx->fl->apps->client,
1757 map->handle,
1758 uint64_to_ptr(rpra[i].buf.pv),
1759 sizeof(uintptr_t),
1760 ION_IOC_CLEAN_INV_CACHES);
1761 else
1762 dmac_flush_range(
1763 uint64_to_ptr(rpra[i].buf.pv), (char *)
1764 uint64_to_ptr(rpra[i].buf.pv + 1));
1765 }
1766
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001767 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1768 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301769 if (!IS_CACHE_ALIGNED(end)) {
1770 if (map && map->handle)
1771 msm_ion_do_cache_op(ctx->fl->apps->client,
1772 map->handle,
1773 uint64_to_ptr(end),
1774 sizeof(uintptr_t),
1775 ION_IOC_CLEAN_INV_CACHES);
1776 else
1777 dmac_flush_range((char *)end,
1778 (char *)end + 1);
1779 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001780 }
1781}
1782
1783static void inv_args(struct smq_invoke_ctx *ctx)
1784{
1785 int i, inbufs, outbufs;
1786 uint32_t sc = ctx->sc;
1787 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001788
1789 inbufs = REMOTE_SCALARS_INBUFS(sc);
1790 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1791 for (i = inbufs; i < inbufs + outbufs; ++i) {
1792 struct fastrpc_mmap *map = ctx->maps[i];
1793
1794 if (map && map->uncached)
1795 continue;
1796 if (!rpra[i].buf.len)
1797 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301798 if (ctx->fl->sctx->smmu.coherent &&
1799 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1800 continue;
1801 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1802 continue;
1803
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001804 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1805 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001806 continue;
1807 }
1808 if (map && map->handle)
1809 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1810 (char *)uint64_to_ptr(rpra[i].buf.pv),
1811 rpra[i].buf.len, ION_IOC_INV_CACHES);
1812 else
1813 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1814 (char *)uint64_to_ptr(rpra[i].buf.pv
1815 + rpra[i].buf.len));
1816 }
1817
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001818}
1819
1820static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1821 uint32_t kernel, uint32_t handle)
1822{
1823 struct smq_msg *msg = &ctx->msg;
1824 struct fastrpc_file *fl = ctx->fl;
1825 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301826 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001827
c_mtharue1a5ce12017-10-13 20:47:09 +05301828 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001829 if (err)
1830 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301831 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001832 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301833 if (fl->sessionid)
1834 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835 if (kernel)
1836 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301837 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001838 msg->invoke.header.handle = handle;
1839 msg->invoke.header.sc = ctx->sc;
1840 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1841 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301842 if (fl->apps->glink) {
1843 if (fl->ssrcount != channel_ctx->ssrcount) {
1844 err = -ECONNRESET;
1845 goto bail;
1846 }
1847 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001848 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301849 if (err)
1850 goto bail;
1851 err = glink_tx(channel_ctx->chan,
1852 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1853 GLINK_TX_REQ_INTENT);
1854 } else {
1855 spin_lock(&fl->apps->hlock);
1856 len = smd_write((smd_channel_t *)
1857 channel_ctx->chan,
1858 msg, sizeof(*msg));
1859 spin_unlock(&fl->apps->hlock);
1860 VERIFY(err, len == sizeof(*msg));
1861 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001862 bail:
1863 return err;
1864}
1865
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301866static void fastrpc_smd_read_handler(int cid)
1867{
1868 struct fastrpc_apps *me = &gfa;
1869 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301870 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301871 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301872
1873 do {
1874 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1875 sizeof(rsp));
1876 if (ret != sizeof(rsp))
1877 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301878
1879 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1880 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301881 if (err)
1882 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301883
1884 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1885 if (err)
1886 goto bail;
1887
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301888 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301889 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1890 if (err)
1891 goto bail;
1892
1893 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301894 } while (ret == sizeof(rsp));
1895bail:
1896 if (err)
1897 pr_err("adsprpc: invalid response or context\n");
1898
1899}
1900
1901static void smd_event_handler(void *priv, unsigned int event)
1902{
1903 struct fastrpc_apps *me = &gfa;
1904 int cid = (int)(uintptr_t)priv;
1905
1906 switch (event) {
1907 case SMD_EVENT_OPEN:
1908 complete(&me->channel[cid].workport);
1909 break;
1910 case SMD_EVENT_CLOSE:
1911 fastrpc_notify_drivers(me, cid);
1912 break;
1913 case SMD_EVENT_DATA:
1914 fastrpc_smd_read_handler(cid);
1915 break;
1916 }
1917}
1918
1919
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001920static void fastrpc_init(struct fastrpc_apps *me)
1921{
1922 int i;
1923
1924 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301925 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001926 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301927 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301928 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001929 me->channel = &gcinfo[0];
1930 for (i = 0; i < NUM_CHANNELS; i++) {
1931 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301932 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001933 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301934 /* All channels are secure by default except CDSP */
1935 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001936 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05301937 /* Set CDSP channel to non secure */
1938 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001939}
1940
1941static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1942
1943static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1944 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001945 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001946{
c_mtharue1a5ce12017-10-13 20:47:09 +05301947 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001948 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1949 int cid = fl->cid;
1950 int interrupted = 0;
1951 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001952 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301953 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001954
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001955 if (fl->profile)
1956 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301957
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05301958 if (!kernel) {
1959 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
1960 if (err) {
1961 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
1962 __func__, current->comm, cid);
1963 goto bail;
1964 }
1965 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301966
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301967 VERIFY(err, fl->sctx != NULL);
1968 if (err)
1969 goto bail;
1970 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1971 if (err)
1972 goto bail;
1973
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001974 if (!kernel) {
1975 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1976 &ctx));
1977 if (err)
1978 goto bail;
1979 if (fl->sctx->smmu.faults)
1980 err = FASTRPC_ENOSUCH;
1981 if (err)
1982 goto bail;
1983 if (ctx)
1984 goto wait;
1985 }
1986
1987 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1988 if (err)
1989 goto bail;
1990
1991 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301992 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001993 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001994 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001995 if (err)
1996 goto bail;
1997 }
1998
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301999 if (!fl->sctx->smmu.coherent) {
2000 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002001 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302002 PERF_END);
2003 }
2004
2005 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002006 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002007 PERF_END);
2008
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002009 if (err)
2010 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002011 wait:
2012 if (kernel)
2013 wait_for_completion(&ctx->work);
2014 else {
2015 interrupted = wait_for_completion_interruptible(&ctx->work);
2016 VERIFY(err, 0 == (err = interrupted));
2017 if (err)
2018 goto bail;
2019 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07002020
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302021 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002022 if (!fl->sctx->smmu.coherent)
2023 inv_args(ctx);
2024 PERF_END);
2025
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002026 VERIFY(err, 0 == (err = ctx->retval));
2027 if (err)
2028 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002029
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302030 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002031 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002032 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002033 if (err)
2034 goto bail;
2035 bail:
2036 if (ctx && interrupted == -ERESTARTSYS)
2037 context_save_interrupted(ctx);
2038 else if (ctx)
2039 context_free(ctx);
2040 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2041 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002042
2043 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302044 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2045 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2046
2047 if (count)
2048 *count += getnstimediff(&invoket);
2049 }
2050 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2051 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2052
2053 if (count)
2054 *count = *count+1;
2055 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002056 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002057 return err;
2058}
2059
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302060static int fastrpc_get_adsp_session(char *name, int *session)
2061{
2062 struct fastrpc_apps *me = &gfa;
2063 int err = 0, i;
2064
2065 for (i = 0; i < NUM_SESSIONS; i++) {
2066 if (!me->channel[0].spd[i].spdname)
2067 continue;
2068 if (!strcmp(name, me->channel[0].spd[i].spdname))
2069 break;
2070 }
2071 VERIFY(err, i < NUM_SESSIONS);
2072 if (err)
2073 goto bail;
2074 *session = i;
2075bail:
2076 return err;
2077}
2078
2079static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002080static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302081static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002082static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002083 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002084{
2085 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302086 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002087 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002088 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002089 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302090 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302091 struct fastrpc_buf *imem = NULL;
2092 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302093 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002094
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302095 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002096 if (err)
2097 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302098 if (init->flags == FASTRPC_INIT_ATTACH ||
2099 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002100 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302101 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002102
2103 ra[0].buf.pv = (void *)&tgid;
2104 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302105 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002106 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2107 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302108 ioctl.fds = NULL;
2109 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002110 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302111 if (init->flags == FASTRPC_INIT_ATTACH)
2112 fl->pd = 0;
2113 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2114 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2115 fl->pd = 2;
2116 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002117 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2118 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2119 if (err)
2120 goto bail;
2121 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002122 remote_arg_t ra[6];
2123 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002124 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302125 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002126 struct {
2127 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302128 unsigned int namelen;
2129 unsigned int filelen;
2130 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002131 int attrs;
2132 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002133 } inbuf;
2134
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302135 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002136 inbuf.namelen = strlen(current->comm) + 1;
2137 inbuf.filelen = init->filelen;
2138 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302139
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302140 VERIFY(err, access_ok(0, (void __user *)init->file,
2141 init->filelen));
2142 if (err)
2143 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002144 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302145 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002146 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2147 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302148 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002149 if (err)
2150 goto bail;
2151 }
2152 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302153
2154 VERIFY(err, !init->mem);
2155 if (err) {
2156 err = -EINVAL;
2157 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2158 current->comm, __func__);
2159 goto bail;
2160 }
2161 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2162 1024*1024);
2163 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2164 DMA_ATTR_NO_KERNEL_MAPPING |
2165 DMA_ATTR_FORCE_NON_COHERENT;
2166 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302167 if (err)
2168 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302169 fl->init_mem = imem;
2170
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002171 inbuf.pageslen = 1;
2172 ra[0].buf.pv = (void *)&inbuf;
2173 ra[0].buf.len = sizeof(inbuf);
2174 fds[0] = 0;
2175
2176 ra[1].buf.pv = (void *)current->comm;
2177 ra[1].buf.len = inbuf.namelen;
2178 fds[1] = 0;
2179
2180 ra[2].buf.pv = (void *)init->file;
2181 ra[2].buf.len = inbuf.filelen;
2182 fds[2] = init->filefd;
2183
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302184 pages[0].addr = imem->phys;
2185 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002186 ra[3].buf.pv = (void *)pages;
2187 ra[3].buf.len = 1 * sizeof(*pages);
2188 fds[3] = 0;
2189
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002190 inbuf.attrs = uproc->attrs;
2191 ra[4].buf.pv = (void *)&(inbuf.attrs);
2192 ra[4].buf.len = sizeof(inbuf.attrs);
2193 fds[4] = 0;
2194
2195 inbuf.siglen = uproc->siglen;
2196 ra[5].buf.pv = (void *)&(inbuf.siglen);
2197 ra[5].buf.len = sizeof(inbuf.siglen);
2198 fds[5] = 0;
2199
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302200 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002201 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002202 if (uproc->attrs)
2203 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002204 ioctl.inv.pra = ra;
2205 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302206 ioctl.attrs = NULL;
2207 ioctl.crc = NULL;
2208 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2209 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2210 if (err)
2211 goto bail;
2212 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2213 remote_arg_t ra[3];
2214 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302215 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302216 int fds[3];
2217 struct {
2218 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302219 unsigned int namelen;
2220 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302221 } inbuf;
2222
2223 if (!init->filelen)
2224 goto bail;
2225
2226 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2227 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2228 if (err)
2229 goto bail;
2230 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2231 (void __user *)init->file, init->filelen));
2232 if (err)
2233 goto bail;
2234
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302235 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302236 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302237 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302238 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302239
2240 if (!strcmp(proc_name, "audiopd")) {
2241 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2242 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302243 if (err)
2244 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302245 }
2246
c_mtharue1a5ce12017-10-13 20:47:09 +05302247 if (!me->staticpd_flags) {
2248 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302249 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302250 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2251 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2252 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302253 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302254 if (err)
2255 goto bail;
2256 phys = mem->phys;
2257 size = mem->size;
2258 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302259 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2260 me->channel[fl->cid].rhvm.vmperm,
2261 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302262 if (err) {
2263 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2264 err);
2265 pr_err("map->phys %llx, map->size %d\n",
2266 phys, (int)size);
2267 goto bail;
2268 }
2269 me->staticpd_flags = 1;
2270 }
2271
2272 ra[0].buf.pv = (void *)&inbuf;
2273 ra[0].buf.len = sizeof(inbuf);
2274 fds[0] = 0;
2275
2276 ra[1].buf.pv = (void *)proc_name;
2277 ra[1].buf.len = inbuf.namelen;
2278 fds[1] = 0;
2279
2280 pages[0].addr = phys;
2281 pages[0].size = size;
2282
2283 ra[2].buf.pv = (void *)pages;
2284 ra[2].buf.len = sizeof(*pages);
2285 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302286 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302287
2288 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2289 ioctl.inv.pra = ra;
2290 ioctl.fds = NULL;
2291 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002292 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002293 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2294 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2295 if (err)
2296 goto bail;
2297 } else {
2298 err = -ENOTTY;
2299 }
2300bail:
c_mtharud91205a2017-11-07 16:01:06 +05302301 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302302 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2303 me->staticpd_flags = 0;
2304 if (mem && err) {
2305 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2306 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302307 me->channel[fl->cid].rhvm.vmid,
2308 me->channel[fl->cid].rhvm.vmcount,
2309 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302310 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302311 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302312 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302313 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302314 if (file) {
2315 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302316 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302317 mutex_unlock(&fl->fl_map_mutex);
2318 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002319 return err;
2320}
2321
2322static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2323{
2324 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002325 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002326 remote_arg_t ra[1];
2327 int tgid = 0;
2328
Sathish Ambley36849af2017-02-02 09:35:55 -08002329 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2330 if (err)
2331 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302332 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002333 if (err)
2334 goto bail;
2335 tgid = fl->tgid;
2336 ra[0].buf.pv = (void *)&tgid;
2337 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302338 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002339 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2340 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302341 ioctl.fds = NULL;
2342 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002343 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002344 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2345 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2346bail:
2347 return err;
2348}
2349
2350static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302351 uintptr_t va, uint64_t phys,
2352 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002353{
Sathish Ambleybae51902017-07-03 15:00:49 -07002354 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302355 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002356 struct smq_phy_page page;
2357 int num = 1;
2358 remote_arg_t ra[3];
2359 int err = 0;
2360 struct {
2361 int pid;
2362 uint32_t flags;
2363 uintptr_t vaddrin;
2364 int num;
2365 } inargs;
2366 struct {
2367 uintptr_t vaddrout;
2368 } routargs;
2369
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302370 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302371 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002372 inargs.flags = flags;
2373 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2374 ra[0].buf.pv = (void *)&inargs;
2375 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302376 page.addr = phys;
2377 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002378 ra[1].buf.pv = (void *)&page;
2379 ra[1].buf.len = num * sizeof(page);
2380
2381 ra[2].buf.pv = (void *)&routargs;
2382 ra[2].buf.len = sizeof(routargs);
2383
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302384 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002385 if (fl->apps->compat)
2386 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2387 else
2388 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2389 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302390 ioctl.fds = NULL;
2391 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002392 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002393 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2394 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302395 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302396 if (err)
2397 goto bail;
2398 if (flags == ADSP_MMAP_HEAP_ADDR) {
2399 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002400
c_mtharue1a5ce12017-10-13 20:47:09 +05302401 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302402 desc.args[1] = phys;
2403 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302404 desc.arginfo = SCM_ARGS(3);
2405 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2406 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2407 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302408 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302409 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2410 me->channel[fl->cid].rhvm.vmperm,
2411 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302412 if (err)
2413 goto bail;
2414 }
2415bail:
2416 return err;
2417}
2418
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302419static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2420 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302421{
2422 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302423 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302424 int destVM[1] = {VMID_HLOS};
2425 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2426
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302427 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302428 struct fastrpc_ioctl_invoke_crc ioctl;
2429 struct scm_desc desc = {0};
2430 remote_arg_t ra[1];
2431 int err = 0;
2432 struct {
2433 uint8_t skey;
2434 } routargs;
2435
2436 ra[0].buf.pv = (void *)&routargs;
2437 ra[0].buf.len = sizeof(routargs);
2438
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302439 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302440 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2441 ioctl.inv.pra = ra;
2442 ioctl.fds = NULL;
2443 ioctl.attrs = NULL;
2444 ioctl.crc = NULL;
2445 if (fl == NULL)
2446 goto bail;
2447
2448 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2449 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2450 if (err)
2451 goto bail;
2452 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302453 desc.args[1] = phys;
2454 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302455 desc.args[3] = routargs.skey;
2456 desc.arginfo = SCM_ARGS(4);
2457 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2458 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302459 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2460 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302461 me->channel[fl->cid].rhvm.vmid,
2462 me->channel[fl->cid].rhvm.vmcount,
2463 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302464 if (err)
2465 goto bail;
2466 }
2467
2468bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002469 return err;
2470}
2471
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302472static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2473 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002474{
Sathish Ambleybae51902017-07-03 15:00:49 -07002475 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002476 remote_arg_t ra[1];
2477 int err = 0;
2478 struct {
2479 int pid;
2480 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302481 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002482 } inargs;
2483
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302484 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302485 inargs.size = size;
2486 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002487 ra[0].buf.pv = (void *)&inargs;
2488 ra[0].buf.len = sizeof(inargs);
2489
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302490 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002491 if (fl->apps->compat)
2492 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2493 else
2494 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2495 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302496 ioctl.fds = NULL;
2497 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002498 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002499 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2500 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302501 if (err)
2502 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302503 if (flags == ADSP_MMAP_HEAP_ADDR ||
2504 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2505 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302506 if (err)
2507 goto bail;
2508 }
2509bail:
2510 return err;
2511}
2512
2513static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2514{
2515 struct fastrpc_mmap *match = NULL, *map = NULL;
2516 struct hlist_node *n = NULL;
2517 int err = 0, ret = 0;
2518 struct fastrpc_apps *me = &gfa;
2519 struct ramdump_segment *ramdump_segments_rh = NULL;
2520
2521 do {
2522 match = NULL;
2523 spin_lock(&me->hlock);
2524 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2525 match = map;
2526 hlist_del_init(&map->hn);
2527 break;
2528 }
2529 spin_unlock(&me->hlock);
2530
2531 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302532 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2533 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302534 if (err)
2535 goto bail;
2536 if (me->channel[0].ramdumpenabled) {
2537 ramdump_segments_rh = kcalloc(1,
2538 sizeof(struct ramdump_segment), GFP_KERNEL);
2539 if (ramdump_segments_rh) {
2540 ramdump_segments_rh->address =
2541 match->phys;
2542 ramdump_segments_rh->size = match->size;
2543 ret = do_elf_ramdump(
2544 me->channel[0].remoteheap_ramdump_dev,
2545 ramdump_segments_rh, 1);
2546 if (ret < 0)
2547 pr_err("ADSPRPC: unable to dump heap");
2548 kfree(ramdump_segments_rh);
2549 }
2550 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302551 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302552 }
2553 } while (match);
2554bail:
2555 if (err && match)
2556 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002557 return err;
2558}
2559
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302560static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2561{
2562 struct fastrpc_apps *me = &gfa;
2563 int session = 0, err = 0;
2564
2565 VERIFY(err, !fastrpc_get_adsp_session(
2566 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2567 if (err)
2568 goto bail;
2569 if (me->channel[fl->cid].spd[session].pdrcount !=
2570 me->channel[fl->cid].spd[session].prevpdrcount) {
2571 if (fastrpc_mmap_remove_ssr(fl))
2572 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2573 me->channel[fl->cid].spd[session].prevpdrcount =
2574 me->channel[fl->cid].spd[session].pdrcount;
2575 }
2576 if (!me->channel[fl->cid].spd[session].ispdup) {
2577 VERIFY(err, 0);
2578 if (err) {
2579 err = -ENOTCONN;
2580 goto bail;
2581 }
2582 }
2583bail:
2584 return err;
2585}
2586
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302588 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002589
2590static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2591
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302592static inline void get_fastrpc_ioctl_mmap_64(
2593 struct fastrpc_ioctl_mmap_64 *mmap64,
2594 struct fastrpc_ioctl_mmap *immap)
2595{
2596 immap->fd = mmap64->fd;
2597 immap->flags = mmap64->flags;
2598 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2599 immap->size = mmap64->size;
2600}
2601
2602static inline void put_fastrpc_ioctl_mmap_64(
2603 struct fastrpc_ioctl_mmap_64 *mmap64,
2604 struct fastrpc_ioctl_mmap *immap)
2605{
2606 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2607}
2608
2609static inline void get_fastrpc_ioctl_munmap_64(
2610 struct fastrpc_ioctl_munmap_64 *munmap64,
2611 struct fastrpc_ioctl_munmap *imunmap)
2612{
2613 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2614 imunmap->size = munmap64->size;
2615}
2616
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002617static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2618 struct fastrpc_ioctl_munmap *ud)
2619{
2620 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302621 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302622 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2623 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002624
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302625 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302626
2627 spin_lock(&fl->hlock);
2628 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2629 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2630 if ((rbuf->raddr == ud->vaddrout) &&
2631 (rbuf->size == ud->size)) {
2632 free = rbuf;
2633 break;
2634 }
2635 }
2636 }
2637 spin_unlock(&fl->hlock);
2638
2639 if (free) {
2640 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2641 free->phys, free->size, free->flags));
2642 if (err)
2643 goto bail;
2644 fastrpc_buf_free(rbuf, 0);
2645 mutex_unlock(&fl->map_mutex);
2646 return err;
2647 }
2648
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302649 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002650 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302651 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002652 if (err)
2653 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302654 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
2655 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002656 if (err)
2657 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302658 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302659 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302660 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002661bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302662 if (err && map) {
2663 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002664 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302665 mutex_unlock(&fl->fl_map_mutex);
2666 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302667 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002668 return err;
2669}
2670
c_mtharu7bd6a422017-10-17 18:15:37 +05302671static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2672 struct fastrpc_ioctl_munmap_fd *ud) {
2673 int err = 0;
2674 struct fastrpc_mmap *map = NULL;
2675
2676 VERIFY(err, (fl && ud));
2677 if (err)
2678 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302679 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302680 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2681 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302682 ud->fd, (unsigned long long)ud->va,
2683 (unsigned int)ud->len);
2684 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302685 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302686 goto bail;
2687 }
2688 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302689 fastrpc_mmap_free(map, 0);
2690 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302691bail:
2692 return err;
2693}
2694
2695
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002696static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2697 struct fastrpc_ioctl_mmap *ud)
2698{
2699
c_mtharue1a5ce12017-10-13 20:47:09 +05302700 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302701 struct fastrpc_buf *rbuf = NULL;
2702 unsigned long dma_attr = 0;
2703 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002704 int err = 0;
2705
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302706 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302707 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2708 if (ud->vaddrin) {
2709 err = -EINVAL;
2710 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2711 current->comm, __func__);
2712 goto bail;
2713 }
2714 dma_attr = DMA_ATTR_EXEC_MAPPING |
2715 DMA_ATTR_NO_KERNEL_MAPPING |
2716 DMA_ATTR_FORCE_NON_COHERENT;
2717 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2718 1, &rbuf);
2719 if (err)
2720 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302721 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302722 rbuf->phys, rbuf->size, &raddr);
2723 if (err)
2724 goto bail;
2725 rbuf->raddr = raddr;
2726 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302727
2728 uintptr_t va_to_dsp;
2729
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302730 mutex_lock(&fl->fl_map_mutex);
2731 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2732 ud->size, ud->flags, 1, &map)) {
2733 mutex_unlock(&fl->fl_map_mutex);
2734 mutex_unlock(&fl->map_mutex);
2735 return 0;
2736 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302737
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302738 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2739 (uintptr_t)ud->vaddrin, ud->size,
2740 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302741 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302742 if (err)
2743 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302744
2745 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2746 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2747 va_to_dsp = 0;
2748 else
2749 va_to_dsp = (uintptr_t)map->va;
2750 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302751 map->phys, map->size, &raddr));
2752 if (err)
2753 goto bail;
2754 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302755 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302756 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002757 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302758 if (err && map) {
2759 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302760 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302761 mutex_unlock(&fl->fl_map_mutex);
2762 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302763 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002764 return err;
2765}
2766
2767static void fastrpc_channel_close(struct kref *kref)
2768{
2769 struct fastrpc_apps *me = &gfa;
2770 struct fastrpc_channel_ctx *ctx;
2771 int cid;
2772
2773 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2774 cid = ctx - &gcinfo[0];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302775 if (!me->glink)
2776 smd_close(ctx->chan);
2777 else
2778 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302779 ctx->chan = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302780 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002781 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2782 MAJOR(me->dev_no), cid);
2783}
2784
2785static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2786
2787static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302788 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002789{
2790 struct fastrpc_apps *me = &gfa;
2791 int idx = 0, err = 0;
2792
2793 if (chan->sesscount) {
2794 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302795 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2796 (!chan->session[idx].used &&
2797 chan->session[idx].smmu.secure
2798 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002799 chan->session[idx].used = 1;
2800 break;
2801 }
2802 }
2803 VERIFY(err, idx < chan->sesscount);
2804 if (err)
2805 goto bail;
2806 chan->session[idx].smmu.faults = 0;
2807 } else {
2808 VERIFY(err, me->dev != NULL);
2809 if (err)
2810 goto bail;
2811 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302812 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002813 }
2814
2815 *session = &chan->session[idx];
2816 bail:
2817 return err;
2818}
2819
c_mtharue1a5ce12017-10-13 20:47:09 +05302820static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2821 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002822{
2823 if (glink_queue_rx_intent(h, NULL, size))
2824 return false;
2825 return true;
2826}
2827
c_mtharue1a5ce12017-10-13 20:47:09 +05302828static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002829 const void *pkt_priv, const void *ptr)
2830{
2831}
2832
c_mtharue1a5ce12017-10-13 20:47:09 +05302833static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002834 const void *pkt_priv, const void *ptr, size_t size)
2835{
2836 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302837 struct fastrpc_apps *me = &gfa;
2838 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302839 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002840
c_mtharufdac6892017-10-12 13:09:01 +05302841 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2842 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302843 goto bail;
2844
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302845 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2846 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302847 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302848 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302849
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302850 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2851 if (err)
2852 goto bail;
2853
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302854 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302855 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2856 if (err)
2857 goto bail;
2858
2859 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302860bail:
c_mtharufdac6892017-10-12 13:09:01 +05302861 if (err)
2862 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002863 glink_rx_done(handle, ptr, true);
2864}
2865
c_mtharue1a5ce12017-10-13 20:47:09 +05302866static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002867 unsigned int event)
2868{
2869 struct fastrpc_apps *me = &gfa;
2870 int cid = (int)(uintptr_t)priv;
2871 struct fastrpc_glink_info *link;
2872
2873 if (cid < 0 || cid >= NUM_CHANNELS)
2874 return;
2875 link = &me->channel[cid].link;
2876 switch (event) {
2877 case GLINK_CONNECTED:
2878 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302879 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002880 break;
2881 case GLINK_LOCAL_DISCONNECTED:
2882 link->port_state = FASTRPC_LINK_DISCONNECTED;
2883 break;
2884 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002885 break;
2886 default:
2887 break;
2888 }
2889}
2890
2891static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2892 struct fastrpc_session_ctx **session)
2893{
2894 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302895 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002896
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302897 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002898 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302899 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302900 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002901 return err;
2902}
2903
2904static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2905 struct fastrpc_session_ctx *session)
2906{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302907 struct fastrpc_apps *me = &gfa;
2908
2909 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002910 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302911 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002912}
2913
2914static int fastrpc_file_free(struct fastrpc_file *fl)
2915{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302916 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302917 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302918 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002919 int cid;
2920
2921 if (!fl)
2922 return 0;
2923 cid = fl->cid;
2924
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302925 (void)fastrpc_release_current_dsp_process(fl);
2926
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002927 spin_lock(&fl->apps->hlock);
2928 hlist_del_init(&fl->hn);
2929 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05302930 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002931
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002932 if (!fl->sctx) {
2933 kfree(fl);
2934 return 0;
2935 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302936 spin_lock(&fl->hlock);
2937 fl->file_close = 1;
2938 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302939 if (!IS_ERR_OR_NULL(fl->init_mem))
2940 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002941 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302942 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302943 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302944 do {
2945 lmap = NULL;
2946 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2947 hlist_del_init(&map->hn);
2948 lmap = map;
2949 break;
2950 }
2951 fastrpc_mmap_free(lmap, 1);
2952 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302953 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302954 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002955 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302956 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002957 if (fl->sctx)
2958 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2959 if (fl->secsctx)
2960 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302961
2962 mutex_lock(&fl->perf_mutex);
2963 do {
2964 struct hlist_node *pn = NULL;
2965
2966 fperf = NULL;
2967 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2968 hlist_del_init(&perf->hn);
2969 fperf = perf;
2970 break;
2971 }
2972 kfree(fperf);
2973 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302974 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302975 mutex_unlock(&fl->perf_mutex);
2976 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302977 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05302978 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002979 kfree(fl);
2980 return 0;
2981}
2982
2983static int fastrpc_device_release(struct inode *inode, struct file *file)
2984{
2985 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2986
2987 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302988 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2989 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002990 if (fl->debugfs_file != NULL)
2991 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002992 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302993 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002994 }
2995 return 0;
2996}
2997
2998static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2999 void *priv)
3000{
3001 struct fastrpc_apps *me = &gfa;
3002 int cid = (int)((uintptr_t)priv);
3003 struct fastrpc_glink_info *link;
3004
3005 if (cid < 0 || cid >= NUM_CHANNELS)
3006 return;
3007
3008 link = &me->channel[cid].link;
3009 switch (cb_info->link_state) {
3010 case GLINK_LINK_STATE_UP:
3011 link->link_state = FASTRPC_LINK_STATE_UP;
3012 complete(&me->channel[cid].work);
3013 break;
3014 case GLINK_LINK_STATE_DOWN:
3015 link->link_state = FASTRPC_LINK_STATE_DOWN;
3016 break;
3017 default:
3018 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3019 break;
3020 }
3021}
3022
3023static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3024{
3025 int err = 0;
3026 struct fastrpc_glink_info *link;
3027
3028 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3029 if (err)
3030 goto bail;
3031
3032 link = &me->channel[cid].link;
3033 if (link->link_notify_handle != NULL)
3034 goto bail;
3035
3036 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3037 link->link_notify_handle = glink_register_link_state_cb(
3038 &link->link_info,
3039 (void *)((uintptr_t)cid));
3040 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3041 if (err) {
3042 link->link_notify_handle = NULL;
3043 goto bail;
3044 }
3045 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3046 RPC_TIMEOUT));
3047bail:
3048 return err;
3049}
3050
3051static void fastrpc_glink_close(void *chan, int cid)
3052{
3053 int err = 0;
3054 struct fastrpc_glink_info *link;
3055
3056 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3057 if (err)
3058 return;
3059 link = &gfa.channel[cid].link;
3060
c_mtharu314a4202017-11-15 22:09:17 +05303061 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3062 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003063 link->port_state = FASTRPC_LINK_DISCONNECTING;
3064 glink_close(chan);
3065 }
3066}
3067
3068static int fastrpc_glink_open(int cid)
3069{
3070 int err = 0;
3071 void *handle = NULL;
3072 struct fastrpc_apps *me = &gfa;
3073 struct glink_open_config *cfg;
3074 struct fastrpc_glink_info *link;
3075
3076 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3077 if (err)
3078 goto bail;
3079 link = &me->channel[cid].link;
3080 cfg = &me->channel[cid].link.cfg;
3081 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3082 if (err)
3083 goto bail;
3084
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303085 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3086 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003087 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003088
3089 link->port_state = FASTRPC_LINK_CONNECTING;
3090 cfg->priv = (void *)(uintptr_t)cid;
3091 cfg->edge = gcinfo[cid].link.link_info.edge;
3092 cfg->transport = gcinfo[cid].link.link_info.transport;
3093 cfg->name = FASTRPC_GLINK_GUID;
3094 cfg->notify_rx = fastrpc_glink_notify_rx;
3095 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3096 cfg->notify_state = fastrpc_glink_notify_state;
3097 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3098 handle = glink_open(cfg);
3099 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303100 if (err) {
3101 if (link->port_state == FASTRPC_LINK_CONNECTING)
3102 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003103 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303104 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003105 me->channel[cid].chan = handle;
3106bail:
3107 return err;
3108}
3109
Sathish Ambley1ca68232017-01-19 10:32:55 -08003110static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3111{
3112 filp->private_data = inode->i_private;
3113 return 0;
3114}
3115
3116static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3117 size_t count, loff_t *position)
3118{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303119 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003120 struct fastrpc_file *fl = filp->private_data;
3121 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303122 struct fastrpc_buf *buf = NULL;
3123 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303124 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303125 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303126 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003127 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303128 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003129 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303130 char single_line[UL_SIZE] = "----------------";
3131 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003132
3133 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3134 if (!fileinfo)
3135 goto bail;
3136 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303137 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3138 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3139 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3140 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3141 "susbsys", "refcount", "sesscount", "issubsystemup",
3142 "ssrcount", "session_used");
3143 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3144 "-%s%s%s%s-\n", single_line, single_line,
3145 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003146 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303147 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003148 chan = &gcinfo[i];
3149 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303150 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003151 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303152 DEBUGFS_SIZE - len, "|%-9d",
3153 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303154 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303155 DEBUGFS_SIZE - len, "|%-9d",
3156 chan->sesscount);
3157 len += scnprintf(fileinfo + len,
3158 DEBUGFS_SIZE - len, "|%-14d",
3159 chan->issubsystemup);
3160 len += scnprintf(fileinfo + len,
3161 DEBUGFS_SIZE - len, "|%-9d",
3162 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003163 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303164 sess_used += chan->session[j].used;
3165 }
3166 len += scnprintf(fileinfo + len,
3167 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3168
3169 }
3170 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3171 "\n%s%s%s\n", "=============",
3172 " CMA HEAP ", "==============");
3173 len += scnprintf(fileinfo + len,
3174 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3175 len += scnprintf(fileinfo + len,
3176 DEBUGFS_SIZE - len, "--%s%s---\n",
3177 single_line, single_line);
3178 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3179 "0x%-18llX", me->range.addr);
3180 len += scnprintf(fileinfo + len,
3181 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3182 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3183 "\n==========%s %s %s===========\n",
3184 title, " GMAPS ", title);
3185 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3186 "%-20s|%-20s|%-20s|%-20s\n",
3187 "fd", "phys", "size", "va");
3188 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3189 "%s%s%s%s%s\n", single_line, single_line,
3190 single_line, single_line, single_line);
3191 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3192 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3193 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3194 gmaps->fd, gmaps->phys,
3195 (uint32_t)gmaps->size,
3196 gmaps->va);
3197 }
3198 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3199 "%-20s|%-20s|%-20s|%-20s\n",
3200 "len", "refs", "raddr", "flags");
3201 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3202 "%s%s%s%s%s\n", single_line, single_line,
3203 single_line, single_line, single_line);
3204 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3205 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3206 "0x%-18X|%-20d|%-20lu|%-20u\n",
3207 (uint32_t)gmaps->len, gmaps->refs,
3208 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003209 }
3210 } else {
3211 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303212 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003213 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303214 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003215 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303216 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003217 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303218 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303219 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303220 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003221 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303222 "%s %14s %d\n", "pd", ":", fl->pd);
3223 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3224 "%s %9s %s\n", "spdname", ":", fl->spdname);
3225 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3226 "%s %6s %d\n", "file_close", ":", fl->file_close);
3227 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3228 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3229 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3230 "%s %9s %d\n", "profile", ":", fl->profile);
3231 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3232 "%s %3s %d\n", "smmu.coherent", ":",
3233 fl->sctx->smmu.coherent);
3234 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3235 "%s %4s %d\n", "smmu.enabled", ":",
3236 fl->sctx->smmu.enabled);
3237 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3238 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3239 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3240 "%s %5s %d\n", "smmu.secure", ":",
3241 fl->sctx->smmu.secure);
3242 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3243 "%s %5s %d\n", "smmu.faults", ":",
3244 fl->sctx->smmu.faults);
3245 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3246 "%s %s %d\n", "link.link_state",
3247 ":", *&me->channel[fl->cid].link.link_state);
3248
3249 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3250 "\n=======%s %s %s======\n", title,
3251 " LIST OF MAPS ", title);
3252
3253 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3254 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3255 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3256 "%s%s%s%s%s\n",
3257 single_line, single_line, single_line,
3258 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003259 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303260 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3261 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3262 map->va, map->phys,
3263 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003264 }
3265 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303266 "%-20s|%-20s|%-20s|%-20s\n",
3267 "len", "refs",
3268 "raddr", "uncached");
3269 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3270 "%s%s%s%s%s\n",
3271 single_line, single_line, single_line,
3272 single_line, single_line);
3273 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3274 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3275 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3276 map->len, map->refs, map->raddr,
3277 map->uncached);
3278 }
3279 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3280 "%-20s|%-20s\n", "secure", "attr");
3281 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3282 "%s%s%s%s%s\n",
3283 single_line, single_line, single_line,
3284 single_line, single_line);
3285 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3286 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3287 "%-20d|0x%-20lX\n\n",
3288 map->secure, map->attr);
3289 }
3290 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303291 "%s %d\n\n",
3292 "KERNEL MEMORY ALLOCATION:", 1);
3293 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303294 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303295 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303296 spin_lock(&fl->hlock);
3297 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303298 "%-19s|%-19s|%-19s|%-19s\n",
3299 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303300 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3301 "%s%s%s%s%s\n", single_line, single_line,
3302 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303303 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303304 len += scnprintf(fileinfo + len,
3305 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303306 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3307 buf->virt, (uint64_t)buf->phys, buf->size,
3308 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303309 }
3310 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3311 "\n%s %s %s\n", title,
3312 " LIST OF PENDING SMQCONTEXTS ", title);
3313
3314 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3315 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3316 "sc", "pid", "tgid", "used", "ctxid");
3317 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3318 "%s%s%s%s%s\n", single_line, single_line,
3319 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003320 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3321 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303322 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3323 ictx->sc, ictx->pid, ictx->tgid,
3324 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003325 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303326
Sathish Ambley1ca68232017-01-19 10:32:55 -08003327 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303328 "\n%s %s %s\n", title,
3329 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3330
3331 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3332 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3333 "sc", "pid", "tgid", "used", "ctxid");
3334 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3335 "%s%s%s%s%s\n", single_line, single_line,
3336 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003337 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303338 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3339 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3340 ictx->sc, ictx->pid, ictx->tgid,
3341 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003342 }
3343 spin_unlock(&fl->hlock);
3344 }
3345 if (len > DEBUGFS_SIZE)
3346 len = DEBUGFS_SIZE;
3347 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3348 kfree(fileinfo);
3349bail:
3350 return ret;
3351}
3352
3353static const struct file_operations debugfs_fops = {
3354 .open = fastrpc_debugfs_open,
3355 .read = fastrpc_debugfs_read,
3356};
Sathish Ambley36849af2017-02-02 09:35:55 -08003357static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003358{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003359 struct fastrpc_apps *me = &gfa;
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303360 int cid, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003361
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303362 mutex_lock(&me->smd_mutex);
3363
Sathish Ambley36849af2017-02-02 09:35:55 -08003364 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003365 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303366 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003367 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303368 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3369 if (err)
3370 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303371 if (me->channel[cid].ssrcount !=
3372 me->channel[cid].prevssrcount) {
3373 if (!me->channel[cid].issubsystemup) {
3374 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303375 if (err) {
3376 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303377 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303378 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303379 }
3380 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003381 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303382 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003383 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303384 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303385 if (me->glink) {
3386 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3387 if (err)
3388 goto bail;
3389 VERIFY(err, 0 == fastrpc_glink_open(cid));
3390 } else {
3391 VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
3392 gcinfo[cid].channel,
3393 (smd_channel_t **)&me->channel[cid].chan,
3394 (void *)(uintptr_t)cid,
3395 smd_event_handler));
3396 }
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303397 VERIFY(err,
3398 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003399 RPC_TIMEOUT));
3400 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303401 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003402 goto bail;
3403 }
3404 kref_init(&me->channel[cid].kref);
3405 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3406 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303407
3408 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3409 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303410 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303411
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303412 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003413 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303414 if (fastrpc_mmap_remove_ssr(fl))
3415 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003416 me->channel[cid].prevssrcount =
3417 me->channel[cid].ssrcount;
3418 }
3419 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003420
3421bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303422 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003423 return err;
3424}
3425
Sathish Ambley36849af2017-02-02 09:35:55 -08003426static int fastrpc_device_open(struct inode *inode, struct file *filp)
3427{
3428 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08003429 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05303430 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003431 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303432 char strpid[PID_SIZE];
3433 int buf_size = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003434
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303435 /*
3436 * Indicates the device node opened
3437 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3438 */
3439 int dev_minor = MINOR(inode->i_rdev);
3440
3441 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3442 (dev_minor == MINOR_NUM_SECURE_DEV)));
3443 if (err) {
3444 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3445 return err;
3446 }
3447
c_mtharue1a5ce12017-10-13 20:47:09 +05303448 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003449 if (err)
3450 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303451 snprintf(strpid, PID_SIZE, "%d", current->pid);
Mohammed Nayeem Ur Rahman2d65b4a2018-10-10 16:34:37 +05303452 buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303453 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3454 snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
3455 current->comm, "_", current->pid);
3456 debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3457 debugfs_root, fl, &debugfs_fops);
3458
Sathish Ambley36849af2017-02-02 09:35:55 -08003459 context_list_ctor(&fl->clst);
3460 spin_lock_init(&fl->hlock);
3461 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303462 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303463 INIT_HLIST_HEAD(&fl->cached_bufs);
3464 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003465 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303466 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003467 fl->tgid = current->tgid;
3468 fl->apps = me;
3469 fl->mode = FASTRPC_MODE_SERIAL;
3470 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303471 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303472 fl->init_mem = NULL;
Sathish Ambley567012b2017-03-06 11:55:04 -08003473 if (debugfs_file != NULL)
3474 fl->debugfs_file = debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303475 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303476 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003477 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303478 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303479 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003480 spin_lock(&me->hlock);
3481 hlist_add_head(&fl->hn, &me->drivers);
3482 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303483 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003484 return 0;
3485}
3486
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003487static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3488{
3489 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003490 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003491
c_mtharue1a5ce12017-10-13 20:47:09 +05303492 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003493 if (err)
3494 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003495 if (fl->cid == -1) {
3496 cid = *info;
3497 VERIFY(err, cid < NUM_CHANNELS);
3498 if (err)
3499 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303500 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003501 if (fl->dev_minor == MINOR_NUM_DEV &&
3502 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303503 /*
3504 * For non secure device node check and make sure that
3505 * the channel allows non-secure access
3506 * If not, bail. Session will not start.
3507 * cid will remain -1 and client will not be able to
3508 * invoke any other methods without failure
3509 */
3510 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3511 err = -EPERM;
3512 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3513 fl->dev_minor, cid,
3514 fl->apps->channel[cid].secure);
3515 goto bail;
3516 }
3517 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003518 fl->cid = cid;
3519 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3520 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303521 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003522 if (err)
3523 goto bail;
3524 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303525 VERIFY(err, fl->sctx != NULL);
3526 if (err)
3527 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003528 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3529bail:
3530 return err;
3531}
3532
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303533static int fastrpc_internal_control(struct fastrpc_file *fl,
3534 struct fastrpc_ioctl_control *cp)
3535{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303536 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303537 int err = 0;
3538 int latency;
3539
3540 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3541 if (err)
3542 goto bail;
3543 VERIFY(err, !IS_ERR_OR_NULL(cp));
3544 if (err)
3545 goto bail;
3546
3547 switch (cp->req) {
3548 case FASTRPC_CONTROL_LATENCY:
3549 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3550 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3551 VERIFY(err, latency != 0);
3552 if (err)
3553 goto bail;
3554 if (!fl->qos_request) {
3555 pm_qos_add_request(&fl->pm_qos_req,
3556 PM_QOS_CPU_DMA_LATENCY, latency);
3557 fl->qos_request = 1;
3558 } else
3559 pm_qos_update_request(&fl->pm_qos_req, latency);
3560 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303561 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303562 if (!me->legacy)
3563 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303564 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303565 case FASTRPC_CONTROL_KALLOC:
3566 cp->kalloc.kalloc_support = 1;
3567 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303568 default:
3569 err = -ENOTTY;
3570 break;
3571 }
3572bail:
3573 return err;
3574}
3575
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003576static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3577 unsigned long ioctl_param)
3578{
3579 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003580 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003581 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303582 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003583 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303584 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303585 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003586 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003587 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303588 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003589 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303590 union {
3591 struct fastrpc_ioctl_mmap mmap;
3592 struct fastrpc_ioctl_munmap munmap;
3593 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003594 void *param = (char *)ioctl_param;
3595 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3596 int size = 0, err = 0;
3597 uint32_t info;
3598
c_mtharue1a5ce12017-10-13 20:47:09 +05303599 p.inv.fds = NULL;
3600 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003601 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303602 spin_lock(&fl->hlock);
3603 if (fl->file_close == 1) {
3604 err = EBADF;
3605 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3606 spin_unlock(&fl->hlock);
3607 goto bail;
3608 }
3609 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003610
3611 switch (ioctl_num) {
3612 case FASTRPC_IOCTL_INVOKE:
3613 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003614 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003615 case FASTRPC_IOCTL_INVOKE_FD:
3616 if (!size)
3617 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3618 /* fall through */
3619 case FASTRPC_IOCTL_INVOKE_ATTRS:
3620 if (!size)
3621 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003622 /* fall through */
3623 case FASTRPC_IOCTL_INVOKE_CRC:
3624 if (!size)
3625 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303626 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003627 if (err)
3628 goto bail;
3629 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3630 0, &p.inv)));
3631 if (err)
3632 goto bail;
3633 break;
3634 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303635 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3636 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303637 if (err)
3638 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003639 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3640 if (err)
3641 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303642 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003643 if (err)
3644 goto bail;
3645 break;
3646 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303647 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3648 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303649 if (err)
3650 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003651 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3652 &p.munmap)));
3653 if (err)
3654 goto bail;
3655 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303656 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303657 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3658 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303659 if (err)
3660 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303661 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3662 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303663 if (err)
3664 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303665 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3666 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303667 if (err)
3668 goto bail;
3669 break;
3670 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303671 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3672 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303673 if (err)
3674 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303675 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303676 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303677 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303678 if (err)
3679 goto bail;
3680 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303681 case FASTRPC_IOCTL_MUNMAP_FD:
3682 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3683 sizeof(p.munmap_fd));
3684 if (err)
3685 goto bail;
3686 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3687 &p.munmap_fd)));
3688 if (err)
3689 goto bail;
3690 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003691 case FASTRPC_IOCTL_SETMODE:
3692 switch ((uint32_t)ioctl_param) {
3693 case FASTRPC_MODE_PARALLEL:
3694 case FASTRPC_MODE_SERIAL:
3695 fl->mode = (uint32_t)ioctl_param;
3696 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003697 case FASTRPC_MODE_PROFILE:
3698 fl->profile = (uint32_t)ioctl_param;
3699 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303700 case FASTRPC_MODE_SESSION:
3701 fl->sessionid = 1;
3702 fl->tgid |= (1 << SESSION_ID_INDEX);
3703 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003704 default:
3705 err = -ENOTTY;
3706 break;
3707 }
3708 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003709 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303710 K_COPY_FROM_USER(err, 0, &p.perf,
3711 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003712 if (err)
3713 goto bail;
3714 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3715 if (p.perf.keys) {
3716 char *keys = PERF_KEYS;
3717
c_mtharue1a5ce12017-10-13 20:47:09 +05303718 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3719 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003720 if (err)
3721 goto bail;
3722 }
3723 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303724 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3725 struct hlist_node *n = NULL;
3726
3727 mutex_lock(&fl->perf_mutex);
3728 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3729 if (perf->tid == current->pid) {
3730 fperf = perf;
3731 break;
3732 }
3733 }
3734
3735 mutex_unlock(&fl->perf_mutex);
3736
3737 if (fperf) {
3738 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3739 fperf, sizeof(*fperf));
3740 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003741 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303742 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003743 if (err)
3744 goto bail;
3745 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303746 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303747 K_COPY_FROM_USER(err, 0, &p.cp, param,
3748 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303749 if (err)
3750 goto bail;
3751 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3752 if (err)
3753 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303754 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3755 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3756 if (err)
3757 goto bail;
3758 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303759 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003760 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303761 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003762 if (err)
3763 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003764 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3765 if (err)
3766 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303767 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003768 if (err)
3769 goto bail;
3770 break;
3771 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003772 p.init.attrs = 0;
3773 p.init.siglen = 0;
3774 size = sizeof(struct fastrpc_ioctl_init);
3775 /* fall through */
3776 case FASTRPC_IOCTL_INIT_ATTRS:
3777 if (!size)
3778 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303779 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003780 if (err)
3781 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303782 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303783 p.init.init.filelen < INIT_FILELEN_MAX);
3784 if (err)
3785 goto bail;
3786 VERIFY(err, p.init.init.memlen >= 0 &&
3787 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303788 if (err)
3789 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303790 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003791 if (err)
3792 goto bail;
3793 break;
3794
3795 default:
3796 err = -ENOTTY;
3797 pr_info("bad ioctl: %d\n", ioctl_num);
3798 break;
3799 }
3800 bail:
3801 return err;
3802}
3803
3804static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3805 unsigned long code,
3806 void *data)
3807{
3808 struct fastrpc_apps *me = &gfa;
3809 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303810 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003811 int cid;
3812
3813 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3814 cid = ctx - &me->channel[0];
3815 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303816 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003817 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303818 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303819 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303820 if (me->glink)
3821 fastrpc_glink_close(ctx->chan, cid);
3822 else
3823 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303824 ctx->chan = NULL;
3825 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3826 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3827 }
3828 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303829 if (cid == 0)
3830 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003831 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303832 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3833 if (me->channel[0].remoteheap_ramdump_dev &&
3834 notifdata->enable_ramdump) {
3835 me->channel[0].ramdumpenabled = 1;
3836 }
3837 } else if (code == SUBSYS_AFTER_POWERUP) {
3838 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003839 }
3840
3841 return NOTIFY_DONE;
3842}
3843
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303844static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303845 unsigned long code,
3846 void *data)
3847{
3848 struct fastrpc_apps *me = &gfa;
3849 struct fastrpc_static_pd *spd;
3850 struct notif_data *notifdata = data;
3851
3852 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3853 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3854 mutex_lock(&me->smd_mutex);
3855 spd->pdrcount++;
3856 spd->ispdup = 0;
3857 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3858 MAJOR(me->dev_no), spd->spdname);
3859 mutex_unlock(&me->smd_mutex);
3860 if (!strcmp(spd->spdname,
3861 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3862 me->staticpd_flags = 0;
3863 fastrpc_notify_pdr_drivers(me, spd->spdname);
3864 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3865 if (me->channel[0].remoteheap_ramdump_dev &&
3866 notifdata->enable_ramdump) {
3867 me->channel[0].ramdumpenabled = 1;
3868 }
3869 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3870 spd->ispdup = 1;
3871 }
3872
3873 return NOTIFY_DONE;
3874}
3875
3876static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3877 unsigned long opcode, void *data)
3878{
3879 struct fastrpc_static_pd *spd;
3880 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303881 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303882
3883 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3884 if (opcode == LOCATOR_DOWN) {
3885 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3886 return NOTIFY_DONE;
3887 }
3888
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303889 for (i = 0; i < pdr->total_domains; i++) {
3890 if ((!strcmp(pdr->domain_list[i].name,
3891 "msm/adsp/audio_pd")) ||
3892 (!strcmp(pdr->domain_list[i].name,
3893 "msm/adsp/sensor_pd"))) {
3894 spd->pdrhandle =
3895 service_notif_register_notifier(
3896 pdr->domain_list[i].name,
3897 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303898 &spd->pdrnb, &curr_state);
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303899 if (IS_ERR(spd->pdrhandle)) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303900 pr_err("ADSPRPC: Unable to register notifier\n");
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303901 } else if (curr_state ==
3902 SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3903 pr_info("ADSPRPC: STATE_UP_V01 received\n");
3904 spd->ispdup = 1;
3905 } else if (curr_state ==
3906 SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
3907 pr_info("ADSPRPC: STATE_UNINIT_V01 received\n");
3908 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303909 break;
3910 }
3911 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303912
3913 return NOTIFY_DONE;
3914}
3915
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003916static const struct file_operations fops = {
3917 .open = fastrpc_device_open,
3918 .release = fastrpc_device_release,
3919 .unlocked_ioctl = fastrpc_device_ioctl,
3920 .compat_ioctl = compat_fastrpc_device_ioctl,
3921};
3922
3923static const struct of_device_id fastrpc_match_table[] = {
3924 { .compatible = "qcom,msm-fastrpc-adsp", },
3925 { .compatible = "qcom,msm-fastrpc-compute", },
3926 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303927 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3928 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003929 { .compatible = "qcom,msm-adsprpc-mem-region", },
3930 {}
3931};
3932
3933static int fastrpc_cb_probe(struct device *dev)
3934{
3935 struct fastrpc_channel_ctx *chan;
3936 struct fastrpc_session_ctx *sess;
3937 struct of_phandle_args iommuspec;
3938 const char *name;
3939 unsigned int start = 0x80000000;
3940 int err = 0, i;
3941 int secure_vmid = VMID_CP_PIXEL;
3942
c_mtharue1a5ce12017-10-13 20:47:09 +05303943 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3944 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003945 if (err)
3946 goto bail;
3947 for (i = 0; i < NUM_CHANNELS; i++) {
3948 if (!gcinfo[i].name)
3949 continue;
3950 if (!strcmp(name, gcinfo[i].name))
3951 break;
3952 }
3953 VERIFY(err, i < NUM_CHANNELS);
3954 if (err)
3955 goto bail;
3956 chan = &gcinfo[i];
3957 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3958 if (err)
3959 goto bail;
3960
3961 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3962 "#iommu-cells", 0, &iommuspec));
3963 if (err)
3964 goto bail;
3965 sess = &chan->session[chan->sesscount];
3966 sess->smmu.cb = iommuspec.args[0] & 0xf;
3967 sess->used = 0;
3968 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3969 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303970 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
3971 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003972 sess->smmu.secure = of_property_read_bool(dev->of_node,
3973 "qcom,secure-context-bank");
3974 if (sess->smmu.secure)
3975 start = 0x60000000;
3976 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3977 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303978 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003979 if (err)
3980 goto bail;
3981
3982 if (sess->smmu.secure)
3983 iommu_domain_set_attr(sess->smmu.mapping->domain,
3984 DOMAIN_ATTR_SECURE_VMID,
3985 &secure_vmid);
3986
3987 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
3988 if (err)
3989 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303990 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003991 sess->smmu.enabled = 1;
3992 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003993 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
3994 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003995bail:
3996 return err;
3997}
3998
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303999static int fastrpc_cb_legacy_probe(struct device *dev)
4000{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304001 struct fastrpc_channel_ctx *chan;
4002 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4003 const char *name;
4004 unsigned int *sids = NULL, sids_size = 0;
4005 int err = 0, ret = 0, i;
4006
4007 unsigned int start = 0x80000000;
4008
4009 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4010 "label", NULL)));
4011 if (err)
4012 goto bail;
4013
4014 for (i = 0; i < NUM_CHANNELS; i++) {
4015 if (!gcinfo[i].name)
4016 continue;
4017 if (!strcmp(name, gcinfo[i].name))
4018 break;
4019 }
4020 VERIFY(err, i < NUM_CHANNELS);
4021 if (err)
4022 goto bail;
4023
4024 chan = &gcinfo[i];
4025 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4026 if (err)
4027 goto bail;
4028
4029 first_sess = &chan->session[chan->sesscount];
4030
4031 VERIFY(err, NULL != of_get_property(dev->of_node,
4032 "sids", &sids_size));
4033 if (err)
4034 goto bail;
4035
4036 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4037 if (err)
4038 goto bail;
4039 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4040 sids_size/sizeof(unsigned int));
4041 if (ret)
4042 goto bail;
4043
4044 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4045 arm_iommu_create_mapping(&platform_bus_type,
4046 start, 0x78000000)));
4047 if (err)
4048 goto bail;
4049
4050 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4051 if (err)
4052 goto bail;
4053
4054
4055 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4056 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4057 if (err)
4058 goto bail;
4059 sess = &chan->session[chan->sesscount];
4060 sess->smmu.cb = sids[i];
4061 sess->smmu.dev = dev;
4062 sess->smmu.mapping = first_sess->smmu.mapping;
4063 sess->smmu.enabled = 1;
4064 sess->used = 0;
4065 sess->smmu.coherent = false;
4066 sess->smmu.secure = false;
4067 chan->sesscount++;
4068 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304069bail:
4070 kfree(sids);
4071 return err;
4072}
4073
4074
4075
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304076static void init_secure_vmid_list(struct device *dev, char *prop_name,
4077 struct secure_vm *destvm)
4078{
4079 int err = 0;
4080 u32 len = 0, i = 0;
4081 u32 *rhvmlist = NULL;
4082 u32 *rhvmpermlist = NULL;
4083
4084 if (!of_find_property(dev->of_node, prop_name, &len))
4085 goto bail;
4086 if (len == 0)
4087 goto bail;
4088 len /= sizeof(u32);
4089 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4090 if (err)
4091 goto bail;
4092 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4093 GFP_KERNEL)));
4094 if (err)
4095 goto bail;
4096 for (i = 0; i < len; i++) {
4097 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4098 &rhvmlist[i]);
4099 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4100 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4101 if (err) {
4102 pr_err("ADSPRPC: Failed to read VMID\n");
4103 goto bail;
4104 }
4105 }
4106 destvm->vmid = rhvmlist;
4107 destvm->vmperm = rhvmpermlist;
4108 destvm->vmcount = len;
4109bail:
4110 if (err) {
4111 kfree(rhvmlist);
4112 kfree(rhvmpermlist);
4113 }
4114}
4115
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304116static void configure_secure_channels(uint32_t secure_domains)
4117{
4118 struct fastrpc_apps *me = &gfa;
4119 int ii = 0;
4120 /*
4121 * secure_domains contains the bitmask of the secure channels
4122 * Bit 0 - ADSP
4123 * Bit 1 - MDSP
4124 * Bit 2 - SLPI
4125 * Bit 3 - CDSP
4126 */
4127 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4128 int secure = (secure_domains >> ii) & 0x01;
4129
4130 me->channel[ii].secure = secure;
4131 }
4132}
4133
4134
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004135static int fastrpc_probe(struct platform_device *pdev)
4136{
4137 int err = 0;
4138 struct fastrpc_apps *me = &gfa;
4139 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004140 struct device_node *ion_node, *node;
4141 struct platform_device *ion_pdev;
4142 struct cma *cma;
4143 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304144 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304145 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304146
4147 if (of_device_is_compatible(dev->of_node,
4148 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304149 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4150 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304151
c_mtharu63ffc012017-11-16 15:26:56 +05304152
4153 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4154 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304155 if (of_get_property(dev->of_node,
4156 "qcom,secure-domains", NULL) != NULL) {
4157 VERIFY(err, !of_property_read_u32(dev->of_node,
4158 "qcom,secure-domains",
4159 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004160 if (!err) {
4161 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304162 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004163 } else {
4164 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304165 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004166 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304167 }
c_mtharu63ffc012017-11-16 15:26:56 +05304168 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004169 if (of_device_is_compatible(dev->of_node,
4170 "qcom,msm-fastrpc-compute-cb"))
4171 return fastrpc_cb_probe(dev);
4172
4173 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304174 "qcom,msm-fastrpc-legacy-compute")) {
4175 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304176 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304177 }
4178
4179 if (of_device_is_compatible(dev->of_node,
4180 "qcom,msm-fastrpc-legacy-compute-cb")){
4181 return fastrpc_cb_legacy_probe(dev);
4182 }
4183
4184 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004185 "qcom,msm-adsprpc-mem-region")) {
4186 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004187 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4188 if (ion_node) {
4189 for_each_available_child_of_node(ion_node, node) {
4190 if (of_property_read_u32(node, "reg", &val))
4191 continue;
4192 if (val != ION_ADSP_HEAP_ID)
4193 continue;
4194 ion_pdev = of_find_device_by_node(node);
4195 if (!ion_pdev)
4196 break;
4197 cma = dev_get_cma_area(&ion_pdev->dev);
4198 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304199 me->range.addr = cma_get_base(cma);
4200 me->range.size =
4201 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004202 }
4203 break;
4204 }
4205 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304206 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304207 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004208 int srcVM[1] = {VMID_HLOS};
4209 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4210 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004211 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004212 PERM_READ | PERM_WRITE | PERM_EXEC,
4213 PERM_READ | PERM_WRITE | PERM_EXEC,
4214 PERM_READ | PERM_WRITE | PERM_EXEC,
4215 };
4216
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304217 VERIFY(err, !hyp_assign_phys(me->range.addr,
4218 me->range.size, srcVM, 1,
4219 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004220 if (err)
4221 goto bail;
4222 }
4223 return 0;
4224 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304225 if (of_property_read_bool(dev->of_node,
4226 "qcom,fastrpc-adsp-audio-pdr")) {
4227 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004228
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304229 VERIFY(err, !fastrpc_get_adsp_session(
4230 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4231 if (err)
4232 goto spdbail;
4233 me->channel[0].spd[session].get_service_nb.notifier_call =
4234 fastrpc_get_service_location_notify;
4235 ret = get_service_location(
4236 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4237 AUDIO_PDR_ADSP_SERVICE_NAME,
4238 &me->channel[0].spd[session].get_service_nb);
4239 if (ret)
4240 pr_err("ADSPRPC: Get service location failed: %d\n",
4241 ret);
4242 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304243 if (of_property_read_bool(dev->of_node,
4244 "qcom,fastrpc-adsp-sensors-pdr")) {
4245 int session;
4246
4247 VERIFY(err, !fastrpc_get_adsp_session(
4248 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4249 if (err)
4250 goto spdbail;
4251 me->channel[0].spd[session].get_service_nb.notifier_call =
4252 fastrpc_get_service_location_notify;
4253 ret = get_service_location(
4254 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4255 SENSORS_PDR_ADSP_SERVICE_NAME,
4256 &me->channel[0].spd[session].get_service_nb);
4257 if (ret)
4258 pr_err("ADSPRPC: Get service location failed: %d\n",
4259 ret);
4260 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304261spdbail:
4262 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004263 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4264 fastrpc_match_table,
4265 NULL, &pdev->dev));
4266 if (err)
4267 goto bail;
4268bail:
4269 return err;
4270}
4271
4272static void fastrpc_deinit(void)
4273{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304274 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004275 struct fastrpc_channel_ctx *chan = gcinfo;
4276 int i, j;
4277
4278 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4279 if (chan->chan) {
4280 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304281 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304282 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004283 }
4284 for (j = 0; j < NUM_SESSIONS; j++) {
4285 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304286 if (sess->smmu.dev) {
4287 arm_iommu_detach_device(sess->smmu.dev);
4288 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004289 }
4290 if (sess->smmu.mapping) {
4291 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304292 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004293 }
4294 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304295 kfree(chan->rhvm.vmid);
4296 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004297 }
4298}
4299
4300static struct platform_driver fastrpc_driver = {
4301 .probe = fastrpc_probe,
4302 .driver = {
4303 .name = "fastrpc",
4304 .owner = THIS_MODULE,
4305 .of_match_table = fastrpc_match_table,
4306 },
4307};
4308
4309static int __init fastrpc_device_init(void)
4310{
4311 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304312 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304313 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004314 int err = 0, i;
4315
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304316 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004317 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004318 fastrpc_init(me);
4319 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304320 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004321 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004322 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4323 if (err)
4324 goto register_bail;
4325 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4326 DEVICE_NAME));
4327 if (err)
4328 goto alloc_chrdev_bail;
4329 cdev_init(&me->cdev, &fops);
4330 me->cdev.owner = THIS_MODULE;
4331 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304332 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004333 if (err)
4334 goto cdev_init_bail;
4335 me->class = class_create(THIS_MODULE, "fastrpc");
4336 VERIFY(err, !IS_ERR(me->class));
4337 if (err)
4338 goto class_create_bail;
4339 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304340
4341 /*
4342 * Create devices and register with sysfs
4343 * Create first device with minor number 0
4344 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004345 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304346 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4347 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004348 VERIFY(err, !IS_ERR_OR_NULL(dev));
4349 if (err)
4350 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304351
4352 /* Create secure device with minor number for secure device */
4353 secure_dev = device_create(me->class, NULL,
4354 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4355 NULL, DEVICE_NAME_SECURE);
4356 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4357 if (err)
4358 goto device_create_bail;
4359
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004360 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304361 me->channel[i].dev = secure_dev;
4362 if (i == CDSP_DOMAIN_ID)
4363 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004364 me->channel[i].ssrcount = 0;
4365 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304366 me->channel[i].issubsystemup = 1;
4367 me->channel[i].ramdumpenabled = 0;
4368 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004369 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4370 me->channel[i].handle = subsys_notif_register_notifier(
4371 gcinfo[i].subsys,
4372 &me->channel[i].nb);
4373 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004374 me->client = msm_ion_client_create(DEVICE_NAME);
4375 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4376 if (err)
4377 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304378
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004379 return 0;
4380device_create_bail:
4381 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004382 if (me->channel[i].handle)
4383 subsys_notif_unregister_notifier(me->channel[i].handle,
4384 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004385 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004386 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304387 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4388 MINOR_NUM_DEV));
4389 if (!IS_ERR_OR_NULL(secure_dev))
4390 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4391 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004392 class_destroy(me->class);
4393class_create_bail:
4394 cdev_del(&me->cdev);
4395cdev_init_bail:
4396 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4397alloc_chrdev_bail:
4398register_bail:
4399 fastrpc_deinit();
4400 return err;
4401}
4402
4403static void __exit fastrpc_device_exit(void)
4404{
4405 struct fastrpc_apps *me = &gfa;
4406 int i;
4407
4408 fastrpc_file_list_dtor(me);
4409 fastrpc_deinit();
4410 for (i = 0; i < NUM_CHANNELS; i++) {
4411 if (!gcinfo[i].name)
4412 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004413 subsys_notif_unregister_notifier(me->channel[i].handle,
4414 &me->channel[i].nb);
4415 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304416
4417 /* Destroy the secure and non secure devices */
4418 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4419 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4420 MINOR_NUM_SECURE_DEV));
4421
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004422 class_destroy(me->class);
4423 cdev_del(&me->cdev);
4424 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4425 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004426 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004427}
4428
4429late_initcall(fastrpc_device_init);
4430module_exit(fastrpc_device_exit);
4431
4432MODULE_LICENSE("GPL v2");