blob: 40abe945d1ce6ceb8dafb6e9ee2de701bc6467a5 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Jeya Rb70b4ad2021-01-25 10:28:42 -08002 * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070020#include <linux/sched.h>
21#include <linux/module.h>
22#include <linux/cdev.h>
23#include <linux/list.h>
24#include <linux/hash.h>
25#include <linux/msm_ion.h>
26#include <soc/qcom/secure_buffer.h>
27#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053028#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070029#include <soc/qcom/subsystem_notif.h>
30#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053031#include <soc/qcom/service-notifier.h>
32#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070033#include <linux/scatterlist.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/uaccess.h>
35#include <linux/device.h>
36#include <linux/of.h>
37#include <linux/of_address.h>
38#include <linux/of_platform.h>
39#include <linux/dma-contiguous.h>
40#include <linux/cma.h>
41#include <linux/iommu.h>
42#include <linux/kref.h>
43#include <linux/sort.h>
44#include <linux/msm_dma_iommu_mapping.h>
45#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053046#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070047#include "adsprpc_compat.h"
48#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053049#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080050#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053051#include <linux/pm_qos.h>
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +053052#include <linux/stat.h>
53
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +0530109 "count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke:tid:ptr"
110#define FASTRPC_STATIC_HANDLE_PROCESS_GROUP (1)
111#define FASTRPC_STATIC_HANDLE_DSP_UTILITIES (2)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800112#define FASTRPC_STATIC_HANDLE_LISTENER (3)
113#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530114#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800115
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530116#define MAX_SIZE_LIMIT (0x78000000)
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530117#define INIT_FILELEN_MAX (2*1024*1024)
118#define INIT_MEMLEN_MAX (8*1024*1024)
119
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800120#define PERF_END (void)0
121
122#define PERF(enb, cnt, ff) \
123 {\
124 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530125 int64_t *counter = cnt;\
126 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800127 getnstimeofday(&startT);\
128 } \
129 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530130 if (enb && counter) {\
131 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800132 } \
133 }
134
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530135#define GET_COUNTER(perf_ptr, offset) \
136 (perf_ptr != NULL ?\
137 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
138 (int64_t *)(perf_ptr + offset)\
139 : (int64_t *)NULL) : (int64_t *)NULL)
140
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700141static int fastrpc_glink_open(int cid);
142static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530143static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530144 unsigned long code,
145 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800146static struct dentry *debugfs_root;
147static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700148
149static inline uint64_t buf_page_start(uint64_t buf)
150{
151 uint64_t start = (uint64_t) buf & PAGE_MASK;
152 return start;
153}
154
155static inline uint64_t buf_page_offset(uint64_t buf)
156{
157 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
158 return offset;
159}
160
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530161static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700162{
163 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
164 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530165 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700166 return nPages;
167}
168
169static inline uint64_t buf_page_size(uint32_t size)
170{
171 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
172
173 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
174}
175
176static inline void *uint64_to_ptr(uint64_t addr)
177{
178 void *ptr = (void *)((uintptr_t)addr);
179
180 return ptr;
181}
182
183static inline uint64_t ptr_to_uint64(void *ptr)
184{
185 uint64_t addr = (uint64_t)((uintptr_t)ptr);
186
187 return addr;
188}
189
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530190struct secure_vm {
191 int *vmid;
192 int *vmperm;
193 int vmcount;
194};
195
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700196struct fastrpc_file;
197
198struct fastrpc_buf {
199 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530200 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700201 struct fastrpc_file *fl;
202 void *virt;
203 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530204 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530205 unsigned long dma_attr;
206 uintptr_t raddr;
207 uint32_t flags;
208 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700209};
210
211struct fastrpc_ctx_lst;
212
213struct overlap {
214 uintptr_t start;
215 uintptr_t end;
216 int raix;
217 uintptr_t mstart;
218 uintptr_t mend;
219 uintptr_t offset;
220};
221
222struct smq_invoke_ctx {
223 struct hlist_node hn;
224 struct completion work;
225 int retval;
226 int pid;
227 int tgid;
228 remote_arg_t *lpra;
229 remote_arg64_t *rpra;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530230 remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700231 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700232 struct fastrpc_mmap **maps;
233 struct fastrpc_buf *buf;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530234 struct fastrpc_buf *lbuf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530235 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700236 struct fastrpc_file *fl;
237 uint32_t sc;
238 struct overlap *overs;
239 struct overlap **overps;
240 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530241 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530242 unsigned int *attrs;
243 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530244 uint64_t ctxid;
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +0530245 void *handle;
246 const void *ptr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700247};
248
249struct fastrpc_ctx_lst {
250 struct hlist_head pending;
251 struct hlist_head interrupted;
252};
253
254struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530255 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700256 struct dma_iommu_mapping *mapping;
257 int cb;
258 int enabled;
259 int faults;
260 int secure;
261 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530262 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700263};
264
265struct fastrpc_session_ctx {
266 struct device *dev;
267 struct fastrpc_smmu smmu;
268 int used;
269};
270
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530271struct fastrpc_static_pd {
272 char *spdname;
273 struct notifier_block pdrnb;
274 struct notifier_block get_service_nb;
275 void *pdrhandle;
276 int pdrcount;
277 int prevpdrcount;
278 int ispdup;
279};
280
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700281struct fastrpc_glink_info {
282 int link_state;
283 int port_state;
284 struct glink_open_config cfg;
285 struct glink_link_info link_info;
286 void *link_notify_handle;
287};
288
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +0530289struct fastrpc_dsp_capabilities {
290 uint32_t is_cached; //! Flag if dsp attributes are cached
291 uint32_t dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
292};
293
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700294struct fastrpc_channel_ctx {
295 char *name;
296 char *subsys;
297 void *chan;
298 struct device *dev;
299 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530300 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700301 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530302 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700303 struct notifier_block nb;
304 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530305 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306 int sesscount;
307 int ssrcount;
308 void *handle;
309 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530310 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700311 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530312 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530313 int ramdumpenabled;
314 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700315 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530316 /* Indicates, if channel is restricted to secure node only */
317 int secure;
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +0530318 struct fastrpc_dsp_capabilities dsp_cap_kernel;
Jeya Rf4b99852020-11-22 13:03:16 +0530319 /* Indicates whether the channel supports unsigned PD */
320 bool unsigned_support;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700321};
322
323struct fastrpc_apps {
324 struct fastrpc_channel_ctx *channel;
325 struct cdev cdev;
326 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530327 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700328 struct smq_phy_page range;
329 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530330 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700331 dev_t dev_no;
332 int compat;
333 struct hlist_head drivers;
334 spinlock_t hlock;
335 struct ion_client *client;
336 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530337 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530338 bool glink;
339 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800340 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530341 spinlock_t ctxlock;
342 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700343};
344
345struct fastrpc_mmap {
346 struct hlist_node hn;
347 struct fastrpc_file *fl;
348 struct fastrpc_apps *apps;
349 int fd;
350 uint32_t flags;
351 struct dma_buf *buf;
352 struct sg_table *table;
353 struct dma_buf_attachment *attach;
354 struct ion_handle *handle;
355 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530356 size_t size;
357 uintptr_t va;
358 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700359 int refs;
360 uintptr_t raddr;
361 int uncached;
362 int secure;
363 uintptr_t attr;
364};
365
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530366enum fastrpc_perfkeys {
367 PERF_COUNT = 0,
368 PERF_FLUSH = 1,
369 PERF_MAP = 2,
370 PERF_COPY = 3,
371 PERF_LINK = 4,
372 PERF_GETARGS = 5,
373 PERF_PUTARGS = 6,
374 PERF_INVARGS = 7,
375 PERF_INVOKE = 8,
376 PERF_KEY_MAX = 9,
377};
378
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800379struct fastrpc_perf {
380 int64_t count;
381 int64_t flush;
382 int64_t map;
383 int64_t copy;
384 int64_t link;
385 int64_t getargs;
386 int64_t putargs;
387 int64_t invargs;
388 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530389 int64_t tid;
390 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800391};
392
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393struct fastrpc_file {
394 struct hlist_node hn;
395 spinlock_t hlock;
396 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530397 struct hlist_head cached_bufs;
398 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700399 struct fastrpc_ctx_lst clst;
400 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530401 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700402 struct fastrpc_session_ctx *secsctx;
403 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800404 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530405 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700406 int tgid;
407 int cid;
408 int ssrcount;
409 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530410 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530411 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530412 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530414 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800415 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530416 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530417 struct pm_qos_request pm_qos_req;
418 int qos_request;
Jeya Rb9090542021-06-10 13:03:44 +0530419 struct mutex pm_qos_mutex;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530420 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530421 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530422 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530423 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
424 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530425 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700426};
427
428static struct fastrpc_apps gfa;
429
430static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
431 {
432 .name = "adsprpc-smd",
433 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530434 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700435 .link.link_info.edge = "lpass",
436 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530437 .spd = {
438 {
439 .spdname =
440 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
441 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530442 fastrpc_pdr_notifier_cb,
443 },
444 {
445 .spdname =
446 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
447 .pdrnb.notifier_call =
448 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530449 }
450 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700451 },
452 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700453 .name = "mdsprpc-smd",
454 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530455 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700456 .link.link_info.edge = "mpss",
457 .link.link_info.transport = "smem",
458 },
459 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800460 .name = "sdsprpc-smd",
461 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530462 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800463 .link.link_info.edge = "dsps",
464 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800465 },
466 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700467 .name = "cdsprpc-smd",
468 .subsys = "cdsp",
469 .link.link_info.edge = "cdsp",
470 .link.link_info.transport = "smem",
471 },
472};
473
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530474static int hlosvm[1] = {VMID_HLOS};
475static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
476
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800477static inline int64_t getnstimediff(struct timespec *start)
478{
479 int64_t ns;
480 struct timespec ts, b;
481
482 getnstimeofday(&ts);
483 b = timespec_sub(ts, *start);
484 ns = timespec_to_ns(&b);
485 return ns;
486}
487
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530488static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
489{
490 int err = 0;
491 int64_t *val = NULL;
492 struct fastrpc_perf *perf = NULL, *fperf = NULL;
493 struct hlist_node *n = NULL;
494
495 VERIFY(err, !IS_ERR_OR_NULL(fl));
496 if (err)
497 goto bail;
498
499 mutex_lock(&fl->perf_mutex);
500 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
501 if (perf->tid == current->pid) {
502 fperf = perf;
503 break;
504 }
505 }
506
507 if (IS_ERR_OR_NULL(fperf)) {
508 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
509
510 VERIFY(err, !IS_ERR_OR_NULL(fperf));
511 if (err) {
512 mutex_unlock(&fl->perf_mutex);
513 kfree(fperf);
514 goto bail;
515 }
516
517 fperf->tid = current->pid;
518 hlist_add_head(&fperf->hn, &fl->perf);
519 }
520
521 val = ((int64_t *)fperf) + key;
522 mutex_unlock(&fl->perf_mutex);
523bail:
524 return val;
525}
526
527
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700528static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
529{
c_mtharue1a5ce12017-10-13 20:47:09 +0530530 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Jeya R984a1a32021-01-18 15:38:07 +0530531 int vmid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700532
533 if (!fl)
534 return;
535 if (cache) {
536 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530537 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700538 spin_unlock(&fl->hlock);
539 return;
540 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530541 if (buf->remote) {
542 spin_lock(&fl->hlock);
543 hlist_del_init(&buf->hn_rem);
544 spin_unlock(&fl->hlock);
545 buf->remote = 0;
546 buf->raddr = 0;
547 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700548 if (!IS_ERR_OR_NULL(buf->virt)) {
549 int destVM[1] = {VMID_HLOS};
550 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
551
Jeya R984a1a32021-01-18 15:38:07 +0530552 VERIFY(err, fl->sctx != NULL);
553 if (err)
554 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700555 if (fl->sctx->smmu.cb)
556 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
557 vmid = fl->apps->channel[fl->cid].vmid;
558 if (vmid) {
559 int srcVM[2] = {VMID_HLOS, vmid};
560
561 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
562 srcVM, 2, destVM, destVMperm, 1);
563 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530564 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
565 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700566 }
Jeya R984a1a32021-01-18 15:38:07 +0530567bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700568 kfree(buf);
569}
570
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530571static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700572{
573 struct fastrpc_buf *buf, *free;
574
575 do {
576 struct hlist_node *n;
577
c_mtharue1a5ce12017-10-13 20:47:09 +0530578 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700579 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530580 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700581 hlist_del_init(&buf->hn);
582 free = buf;
583 break;
584 }
585 spin_unlock(&fl->hlock);
586 if (free)
587 fastrpc_buf_free(free, 0);
588 } while (free);
589}
590
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530591static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
592{
593 struct fastrpc_buf *buf, *free;
594
595 do {
596 struct hlist_node *n;
597
598 free = NULL;
599 spin_lock(&fl->hlock);
600 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
601 free = buf;
602 break;
603 }
604 spin_unlock(&fl->hlock);
605 if (free)
606 fastrpc_buf_free(free, 0);
607 } while (free);
608}
609
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700610static void fastrpc_mmap_add(struct fastrpc_mmap *map)
611{
c_mtharue1a5ce12017-10-13 20:47:09 +0530612 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
613 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
614 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700615
c_mtharue1a5ce12017-10-13 20:47:09 +0530616 spin_lock(&me->hlock);
617 hlist_add_head(&map->hn, &me->maps);
618 spin_unlock(&me->hlock);
619 } else {
620 struct fastrpc_file *fl = map->fl;
621
c_mtharue1a5ce12017-10-13 20:47:09 +0530622 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530623 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700624}
625
c_mtharue1a5ce12017-10-13 20:47:09 +0530626static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530627 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530628 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700629{
c_mtharue1a5ce12017-10-13 20:47:09 +0530630 struct fastrpc_apps *me = &gfa;
631 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700632 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530633
634 if ((va + len) < va)
635 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530636 if (mflags == ADSP_MMAP_HEAP_ADDR ||
637 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
638 spin_lock(&me->hlock);
639 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
640 if (va >= map->va &&
641 va + len <= map->va + map->len &&
642 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530643 if (refs) {
644 if (map->refs + 1 == INT_MAX) {
645 spin_unlock(&me->hlock);
646 return -ETOOMANYREFS;
647 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530648 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530649 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530650 match = map;
651 break;
652 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700653 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530654 spin_unlock(&me->hlock);
655 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530656 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
657 if (va >= map->va &&
658 va + len <= map->va + map->len &&
659 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530660 if (refs) {
661 if (map->refs + 1 == INT_MAX)
662 return -ETOOMANYREFS;
c_mtharue1a5ce12017-10-13 20:47:09 +0530663 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530664 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530665 match = map;
666 break;
667 }
668 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700669 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700670 if (match) {
671 *ppmap = match;
672 return 0;
673 }
674 return -ENOTTY;
675}
676
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530677static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530678 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530679{
Jeya Re9310762020-07-29 12:10:54 +0530680 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530681 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530682
683 if (me->dev == NULL) {
684 pr_err("device adsprpc-mem is not initialized\n");
685 return -ENODEV;
686 }
Jeya Re9310762020-07-29 12:10:54 +0530687 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
688 if (err) {
689 err = -EFAULT;
690 pr_err("adsprpc: %s: invalid allocation size 0x%zx\n",
691 __func__, size);
692 return err;
693 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530694 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530695 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530696 if (IS_ERR_OR_NULL(*vaddr)) {
697 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
698 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530699 return -ENOMEM;
700 }
701 return 0;
702}
703
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700704static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530705 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700706{
c_mtharue1a5ce12017-10-13 20:47:09 +0530707 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700708 struct hlist_node *n;
709 struct fastrpc_apps *me = &gfa;
710
711 spin_lock(&me->hlock);
712 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
713 if (map->raddr == va &&
714 map->raddr + map->len == va + len &&
715 map->refs == 1) {
716 match = map;
717 hlist_del_init(&map->hn);
718 break;
719 }
720 }
721 spin_unlock(&me->hlock);
722 if (match) {
723 *ppmap = match;
724 return 0;
725 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700726 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
727 if (map->raddr == va &&
728 map->raddr + map->len == va + len &&
729 map->refs == 1) {
730 match = map;
731 hlist_del_init(&map->hn);
732 break;
733 }
734 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 if (match) {
736 *ppmap = match;
737 return 0;
738 }
739 return -ENOTTY;
740}
741
c_mtharu7bd6a422017-10-17 18:15:37 +0530742static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700743{
c_mtharue1a5ce12017-10-13 20:47:09 +0530744 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700745 struct fastrpc_file *fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530746 int vmid, cid = -1, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700747 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700748
749 if (!map)
750 return;
751 fl = map->fl;
Jeya R3dc4bc52021-02-09 02:35:41 -0800752 /* remote heap and dynamic loading memory
753 * maps expected to initialize with NULL
754 */
755 if (!fl && !(map->flags == ADSP_MMAP_HEAP_ADDR ||
756 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR))
Jeya Rb70b4ad2021-01-25 10:28:42 -0800757 return;
Jeya R3dc4bc52021-02-09 02:35:41 -0800758 if (fl && !(map->flags == ADSP_MMAP_HEAP_ADDR ||
Jeya Rccafee22020-05-26 18:17:26 +0530759 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) {
760 cid = fl->cid;
761 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
762 if (err) {
763 err = -ECHRNG;
764 pr_err("adsprpc: ERROR:%s, Invalid channel id: %d, err:%d",
765 __func__, cid, err);
766 return;
767 }
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530768 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530769 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
770 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
771 spin_lock(&me->hlock);
772 map->refs--;
773 if (!map->refs)
774 hlist_del_init(&map->hn);
775 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530776 if (map->refs > 0)
777 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530778 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530779 map->refs--;
780 if (!map->refs)
781 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530782 if (map->refs > 0 && !flags)
783 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530784 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530785 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
786 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700787
c_mtharue1a5ce12017-10-13 20:47:09 +0530788 if (me->dev == NULL) {
789 pr_err("failed to free remote heap allocation\n");
790 return;
791 }
792 if (map->phys) {
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +0530793 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
794 DMA_ATTR_NO_KERNEL_MAPPING;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530795 dma_free_attrs(me->dev, map->size, (void *)map->va,
796 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530797 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530798 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
799 if (!IS_ERR_OR_NULL(map->handle))
800 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530801 } else {
802 int destVM[1] = {VMID_HLOS};
803 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
804
805 if (map->secure)
806 sess = fl->secsctx;
807 else
808 sess = fl->sctx;
809
810 if (!IS_ERR_OR_NULL(map->handle))
811 ion_free(fl->apps->client, map->handle);
812 if (sess && sess->smmu.enabled) {
813 if (map->size || map->phys)
814 msm_dma_unmap_sg(sess->smmu.dev,
815 map->table->sgl,
816 map->table->nents, DMA_BIDIRECTIONAL,
817 map->buf);
818 }
819 vmid = fl->apps->channel[fl->cid].vmid;
820 if (vmid && map->phys) {
821 int srcVM[2] = {VMID_HLOS, vmid};
822
823 hyp_assign_phys(map->phys, buf_page_size(map->size),
824 srcVM, 2, destVM, destVMperm, 1);
825 }
826
827 if (!IS_ERR_OR_NULL(map->table))
828 dma_buf_unmap_attachment(map->attach, map->table,
829 DMA_BIDIRECTIONAL);
830 if (!IS_ERR_OR_NULL(map->attach))
831 dma_buf_detach(map->buf, map->attach);
832 if (!IS_ERR_OR_NULL(map->buf))
833 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700834 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700835 kfree(map);
836}
837
838static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
839 struct fastrpc_session_ctx **session);
840
841static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530842 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700843 struct fastrpc_mmap **ppmap)
844{
c_mtharue1a5ce12017-10-13 20:47:09 +0530845 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700846 struct fastrpc_session_ctx *sess;
847 struct fastrpc_apps *apps = fl->apps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530848 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530849 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700850 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530851 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530852 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700853 unsigned long flags;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530854 int err = 0, vmid, cid = -1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700855
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530856 cid = fl->cid;
857 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
858 if (err) {
859 err = -ECHRNG;
860 goto bail;
861 }
862 chan = &apps->channel[cid];
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800863 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700864 return 0;
865 map = kzalloc(sizeof(*map), GFP_KERNEL);
866 VERIFY(err, !IS_ERR_OR_NULL(map));
867 if (err)
868 goto bail;
869 INIT_HLIST_NODE(&map->hn);
870 map->flags = mflags;
871 map->refs = 1;
872 map->fl = fl;
873 map->fd = fd;
874 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530875 if (mflags == ADSP_MMAP_HEAP_ADDR ||
876 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530877 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
878 DMA_ATTR_NO_KERNEL_MAPPING;
879
c_mtharue1a5ce12017-10-13 20:47:09 +0530880 map->apps = me;
881 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530882 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
883 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700884 if (err)
885 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530886 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530887 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530888 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530889 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
890 ion_phys_addr_t iphys;
891
892 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
893 ion_import_dma_buf_fd(fl->apps->client, fd)));
894 if (err)
895 goto bail;
896
897 map->uncached = 1;
898 map->buf = NULL;
899 map->attach = NULL;
900 map->table = NULL;
901 map->va = 0;
902 map->phys = 0;
903
904 err = ion_phys(fl->apps->client, map->handle,
905 &iphys, &map->size);
906 if (err)
907 goto bail;
908 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530909 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530910 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
911 pr_info("adsprpc: buffer mapped with persist attr %x\n",
912 (unsigned int)map->attr);
913 map->refs = 2;
914 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530915 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
916 ion_import_dma_buf_fd(fl->apps->client, fd)));
917 if (err)
918 goto bail;
919 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
920 &flags));
921 if (err)
922 goto bail;
923
c_mtharue1a5ce12017-10-13 20:47:09 +0530924 map->secure = flags & ION_FLAG_SECURE;
925 if (map->secure) {
926 if (!fl->secsctx)
927 err = fastrpc_session_alloc(chan, 1,
928 &fl->secsctx);
929 if (err)
930 goto bail;
931 }
932 if (map->secure)
933 sess = fl->secsctx;
934 else
935 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530936
c_mtharue1a5ce12017-10-13 20:47:09 +0530937 VERIFY(err, !IS_ERR_OR_NULL(sess));
938 if (err)
939 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530940
941 map->uncached = !ION_IS_CACHED(flags);
942 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
943 map->uncached = 1;
944
c_mtharue1a5ce12017-10-13 20:47:09 +0530945 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
946 if (err)
947 goto bail;
948 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
949 dma_buf_attach(map->buf, sess->smmu.dev)));
950 if (err)
951 goto bail;
952 VERIFY(err, !IS_ERR_OR_NULL(map->table =
953 dma_buf_map_attachment(map->attach,
954 DMA_BIDIRECTIONAL)));
955 if (err)
956 goto bail;
957 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700958 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530959
960 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
961 (sess->smmu.coherent && map->uncached))
962 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
963 else if (map->attr & FASTRPC_ATTR_COHERENT)
964 attrs |= DMA_ATTR_FORCE_COHERENT;
965
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700966 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530967 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700968 map->table->sgl, map->table->nents,
969 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530970 if (err)
971 goto bail;
972 } else {
973 VERIFY(err, map->table->nents == 1);
974 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700975 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530976 }
977 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530978
c_mtharue1a5ce12017-10-13 20:47:09 +0530979 if (sess->smmu.cb) {
980 map->phys += ((uint64_t)sess->smmu.cb << 32);
981 map->size = sg_dma_len(map->table->sgl);
982 } else {
983 map->size = buf_page_size(len);
984 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530985
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530986 VERIFY(err, map->size >= len && map->size < MAX_SIZE_LIMIT);
987 if (err) {
988 err = -EFAULT;
989 goto bail;
990 }
991
c_mtharue1a5ce12017-10-13 20:47:09 +0530992 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530993 if (!sess->smmu.enabled && !vmid) {
994 VERIFY(err, map->phys >= me->range.addr &&
995 map->phys + map->size <=
996 me->range.addr + me->range.size);
997 if (err) {
998 pr_err("adsprpc: mmap fail out of range\n");
999 goto bail;
1000 }
1001 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301002 if (vmid) {
1003 int srcVM[1] = {VMID_HLOS};
1004 int destVM[2] = {VMID_HLOS, vmid};
1005 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1006 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001007
c_mtharue1a5ce12017-10-13 20:47:09 +05301008 VERIFY(err, !hyp_assign_phys(map->phys,
1009 buf_page_size(map->size),
1010 srcVM, 1, destVM, destVMperm, 2));
1011 if (err)
1012 goto bail;
1013 }
1014 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001015 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016 map->len = len;
1017
1018 fastrpc_mmap_add(map);
1019 *ppmap = map;
1020
1021bail:
1022 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05301023 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001024 return err;
1025}
1026
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301027static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301028 unsigned long dma_attr, uint32_t rflags,
1029 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001030{
1031 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +05301032 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001033 struct hlist_node *n;
1034
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301035 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
1036 if (err) {
1037 err = -EFAULT;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001038 goto bail;
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301039 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001040
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301041 if (!remote) {
1042 /* find the smallest buffer that fits in the cache */
1043 spin_lock(&fl->hlock);
1044 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
1045 if (buf->size >= size && (!fr || fr->size > buf->size))
1046 fr = buf;
1047 }
1048 if (fr)
1049 hlist_del_init(&fr->hn);
1050 spin_unlock(&fl->hlock);
1051 if (fr) {
1052 *obuf = fr;
1053 return 0;
1054 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001055 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301056 buf = NULL;
1057 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001058 if (err)
1059 goto bail;
1060 INIT_HLIST_NODE(&buf->hn);
1061 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +05301062 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001063 buf->phys = 0;
1064 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301065 buf->dma_attr = dma_attr;
1066 buf->flags = rflags;
1067 buf->raddr = 0;
1068 buf->remote = 0;
Jeya R984a1a32021-01-18 15:38:07 +05301069 VERIFY(err, fl && fl->sctx != NULL);
1070 if (err) {
1071 err = -EBADR;
1072 goto bail;
1073 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301074 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1075 (dma_addr_t *)&buf->phys,
1076 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001077 if (IS_ERR_OR_NULL(buf->virt)) {
1078 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301079 fastrpc_cached_buf_list_free(fl);
1080 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1081 (dma_addr_t *)&buf->phys,
1082 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001083 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1084 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301085 if (err) {
1086 err = -ENOMEM;
1087 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1088 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001089 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301090 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001091 if (fl->sctx->smmu.cb)
1092 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1093 vmid = fl->apps->channel[fl->cid].vmid;
1094 if (vmid) {
1095 int srcVM[1] = {VMID_HLOS};
1096 int destVM[2] = {VMID_HLOS, vmid};
1097 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1098 PERM_READ | PERM_WRITE | PERM_EXEC};
1099
1100 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1101 srcVM, 1, destVM, destVMperm, 2));
1102 if (err)
1103 goto bail;
1104 }
1105
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301106 if (remote) {
1107 INIT_HLIST_NODE(&buf->hn_rem);
1108 spin_lock(&fl->hlock);
1109 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1110 spin_unlock(&fl->hlock);
1111 buf->remote = remote;
1112 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001113 *obuf = buf;
1114 bail:
1115 if (err && buf)
1116 fastrpc_buf_free(buf, 0);
1117 return err;
1118}
1119
1120
1121static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001122 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001123 struct smq_invoke_ctx **po)
1124{
1125 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301126 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001127 struct hlist_node *n;
1128 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1129
1130 spin_lock(&fl->hlock);
1131 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1132 if (ictx->pid == current->pid) {
1133 if (invoke->sc != ictx->sc || ictx->fl != fl)
1134 err = -1;
1135 else {
1136 ctx = ictx;
1137 hlist_del_init(&ctx->hn);
1138 hlist_add_head(&ctx->hn, &fl->clst.pending);
1139 }
1140 break;
1141 }
1142 }
1143 spin_unlock(&fl->hlock);
1144 if (ctx)
1145 *po = ctx;
1146 return err;
1147}
1148
1149#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1150static int overlap_ptr_cmp(const void *a, const void *b)
1151{
1152 struct overlap *pa = *((struct overlap **)a);
1153 struct overlap *pb = *((struct overlap **)b);
1154 /* sort with lowest starting buffer first */
1155 int st = CMP(pa->start, pb->start);
1156 /* sort with highest ending buffer first */
1157 int ed = CMP(pb->end, pa->end);
1158 return st == 0 ? ed : st;
1159}
1160
Sathish Ambley9466d672017-01-25 10:51:55 -08001161static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001162{
Sathish Ambley9466d672017-01-25 10:51:55 -08001163 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001164 remote_arg_t *lpra = ctx->lpra;
1165 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1166 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1167 int nbufs = inbufs + outbufs;
1168 struct overlap max;
1169
1170 for (i = 0; i < nbufs; ++i) {
1171 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1172 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001173 if (lpra[i].buf.len) {
1174 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1175 if (err)
1176 goto bail;
1177 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001178 ctx->overs[i].raix = i;
1179 ctx->overps[i] = &ctx->overs[i];
1180 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301181 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001182 max.start = 0;
1183 max.end = 0;
1184 for (i = 0; i < nbufs; ++i) {
1185 if (ctx->overps[i]->start < max.end) {
1186 ctx->overps[i]->mstart = max.end;
1187 ctx->overps[i]->mend = ctx->overps[i]->end;
1188 ctx->overps[i]->offset = max.end -
1189 ctx->overps[i]->start;
1190 if (ctx->overps[i]->end > max.end) {
1191 max.end = ctx->overps[i]->end;
1192 } else {
1193 ctx->overps[i]->mend = 0;
1194 ctx->overps[i]->mstart = 0;
1195 }
1196 } else {
1197 ctx->overps[i]->mend = ctx->overps[i]->end;
1198 ctx->overps[i]->mstart = ctx->overps[i]->start;
1199 ctx->overps[i]->offset = 0;
1200 max = *ctx->overps[i];
1201 }
1202 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001203bail:
1204 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001205}
1206
1207#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1208 do {\
1209 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301210 VERIFY(err, 0 == copy_from_user((dst),\
1211 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001212 (size)));\
1213 else\
1214 memmove((dst), (src), (size));\
1215 } while (0)
1216
1217#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1218 do {\
1219 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301220 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301221 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 else\
1223 memmove((dst), (src), (size));\
1224 } while (0)
1225
1226
1227static void context_free(struct smq_invoke_ctx *ctx);
1228
1229static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001230 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001231 struct smq_invoke_ctx **po)
1232{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301233 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301234 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301235 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001236 struct fastrpc_ctx_lst *clst = &fl->clst;
1237 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
Jeya R8fa59d62020-11-04 20:42:59 +05301238 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001239
1240 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1241 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1242 sizeof(*ctx->fds) * (bufs) +
1243 sizeof(*ctx->attrs) * (bufs) +
1244 sizeof(*ctx->overs) * (bufs) +
1245 sizeof(*ctx->overps) * (bufs);
1246
c_mtharue1a5ce12017-10-13 20:47:09 +05301247 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001248 if (err)
1249 goto bail;
1250
1251 INIT_HLIST_NODE(&ctx->hn);
1252 hlist_add_fake(&ctx->hn);
1253 ctx->fl = fl;
1254 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1255 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1256 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301257 if (me->legacy) {
1258 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1259 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1260 } else {
1261 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1262 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1263 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1264 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001265
c_mtharue1a5ce12017-10-13 20:47:09 +05301266 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001267 bufs * sizeof(*ctx->lpra));
1268 if (err)
1269 goto bail;
1270
1271 if (invokefd->fds) {
1272 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1273 bufs * sizeof(*ctx->fds));
1274 if (err)
1275 goto bail;
1276 }
1277 if (invokefd->attrs) {
1278 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1279 bufs * sizeof(*ctx->attrs));
1280 if (err)
1281 goto bail;
1282 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001283 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001284 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001285 if (bufs) {
1286 VERIFY(err, 0 == context_build_overlap(ctx));
1287 if (err)
1288 goto bail;
1289 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001290 ctx->retval = -1;
1291 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301292 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001293 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301294 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001295
1296 spin_lock(&fl->hlock);
1297 hlist_add_head(&ctx->hn, &clst->pending);
1298 spin_unlock(&fl->hlock);
1299
Jeya R8fa59d62020-11-04 20:42:59 +05301300 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301301 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1302 if (!me->ctxtable[ii]) {
1303 me->ctxtable[ii] = ctx;
1304 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1305 break;
1306 }
1307 }
Jeya R8fa59d62020-11-04 20:42:59 +05301308 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301309 VERIFY(err, ii < FASTRPC_CTX_MAX);
1310 if (err) {
1311 pr_err("adsprpc: out of context memory\n");
1312 goto bail;
1313 }
1314
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001315 *po = ctx;
1316bail:
1317 if (ctx && err)
1318 context_free(ctx);
1319 return err;
1320}
1321
1322static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1323{
1324 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1325
1326 spin_lock(&ctx->fl->hlock);
1327 hlist_del_init(&ctx->hn);
1328 hlist_add_head(&ctx->hn, &clst->interrupted);
1329 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001330}
1331
1332static void context_free(struct smq_invoke_ctx *ctx)
1333{
1334 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301335 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1337 REMOTE_SCALARS_OUTBUFS(ctx->sc);
Jeya R8fa59d62020-11-04 20:42:59 +05301338 unsigned long irq_flags = 0;
1339 void *handle = NULL;
1340 const void *ptr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001341 spin_lock(&ctx->fl->hlock);
1342 hlist_del_init(&ctx->hn);
1343 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301344 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001345 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301346 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301347
1348 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001349 fastrpc_buf_free(ctx->buf, 1);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301350 fastrpc_buf_free(ctx->lbuf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301351 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301352 ctx->ctxid = 0;
1353
Jeya R8fa59d62020-11-04 20:42:59 +05301354 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301355 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1356 if (me->ctxtable[i] == ctx) {
Jeya R8fa59d62020-11-04 20:42:59 +05301357 handle = me->ctxtable[i]->handle;
1358 ptr = me->ctxtable[i]->ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301359 me->ctxtable[i] = NULL;
1360 break;
1361 }
1362 }
Jeya R8fa59d62020-11-04 20:42:59 +05301363 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
1364 if (handle) {
1365 glink_rx_done(handle, ptr, true);
1366 handle = NULL;
1367 }
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301368
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001369 kfree(ctx);
1370}
1371
1372static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1373{
1374 ctx->retval = retval;
1375 complete(&ctx->work);
1376}
1377
1378
1379static void fastrpc_notify_users(struct fastrpc_file *me)
1380{
1381 struct smq_invoke_ctx *ictx;
1382 struct hlist_node *n;
1383
1384 spin_lock(&me->hlock);
1385 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1386 complete(&ictx->work);
1387 }
1388 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1389 complete(&ictx->work);
1390 }
1391 spin_unlock(&me->hlock);
1392
1393}
1394
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301395
1396static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1397{
1398 struct smq_invoke_ctx *ictx;
1399 struct hlist_node *n;
1400
1401 spin_lock(&me->hlock);
1402 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1403 if (ictx->msg.pid)
1404 complete(&ictx->work);
1405 }
1406 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1407 if (ictx->msg.pid)
1408 complete(&ictx->work);
1409 }
1410 spin_unlock(&me->hlock);
1411}
1412
1413
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001414static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1415{
1416 struct fastrpc_file *fl;
1417 struct hlist_node *n;
1418
1419 spin_lock(&me->hlock);
1420 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1421 if (fl->cid == cid)
1422 fastrpc_notify_users(fl);
1423 }
1424 spin_unlock(&me->hlock);
1425
1426}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301427
1428static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1429{
1430 struct fastrpc_file *fl;
1431 struct hlist_node *n;
1432
1433 spin_lock(&me->hlock);
1434 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1435 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301436 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301437 }
1438 spin_unlock(&me->hlock);
1439
1440}
1441
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442static void context_list_ctor(struct fastrpc_ctx_lst *me)
1443{
1444 INIT_HLIST_HEAD(&me->interrupted);
1445 INIT_HLIST_HEAD(&me->pending);
1446}
1447
1448static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1449{
1450 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301451 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001452 struct hlist_node *n;
1453
1454 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301455 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001456 spin_lock(&fl->hlock);
1457 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1458 hlist_del_init(&ictx->hn);
1459 ctxfree = ictx;
1460 break;
1461 }
1462 spin_unlock(&fl->hlock);
1463 if (ctxfree)
1464 context_free(ctxfree);
1465 } while (ctxfree);
1466 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301467 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001468 spin_lock(&fl->hlock);
1469 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1470 hlist_del_init(&ictx->hn);
1471 ctxfree = ictx;
1472 break;
1473 }
1474 spin_unlock(&fl->hlock);
1475 if (ctxfree)
1476 context_free(ctxfree);
1477 } while (ctxfree);
1478}
1479
1480static int fastrpc_file_free(struct fastrpc_file *fl);
1481static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1482{
1483 struct fastrpc_file *fl, *free;
1484 struct hlist_node *n;
1485
1486 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301487 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001488 spin_lock(&me->hlock);
1489 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1490 hlist_del_init(&fl->hn);
1491 free = fl;
1492 break;
1493 }
1494 spin_unlock(&me->hlock);
1495 if (free)
1496 fastrpc_file_free(free);
1497 } while (free);
1498}
1499
1500static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1501{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301502 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301503 remote_arg64_t *rpra, *lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001504 remote_arg_t *lpra = ctx->lpra;
1505 struct smq_invoke_buf *list;
1506 struct smq_phy_page *pages, *ipage;
1507 uint32_t sc = ctx->sc;
1508 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1509 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001510 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001511 uintptr_t args;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301512 size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001513 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001514 int err = 0;
1515 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001516 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001517 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301518 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001519
1520 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301521 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 list = smq_invoke_buf_start(rpra, sc);
1523 pages = smq_phy_page_start(sc, list);
1524 ipage = pages;
1525
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301526 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001527 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301528 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1529 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001530
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301531 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301532 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1533 unsigned int attrs = 0;
1534
1535 if (ctx->attrs)
1536 attrs = ctx->attrs[i];
1537
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001538 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301539 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001540 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301541 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301542 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001543 ipage += 1;
1544 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301545 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001546 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301547 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001548 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301549 int dmaflags = 0;
1550
1551 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1552 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001553 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301554 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301555 if (err) {
1556 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001557 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301558 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001559 ipage += 1;
1560 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301561 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301562 if (!me->legacy) {
1563 metalen = copylen = (size_t)&ipage[0] +
1564 (sizeof(uint64_t) * M_FDLIST) +
1565 (sizeof(uint32_t) * M_CRCLIST);
1566 } else {
1567 metalen = copylen = (size_t)&ipage[0];
1568 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001569
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301570 /* allocate new local rpra buffer */
1571 lrpralen = (size_t)&list[0];
1572 if (lrpralen) {
1573 err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
1574 if (err)
1575 goto bail;
1576 }
1577 if (ctx->lbuf->virt)
1578 memset(ctx->lbuf->virt, 0, lrpralen);
1579
1580 lrpra = ctx->lbuf->virt;
1581 ctx->lrpra = lrpra;
1582
1583 /* calculate len required for copying */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001584 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1585 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001586 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301587 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001588
1589 if (!len)
1590 continue;
1591 if (ctx->maps[i])
1592 continue;
1593 if (ctx->overps[oix]->offset == 0)
1594 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001595 mstart = ctx->overps[oix]->mstart;
1596 mend = ctx->overps[oix]->mend;
1597 VERIFY(err, (mend - mstart) <= LONG_MAX);
1598 if (err)
1599 goto bail;
1600 copylen += mend - mstart;
1601 VERIFY(err, copylen >= 0);
1602 if (err)
1603 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001604 }
1605 ctx->used = copylen;
1606
1607 /* allocate new buffer */
1608 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301609 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001610 if (err)
1611 goto bail;
1612 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301613 if (ctx->buf->virt && metalen <= copylen)
1614 memset(ctx->buf->virt, 0, metalen);
1615
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001616 /* copy metadata */
1617 rpra = ctx->buf->virt;
1618 ctx->rpra = rpra;
1619 list = smq_invoke_buf_start(rpra, sc);
1620 pages = smq_phy_page_start(sc, list);
1621 ipage = pages;
1622 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001623 for (i = 0; i < bufs + handles; ++i) {
1624 if (lpra[i].buf.len)
1625 list[i].num = 1;
1626 else
1627 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001628 list[i].pgidx = ipage - pages;
1629 ipage++;
1630 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301631
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001632 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301633 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301634 for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001635 struct fastrpc_mmap *map = ctx->maps[i];
1636 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301637 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001638
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301639 rpra[i].buf.pv = lrpra[i].buf.pv = 0;
1640 rpra[i].buf.len = lrpra[i].buf.len = len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001641 if (!len)
1642 continue;
1643 if (map) {
1644 struct vm_area_struct *vma;
1645 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301646 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001647 int idx = list[i].pgidx;
1648
1649 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001650 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001651 } else {
1652 down_read(&current->mm->mmap_sem);
1653 VERIFY(err, NULL != (vma = find_vma(current->mm,
1654 map->va)));
1655 if (err) {
1656 up_read(&current->mm->mmap_sem);
1657 goto bail;
1658 }
1659 offset = buf_page_start(buf) - vma->vm_start;
1660 up_read(&current->mm->mmap_sem);
1661 VERIFY(err, offset < (uintptr_t)map->size);
1662 if (err)
1663 goto bail;
1664 }
1665 pages[idx].addr = map->phys + offset;
1666 pages[idx].size = num << PAGE_SHIFT;
1667 }
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301668 rpra[i].buf.pv = lrpra[i].buf.pv = buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001669 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001670 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001671 for (i = bufs; i < bufs + handles; ++i) {
1672 struct fastrpc_mmap *map = ctx->maps[i];
Jeya R4c7abf22020-07-23 16:00:50 +05301673 if (map) {
1674 pages[i].addr = map->phys;
1675 pages[i].size = map->size;
1676 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001677 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301678 if (!me->legacy) {
1679 fdlist = (uint64_t *)&pages[bufs + handles];
1680 for (i = 0; i < M_FDLIST; i++)
1681 fdlist[i] = 0;
1682 crclist = (uint32_t *)&fdlist[M_FDLIST];
1683 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1684 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001685
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001686 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301687 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001688 rlen = copylen - metalen;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301689 for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001690 int i = ctx->overps[oix]->raix;
1691 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301692 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001693 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301694 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001695
1696 if (!len)
1697 continue;
1698 if (map)
1699 continue;
1700 if (ctx->overps[oix]->offset == 0) {
1701 rlen -= ALIGN(args, BALIGN) - args;
1702 args = ALIGN(args, BALIGN);
1703 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001704 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001705 VERIFY(err, rlen >= mlen);
1706 if (err)
1707 goto bail;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301708 rpra[i].buf.pv = lrpra[i].buf.pv =
1709 (args - ctx->overps[oix]->offset);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001710 pages[list[i].pgidx].addr = ctx->buf->phys -
1711 ctx->overps[oix]->offset +
1712 (copylen - rlen);
1713 pages[list[i].pgidx].addr =
1714 buf_page_start(pages[list[i].pgidx].addr);
1715 buf = rpra[i].buf.pv;
1716 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1717 if (i < inbufs) {
1718 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1719 lpra[i].buf.pv, len);
1720 if (err)
1721 goto bail;
1722 }
1723 args = args + mlen;
1724 rlen -= mlen;
1725 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001726 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001727
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301728 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001729 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1730 int i = ctx->overps[oix]->raix;
1731 struct fastrpc_mmap *map = ctx->maps[i];
1732
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001733 if (map && map->uncached)
1734 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301735 if (ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301736 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1737 continue;
1738 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1739 continue;
1740
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301741 if (rpra && lrpra && rpra[i].buf.len &&
1742 ctx->overps[oix]->mstart) {
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301743 if (map && map->handle)
1744 msm_ion_do_cache_op(ctx->fl->apps->client,
1745 map->handle,
1746 uint64_to_ptr(rpra[i].buf.pv),
1747 rpra[i].buf.len,
1748 ION_IOC_CLEAN_INV_CACHES);
1749 else
1750 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1751 uint64_to_ptr(rpra[i].buf.pv
1752 + rpra[i].buf.len));
1753 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001754 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001755 PERF_END);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301756 for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
Jeya R4c7abf22020-07-23 16:00:50 +05301757 if (ctx->fds)
1758 rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301759 rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1760 rpra[i].dma.offset = lrpra[i].dma.offset =
1761 (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001762 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001763
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001764 bail:
1765 return err;
1766}
1767
1768static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1769 remote_arg_t *upra)
1770{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301771 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001772 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001773 struct smq_invoke_buf *list;
1774 struct smq_phy_page *pages;
1775 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301776 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001777 uint32_t *crclist = NULL;
1778
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301779 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001780 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001781 int err = 0;
1782
1783 inbufs = REMOTE_SCALARS_INBUFS(sc);
1784 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001785 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1786 list = smq_invoke_buf_start(ctx->rpra, sc);
1787 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301788 if (!me->legacy) {
1789 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1790 crclist = (uint32_t *)(fdlist + M_FDLIST);
1791 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001792
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001793 for (i = inbufs; i < inbufs + outbufs; ++i) {
1794 if (!ctx->maps[i]) {
1795 K_COPY_TO_USER(err, kernel,
1796 ctx->lpra[i].buf.pv,
1797 uint64_to_ptr(rpra[i].buf.pv),
1798 rpra[i].buf.len);
1799 if (err)
1800 goto bail;
1801 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301802 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301803 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301804 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301805 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001806 }
1807 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301808 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301809 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001810 for (i = 0; i < M_FDLIST; i++) {
1811 if (!fdlist[i])
1812 break;
1813 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001814 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301815 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001816 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001817 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301818 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001819 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301820 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001821 crclist, M_CRCLIST*sizeof(uint32_t));
1822
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001823 bail:
1824 return err;
1825}
1826
1827static void inv_args_pre(struct smq_invoke_ctx *ctx)
1828{
1829 int i, inbufs, outbufs;
1830 uint32_t sc = ctx->sc;
1831 remote_arg64_t *rpra = ctx->rpra;
1832 uintptr_t end;
1833
1834 inbufs = REMOTE_SCALARS_INBUFS(sc);
1835 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1836 for (i = inbufs; i < inbufs + outbufs; ++i) {
1837 struct fastrpc_mmap *map = ctx->maps[i];
1838
1839 if (map && map->uncached)
1840 continue;
1841 if (!rpra[i].buf.len)
1842 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301843 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301844 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1845 continue;
1846 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1847 continue;
1848
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001849 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1850 buf_page_start(rpra[i].buf.pv))
1851 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301852 if (!IS_CACHE_ALIGNED((uintptr_t)
1853 uint64_to_ptr(rpra[i].buf.pv))) {
1854 if (map && map->handle)
1855 msm_ion_do_cache_op(ctx->fl->apps->client,
1856 map->handle,
1857 uint64_to_ptr(rpra[i].buf.pv),
1858 sizeof(uintptr_t),
1859 ION_IOC_CLEAN_INV_CACHES);
1860 else
1861 dmac_flush_range(
1862 uint64_to_ptr(rpra[i].buf.pv), (char *)
1863 uint64_to_ptr(rpra[i].buf.pv + 1));
1864 }
1865
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001866 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1867 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301868 if (!IS_CACHE_ALIGNED(end)) {
1869 if (map && map->handle)
1870 msm_ion_do_cache_op(ctx->fl->apps->client,
1871 map->handle,
1872 uint64_to_ptr(end),
1873 sizeof(uintptr_t),
1874 ION_IOC_CLEAN_INV_CACHES);
1875 else
1876 dmac_flush_range((char *)end,
1877 (char *)end + 1);
1878 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001879 }
1880}
1881
1882static void inv_args(struct smq_invoke_ctx *ctx)
1883{
1884 int i, inbufs, outbufs;
1885 uint32_t sc = ctx->sc;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301886 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001887
1888 inbufs = REMOTE_SCALARS_INBUFS(sc);
1889 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1890 for (i = inbufs; i < inbufs + outbufs; ++i) {
1891 struct fastrpc_mmap *map = ctx->maps[i];
1892
1893 if (map && map->uncached)
1894 continue;
1895 if (!rpra[i].buf.len)
1896 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301897 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301898 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1899 continue;
1900 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1901 continue;
1902
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001903 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1904 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001905 continue;
1906 }
1907 if (map && map->handle)
1908 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1909 (char *)uint64_to_ptr(rpra[i].buf.pv),
1910 rpra[i].buf.len, ION_IOC_INV_CACHES);
1911 else
1912 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1913 (char *)uint64_to_ptr(rpra[i].buf.pv
1914 + rpra[i].buf.len));
1915 }
1916
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001917}
1918
1919static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1920 uint32_t kernel, uint32_t handle)
1921{
1922 struct smq_msg *msg = &ctx->msg;
1923 struct fastrpc_file *fl = ctx->fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301924 int err = 0, len, cid = -1;
1925 struct fastrpc_channel_ctx *channel_ctx = NULL;
1926
1927 cid = fl->cid;
1928 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
1929 if (err) {
1930 err = -ECHRNG;
1931 goto bail;
1932 }
1933 channel_ctx = &fl->apps->channel[fl->cid];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001934
c_mtharue1a5ce12017-10-13 20:47:09 +05301935 VERIFY(err, NULL != channel_ctx->chan);
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301936 if (err) {
1937 err = -ECHRNG;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001938 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301939 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301940 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001941 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301942 if (fl->sessionid)
1943 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001944 if (kernel)
1945 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301946 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001947 msg->invoke.header.handle = handle;
1948 msg->invoke.header.sc = ctx->sc;
1949 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1950 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301951 if (fl->apps->glink) {
1952 if (fl->ssrcount != channel_ctx->ssrcount) {
1953 err = -ECONNRESET;
1954 goto bail;
1955 }
1956 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001957 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301958 if (err)
1959 goto bail;
1960 err = glink_tx(channel_ctx->chan,
1961 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1962 GLINK_TX_REQ_INTENT);
1963 } else {
1964 spin_lock(&fl->apps->hlock);
1965 len = smd_write((smd_channel_t *)
1966 channel_ctx->chan,
1967 msg, sizeof(*msg));
1968 spin_unlock(&fl->apps->hlock);
1969 VERIFY(err, len == sizeof(*msg));
1970 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001971 bail:
1972 return err;
1973}
1974
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301975static void fastrpc_smd_read_handler(int cid)
1976{
1977 struct fastrpc_apps *me = &gfa;
1978 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301979 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301980 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301981
1982 do {
1983 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1984 sizeof(rsp));
1985 if (ret != sizeof(rsp))
1986 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301987
1988 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1989 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301990 if (err)
1991 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301992
1993 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1994 if (err)
1995 goto bail;
1996
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301997 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301998 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1999 if (err)
2000 goto bail;
2001
2002 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302003 } while (ret == sizeof(rsp));
2004bail:
2005 if (err)
2006 pr_err("adsprpc: invalid response or context\n");
2007
2008}
2009
2010static void smd_event_handler(void *priv, unsigned int event)
2011{
2012 struct fastrpc_apps *me = &gfa;
2013 int cid = (int)(uintptr_t)priv;
2014
2015 switch (event) {
2016 case SMD_EVENT_OPEN:
2017 complete(&me->channel[cid].workport);
2018 break;
2019 case SMD_EVENT_CLOSE:
2020 fastrpc_notify_drivers(me, cid);
2021 break;
2022 case SMD_EVENT_DATA:
2023 fastrpc_smd_read_handler(cid);
2024 break;
2025 }
2026}
2027
2028
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002029static void fastrpc_init(struct fastrpc_apps *me)
2030{
2031 int i;
2032
2033 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302034 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002035 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302036 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302037 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002038 me->channel = &gcinfo[0];
2039 for (i = 0; i < NUM_CHANNELS; i++) {
2040 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302041 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002042 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302043 /* All channels are secure by default except CDSP */
2044 me->channel[i].secure = SECURE_CHANNEL;
Jeya Rf4b99852020-11-22 13:03:16 +05302045 me->channel[i].unsigned_support = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002046 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302047 /* Set CDSP channel to non secure */
2048 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Jeya Rf4b99852020-11-22 13:03:16 +05302049 /* Set CDSP channel unsigned_support to true*/
2050 me->channel[CDSP_DOMAIN_ID].unsigned_support = true;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002051}
2052
2053static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
2054
2055static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
2056 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07002057 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002058{
c_mtharue1a5ce12017-10-13 20:47:09 +05302059 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002060 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302061 int err = 0, cid = -1, interrupted = 0;
Maria Yu757199c2017-09-22 16:05:49 +08002062 struct timespec invoket = {0};
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302063 int64_t *perf_counter = NULL;
2064
2065 cid = fl->cid;
2066 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
2067 if (err) {
2068 err = -ECHRNG;
2069 goto bail;
2070 }
2071 VERIFY(err, fl->sctx != NULL);
2072 if (err) {
2073 err = -EBADR;
2074 goto bail;
2075 }
2076 perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002077
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002078 if (fl->profile)
2079 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05302080
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302081 if (!kernel) {
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302082 VERIFY(err, invoke->handle !=
2083 FASTRPC_STATIC_HANDLE_PROCESS_GROUP);
2084 VERIFY(err, invoke->handle !=
2085 FASTRPC_STATIC_HANDLE_DSP_UTILITIES);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302086 if (err) {
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302087 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d, handle 0x%x\n",
2088 __func__, current->comm, cid, invoke->handle);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302089 goto bail;
2090 }
2091 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302092
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002093 if (!kernel) {
2094 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
2095 &ctx));
2096 if (err)
2097 goto bail;
2098 if (fl->sctx->smmu.faults)
2099 err = FASTRPC_ENOSUCH;
2100 if (err)
2101 goto bail;
2102 if (ctx)
2103 goto wait;
2104 }
2105
2106 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
2107 if (err)
2108 goto bail;
2109
2110 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302111 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002112 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002113 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002114 if (err)
2115 goto bail;
2116 }
2117
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302118 if (!fl->sctx->smmu.coherent) {
2119 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002120 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302121 PERF_END);
2122 }
2123
2124 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002125 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002126 PERF_END);
2127
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002128 if (err)
2129 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002130 wait:
2131 if (kernel)
2132 wait_for_completion(&ctx->work);
2133 else {
2134 interrupted = wait_for_completion_interruptible(&ctx->work);
2135 VERIFY(err, 0 == (err = interrupted));
2136 if (err)
2137 goto bail;
2138 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302139 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002140 if (!fl->sctx->smmu.coherent)
2141 inv_args(ctx);
2142 PERF_END);
2143
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002144 VERIFY(err, 0 == (err = ctx->retval));
2145 if (err)
2146 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002147
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302148 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002149 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002150 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002151 if (err)
2152 goto bail;
2153 bail:
2154 if (ctx && interrupted == -ERESTARTSYS)
2155 context_save_interrupted(ctx);
2156 else if (ctx)
2157 context_free(ctx);
2158 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2159 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002160
2161 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302162 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2163 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2164
2165 if (count)
2166 *count += getnstimediff(&invoket);
2167 }
2168 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2169 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2170
2171 if (count)
2172 *count = *count+1;
2173 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002174 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002175 return err;
2176}
2177
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302178static int fastrpc_get_adsp_session(char *name, int *session)
2179{
2180 struct fastrpc_apps *me = &gfa;
2181 int err = 0, i;
2182
2183 for (i = 0; i < NUM_SESSIONS; i++) {
2184 if (!me->channel[0].spd[i].spdname)
2185 continue;
2186 if (!strcmp(name, me->channel[0].spd[i].spdname))
2187 break;
2188 }
2189 VERIFY(err, i < NUM_SESSIONS);
2190 if (err)
2191 goto bail;
2192 *session = i;
2193bail:
2194 return err;
2195}
2196
2197static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002198static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302199static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002200static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002201 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002202{
2203 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302204 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002205 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002206 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002207 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302208 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302209 struct fastrpc_buf *imem = NULL;
2210 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302211 char *proc_name = NULL;
Jeya Rf4b99852020-11-22 13:03:16 +05302212 int unsigned_request = (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE);
2213 int cid = fl->cid;
2214 struct fastrpc_channel_ctx *chan = &me->channel[cid];
2215
2216 if (chan->unsigned_support &&
2217 fl->dev_minor == MINOR_NUM_DEV) {
2218 /* Make sure third party applications */
2219 /* can spawn only unsigned PD when */
2220 /* channel configured as secure. */
2221 if (chan->secure && !unsigned_request) {
2222 err = -ECONNREFUSED;
2223 goto bail;
2224 }
2225 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002226
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302227 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002228 if (err)
2229 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302230 if (init->flags == FASTRPC_INIT_ATTACH ||
2231 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002232 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302233 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002234
2235 ra[0].buf.pv = (void *)&tgid;
2236 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302237 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002238 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2239 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302240 ioctl.fds = NULL;
2241 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002242 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302243 if (init->flags == FASTRPC_INIT_ATTACH)
2244 fl->pd = 0;
2245 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2246 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2247 fl->pd = 2;
2248 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002249 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2250 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2251 if (err)
2252 goto bail;
2253 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002254 remote_arg_t ra[6];
2255 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002256 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302257 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002258 struct {
2259 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302260 unsigned int namelen;
2261 unsigned int filelen;
2262 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002263 int attrs;
2264 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002265 } inbuf;
2266
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302267 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002268 inbuf.namelen = strlen(current->comm) + 1;
2269 inbuf.filelen = init->filelen;
2270 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302271
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302272 VERIFY(err, access_ok(0, (void __user *)init->file,
2273 init->filelen));
2274 if (err)
2275 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002276 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302277 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002278 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2279 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302280 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002281 if (err)
2282 goto bail;
2283 }
2284 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302285
2286 VERIFY(err, !init->mem);
2287 if (err) {
2288 err = -EINVAL;
2289 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2290 current->comm, __func__);
2291 goto bail;
2292 }
2293 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2294 1024*1024);
2295 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2296 DMA_ATTR_NO_KERNEL_MAPPING |
2297 DMA_ATTR_FORCE_NON_COHERENT;
2298 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302299 if (err)
2300 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302301 fl->init_mem = imem;
2302
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002303 inbuf.pageslen = 1;
2304 ra[0].buf.pv = (void *)&inbuf;
2305 ra[0].buf.len = sizeof(inbuf);
2306 fds[0] = 0;
2307
2308 ra[1].buf.pv = (void *)current->comm;
2309 ra[1].buf.len = inbuf.namelen;
2310 fds[1] = 0;
2311
2312 ra[2].buf.pv = (void *)init->file;
2313 ra[2].buf.len = inbuf.filelen;
2314 fds[2] = init->filefd;
2315
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302316 pages[0].addr = imem->phys;
2317 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002318 ra[3].buf.pv = (void *)pages;
2319 ra[3].buf.len = 1 * sizeof(*pages);
2320 fds[3] = 0;
2321
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002322 inbuf.attrs = uproc->attrs;
2323 ra[4].buf.pv = (void *)&(inbuf.attrs);
2324 ra[4].buf.len = sizeof(inbuf.attrs);
2325 fds[4] = 0;
2326
2327 inbuf.siglen = uproc->siglen;
2328 ra[5].buf.pv = (void *)&(inbuf.siglen);
2329 ra[5].buf.len = sizeof(inbuf.siglen);
2330 fds[5] = 0;
2331
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302332 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002333 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002334 if (uproc->attrs)
2335 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002336 ioctl.inv.pra = ra;
2337 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302338 ioctl.attrs = NULL;
2339 ioctl.crc = NULL;
2340 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2341 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2342 if (err)
2343 goto bail;
2344 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2345 remote_arg_t ra[3];
2346 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302347 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302348 int fds[3];
2349 struct {
2350 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302351 unsigned int namelen;
2352 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302353 } inbuf;
2354
2355 if (!init->filelen)
2356 goto bail;
2357
2358 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2359 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2360 if (err)
2361 goto bail;
2362 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2363 (void __user *)init->file, init->filelen));
2364 if (err)
2365 goto bail;
2366
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302367 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302368 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302369 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302370 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302371
2372 if (!strcmp(proc_name, "audiopd")) {
2373 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2374 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302375 if (err)
2376 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302377 }
2378
c_mtharue1a5ce12017-10-13 20:47:09 +05302379 if (!me->staticpd_flags) {
2380 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302381 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302382 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2383 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2384 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302385 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302386 if (err)
2387 goto bail;
2388 phys = mem->phys;
2389 size = mem->size;
2390 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302391 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2392 me->channel[fl->cid].rhvm.vmperm,
2393 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302394 if (err) {
2395 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2396 err);
2397 pr_err("map->phys %llx, map->size %d\n",
2398 phys, (int)size);
2399 goto bail;
2400 }
2401 me->staticpd_flags = 1;
2402 }
2403
2404 ra[0].buf.pv = (void *)&inbuf;
2405 ra[0].buf.len = sizeof(inbuf);
2406 fds[0] = 0;
2407
2408 ra[1].buf.pv = (void *)proc_name;
2409 ra[1].buf.len = inbuf.namelen;
2410 fds[1] = 0;
2411
2412 pages[0].addr = phys;
2413 pages[0].size = size;
2414
2415 ra[2].buf.pv = (void *)pages;
2416 ra[2].buf.len = sizeof(*pages);
2417 fds[2] = 0;
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302418 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
c_mtharue1a5ce12017-10-13 20:47:09 +05302419
2420 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2421 ioctl.inv.pra = ra;
2422 ioctl.fds = NULL;
2423 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002424 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002425 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2426 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2427 if (err)
2428 goto bail;
2429 } else {
2430 err = -ENOTTY;
2431 }
2432bail:
c_mtharud91205a2017-11-07 16:01:06 +05302433 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302434 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2435 me->staticpd_flags = 0;
2436 if (mem && err) {
2437 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2438 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302439 me->channel[fl->cid].rhvm.vmid,
2440 me->channel[fl->cid].rhvm.vmcount,
2441 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302442 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302443 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302444 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302445 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302446 if (file) {
2447 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302448 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302449 mutex_unlock(&fl->fl_map_mutex);
2450 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002451 return err;
2452}
2453
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302454static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
Edgar Floresebdc05e2019-08-22 18:12:10 -07002455 uint32_t *dsp_attr_buf,
2456 uint32_t dsp_attr_buf_len,
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302457 uint32_t domain)
2458{
Edgar Floresebdc05e2019-08-22 18:12:10 -07002459 int err = 0, dsp_support = 0;
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302460 struct fastrpc_ioctl_invoke_crc ioctl;
2461 remote_arg_t ra[2];
2462 struct fastrpc_apps *me = &gfa;
2463
2464 // Querying device about DSP support
2465 switch (domain) {
2466 case ADSP_DOMAIN_ID:
2467 case SDSP_DOMAIN_ID:
2468 case CDSP_DOMAIN_ID:
2469 if (me->channel[domain].issubsystemup)
2470 dsp_support = 1;
2471 break;
2472 case MDSP_DOMAIN_ID:
2473 //Modem not supported for fastRPC
2474 break;
2475 default:
2476 dsp_support = 0;
2477 break;
2478 }
Edgar Floresebdc05e2019-08-22 18:12:10 -07002479 dsp_attr_buf[0] = dsp_support;
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302480
2481 if (dsp_support == 0) {
2482 err = -ENOTCONN;
2483 goto bail;
2484 }
2485
2486 err = fastrpc_channel_open(fl);
2487 if (err)
2488 goto bail;
2489
Edgar Floresebdc05e2019-08-22 18:12:10 -07002490 ra[0].buf.pv = (void *)&dsp_attr_buf_len;
2491 ra[0].buf.len = sizeof(dsp_attr_buf_len);
2492 ra[1].buf.pv = (void *)(&dsp_attr_buf[1]);
2493 ra[1].buf.len = dsp_attr_buf_len * sizeof(uint32_t);
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302494 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_DSP_UTILITIES;
2495 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 1);
2496 ioctl.inv.pra = ra;
2497 ioctl.fds = NULL;
2498 ioctl.attrs = NULL;
2499 ioctl.crc = NULL;
2500 fl->pd = 1;
2501
2502 err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl);
2503bail:
2504
2505 if (err)
2506 pr_err("adsprpc: %s: %s: could not obtain dsp information, err val 0x%x\n",
2507 current->comm, __func__, err);
2508 return err;
2509}
2510
2511static int fastrpc_get_info_from_kernel(
2512 struct fastrpc_ioctl_dsp_capabilities *dsp_cap,
2513 struct fastrpc_file *fl)
2514{
2515 int err = 0;
2516 uint32_t domain_support;
2517 uint32_t domain = dsp_cap->domain;
2518
2519 if (!gcinfo[domain].dsp_cap_kernel.is_cached) {
2520 /*
2521 * Information not on kernel, query device for information
2522 * and cache on kernel
2523 */
2524 err = fastrpc_get_info_from_dsp(fl, dsp_cap->dsp_attributes,
Edgar Floresebdc05e2019-08-22 18:12:10 -07002525 FASTRPC_MAX_DSP_ATTRIBUTES - 1,
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302526 domain);
2527 if (err)
2528 goto bail;
2529
2530 domain_support = dsp_cap->dsp_attributes[0];
2531 switch (domain_support) {
2532 case 0:
2533 memset(dsp_cap->dsp_attributes, 0,
2534 sizeof(dsp_cap->dsp_attributes));
2535 memset(&gcinfo[domain].dsp_cap_kernel.dsp_attributes,
2536 0, sizeof(dsp_cap->dsp_attributes));
2537 break;
2538 case 1:
2539 memcpy(&gcinfo[domain].dsp_cap_kernel.dsp_attributes,
2540 dsp_cap->dsp_attributes,
2541 sizeof(dsp_cap->dsp_attributes));
2542 break;
2543 default:
2544 err = -1;
2545 /*
2546 * Reset is_cached flag to 0 so subsequent calls
2547 * can try to query dsp again
2548 */
2549 gcinfo[domain].dsp_cap_kernel.is_cached = 0;
2550 pr_warn("adsprpc: %s: %s: returned bad domain support value %d\n",
2551 current->comm,
2552 __func__,
2553 domain_support);
2554 goto bail;
2555 }
2556 gcinfo[domain].dsp_cap_kernel.is_cached = 1;
2557 } else {
2558 // Information on Kernel, pass it to user
2559 memcpy(dsp_cap->dsp_attributes,
2560 &gcinfo[domain].dsp_cap_kernel.dsp_attributes,
2561 sizeof(dsp_cap->dsp_attributes));
2562 }
2563bail:
2564 return err;
2565}
2566
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002567static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2568{
2569 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002570 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002571 remote_arg_t ra[1];
2572 int tgid = 0;
2573
Sathish Ambley36849af2017-02-02 09:35:55 -08002574 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2575 if (err)
2576 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05302577 VERIFY(err, fl->sctx != NULL);
2578 if (err)
2579 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302580 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002581 if (err)
2582 goto bail;
2583 tgid = fl->tgid;
2584 ra[0].buf.pv = (void *)&tgid;
2585 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302586 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2588 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302589 ioctl.fds = NULL;
2590 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002591 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002592 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2593 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2594bail:
2595 return err;
2596}
2597
2598static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302599 uintptr_t va, uint64_t phys,
2600 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002601{
Sathish Ambleybae51902017-07-03 15:00:49 -07002602 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302603 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002604 struct smq_phy_page page;
2605 int num = 1;
2606 remote_arg_t ra[3];
2607 int err = 0;
2608 struct {
2609 int pid;
2610 uint32_t flags;
2611 uintptr_t vaddrin;
2612 int num;
2613 } inargs;
2614 struct {
2615 uintptr_t vaddrout;
2616 } routargs;
2617
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302618 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302619 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002620 inargs.flags = flags;
2621 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2622 ra[0].buf.pv = (void *)&inargs;
2623 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302624 page.addr = phys;
2625 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002626 ra[1].buf.pv = (void *)&page;
2627 ra[1].buf.len = num * sizeof(page);
2628
2629 ra[2].buf.pv = (void *)&routargs;
2630 ra[2].buf.len = sizeof(routargs);
2631
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302632 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002633 if (fl->apps->compat)
2634 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2635 else
2636 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2637 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302638 ioctl.fds = NULL;
2639 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002640 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002641 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2642 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302643 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302644 if (err)
2645 goto bail;
2646 if (flags == ADSP_MMAP_HEAP_ADDR) {
2647 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002648
c_mtharue1a5ce12017-10-13 20:47:09 +05302649 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302650 desc.args[1] = phys;
2651 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302652 desc.arginfo = SCM_ARGS(3);
2653 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2654 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2655 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302656 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302657 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2658 me->channel[fl->cid].rhvm.vmperm,
2659 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302660 if (err)
2661 goto bail;
2662 }
2663bail:
2664 return err;
2665}
2666
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302667static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2668 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302669{
2670 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302671 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302672 int tgid = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302673 int destVM[1] = {VMID_HLOS};
2674 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2675
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302676 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302677 struct fastrpc_ioctl_invoke_crc ioctl;
2678 struct scm_desc desc = {0};
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302679 remote_arg_t ra[2];
2680
c_mtharue1a5ce12017-10-13 20:47:09 +05302681 struct {
2682 uint8_t skey;
2683 } routargs;
2684
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302685 if (fl == NULL)
2686 goto bail;
2687 tgid = fl->tgid;
2688 ra[0].buf.pv = (void *)&tgid;
2689 ra[0].buf.len = sizeof(tgid);
2690 ra[1].buf.pv = (void *)&routargs;
2691 ra[1].buf.len = sizeof(routargs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302692
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302693 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302694 ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1);
c_mtharue1a5ce12017-10-13 20:47:09 +05302695 ioctl.inv.pra = ra;
2696 ioctl.fds = NULL;
2697 ioctl.attrs = NULL;
2698 ioctl.crc = NULL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302699
c_mtharue1a5ce12017-10-13 20:47:09 +05302700
2701 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2702 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302703 if (err == AEE_EUNSUPPORTED) {
2704 remote_arg_t ra[1];
2705
2706 pr_warn("ADSPRPC:Failed to get security key with updated remote call, falling back to older method");
2707 ra[0].buf.pv = (void *)&routargs;
2708 ra[0].buf.len = sizeof(routargs);
2709 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2710 ioctl.inv.pra = ra;
2711 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2712 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2713 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302714 if (err)
2715 goto bail;
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302716
c_mtharue1a5ce12017-10-13 20:47:09 +05302717 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302718 desc.args[1] = phys;
2719 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302720 desc.args[3] = routargs.skey;
2721 desc.arginfo = SCM_ARGS(4);
2722 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2723 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302724 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2725 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302726 me->channel[fl->cid].rhvm.vmid,
2727 me->channel[fl->cid].rhvm.vmcount,
2728 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302729 if (err)
2730 goto bail;
2731 }
2732
2733bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002734 return err;
2735}
2736
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302737static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2738 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002739{
Sathish Ambleybae51902017-07-03 15:00:49 -07002740 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002741 remote_arg_t ra[1];
2742 int err = 0;
2743 struct {
2744 int pid;
2745 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302746 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002747 } inargs;
2748
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302749 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302750 inargs.size = size;
2751 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002752 ra[0].buf.pv = (void *)&inargs;
2753 ra[0].buf.len = sizeof(inargs);
2754
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05302755 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_PROCESS_GROUP;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002756 if (fl->apps->compat)
2757 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2758 else
2759 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2760 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302761 ioctl.fds = NULL;
2762 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002763 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002764 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2765 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302766 if (err)
2767 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302768 if (flags == ADSP_MMAP_HEAP_ADDR ||
2769 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2770 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302771 if (err)
2772 goto bail;
2773 }
2774bail:
2775 return err;
2776}
2777
2778static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2779{
2780 struct fastrpc_mmap *match = NULL, *map = NULL;
2781 struct hlist_node *n = NULL;
2782 int err = 0, ret = 0;
2783 struct fastrpc_apps *me = &gfa;
2784 struct ramdump_segment *ramdump_segments_rh = NULL;
2785
2786 do {
2787 match = NULL;
2788 spin_lock(&me->hlock);
2789 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2790 match = map;
2791 hlist_del_init(&map->hn);
2792 break;
2793 }
2794 spin_unlock(&me->hlock);
2795
2796 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302797 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2798 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302799 if (err)
2800 goto bail;
2801 if (me->channel[0].ramdumpenabled) {
2802 ramdump_segments_rh = kcalloc(1,
2803 sizeof(struct ramdump_segment), GFP_KERNEL);
2804 if (ramdump_segments_rh) {
2805 ramdump_segments_rh->address =
2806 match->phys;
2807 ramdump_segments_rh->size = match->size;
2808 ret = do_elf_ramdump(
2809 me->channel[0].remoteheap_ramdump_dev,
2810 ramdump_segments_rh, 1);
2811 if (ret < 0)
2812 pr_err("ADSPRPC: unable to dump heap");
2813 kfree(ramdump_segments_rh);
2814 }
2815 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302816 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302817 }
2818 } while (match);
2819bail:
2820 if (err && match)
2821 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002822 return err;
2823}
2824
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302825static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2826{
2827 struct fastrpc_apps *me = &gfa;
2828 int session = 0, err = 0;
2829
2830 VERIFY(err, !fastrpc_get_adsp_session(
2831 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2832 if (err)
2833 goto bail;
2834 if (me->channel[fl->cid].spd[session].pdrcount !=
2835 me->channel[fl->cid].spd[session].prevpdrcount) {
2836 if (fastrpc_mmap_remove_ssr(fl))
2837 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2838 me->channel[fl->cid].spd[session].prevpdrcount =
2839 me->channel[fl->cid].spd[session].pdrcount;
2840 }
2841 if (!me->channel[fl->cid].spd[session].ispdup) {
2842 VERIFY(err, 0);
2843 if (err) {
2844 err = -ENOTCONN;
2845 goto bail;
2846 }
2847 }
2848bail:
2849 return err;
2850}
2851
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002852static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302853 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002854
2855static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2856
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302857static inline void get_fastrpc_ioctl_mmap_64(
2858 struct fastrpc_ioctl_mmap_64 *mmap64,
2859 struct fastrpc_ioctl_mmap *immap)
2860{
2861 immap->fd = mmap64->fd;
2862 immap->flags = mmap64->flags;
2863 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2864 immap->size = mmap64->size;
2865}
2866
2867static inline void put_fastrpc_ioctl_mmap_64(
2868 struct fastrpc_ioctl_mmap_64 *mmap64,
2869 struct fastrpc_ioctl_mmap *immap)
2870{
2871 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2872}
2873
2874static inline void get_fastrpc_ioctl_munmap_64(
2875 struct fastrpc_ioctl_munmap_64 *munmap64,
2876 struct fastrpc_ioctl_munmap *imunmap)
2877{
2878 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2879 imunmap->size = munmap64->size;
2880}
2881
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002882static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2883 struct fastrpc_ioctl_munmap *ud)
2884{
2885 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302886 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302887 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2888 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002889
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302890 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302891
2892 spin_lock(&fl->hlock);
2893 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2894 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2895 if ((rbuf->raddr == ud->vaddrout) &&
2896 (rbuf->size == ud->size)) {
2897 free = rbuf;
2898 break;
2899 }
2900 }
2901 }
2902 spin_unlock(&fl->hlock);
2903
2904 if (free) {
2905 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2906 free->phys, free->size, free->flags));
2907 if (err)
2908 goto bail;
2909 fastrpc_buf_free(rbuf, 0);
2910 mutex_unlock(&fl->map_mutex);
2911 return err;
2912 }
2913
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302914 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002915 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302916 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002917 if (err)
2918 goto bail;
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302919 VERIFY(err, map != NULL);
2920 if (err) {
2921 err = -EINVAL;
2922 goto bail;
2923 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302924 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302925 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002926 if (err)
2927 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302928 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302929 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302930 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002931bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302932 if (err && map) {
2933 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002934 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302935 mutex_unlock(&fl->fl_map_mutex);
2936 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302937 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002938 return err;
2939}
2940
c_mtharu7bd6a422017-10-17 18:15:37 +05302941static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2942 struct fastrpc_ioctl_munmap_fd *ud) {
2943 int err = 0;
2944 struct fastrpc_mmap *map = NULL;
2945
2946 VERIFY(err, (fl && ud));
2947 if (err)
2948 goto bail;
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302949 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302950 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302951 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2952 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302953 ud->fd, (unsigned long long)ud->va,
2954 (unsigned int)ud->len);
2955 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302956 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302957 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302958 goto bail;
2959 }
2960 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302961 fastrpc_mmap_free(map, 0);
2962 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302963 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302964bail:
2965 return err;
2966}
2967
2968
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002969static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2970 struct fastrpc_ioctl_mmap *ud)
2971{
2972
c_mtharue1a5ce12017-10-13 20:47:09 +05302973 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302974 struct fastrpc_buf *rbuf = NULL;
2975 unsigned long dma_attr = 0;
2976 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002977 int err = 0;
2978
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302979 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302980 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2981 if (ud->vaddrin) {
2982 err = -EINVAL;
2983 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2984 current->comm, __func__);
2985 goto bail;
2986 }
2987 dma_attr = DMA_ATTR_EXEC_MAPPING |
2988 DMA_ATTR_NO_KERNEL_MAPPING |
2989 DMA_ATTR_FORCE_NON_COHERENT;
2990 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2991 1, &rbuf);
2992 if (err)
2993 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302994 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302995 rbuf->phys, rbuf->size, &raddr);
2996 if (err)
2997 goto bail;
2998 rbuf->raddr = raddr;
2999 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05303000
3001 uintptr_t va_to_dsp;
3002
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303003 mutex_lock(&fl->fl_map_mutex);
3004 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
3005 ud->size, ud->flags, 1, &map)) {
Mohammed Nayeem Ur Rahmanaf5f6102019-10-09 13:36:52 +05303006 ud->vaddrout = map->raddr;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303007 mutex_unlock(&fl->fl_map_mutex);
3008 mutex_unlock(&fl->map_mutex);
3009 return 0;
3010 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05303011
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303012 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
3013 (uintptr_t)ud->vaddrin, ud->size,
3014 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303015 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303016 if (err)
3017 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05303018
3019 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
3020 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
3021 va_to_dsp = 0;
3022 else
3023 va_to_dsp = (uintptr_t)map->va;
3024 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303025 map->phys, map->size, &raddr));
3026 if (err)
3027 goto bail;
3028 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303029 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05303030 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003031 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303032 if (err && map) {
3033 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05303034 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303035 mutex_unlock(&fl->fl_map_mutex);
3036 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303037 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003038 return err;
3039}
3040
3041static void fastrpc_channel_close(struct kref *kref)
3042{
3043 struct fastrpc_apps *me = &gfa;
3044 struct fastrpc_channel_ctx *ctx;
3045 int cid;
3046
3047 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
3048 cid = ctx - &gcinfo[0];
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303049 if (me->glink) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303050 fastrpc_glink_close(ctx->chan, cid);
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303051 ctx->chan = NULL;
3052 }
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303053 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003054 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
3055 MAJOR(me->dev_no), cid);
3056}
3057
3058static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
3059
3060static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303061 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003062{
3063 struct fastrpc_apps *me = &gfa;
3064 int idx = 0, err = 0;
3065
3066 if (chan->sesscount) {
3067 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303068 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
3069 (!chan->session[idx].used &&
3070 chan->session[idx].smmu.secure
3071 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072 chan->session[idx].used = 1;
3073 break;
3074 }
3075 }
3076 VERIFY(err, idx < chan->sesscount);
3077 if (err)
3078 goto bail;
3079 chan->session[idx].smmu.faults = 0;
3080 } else {
3081 VERIFY(err, me->dev != NULL);
3082 if (err)
3083 goto bail;
3084 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05303085 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003086 }
3087
3088 *session = &chan->session[idx];
3089 bail:
3090 return err;
3091}
3092
c_mtharue1a5ce12017-10-13 20:47:09 +05303093static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
3094 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003095{
3096 if (glink_queue_rx_intent(h, NULL, size))
3097 return false;
3098 return true;
3099}
3100
c_mtharue1a5ce12017-10-13 20:47:09 +05303101static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003102 const void *pkt_priv, const void *ptr)
3103{
3104}
3105
c_mtharue1a5ce12017-10-13 20:47:09 +05303106static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003107 const void *pkt_priv, const void *ptr, size_t size)
3108{
3109 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303110 struct fastrpc_apps *me = &gfa;
3111 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05303112 int err = 0;
Jeya R8fa59d62020-11-04 20:42:59 +05303113 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003114
c_mtharufdac6892017-10-12 13:09:01 +05303115 VERIFY(err, (rsp && size >= sizeof(*rsp)));
3116 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303117 goto bail;
3118
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303119 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
3120 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05303121 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303122 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303123
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303124 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
3125 if (err)
3126 goto bail;
3127
Jeya R8fa59d62020-11-04 20:42:59 +05303128 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303129 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303130 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
Jeya R8fa59d62020-11-04 20:42:59 +05303131 if (err) {
3132 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303133 goto bail;
Jeya R8fa59d62020-11-04 20:42:59 +05303134 }
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05303135 me->ctxtable[index]->handle = handle;
3136 me->ctxtable[index]->ptr = ptr;
Jeya R8fa59d62020-11-04 20:42:59 +05303137 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05303138
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303139 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303140bail:
Jeya R859f8012020-08-09 02:09:14 +05303141 if (err) {
3142 glink_rx_done(handle, ptr, true);
c_mtharufdac6892017-10-12 13:09:01 +05303143 pr_err("adsprpc: invalid response or context\n");
Jeya R859f8012020-08-09 02:09:14 +05303144 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003145}
3146
c_mtharue1a5ce12017-10-13 20:47:09 +05303147static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003148 unsigned int event)
3149{
3150 struct fastrpc_apps *me = &gfa;
3151 int cid = (int)(uintptr_t)priv;
3152 struct fastrpc_glink_info *link;
3153
3154 if (cid < 0 || cid >= NUM_CHANNELS)
3155 return;
3156 link = &me->channel[cid].link;
3157 switch (event) {
3158 case GLINK_CONNECTED:
3159 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303160 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003161 break;
3162 case GLINK_LOCAL_DISCONNECTED:
3163 link->port_state = FASTRPC_LINK_DISCONNECTED;
3164 break;
3165 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003166 break;
3167 default:
3168 break;
3169 }
3170}
3171
3172static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
3173 struct fastrpc_session_ctx **session)
3174{
3175 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303176 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003177
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303178 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003179 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303180 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303181 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003182 return err;
3183}
3184
3185static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
3186 struct fastrpc_session_ctx *session)
3187{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303188 struct fastrpc_apps *me = &gfa;
3189
3190 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003191 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303192 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003193}
3194
3195static int fastrpc_file_free(struct fastrpc_file *fl)
3196{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303197 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303198 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303199 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003200 int cid;
3201
3202 if (!fl)
3203 return 0;
3204 cid = fl->cid;
3205
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303206 (void)fastrpc_release_current_dsp_process(fl);
3207
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003208 spin_lock(&fl->apps->hlock);
3209 hlist_del_init(&fl->hn);
3210 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303211 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003212
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08003213 if (!fl->sctx) {
3214 kfree(fl);
3215 return 0;
3216 }
tharun kumar9f899ea2017-07-03 17:07:03 +05303217 spin_lock(&fl->hlock);
3218 fl->file_close = 1;
3219 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303220 if (!IS_ERR_OR_NULL(fl->init_mem))
3221 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003222 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303223 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303224 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303225 do {
3226 lmap = NULL;
3227 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3228 hlist_del_init(&map->hn);
3229 lmap = map;
3230 break;
3231 }
3232 fastrpc_mmap_free(lmap, 1);
3233 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303234 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303235 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003236 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303237 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003238 if (fl->sctx)
3239 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
3240 if (fl->secsctx)
3241 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303242
3243 mutex_lock(&fl->perf_mutex);
3244 do {
3245 struct hlist_node *pn = NULL;
3246
3247 fperf = NULL;
3248 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
3249 hlist_del_init(&perf->hn);
3250 fperf = perf;
3251 break;
3252 }
3253 kfree(fperf);
3254 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303255 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303256 mutex_unlock(&fl->perf_mutex);
3257 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303258 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05303259 mutex_destroy(&fl->map_mutex);
Jeya Rb9090542021-06-10 13:03:44 +05303260 mutex_destroy(&fl->pm_qos_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003261 kfree(fl);
3262 return 0;
3263}
3264
3265static int fastrpc_device_release(struct inode *inode, struct file *file)
3266{
3267 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3268
3269 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303270 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
3271 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003272 if (fl->debugfs_file != NULL)
3273 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003274 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05303275 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003276 }
3277 return 0;
3278}
3279
3280static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
3281 void *priv)
3282{
3283 struct fastrpc_apps *me = &gfa;
3284 int cid = (int)((uintptr_t)priv);
3285 struct fastrpc_glink_info *link;
3286
3287 if (cid < 0 || cid >= NUM_CHANNELS)
3288 return;
3289
3290 link = &me->channel[cid].link;
3291 switch (cb_info->link_state) {
3292 case GLINK_LINK_STATE_UP:
3293 link->link_state = FASTRPC_LINK_STATE_UP;
3294 complete(&me->channel[cid].work);
3295 break;
3296 case GLINK_LINK_STATE_DOWN:
3297 link->link_state = FASTRPC_LINK_STATE_DOWN;
3298 break;
3299 default:
3300 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3301 break;
3302 }
3303}
3304
3305static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3306{
3307 int err = 0;
3308 struct fastrpc_glink_info *link;
3309
3310 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3311 if (err)
3312 goto bail;
3313
3314 link = &me->channel[cid].link;
3315 if (link->link_notify_handle != NULL)
3316 goto bail;
3317
3318 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3319 link->link_notify_handle = glink_register_link_state_cb(
3320 &link->link_info,
3321 (void *)((uintptr_t)cid));
3322 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3323 if (err) {
3324 link->link_notify_handle = NULL;
3325 goto bail;
3326 }
3327 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3328 RPC_TIMEOUT));
3329bail:
3330 return err;
3331}
3332
3333static void fastrpc_glink_close(void *chan, int cid)
3334{
3335 int err = 0;
3336 struct fastrpc_glink_info *link;
3337
3338 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3339 if (err)
3340 return;
3341 link = &gfa.channel[cid].link;
3342
c_mtharu314a4202017-11-15 22:09:17 +05303343 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3344 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003345 link->port_state = FASTRPC_LINK_DISCONNECTING;
3346 glink_close(chan);
3347 }
3348}
3349
3350static int fastrpc_glink_open(int cid)
3351{
3352 int err = 0;
3353 void *handle = NULL;
3354 struct fastrpc_apps *me = &gfa;
3355 struct glink_open_config *cfg;
3356 struct fastrpc_glink_info *link;
3357
3358 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3359 if (err)
3360 goto bail;
3361 link = &me->channel[cid].link;
3362 cfg = &me->channel[cid].link.cfg;
3363 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3364 if (err)
3365 goto bail;
3366
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303367 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3368 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003369 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003370
3371 link->port_state = FASTRPC_LINK_CONNECTING;
3372 cfg->priv = (void *)(uintptr_t)cid;
3373 cfg->edge = gcinfo[cid].link.link_info.edge;
3374 cfg->transport = gcinfo[cid].link.link_info.transport;
3375 cfg->name = FASTRPC_GLINK_GUID;
3376 cfg->notify_rx = fastrpc_glink_notify_rx;
3377 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3378 cfg->notify_state = fastrpc_glink_notify_state;
3379 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3380 handle = glink_open(cfg);
3381 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303382 if (err) {
3383 if (link->port_state == FASTRPC_LINK_CONNECTING)
3384 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003385 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303386 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003387 me->channel[cid].chan = handle;
3388bail:
3389 return err;
3390}
3391
Sathish Ambley1ca68232017-01-19 10:32:55 -08003392static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3393{
3394 filp->private_data = inode->i_private;
3395 return 0;
3396}
3397
3398static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3399 size_t count, loff_t *position)
3400{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303401 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003402 struct fastrpc_file *fl = filp->private_data;
3403 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303404 struct fastrpc_buf *buf = NULL;
3405 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303406 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303407 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303408 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003409 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303410 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003411 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303412 char single_line[UL_SIZE] = "----------------";
3413 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003414
3415 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3416 if (!fileinfo)
3417 goto bail;
3418 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303419 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3420 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3421 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3422 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3423 "susbsys", "refcount", "sesscount", "issubsystemup",
3424 "ssrcount", "session_used");
3425 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3426 "-%s%s%s%s-\n", single_line, single_line,
3427 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003428 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303429 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003430 chan = &gcinfo[i];
3431 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303432 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003433 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303434 DEBUGFS_SIZE - len, "|%-9d",
3435 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303436 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303437 DEBUGFS_SIZE - len, "|%-9d",
3438 chan->sesscount);
3439 len += scnprintf(fileinfo + len,
3440 DEBUGFS_SIZE - len, "|%-14d",
3441 chan->issubsystemup);
3442 len += scnprintf(fileinfo + len,
3443 DEBUGFS_SIZE - len, "|%-9d",
3444 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003445 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303446 sess_used += chan->session[j].used;
3447 }
3448 len += scnprintf(fileinfo + len,
3449 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3450
3451 }
3452 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3453 "\n%s%s%s\n", "=============",
3454 " CMA HEAP ", "==============");
3455 len += scnprintf(fileinfo + len,
3456 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3457 len += scnprintf(fileinfo + len,
3458 DEBUGFS_SIZE - len, "--%s%s---\n",
3459 single_line, single_line);
3460 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3461 "0x%-18llX", me->range.addr);
3462 len += scnprintf(fileinfo + len,
3463 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3464 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3465 "\n==========%s %s %s===========\n",
3466 title, " GMAPS ", title);
3467 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3468 "%-20s|%-20s|%-20s|%-20s\n",
3469 "fd", "phys", "size", "va");
3470 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3471 "%s%s%s%s%s\n", single_line, single_line,
3472 single_line, single_line, single_line);
3473 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3474 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3475 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3476 gmaps->fd, gmaps->phys,
3477 (uint32_t)gmaps->size,
3478 gmaps->va);
3479 }
3480 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3481 "%-20s|%-20s|%-20s|%-20s\n",
3482 "len", "refs", "raddr", "flags");
3483 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3484 "%s%s%s%s%s\n", single_line, single_line,
3485 single_line, single_line, single_line);
3486 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3487 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3488 "0x%-18X|%-20d|%-20lu|%-20u\n",
3489 (uint32_t)gmaps->len, gmaps->refs,
3490 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003491 }
3492 } else {
3493 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303494 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003495 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303496 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003497 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303498 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003499 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303500 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303501 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303502 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003503 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303504 "%s %14s %d\n", "pd", ":", fl->pd);
3505 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3506 "%s %9s %s\n", "spdname", ":", fl->spdname);
3507 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3508 "%s %6s %d\n", "file_close", ":", fl->file_close);
3509 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3510 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3511 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3512 "%s %9s %d\n", "profile", ":", fl->profile);
3513 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3514 "%s %3s %d\n", "smmu.coherent", ":",
3515 fl->sctx->smmu.coherent);
3516 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3517 "%s %4s %d\n", "smmu.enabled", ":",
3518 fl->sctx->smmu.enabled);
3519 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3520 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3521 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3522 "%s %5s %d\n", "smmu.secure", ":",
3523 fl->sctx->smmu.secure);
3524 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3525 "%s %5s %d\n", "smmu.faults", ":",
3526 fl->sctx->smmu.faults);
3527 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3528 "%s %s %d\n", "link.link_state",
3529 ":", *&me->channel[fl->cid].link.link_state);
3530
3531 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3532 "\n=======%s %s %s======\n", title,
3533 " LIST OF MAPS ", title);
3534
3535 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3536 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3537 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3538 "%s%s%s%s%s\n",
3539 single_line, single_line, single_line,
3540 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003541 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303542 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3543 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3544 map->va, map->phys,
3545 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003546 }
3547 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303548 "%-20s|%-20s|%-20s|%-20s\n",
3549 "len", "refs",
3550 "raddr", "uncached");
3551 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3552 "%s%s%s%s%s\n",
3553 single_line, single_line, single_line,
3554 single_line, single_line);
3555 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3556 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3557 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3558 map->len, map->refs, map->raddr,
3559 map->uncached);
3560 }
3561 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3562 "%-20s|%-20s\n", "secure", "attr");
3563 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3564 "%s%s%s%s%s\n",
3565 single_line, single_line, single_line,
3566 single_line, single_line);
3567 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3568 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3569 "%-20d|0x%-20lX\n\n",
3570 map->secure, map->attr);
3571 }
3572 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303573 "%s %d\n\n",
3574 "KERNEL MEMORY ALLOCATION:", 1);
3575 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303576 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303577 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303578 spin_lock(&fl->hlock);
3579 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303580 "%-19s|%-19s|%-19s|%-19s\n",
3581 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303582 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3583 "%s%s%s%s%s\n", single_line, single_line,
3584 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303585 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303586 len += scnprintf(fileinfo + len,
3587 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303588 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3589 buf->virt, (uint64_t)buf->phys, buf->size,
3590 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303591 }
3592 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3593 "\n%s %s %s\n", title,
3594 " LIST OF PENDING SMQCONTEXTS ", title);
3595
3596 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3597 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3598 "sc", "pid", "tgid", "used", "ctxid");
3599 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3600 "%s%s%s%s%s\n", single_line, single_line,
3601 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003602 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3603 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303604 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3605 ictx->sc, ictx->pid, ictx->tgid,
3606 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003607 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303608
Sathish Ambley1ca68232017-01-19 10:32:55 -08003609 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303610 "\n%s %s %s\n", title,
3611 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3612
3613 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3614 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3615 "sc", "pid", "tgid", "used", "ctxid");
3616 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3617 "%s%s%s%s%s\n", single_line, single_line,
3618 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003619 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303620 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3621 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3622 ictx->sc, ictx->pid, ictx->tgid,
3623 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003624 }
3625 spin_unlock(&fl->hlock);
3626 }
3627 if (len > DEBUGFS_SIZE)
3628 len = DEBUGFS_SIZE;
3629 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3630 kfree(fileinfo);
3631bail:
3632 return ret;
3633}
3634
3635static const struct file_operations debugfs_fops = {
3636 .open = fastrpc_debugfs_open,
3637 .read = fastrpc_debugfs_read,
3638};
Sathish Ambley36849af2017-02-02 09:35:55 -08003639static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003640{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003641 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303642 int cid = -1, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003643
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303644 mutex_lock(&me->smd_mutex);
3645
Sathish Ambley36849af2017-02-02 09:35:55 -08003646 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003647 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303648 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003649 cid = fl->cid;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303650 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
3651 if (err) {
3652 err = -ECHRNG;
c_mtharu314a4202017-11-15 22:09:17 +05303653 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303654 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303655 if (me->channel[cid].ssrcount !=
3656 me->channel[cid].prevssrcount) {
3657 if (!me->channel[cid].issubsystemup) {
3658 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303659 if (err) {
3660 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303661 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303662 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303663 }
3664 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003665 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303666 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003667 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303668 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303669 if (me->glink) {
3670 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3671 if (err)
3672 goto bail;
3673 VERIFY(err, 0 == fastrpc_glink_open(cid));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303674 VERIFY(err,
3675 wait_for_completion_timeout(&me->channel[cid].workport,
3676 RPC_TIMEOUT));
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303677 } else {
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303678 if (me->channel[cid].chan == NULL) {
3679 VERIFY(err, !smd_named_open_on_edge(
3680 FASTRPC_SMD_GUID,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303681 gcinfo[cid].channel,
3682 (smd_channel_t **)&me->channel[cid].chan,
3683 (void *)(uintptr_t)cid,
3684 smd_event_handler));
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303685 VERIFY(err,
3686 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003687 RPC_TIMEOUT));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303688
3689 }
3690 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003691 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303692 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003693 goto bail;
3694 }
3695 kref_init(&me->channel[cid].kref);
3696 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3697 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303698
3699 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3700 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303701 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303702
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303703 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003704 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303705 if (fastrpc_mmap_remove_ssr(fl))
3706 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003707 me->channel[cid].prevssrcount =
3708 me->channel[cid].ssrcount;
3709 }
3710 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003711
3712bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303713 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003714 return err;
3715}
3716
Sathish Ambley36849af2017-02-02 09:35:55 -08003717static int fastrpc_device_open(struct inode *inode, struct file *filp)
3718{
3719 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303720 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003721 struct fastrpc_apps *me = &gfa;
3722
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303723 /*
3724 * Indicates the device node opened
3725 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3726 */
3727 int dev_minor = MINOR(inode->i_rdev);
3728
3729 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3730 (dev_minor == MINOR_NUM_SECURE_DEV)));
3731 if (err) {
3732 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3733 return err;
3734 }
3735
c_mtharue1a5ce12017-10-13 20:47:09 +05303736 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003737 if (err)
3738 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303739
Sathish Ambley36849af2017-02-02 09:35:55 -08003740 context_list_ctor(&fl->clst);
3741 spin_lock_init(&fl->hlock);
3742 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303743 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303744 INIT_HLIST_HEAD(&fl->cached_bufs);
3745 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003746 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303747 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003748 fl->apps = me;
3749 fl->mode = FASTRPC_MODE_SERIAL;
3750 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303751 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303752 fl->init_mem = NULL;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303753 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303754 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003755 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303756 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303757 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003758 spin_lock(&me->hlock);
3759 hlist_add_head(&fl->hn, &me->drivers);
3760 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303761 mutex_init(&fl->perf_mutex);
Jeya Rb9090542021-06-10 13:03:44 +05303762 mutex_init(&fl->pm_qos_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003763 return 0;
3764}
3765
Edgar Flores1a772fa2020-02-07 14:59:29 -08003766static int fastrpc_set_process_info(struct fastrpc_file *fl)
3767{
3768 int err = 0, buf_size = 0;
3769 char strpid[PID_SIZE];
Jeya Rc6995932021-03-18 14:04:49 +05303770 char cur_comm[TASK_COMM_LEN];
Edgar Flores1a772fa2020-02-07 14:59:29 -08003771
Jeya Rc6995932021-03-18 14:04:49 +05303772 memcpy(cur_comm, current->comm, TASK_COMM_LEN);
3773 cur_comm[TASK_COMM_LEN-1] = '\0';
Edgar Flores1a772fa2020-02-07 14:59:29 -08003774 fl->tgid = current->tgid;
3775 snprintf(strpid, PID_SIZE, "%d", current->pid);
Jeya Rc6995932021-03-18 14:04:49 +05303776 buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + 1;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003777 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3778 if (!fl->debug_buf) {
3779 err = -ENOMEM;
3780 return err;
3781 }
Jeya Rc6995932021-03-18 14:04:49 +05303782 snprintf(fl->debug_buf, buf_size, "%.10s%s%d",
3783 cur_comm, "_", current->pid);
Edgar Flores1a772fa2020-02-07 14:59:29 -08003784 fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3785 debugfs_root, fl, &debugfs_fops);
3786 if (!fl->debugfs_file)
3787 pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
Jeya Rc6995932021-03-18 14:04:49 +05303788 cur_comm, __func__, fl->debug_buf);
3789
Edgar Flores1a772fa2020-02-07 14:59:29 -08003790 return err;
3791}
3792
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003793static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3794{
3795 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003796 uint32_t cid;
Jeya Rf4b99852020-11-22 13:03:16 +05303797 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003798
c_mtharue1a5ce12017-10-13 20:47:09 +05303799 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003800 if (err)
3801 goto bail;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003802 err = fastrpc_set_process_info(fl);
3803 if (err)
3804 goto bail;
Jeya Rf4b99852020-11-22 13:03:16 +05303805 cid = *info;
Sathish Ambley36849af2017-02-02 09:35:55 -08003806 if (fl->cid == -1) {
Jeya Rf4b99852020-11-22 13:03:16 +05303807 struct fastrpc_channel_ctx *chan = &me->channel[cid];
3808
Sathish Ambley36849af2017-02-02 09:35:55 -08003809 VERIFY(err, cid < NUM_CHANNELS);
3810 if (err)
3811 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303812 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003813 if (fl->dev_minor == MINOR_NUM_DEV &&
3814 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303815 /*
Jeya Rf4b99852020-11-22 13:03:16 +05303816 * If an app is trying to offload to a secure remote
3817 * channel by opening the non-secure device node, allow
3818 * the access if the subsystem supports unsigned
3819 * offload. Untrusted apps will be restricted.
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303820 */
Jeya Rf4b99852020-11-22 13:03:16 +05303821 if (chan->secure == SECURE_CHANNEL &&
3822 !chan->unsigned_support) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303823 err = -EPERM;
3824 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3825 fl->dev_minor, cid,
3826 fl->apps->channel[cid].secure);
3827 goto bail;
3828 }
3829 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003830 fl->cid = cid;
3831 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3832 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303833 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003834 if (err)
3835 goto bail;
3836 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303837 VERIFY(err, fl->sctx != NULL);
Jeya R984a1a32021-01-18 15:38:07 +05303838 if (err) {
3839 err = -EBADR;
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303840 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05303841 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003842 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3843bail:
3844 return err;
3845}
3846
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303847static int fastrpc_internal_control(struct fastrpc_file *fl,
3848 struct fastrpc_ioctl_control *cp)
3849{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303850 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303851 int err = 0;
3852 int latency;
3853
3854 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3855 if (err)
3856 goto bail;
3857 VERIFY(err, !IS_ERR_OR_NULL(cp));
3858 if (err)
3859 goto bail;
3860
3861 switch (cp->req) {
3862 case FASTRPC_CONTROL_LATENCY:
3863 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3864 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3865 VERIFY(err, latency != 0);
3866 if (err)
3867 goto bail;
Jeya Rb9090542021-06-10 13:03:44 +05303868 mutex_lock(&fl->pm_qos_mutex);
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303869 if (!fl->qos_request) {
3870 pm_qos_add_request(&fl->pm_qos_req,
3871 PM_QOS_CPU_DMA_LATENCY, latency);
3872 fl->qos_request = 1;
3873 } else
3874 pm_qos_update_request(&fl->pm_qos_req, latency);
Jeya Rb9090542021-06-10 13:03:44 +05303875 mutex_unlock(&fl->pm_qos_mutex);
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303876 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303877 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303878 if (!me->legacy)
3879 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303880 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303881 case FASTRPC_CONTROL_KALLOC:
3882 cp->kalloc.kalloc_support = 1;
3883 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303884 default:
3885 err = -ENOTTY;
3886 break;
3887 }
3888bail:
3889 return err;
3890}
3891
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05303892static int fastrpc_get_dsp_info(struct fastrpc_ioctl_dsp_capabilities *dsp_cap,
3893 void *param, struct fastrpc_file *fl)
3894{
3895 int err = 0;
3896
3897 K_COPY_FROM_USER(err, 0, dsp_cap, param,
3898 sizeof(struct fastrpc_ioctl_dsp_capabilities));
3899 VERIFY(err, dsp_cap->domain < NUM_CHANNELS);
3900 if (err)
3901 goto bail;
3902
3903 err = fastrpc_get_info_from_kernel(dsp_cap, fl);
3904 if (err)
3905 goto bail;
3906 K_COPY_TO_USER(err, 0, param, dsp_cap,
3907 sizeof(struct fastrpc_ioctl_dsp_capabilities));
3908bail:
3909 return err;
3910}
3911
Vamsi Krishna Gattupallid1e4dff2021-05-28 14:19:24 +05303912static int fastrpc_update_cdsp_support(struct fastrpc_file *fl)
3913{
3914 struct fastrpc_ioctl_dsp_capabilities *dsp_query;
3915 struct fastrpc_apps *me = &gfa;
3916 int err = 0;
3917
3918 VERIFY(err, NULL != (dsp_query = kzalloc(sizeof(*dsp_query),
3919 GFP_KERNEL)));
3920 if (err)
3921 goto bail;
3922 dsp_query->domain = CDSP_DOMAIN_ID;
3923 err = fastrpc_get_info_from_kernel(dsp_query, fl);
3924 if (err)
3925 goto bail;
3926 if (!(dsp_query->dsp_attributes[1]))
3927 me->channel[CDSP_DOMAIN_ID].unsigned_support = false;
3928bail:
3929 kfree(dsp_query);
3930 return err;
3931}
3932
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003933static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3934 unsigned long ioctl_param)
3935{
3936 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003937 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003938 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303939 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003940 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303941 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303942 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003943 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003944 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303945 struct fastrpc_ioctl_control cp;
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05303946 struct fastrpc_ioctl_dsp_capabilities dsp_cap;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003947 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303948 union {
3949 struct fastrpc_ioctl_mmap mmap;
3950 struct fastrpc_ioctl_munmap munmap;
3951 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003952 void *param = (char *)ioctl_param;
3953 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3954 int size = 0, err = 0;
3955 uint32_t info;
Vamsi Krishna Gattupallid1e4dff2021-05-28 14:19:24 +05303956 static bool isQueryDone;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003957
Jeya Rb70b4ad2021-01-25 10:28:42 -08003958 VERIFY(err, fl != NULL);
3959 if (err) {
3960 err = -EBADR;
3961 goto bail;
3962 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303963 p.inv.fds = NULL;
3964 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003965 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303966 spin_lock(&fl->hlock);
3967 if (fl->file_close == 1) {
3968 err = EBADF;
3969 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3970 spin_unlock(&fl->hlock);
3971 goto bail;
3972 }
3973 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003974
3975 switch (ioctl_num) {
3976 case FASTRPC_IOCTL_INVOKE:
3977 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003978 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003979 case FASTRPC_IOCTL_INVOKE_FD:
3980 if (!size)
3981 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3982 /* fall through */
3983 case FASTRPC_IOCTL_INVOKE_ATTRS:
3984 if (!size)
3985 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003986 /* fall through */
3987 case FASTRPC_IOCTL_INVOKE_CRC:
3988 if (!size)
3989 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303990 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003991 if (err)
3992 goto bail;
3993 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3994 0, &p.inv)));
3995 if (err)
3996 goto bail;
3997 break;
3998 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303999 K_COPY_FROM_USER(err, 0, &p.mmap, param,
4000 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05304001 if (err)
4002 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004003 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
4004 if (err)
4005 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304006 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004007 if (err)
4008 goto bail;
4009 break;
4010 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05304011 K_COPY_FROM_USER(err, 0, &p.munmap, param,
4012 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05304013 if (err)
4014 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004015 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
4016 &p.munmap)));
4017 if (err)
4018 goto bail;
4019 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304020 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05304021 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
4022 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304023 if (err)
4024 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05304025 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
4026 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304027 if (err)
4028 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05304029 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
4030 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304031 if (err)
4032 goto bail;
4033 break;
4034 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05304035 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
4036 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304037 if (err)
4038 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05304039 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304040 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05304041 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05304042 if (err)
4043 goto bail;
4044 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05304045 case FASTRPC_IOCTL_MUNMAP_FD:
4046 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
4047 sizeof(p.munmap_fd));
4048 if (err)
4049 goto bail;
4050 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
4051 &p.munmap_fd)));
4052 if (err)
4053 goto bail;
4054 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004055 case FASTRPC_IOCTL_SETMODE:
4056 switch ((uint32_t)ioctl_param) {
4057 case FASTRPC_MODE_PARALLEL:
4058 case FASTRPC_MODE_SERIAL:
4059 fl->mode = (uint32_t)ioctl_param;
4060 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08004061 case FASTRPC_MODE_PROFILE:
4062 fl->profile = (uint32_t)ioctl_param;
4063 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05304064 case FASTRPC_MODE_SESSION:
4065 fl->sessionid = 1;
4066 fl->tgid |= (1 << SESSION_ID_INDEX);
4067 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004068 default:
4069 err = -ENOTTY;
4070 break;
4071 }
4072 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08004073 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05304074 K_COPY_FROM_USER(err, 0, &p.perf,
4075 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08004076 if (err)
4077 goto bail;
4078 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
4079 if (p.perf.keys) {
4080 char *keys = PERF_KEYS;
4081
c_mtharue1a5ce12017-10-13 20:47:09 +05304082 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
4083 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08004084 if (err)
4085 goto bail;
4086 }
4087 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05304088 struct fastrpc_perf *perf = NULL, *fperf = NULL;
4089 struct hlist_node *n = NULL;
4090
4091 mutex_lock(&fl->perf_mutex);
4092 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
4093 if (perf->tid == current->pid) {
4094 fperf = perf;
4095 break;
4096 }
4097 }
4098
4099 mutex_unlock(&fl->perf_mutex);
4100
4101 if (fperf) {
4102 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
4103 fperf, sizeof(*fperf));
4104 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08004105 }
c_mtharue1a5ce12017-10-13 20:47:09 +05304106 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08004107 if (err)
4108 goto bail;
4109 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05304110 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05304111 K_COPY_FROM_USER(err, 0, &p.cp, param,
4112 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05304113 if (err)
4114 goto bail;
4115 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
4116 if (err)
4117 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05304118 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
4119 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
4120 if (err)
4121 goto bail;
4122 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05304123 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004124 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05304125 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08004126 if (err)
4127 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004128 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
4129 if (err)
4130 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304131 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004132 if (err)
4133 goto bail;
4134 break;
4135 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08004136 p.init.attrs = 0;
4137 p.init.siglen = 0;
4138 size = sizeof(struct fastrpc_ioctl_init);
4139 /* fall through */
4140 case FASTRPC_IOCTL_INIT_ATTRS:
4141 if (!size)
4142 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05304143 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004144 if (err)
4145 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05304146 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05304147 p.init.init.filelen < INIT_FILELEN_MAX);
4148 if (err)
4149 goto bail;
4150 VERIFY(err, p.init.init.memlen >= 0 &&
4151 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05304152 if (err)
4153 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304154 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004155 if (err)
4156 goto bail;
Vamsi Krishna Gattupallid1e4dff2021-05-28 14:19:24 +05304157 if ((fl->cid == CDSP_DOMAIN_ID) && !isQueryDone) {
4158 err = fastrpc_update_cdsp_support(fl);
4159 if (!err)
4160 isQueryDone = true;
4161 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004162 break;
Tharun Kumar Meruguebe00202019-04-05 03:34:28 +05304163 case FASTRPC_IOCTL_GET_DSP_INFO:
4164 err = fastrpc_get_dsp_info(&p.dsp_cap, param, fl);
4165 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004166 default:
4167 err = -ENOTTY;
4168 pr_info("bad ioctl: %d\n", ioctl_num);
4169 break;
4170 }
4171 bail:
4172 return err;
4173}
4174
4175static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
4176 unsigned long code,
4177 void *data)
4178{
4179 struct fastrpc_apps *me = &gfa;
4180 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05304181 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004182 int cid;
4183
4184 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
4185 cid = ctx - &me->channel[0];
4186 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304187 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004188 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05304189 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304190 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304191 if (me->glink)
4192 fastrpc_glink_close(ctx->chan, cid);
4193 else
4194 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304195 ctx->chan = NULL;
4196 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
4197 gcinfo[cid].name, MAJOR(me->dev_no), cid);
4198 }
4199 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304200 if (cid == 0)
4201 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004202 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05304203 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4204 if (me->channel[0].remoteheap_ramdump_dev &&
4205 notifdata->enable_ramdump) {
4206 me->channel[0].ramdumpenabled = 1;
4207 }
4208 } else if (code == SUBSYS_AFTER_POWERUP) {
4209 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004210 }
4211
4212 return NOTIFY_DONE;
4213}
4214
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304215static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304216 unsigned long code,
4217 void *data)
4218{
4219 struct fastrpc_apps *me = &gfa;
4220 struct fastrpc_static_pd *spd;
4221 struct notif_data *notifdata = data;
4222
4223 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
4224 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
4225 mutex_lock(&me->smd_mutex);
4226 spd->pdrcount++;
4227 spd->ispdup = 0;
4228 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
4229 MAJOR(me->dev_no), spd->spdname);
4230 mutex_unlock(&me->smd_mutex);
4231 if (!strcmp(spd->spdname,
4232 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
4233 me->staticpd_flags = 0;
4234 fastrpc_notify_pdr_drivers(me, spd->spdname);
4235 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4236 if (me->channel[0].remoteheap_ramdump_dev &&
4237 notifdata->enable_ramdump) {
4238 me->channel[0].ramdumpenabled = 1;
4239 }
4240 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4241 spd->ispdup = 1;
4242 }
4243
4244 return NOTIFY_DONE;
4245}
4246
4247static int fastrpc_get_service_location_notify(struct notifier_block *nb,
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304248 unsigned long opcode, void *data)
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304249{
4250 struct fastrpc_static_pd *spd;
4251 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304252 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304253
4254 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
4255 if (opcode == LOCATOR_DOWN) {
4256 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
4257 return NOTIFY_DONE;
4258 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304259 for (i = 0; i < pdr->total_domains; i++) {
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304260 if ((!strcmp(spd->spdname, "audio_pdr_adsprpc"))
4261 && (!strcmp(pdr->domain_list[i].name,
4262 "msm/adsp/audio_pd"))) {
4263 goto pdr_register;
4264 } else if ((!strcmp(spd->spdname, "sensors_pdr_adsprpc"))
4265 && (!strcmp(pdr->domain_list[i].name,
4266 "msm/adsp/sensor_pd"))) {
4267 goto pdr_register;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304268 }
4269 }
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304270 return NOTIFY_DONE;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304271
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304272pdr_register:
4273 if (!spd->pdrhandle) {
4274 spd->pdrhandle =
4275 service_notif_register_notifier(
4276 pdr->domain_list[i].name,
4277 pdr->domain_list[i].instance_id,
4278 &spd->pdrnb, &curr_state);
4279 } else {
4280 pr_err("ADSPRPC: %s is already registered\n", spd->spdname);
4281 }
4282
4283 if (IS_ERR(spd->pdrhandle))
4284 pr_err("ADSPRPC: Unable to register notifier\n");
4285
4286 if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4287 pr_info("ADSPRPC: %s is up\n", spd->spdname);
4288 spd->ispdup = 1;
4289 } else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
4290 pr_info("ADSPRPC: %s is uninitialzed\n", spd->spdname);
4291 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304292 return NOTIFY_DONE;
4293}
4294
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004295static const struct file_operations fops = {
4296 .open = fastrpc_device_open,
4297 .release = fastrpc_device_release,
4298 .unlocked_ioctl = fastrpc_device_ioctl,
4299 .compat_ioctl = compat_fastrpc_device_ioctl,
4300};
4301
4302static const struct of_device_id fastrpc_match_table[] = {
4303 { .compatible = "qcom,msm-fastrpc-adsp", },
4304 { .compatible = "qcom,msm-fastrpc-compute", },
4305 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304306 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
4307 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004308 { .compatible = "qcom,msm-adsprpc-mem-region", },
4309 {}
4310};
4311
4312static int fastrpc_cb_probe(struct device *dev)
4313{
4314 struct fastrpc_channel_ctx *chan;
4315 struct fastrpc_session_ctx *sess;
4316 struct of_phandle_args iommuspec;
4317 const char *name;
4318 unsigned int start = 0x80000000;
4319 int err = 0, i;
4320 int secure_vmid = VMID_CP_PIXEL;
4321
c_mtharue1a5ce12017-10-13 20:47:09 +05304322 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4323 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004324 if (err)
4325 goto bail;
4326 for (i = 0; i < NUM_CHANNELS; i++) {
4327 if (!gcinfo[i].name)
4328 continue;
4329 if (!strcmp(name, gcinfo[i].name))
4330 break;
4331 }
4332 VERIFY(err, i < NUM_CHANNELS);
4333 if (err)
4334 goto bail;
4335 chan = &gcinfo[i];
4336 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4337 if (err)
4338 goto bail;
4339
4340 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
4341 "#iommu-cells", 0, &iommuspec));
4342 if (err)
4343 goto bail;
4344 sess = &chan->session[chan->sesscount];
4345 sess->smmu.cb = iommuspec.args[0] & 0xf;
4346 sess->used = 0;
4347 sess->smmu.coherent = of_property_read_bool(dev->of_node,
4348 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304349 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
4350 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004351 sess->smmu.secure = of_property_read_bool(dev->of_node,
4352 "qcom,secure-context-bank");
4353 if (sess->smmu.secure)
4354 start = 0x60000000;
4355 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
4356 arm_iommu_create_mapping(&platform_bus_type,
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05304357 start, MAX_SIZE_LIMIT)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004358 if (err)
4359 goto bail;
4360
4361 if (sess->smmu.secure)
4362 iommu_domain_set_attr(sess->smmu.mapping->domain,
4363 DOMAIN_ATTR_SECURE_VMID,
4364 &secure_vmid);
4365
4366 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
4367 if (err)
4368 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304369 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004370 sess->smmu.enabled = 1;
4371 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08004372 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
4373 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004374bail:
4375 return err;
4376}
4377
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304378static int fastrpc_cb_legacy_probe(struct device *dev)
4379{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304380 struct fastrpc_channel_ctx *chan;
4381 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4382 const char *name;
4383 unsigned int *sids = NULL, sids_size = 0;
4384 int err = 0, ret = 0, i;
4385
4386 unsigned int start = 0x80000000;
4387
4388 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4389 "label", NULL)));
4390 if (err)
4391 goto bail;
4392
4393 for (i = 0; i < NUM_CHANNELS; i++) {
4394 if (!gcinfo[i].name)
4395 continue;
4396 if (!strcmp(name, gcinfo[i].name))
4397 break;
4398 }
4399 VERIFY(err, i < NUM_CHANNELS);
4400 if (err)
4401 goto bail;
4402
4403 chan = &gcinfo[i];
4404 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4405 if (err)
4406 goto bail;
4407
4408 first_sess = &chan->session[chan->sesscount];
4409
4410 VERIFY(err, NULL != of_get_property(dev->of_node,
4411 "sids", &sids_size));
4412 if (err)
4413 goto bail;
4414
4415 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4416 if (err)
4417 goto bail;
4418 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4419 sids_size/sizeof(unsigned int));
4420 if (ret)
4421 goto bail;
4422
4423 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4424 arm_iommu_create_mapping(&platform_bus_type,
4425 start, 0x78000000)));
4426 if (err)
4427 goto bail;
4428
4429 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4430 if (err)
4431 goto bail;
4432
4433
4434 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4435 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4436 if (err)
4437 goto bail;
4438 sess = &chan->session[chan->sesscount];
4439 sess->smmu.cb = sids[i];
4440 sess->smmu.dev = dev;
4441 sess->smmu.mapping = first_sess->smmu.mapping;
4442 sess->smmu.enabled = 1;
4443 sess->used = 0;
4444 sess->smmu.coherent = false;
4445 sess->smmu.secure = false;
4446 chan->sesscount++;
4447 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304448bail:
4449 kfree(sids);
4450 return err;
4451}
4452
4453
4454
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304455static void init_secure_vmid_list(struct device *dev, char *prop_name,
4456 struct secure_vm *destvm)
4457{
4458 int err = 0;
4459 u32 len = 0, i = 0;
4460 u32 *rhvmlist = NULL;
4461 u32 *rhvmpermlist = NULL;
4462
4463 if (!of_find_property(dev->of_node, prop_name, &len))
4464 goto bail;
4465 if (len == 0)
4466 goto bail;
4467 len /= sizeof(u32);
4468 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4469 if (err)
4470 goto bail;
4471 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4472 GFP_KERNEL)));
4473 if (err)
4474 goto bail;
4475 for (i = 0; i < len; i++) {
4476 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4477 &rhvmlist[i]);
4478 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4479 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4480 if (err) {
4481 pr_err("ADSPRPC: Failed to read VMID\n");
4482 goto bail;
4483 }
4484 }
4485 destvm->vmid = rhvmlist;
4486 destvm->vmperm = rhvmpermlist;
4487 destvm->vmcount = len;
4488bail:
4489 if (err) {
4490 kfree(rhvmlist);
4491 kfree(rhvmpermlist);
4492 }
4493}
4494
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304495static void configure_secure_channels(uint32_t secure_domains)
4496{
4497 struct fastrpc_apps *me = &gfa;
4498 int ii = 0;
4499 /*
4500 * secure_domains contains the bitmask of the secure channels
4501 * Bit 0 - ADSP
4502 * Bit 1 - MDSP
4503 * Bit 2 - SLPI
4504 * Bit 3 - CDSP
4505 */
4506 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4507 int secure = (secure_domains >> ii) & 0x01;
4508
4509 me->channel[ii].secure = secure;
4510 }
4511}
4512
4513
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004514static int fastrpc_probe(struct platform_device *pdev)
4515{
4516 int err = 0;
4517 struct fastrpc_apps *me = &gfa;
4518 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004519 struct device_node *ion_node, *node;
4520 struct platform_device *ion_pdev;
4521 struct cma *cma;
4522 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304523 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304524 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304525
4526 if (of_device_is_compatible(dev->of_node,
4527 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304528 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4529 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304530
c_mtharu63ffc012017-11-16 15:26:56 +05304531
4532 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4533 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304534 if (of_get_property(dev->of_node,
4535 "qcom,secure-domains", NULL) != NULL) {
4536 VERIFY(err, !of_property_read_u32(dev->of_node,
4537 "qcom,secure-domains",
4538 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004539 if (!err) {
4540 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304541 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004542 } else {
4543 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304544 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004545 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304546 }
c_mtharu63ffc012017-11-16 15:26:56 +05304547 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004548 if (of_device_is_compatible(dev->of_node,
4549 "qcom,msm-fastrpc-compute-cb"))
4550 return fastrpc_cb_probe(dev);
4551
4552 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304553 "qcom,msm-fastrpc-legacy-compute")) {
4554 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304555 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304556 }
4557
4558 if (of_device_is_compatible(dev->of_node,
4559 "qcom,msm-fastrpc-legacy-compute-cb")){
4560 return fastrpc_cb_legacy_probe(dev);
4561 }
4562
4563 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004564 "qcom,msm-adsprpc-mem-region")) {
4565 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004566 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4567 if (ion_node) {
4568 for_each_available_child_of_node(ion_node, node) {
4569 if (of_property_read_u32(node, "reg", &val))
4570 continue;
4571 if (val != ION_ADSP_HEAP_ID)
4572 continue;
4573 ion_pdev = of_find_device_by_node(node);
4574 if (!ion_pdev)
4575 break;
4576 cma = dev_get_cma_area(&ion_pdev->dev);
4577 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304578 me->range.addr = cma_get_base(cma);
4579 me->range.size =
4580 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004581 }
4582 break;
4583 }
4584 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304585 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304586 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004587 int srcVM[1] = {VMID_HLOS};
4588 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4589 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004590 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004591 PERM_READ | PERM_WRITE | PERM_EXEC,
4592 PERM_READ | PERM_WRITE | PERM_EXEC,
4593 PERM_READ | PERM_WRITE | PERM_EXEC,
4594 };
4595
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304596 VERIFY(err, !hyp_assign_phys(me->range.addr,
4597 me->range.size, srcVM, 1,
4598 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004599 if (err)
4600 goto bail;
4601 }
4602 return 0;
4603 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304604 if (of_property_read_bool(dev->of_node,
4605 "qcom,fastrpc-adsp-audio-pdr")) {
4606 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004607
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304608 VERIFY(err, !fastrpc_get_adsp_session(
4609 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4610 if (err)
4611 goto spdbail;
4612 me->channel[0].spd[session].get_service_nb.notifier_call =
4613 fastrpc_get_service_location_notify;
4614 ret = get_service_location(
4615 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4616 AUDIO_PDR_ADSP_SERVICE_NAME,
4617 &me->channel[0].spd[session].get_service_nb);
4618 if (ret)
4619 pr_err("ADSPRPC: Get service location failed: %d\n",
4620 ret);
4621 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304622 if (of_property_read_bool(dev->of_node,
4623 "qcom,fastrpc-adsp-sensors-pdr")) {
4624 int session;
4625
4626 VERIFY(err, !fastrpc_get_adsp_session(
4627 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4628 if (err)
4629 goto spdbail;
4630 me->channel[0].spd[session].get_service_nb.notifier_call =
4631 fastrpc_get_service_location_notify;
4632 ret = get_service_location(
4633 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4634 SENSORS_PDR_ADSP_SERVICE_NAME,
4635 &me->channel[0].spd[session].get_service_nb);
4636 if (ret)
4637 pr_err("ADSPRPC: Get service location failed: %d\n",
4638 ret);
4639 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304640spdbail:
4641 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004642 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4643 fastrpc_match_table,
4644 NULL, &pdev->dev));
4645 if (err)
4646 goto bail;
4647bail:
4648 return err;
4649}
4650
4651static void fastrpc_deinit(void)
4652{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304653 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004654 struct fastrpc_channel_ctx *chan = gcinfo;
4655 int i, j;
4656
4657 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4658 if (chan->chan) {
4659 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304660 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304661 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004662 }
4663 for (j = 0; j < NUM_SESSIONS; j++) {
4664 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304665 if (sess->smmu.dev) {
4666 arm_iommu_detach_device(sess->smmu.dev);
4667 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004668 }
4669 if (sess->smmu.mapping) {
4670 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304671 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004672 }
4673 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304674 kfree(chan->rhvm.vmid);
4675 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004676 }
4677}
4678
4679static struct platform_driver fastrpc_driver = {
4680 .probe = fastrpc_probe,
4681 .driver = {
4682 .name = "fastrpc",
4683 .owner = THIS_MODULE,
4684 .of_match_table = fastrpc_match_table,
4685 },
4686};
4687
4688static int __init fastrpc_device_init(void)
4689{
4690 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304691 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304692 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004693 int err = 0, i;
4694
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304695 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004696 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004697 fastrpc_init(me);
4698 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304699 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004700 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004701 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4702 if (err)
4703 goto register_bail;
4704 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4705 DEVICE_NAME));
4706 if (err)
4707 goto alloc_chrdev_bail;
4708 cdev_init(&me->cdev, &fops);
4709 me->cdev.owner = THIS_MODULE;
4710 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304711 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004712 if (err)
4713 goto cdev_init_bail;
4714 me->class = class_create(THIS_MODULE, "fastrpc");
4715 VERIFY(err, !IS_ERR(me->class));
4716 if (err)
4717 goto class_create_bail;
4718 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304719
4720 /*
4721 * Create devices and register with sysfs
4722 * Create first device with minor number 0
4723 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004724 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304725 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4726 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004727 VERIFY(err, !IS_ERR_OR_NULL(dev));
4728 if (err)
4729 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304730
4731 /* Create secure device with minor number for secure device */
4732 secure_dev = device_create(me->class, NULL,
4733 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4734 NULL, DEVICE_NAME_SECURE);
4735 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4736 if (err)
4737 goto device_create_bail;
4738
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004739 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304740 me->channel[i].dev = secure_dev;
4741 if (i == CDSP_DOMAIN_ID)
4742 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004743 me->channel[i].ssrcount = 0;
4744 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304745 me->channel[i].issubsystemup = 1;
4746 me->channel[i].ramdumpenabled = 0;
4747 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004748 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4749 me->channel[i].handle = subsys_notif_register_notifier(
4750 gcinfo[i].subsys,
4751 &me->channel[i].nb);
4752 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004753 me->client = msm_ion_client_create(DEVICE_NAME);
4754 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4755 if (err)
4756 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304757
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004758 return 0;
4759device_create_bail:
4760 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004761 if (me->channel[i].handle)
4762 subsys_notif_unregister_notifier(me->channel[i].handle,
4763 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004764 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004765 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304766 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4767 MINOR_NUM_DEV));
4768 if (!IS_ERR_OR_NULL(secure_dev))
4769 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4770 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004771 class_destroy(me->class);
4772class_create_bail:
4773 cdev_del(&me->cdev);
4774cdev_init_bail:
4775 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4776alloc_chrdev_bail:
4777register_bail:
4778 fastrpc_deinit();
4779 return err;
4780}
4781
4782static void __exit fastrpc_device_exit(void)
4783{
4784 struct fastrpc_apps *me = &gfa;
4785 int i;
4786
4787 fastrpc_file_list_dtor(me);
4788 fastrpc_deinit();
4789 for (i = 0; i < NUM_CHANNELS; i++) {
4790 if (!gcinfo[i].name)
4791 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004792 subsys_notif_unregister_notifier(me->channel[i].handle,
4793 &me->channel[i].nb);
4794 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304795
4796 /* Destroy the secure and non secure devices */
4797 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4798 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4799 MINOR_NUM_SECURE_DEV));
4800
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004801 class_destroy(me->class);
4802 cdev_del(&me->cdev);
4803 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4804 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004805 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004806}
4807
4808late_initcall(fastrpc_device_init);
4809module_exit(fastrpc_device_exit);
4810
4811MODULE_LICENSE("GPL v2");