blob: 0f179f00573f312a31161db8fecc5408695c5ac4 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Jeya Rb70b4ad2021-01-25 10:28:42 -08002 * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
109 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530110#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111#define FASTRPC_STATIC_HANDLE_LISTENER (3)
112#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530113#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800114
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530115#define MAX_SIZE_LIMIT (0x78000000)
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530116#define INIT_FILELEN_MAX (2*1024*1024)
117#define INIT_MEMLEN_MAX (8*1024*1024)
118
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800119#define PERF_END (void)0
120
121#define PERF(enb, cnt, ff) \
122 {\
123 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530124 int64_t *counter = cnt;\
125 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800126 getnstimeofday(&startT);\
127 } \
128 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530129 if (enb && counter) {\
130 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800131 } \
132 }
133
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530134#define GET_COUNTER(perf_ptr, offset) \
135 (perf_ptr != NULL ?\
136 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
137 (int64_t *)(perf_ptr + offset)\
138 : (int64_t *)NULL) : (int64_t *)NULL)
139
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700140static int fastrpc_glink_open(int cid);
141static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530142static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530143 unsigned long code,
144 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800145static struct dentry *debugfs_root;
146static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700147
148static inline uint64_t buf_page_start(uint64_t buf)
149{
150 uint64_t start = (uint64_t) buf & PAGE_MASK;
151 return start;
152}
153
154static inline uint64_t buf_page_offset(uint64_t buf)
155{
156 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
157 return offset;
158}
159
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530160static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700161{
162 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
163 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530164 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700165 return nPages;
166}
167
168static inline uint64_t buf_page_size(uint32_t size)
169{
170 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
171
172 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
173}
174
175static inline void *uint64_to_ptr(uint64_t addr)
176{
177 void *ptr = (void *)((uintptr_t)addr);
178
179 return ptr;
180}
181
182static inline uint64_t ptr_to_uint64(void *ptr)
183{
184 uint64_t addr = (uint64_t)((uintptr_t)ptr);
185
186 return addr;
187}
188
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530189struct secure_vm {
190 int *vmid;
191 int *vmperm;
192 int vmcount;
193};
194
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700195struct fastrpc_file;
196
197struct fastrpc_buf {
198 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530199 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700200 struct fastrpc_file *fl;
201 void *virt;
202 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530203 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530204 unsigned long dma_attr;
205 uintptr_t raddr;
206 uint32_t flags;
207 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700208};
209
210struct fastrpc_ctx_lst;
211
212struct overlap {
213 uintptr_t start;
214 uintptr_t end;
215 int raix;
216 uintptr_t mstart;
217 uintptr_t mend;
218 uintptr_t offset;
219};
220
221struct smq_invoke_ctx {
222 struct hlist_node hn;
223 struct completion work;
224 int retval;
225 int pid;
226 int tgid;
227 remote_arg_t *lpra;
228 remote_arg64_t *rpra;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530229 remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700231 struct fastrpc_mmap **maps;
232 struct fastrpc_buf *buf;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530233 struct fastrpc_buf *lbuf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530234 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700235 struct fastrpc_file *fl;
236 uint32_t sc;
237 struct overlap *overs;
238 struct overlap **overps;
239 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530240 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530241 unsigned int *attrs;
242 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530243 uint64_t ctxid;
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +0530244 void *handle;
245 const void *ptr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700246};
247
248struct fastrpc_ctx_lst {
249 struct hlist_head pending;
250 struct hlist_head interrupted;
251};
252
253struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530254 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255 struct dma_iommu_mapping *mapping;
256 int cb;
257 int enabled;
258 int faults;
259 int secure;
260 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530261 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700262};
263
264struct fastrpc_session_ctx {
265 struct device *dev;
266 struct fastrpc_smmu smmu;
267 int used;
268};
269
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530270struct fastrpc_static_pd {
271 char *spdname;
272 struct notifier_block pdrnb;
273 struct notifier_block get_service_nb;
274 void *pdrhandle;
275 int pdrcount;
276 int prevpdrcount;
277 int ispdup;
278};
279
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280struct fastrpc_glink_info {
281 int link_state;
282 int port_state;
283 struct glink_open_config cfg;
284 struct glink_link_info link_info;
285 void *link_notify_handle;
286};
287
288struct fastrpc_channel_ctx {
289 char *name;
290 char *subsys;
291 void *chan;
292 struct device *dev;
293 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530294 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700295 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530296 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700297 struct notifier_block nb;
298 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530299 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700300 int sesscount;
301 int ssrcount;
302 void *handle;
303 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530304 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700305 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530306 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530307 int ramdumpenabled;
308 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530310 /* Indicates, if channel is restricted to secure node only */
311 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700312};
313
314struct fastrpc_apps {
315 struct fastrpc_channel_ctx *channel;
316 struct cdev cdev;
317 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530318 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700319 struct smq_phy_page range;
320 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530321 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700322 dev_t dev_no;
323 int compat;
324 struct hlist_head drivers;
325 spinlock_t hlock;
326 struct ion_client *client;
327 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530328 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530329 bool glink;
330 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800331 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530332 spinlock_t ctxlock;
333 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700334};
335
336struct fastrpc_mmap {
337 struct hlist_node hn;
338 struct fastrpc_file *fl;
339 struct fastrpc_apps *apps;
340 int fd;
341 uint32_t flags;
342 struct dma_buf *buf;
343 struct sg_table *table;
344 struct dma_buf_attachment *attach;
345 struct ion_handle *handle;
346 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530347 size_t size;
348 uintptr_t va;
349 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700350 int refs;
351 uintptr_t raddr;
352 int uncached;
353 int secure;
354 uintptr_t attr;
355};
356
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530357enum fastrpc_perfkeys {
358 PERF_COUNT = 0,
359 PERF_FLUSH = 1,
360 PERF_MAP = 2,
361 PERF_COPY = 3,
362 PERF_LINK = 4,
363 PERF_GETARGS = 5,
364 PERF_PUTARGS = 6,
365 PERF_INVARGS = 7,
366 PERF_INVOKE = 8,
367 PERF_KEY_MAX = 9,
368};
369
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800370struct fastrpc_perf {
371 int64_t count;
372 int64_t flush;
373 int64_t map;
374 int64_t copy;
375 int64_t link;
376 int64_t getargs;
377 int64_t putargs;
378 int64_t invargs;
379 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530380 int64_t tid;
381 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800382};
383
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700384struct fastrpc_file {
385 struct hlist_node hn;
386 spinlock_t hlock;
387 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530388 struct hlist_head cached_bufs;
389 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700390 struct fastrpc_ctx_lst clst;
391 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530392 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393 struct fastrpc_session_ctx *secsctx;
394 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800395 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530396 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700397 int tgid;
398 int cid;
399 int ssrcount;
400 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530401 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530402 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530403 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700404 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530405 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800406 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530407 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530408 struct pm_qos_request pm_qos_req;
409 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530410 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530411 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530412 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530413 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
414 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530415 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700416};
417
418static struct fastrpc_apps gfa;
419
420static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
421 {
422 .name = "adsprpc-smd",
423 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530424 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700425 .link.link_info.edge = "lpass",
426 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530427 .spd = {
428 {
429 .spdname =
430 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
431 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530432 fastrpc_pdr_notifier_cb,
433 },
434 {
435 .spdname =
436 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
437 .pdrnb.notifier_call =
438 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530439 }
440 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700441 },
442 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700443 .name = "mdsprpc-smd",
444 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530445 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700446 .link.link_info.edge = "mpss",
447 .link.link_info.transport = "smem",
448 },
449 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800450 .name = "sdsprpc-smd",
451 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530452 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800453 .link.link_info.edge = "dsps",
454 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800455 },
456 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700457 .name = "cdsprpc-smd",
458 .subsys = "cdsp",
459 .link.link_info.edge = "cdsp",
460 .link.link_info.transport = "smem",
461 },
462};
463
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530464static int hlosvm[1] = {VMID_HLOS};
465static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
466
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800467static inline int64_t getnstimediff(struct timespec *start)
468{
469 int64_t ns;
470 struct timespec ts, b;
471
472 getnstimeofday(&ts);
473 b = timespec_sub(ts, *start);
474 ns = timespec_to_ns(&b);
475 return ns;
476}
477
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530478static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
479{
480 int err = 0;
481 int64_t *val = NULL;
482 struct fastrpc_perf *perf = NULL, *fperf = NULL;
483 struct hlist_node *n = NULL;
484
485 VERIFY(err, !IS_ERR_OR_NULL(fl));
486 if (err)
487 goto bail;
488
489 mutex_lock(&fl->perf_mutex);
490 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
491 if (perf->tid == current->pid) {
492 fperf = perf;
493 break;
494 }
495 }
496
497 if (IS_ERR_OR_NULL(fperf)) {
498 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
499
500 VERIFY(err, !IS_ERR_OR_NULL(fperf));
501 if (err) {
502 mutex_unlock(&fl->perf_mutex);
503 kfree(fperf);
504 goto bail;
505 }
506
507 fperf->tid = current->pid;
508 hlist_add_head(&fperf->hn, &fl->perf);
509 }
510
511 val = ((int64_t *)fperf) + key;
512 mutex_unlock(&fl->perf_mutex);
513bail:
514 return val;
515}
516
517
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700518static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
519{
c_mtharue1a5ce12017-10-13 20:47:09 +0530520 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Jeya R984a1a32021-01-18 15:38:07 +0530521 int vmid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700522
523 if (!fl)
524 return;
525 if (cache) {
526 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530527 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700528 spin_unlock(&fl->hlock);
529 return;
530 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530531 if (buf->remote) {
532 spin_lock(&fl->hlock);
533 hlist_del_init(&buf->hn_rem);
534 spin_unlock(&fl->hlock);
535 buf->remote = 0;
536 buf->raddr = 0;
537 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700538 if (!IS_ERR_OR_NULL(buf->virt)) {
539 int destVM[1] = {VMID_HLOS};
540 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
541
Jeya R984a1a32021-01-18 15:38:07 +0530542 VERIFY(err, fl->sctx != NULL);
543 if (err)
544 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700545 if (fl->sctx->smmu.cb)
546 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
547 vmid = fl->apps->channel[fl->cid].vmid;
548 if (vmid) {
549 int srcVM[2] = {VMID_HLOS, vmid};
550
551 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
552 srcVM, 2, destVM, destVMperm, 1);
553 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530554 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
555 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700556 }
Jeya R984a1a32021-01-18 15:38:07 +0530557bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700558 kfree(buf);
559}
560
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530561static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700562{
563 struct fastrpc_buf *buf, *free;
564
565 do {
566 struct hlist_node *n;
567
c_mtharue1a5ce12017-10-13 20:47:09 +0530568 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700569 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530570 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700571 hlist_del_init(&buf->hn);
572 free = buf;
573 break;
574 }
575 spin_unlock(&fl->hlock);
576 if (free)
577 fastrpc_buf_free(free, 0);
578 } while (free);
579}
580
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530581static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
582{
583 struct fastrpc_buf *buf, *free;
584
585 do {
586 struct hlist_node *n;
587
588 free = NULL;
589 spin_lock(&fl->hlock);
590 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
591 free = buf;
592 break;
593 }
594 spin_unlock(&fl->hlock);
595 if (free)
596 fastrpc_buf_free(free, 0);
597 } while (free);
598}
599
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700600static void fastrpc_mmap_add(struct fastrpc_mmap *map)
601{
c_mtharue1a5ce12017-10-13 20:47:09 +0530602 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
603 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
604 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700605
c_mtharue1a5ce12017-10-13 20:47:09 +0530606 spin_lock(&me->hlock);
607 hlist_add_head(&map->hn, &me->maps);
608 spin_unlock(&me->hlock);
609 } else {
610 struct fastrpc_file *fl = map->fl;
611
c_mtharue1a5ce12017-10-13 20:47:09 +0530612 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530613 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700614}
615
c_mtharue1a5ce12017-10-13 20:47:09 +0530616static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530617 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530618 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700619{
c_mtharue1a5ce12017-10-13 20:47:09 +0530620 struct fastrpc_apps *me = &gfa;
621 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700622 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530623
624 if ((va + len) < va)
625 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530626 if (mflags == ADSP_MMAP_HEAP_ADDR ||
627 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
628 spin_lock(&me->hlock);
629 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
630 if (va >= map->va &&
631 va + len <= map->va + map->len &&
632 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530633 if (refs) {
634 if (map->refs + 1 == INT_MAX) {
635 spin_unlock(&me->hlock);
636 return -ETOOMANYREFS;
637 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530638 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530639 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530640 match = map;
641 break;
642 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700643 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530644 spin_unlock(&me->hlock);
645 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530646 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
647 if (va >= map->va &&
648 va + len <= map->va + map->len &&
649 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530650 if (refs) {
651 if (map->refs + 1 == INT_MAX)
652 return -ETOOMANYREFS;
c_mtharue1a5ce12017-10-13 20:47:09 +0530653 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530654 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530655 match = map;
656 break;
657 }
658 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700659 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700660 if (match) {
661 *ppmap = match;
662 return 0;
663 }
664 return -ENOTTY;
665}
666
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530667static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530668 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530669{
Jeya Re9310762020-07-29 12:10:54 +0530670 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530671 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530672
673 if (me->dev == NULL) {
674 pr_err("device adsprpc-mem is not initialized\n");
675 return -ENODEV;
676 }
Jeya Re9310762020-07-29 12:10:54 +0530677 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
678 if (err) {
679 err = -EFAULT;
680 pr_err("adsprpc: %s: invalid allocation size 0x%zx\n",
681 __func__, size);
682 return err;
683 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530684 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530685 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530686 if (IS_ERR_OR_NULL(*vaddr)) {
687 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
688 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530689 return -ENOMEM;
690 }
691 return 0;
692}
693
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700694static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530695 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696{
c_mtharue1a5ce12017-10-13 20:47:09 +0530697 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700698 struct hlist_node *n;
699 struct fastrpc_apps *me = &gfa;
700
701 spin_lock(&me->hlock);
702 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
703 if (map->raddr == va &&
704 map->raddr + map->len == va + len &&
705 map->refs == 1) {
706 match = map;
707 hlist_del_init(&map->hn);
708 break;
709 }
710 }
711 spin_unlock(&me->hlock);
712 if (match) {
713 *ppmap = match;
714 return 0;
715 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700716 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
717 if (map->raddr == va &&
718 map->raddr + map->len == va + len &&
719 map->refs == 1) {
720 match = map;
721 hlist_del_init(&map->hn);
722 break;
723 }
724 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700725 if (match) {
726 *ppmap = match;
727 return 0;
728 }
729 return -ENOTTY;
730}
731
c_mtharu7bd6a422017-10-17 18:15:37 +0530732static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700733{
c_mtharue1a5ce12017-10-13 20:47:09 +0530734 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700735 struct fastrpc_file *fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530736 int vmid, cid = -1, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700737 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700738
739 if (!map)
740 return;
741 fl = map->fl;
Jeya Rb70b4ad2021-01-25 10:28:42 -0800742 if (!fl)
743 return;
744 if (!(map->flags == ADSP_MMAP_HEAP_ADDR ||
Jeya Rccafee22020-05-26 18:17:26 +0530745 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) {
746 cid = fl->cid;
747 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
748 if (err) {
749 err = -ECHRNG;
750 pr_err("adsprpc: ERROR:%s, Invalid channel id: %d, err:%d",
751 __func__, cid, err);
752 return;
753 }
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530754 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530755 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
756 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
757 spin_lock(&me->hlock);
758 map->refs--;
759 if (!map->refs)
760 hlist_del_init(&map->hn);
761 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530762 if (map->refs > 0)
763 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530764 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530765 map->refs--;
766 if (!map->refs)
767 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530768 if (map->refs > 0 && !flags)
769 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530770 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530771 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
772 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700773
c_mtharue1a5ce12017-10-13 20:47:09 +0530774 if (me->dev == NULL) {
775 pr_err("failed to free remote heap allocation\n");
776 return;
777 }
778 if (map->phys) {
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +0530779 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
780 DMA_ATTR_NO_KERNEL_MAPPING;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530781 dma_free_attrs(me->dev, map->size, (void *)map->va,
782 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530783 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530784 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
785 if (!IS_ERR_OR_NULL(map->handle))
786 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530787 } else {
788 int destVM[1] = {VMID_HLOS};
789 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
790
791 if (map->secure)
792 sess = fl->secsctx;
793 else
794 sess = fl->sctx;
795
796 if (!IS_ERR_OR_NULL(map->handle))
797 ion_free(fl->apps->client, map->handle);
798 if (sess && sess->smmu.enabled) {
799 if (map->size || map->phys)
800 msm_dma_unmap_sg(sess->smmu.dev,
801 map->table->sgl,
802 map->table->nents, DMA_BIDIRECTIONAL,
803 map->buf);
804 }
805 vmid = fl->apps->channel[fl->cid].vmid;
806 if (vmid && map->phys) {
807 int srcVM[2] = {VMID_HLOS, vmid};
808
809 hyp_assign_phys(map->phys, buf_page_size(map->size),
810 srcVM, 2, destVM, destVMperm, 1);
811 }
812
813 if (!IS_ERR_OR_NULL(map->table))
814 dma_buf_unmap_attachment(map->attach, map->table,
815 DMA_BIDIRECTIONAL);
816 if (!IS_ERR_OR_NULL(map->attach))
817 dma_buf_detach(map->buf, map->attach);
818 if (!IS_ERR_OR_NULL(map->buf))
819 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700820 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700821 kfree(map);
822}
823
824static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
825 struct fastrpc_session_ctx **session);
826
827static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530828 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700829 struct fastrpc_mmap **ppmap)
830{
c_mtharue1a5ce12017-10-13 20:47:09 +0530831 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700832 struct fastrpc_session_ctx *sess;
833 struct fastrpc_apps *apps = fl->apps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530834 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530835 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700836 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530837 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530838 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700839 unsigned long flags;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530840 int err = 0, vmid, cid = -1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700841
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530842 cid = fl->cid;
843 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
844 if (err) {
845 err = -ECHRNG;
846 goto bail;
847 }
848 chan = &apps->channel[cid];
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800849 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700850 return 0;
851 map = kzalloc(sizeof(*map), GFP_KERNEL);
852 VERIFY(err, !IS_ERR_OR_NULL(map));
853 if (err)
854 goto bail;
855 INIT_HLIST_NODE(&map->hn);
856 map->flags = mflags;
857 map->refs = 1;
858 map->fl = fl;
859 map->fd = fd;
860 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530861 if (mflags == ADSP_MMAP_HEAP_ADDR ||
862 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530863 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
864 DMA_ATTR_NO_KERNEL_MAPPING;
865
c_mtharue1a5ce12017-10-13 20:47:09 +0530866 map->apps = me;
867 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530868 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
869 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700870 if (err)
871 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530872 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530873 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530874 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530875 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
876 ion_phys_addr_t iphys;
877
878 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
879 ion_import_dma_buf_fd(fl->apps->client, fd)));
880 if (err)
881 goto bail;
882
883 map->uncached = 1;
884 map->buf = NULL;
885 map->attach = NULL;
886 map->table = NULL;
887 map->va = 0;
888 map->phys = 0;
889
890 err = ion_phys(fl->apps->client, map->handle,
891 &iphys, &map->size);
892 if (err)
893 goto bail;
894 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530895 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530896 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
897 pr_info("adsprpc: buffer mapped with persist attr %x\n",
898 (unsigned int)map->attr);
899 map->refs = 2;
900 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530901 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
902 ion_import_dma_buf_fd(fl->apps->client, fd)));
903 if (err)
904 goto bail;
905 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
906 &flags));
907 if (err)
908 goto bail;
909
c_mtharue1a5ce12017-10-13 20:47:09 +0530910 map->secure = flags & ION_FLAG_SECURE;
911 if (map->secure) {
912 if (!fl->secsctx)
913 err = fastrpc_session_alloc(chan, 1,
914 &fl->secsctx);
915 if (err)
916 goto bail;
917 }
918 if (map->secure)
919 sess = fl->secsctx;
920 else
921 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530922
c_mtharue1a5ce12017-10-13 20:47:09 +0530923 VERIFY(err, !IS_ERR_OR_NULL(sess));
924 if (err)
925 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530926
927 map->uncached = !ION_IS_CACHED(flags);
928 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
929 map->uncached = 1;
930
c_mtharue1a5ce12017-10-13 20:47:09 +0530931 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
932 if (err)
933 goto bail;
934 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
935 dma_buf_attach(map->buf, sess->smmu.dev)));
936 if (err)
937 goto bail;
938 VERIFY(err, !IS_ERR_OR_NULL(map->table =
939 dma_buf_map_attachment(map->attach,
940 DMA_BIDIRECTIONAL)));
941 if (err)
942 goto bail;
943 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700944 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530945
946 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
947 (sess->smmu.coherent && map->uncached))
948 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
949 else if (map->attr & FASTRPC_ATTR_COHERENT)
950 attrs |= DMA_ATTR_FORCE_COHERENT;
951
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700952 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530953 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700954 map->table->sgl, map->table->nents,
955 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530956 if (err)
957 goto bail;
958 } else {
959 VERIFY(err, map->table->nents == 1);
960 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700961 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530962 }
963 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530964
c_mtharue1a5ce12017-10-13 20:47:09 +0530965 if (sess->smmu.cb) {
966 map->phys += ((uint64_t)sess->smmu.cb << 32);
967 map->size = sg_dma_len(map->table->sgl);
968 } else {
969 map->size = buf_page_size(len);
970 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530971
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530972 VERIFY(err, map->size >= len && map->size < MAX_SIZE_LIMIT);
973 if (err) {
974 err = -EFAULT;
975 goto bail;
976 }
977
c_mtharue1a5ce12017-10-13 20:47:09 +0530978 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530979 if (!sess->smmu.enabled && !vmid) {
980 VERIFY(err, map->phys >= me->range.addr &&
981 map->phys + map->size <=
982 me->range.addr + me->range.size);
983 if (err) {
984 pr_err("adsprpc: mmap fail out of range\n");
985 goto bail;
986 }
987 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530988 if (vmid) {
989 int srcVM[1] = {VMID_HLOS};
990 int destVM[2] = {VMID_HLOS, vmid};
991 int destVMperm[2] = {PERM_READ | PERM_WRITE,
992 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700993
c_mtharue1a5ce12017-10-13 20:47:09 +0530994 VERIFY(err, !hyp_assign_phys(map->phys,
995 buf_page_size(map->size),
996 srcVM, 1, destVM, destVMperm, 2));
997 if (err)
998 goto bail;
999 }
1000 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001001 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001002 map->len = len;
1003
1004 fastrpc_mmap_add(map);
1005 *ppmap = map;
1006
1007bail:
1008 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05301009 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001010 return err;
1011}
1012
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301013static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301014 unsigned long dma_attr, uint32_t rflags,
1015 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016{
1017 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +05301018 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001019 struct hlist_node *n;
1020
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301021 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
1022 if (err) {
1023 err = -EFAULT;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001024 goto bail;
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301025 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001026
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301027 if (!remote) {
1028 /* find the smallest buffer that fits in the cache */
1029 spin_lock(&fl->hlock);
1030 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
1031 if (buf->size >= size && (!fr || fr->size > buf->size))
1032 fr = buf;
1033 }
1034 if (fr)
1035 hlist_del_init(&fr->hn);
1036 spin_unlock(&fl->hlock);
1037 if (fr) {
1038 *obuf = fr;
1039 return 0;
1040 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001041 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301042 buf = NULL;
1043 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001044 if (err)
1045 goto bail;
1046 INIT_HLIST_NODE(&buf->hn);
1047 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +05301048 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001049 buf->phys = 0;
1050 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301051 buf->dma_attr = dma_attr;
1052 buf->flags = rflags;
1053 buf->raddr = 0;
1054 buf->remote = 0;
Jeya R984a1a32021-01-18 15:38:07 +05301055 VERIFY(err, fl && fl->sctx != NULL);
1056 if (err) {
1057 err = -EBADR;
1058 goto bail;
1059 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301060 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1061 (dma_addr_t *)&buf->phys,
1062 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001063 if (IS_ERR_OR_NULL(buf->virt)) {
1064 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301065 fastrpc_cached_buf_list_free(fl);
1066 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1067 (dma_addr_t *)&buf->phys,
1068 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001069 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1070 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301071 if (err) {
1072 err = -ENOMEM;
1073 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1074 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001075 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301076 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001077 if (fl->sctx->smmu.cb)
1078 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1079 vmid = fl->apps->channel[fl->cid].vmid;
1080 if (vmid) {
1081 int srcVM[1] = {VMID_HLOS};
1082 int destVM[2] = {VMID_HLOS, vmid};
1083 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1084 PERM_READ | PERM_WRITE | PERM_EXEC};
1085
1086 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1087 srcVM, 1, destVM, destVMperm, 2));
1088 if (err)
1089 goto bail;
1090 }
1091
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301092 if (remote) {
1093 INIT_HLIST_NODE(&buf->hn_rem);
1094 spin_lock(&fl->hlock);
1095 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1096 spin_unlock(&fl->hlock);
1097 buf->remote = remote;
1098 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001099 *obuf = buf;
1100 bail:
1101 if (err && buf)
1102 fastrpc_buf_free(buf, 0);
1103 return err;
1104}
1105
1106
1107static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001108 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001109 struct smq_invoke_ctx **po)
1110{
1111 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301112 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001113 struct hlist_node *n;
1114 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1115
1116 spin_lock(&fl->hlock);
1117 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1118 if (ictx->pid == current->pid) {
1119 if (invoke->sc != ictx->sc || ictx->fl != fl)
1120 err = -1;
1121 else {
1122 ctx = ictx;
1123 hlist_del_init(&ctx->hn);
1124 hlist_add_head(&ctx->hn, &fl->clst.pending);
1125 }
1126 break;
1127 }
1128 }
1129 spin_unlock(&fl->hlock);
1130 if (ctx)
1131 *po = ctx;
1132 return err;
1133}
1134
1135#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1136static int overlap_ptr_cmp(const void *a, const void *b)
1137{
1138 struct overlap *pa = *((struct overlap **)a);
1139 struct overlap *pb = *((struct overlap **)b);
1140 /* sort with lowest starting buffer first */
1141 int st = CMP(pa->start, pb->start);
1142 /* sort with highest ending buffer first */
1143 int ed = CMP(pb->end, pa->end);
1144 return st == 0 ? ed : st;
1145}
1146
Sathish Ambley9466d672017-01-25 10:51:55 -08001147static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001148{
Sathish Ambley9466d672017-01-25 10:51:55 -08001149 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001150 remote_arg_t *lpra = ctx->lpra;
1151 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1152 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1153 int nbufs = inbufs + outbufs;
1154 struct overlap max;
1155
1156 for (i = 0; i < nbufs; ++i) {
1157 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1158 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001159 if (lpra[i].buf.len) {
1160 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1161 if (err)
1162 goto bail;
1163 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001164 ctx->overs[i].raix = i;
1165 ctx->overps[i] = &ctx->overs[i];
1166 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301167 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001168 max.start = 0;
1169 max.end = 0;
1170 for (i = 0; i < nbufs; ++i) {
1171 if (ctx->overps[i]->start < max.end) {
1172 ctx->overps[i]->mstart = max.end;
1173 ctx->overps[i]->mend = ctx->overps[i]->end;
1174 ctx->overps[i]->offset = max.end -
1175 ctx->overps[i]->start;
1176 if (ctx->overps[i]->end > max.end) {
1177 max.end = ctx->overps[i]->end;
1178 } else {
1179 ctx->overps[i]->mend = 0;
1180 ctx->overps[i]->mstart = 0;
1181 }
1182 } else {
1183 ctx->overps[i]->mend = ctx->overps[i]->end;
1184 ctx->overps[i]->mstart = ctx->overps[i]->start;
1185 ctx->overps[i]->offset = 0;
1186 max = *ctx->overps[i];
1187 }
1188 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001189bail:
1190 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001191}
1192
1193#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1194 do {\
1195 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301196 VERIFY(err, 0 == copy_from_user((dst),\
1197 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001198 (size)));\
1199 else\
1200 memmove((dst), (src), (size));\
1201 } while (0)
1202
1203#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1204 do {\
1205 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301206 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301207 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001208 else\
1209 memmove((dst), (src), (size));\
1210 } while (0)
1211
1212
1213static void context_free(struct smq_invoke_ctx *ctx);
1214
1215static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001216 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001217 struct smq_invoke_ctx **po)
1218{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301219 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301220 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301221 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 struct fastrpc_ctx_lst *clst = &fl->clst;
1223 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
Jeya R8fa59d62020-11-04 20:42:59 +05301224 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001225
1226 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1227 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1228 sizeof(*ctx->fds) * (bufs) +
1229 sizeof(*ctx->attrs) * (bufs) +
1230 sizeof(*ctx->overs) * (bufs) +
1231 sizeof(*ctx->overps) * (bufs);
1232
c_mtharue1a5ce12017-10-13 20:47:09 +05301233 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001234 if (err)
1235 goto bail;
1236
1237 INIT_HLIST_NODE(&ctx->hn);
1238 hlist_add_fake(&ctx->hn);
1239 ctx->fl = fl;
1240 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1241 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1242 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301243 if (me->legacy) {
1244 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1245 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1246 } else {
1247 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1248 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1249 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1250 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001251
c_mtharue1a5ce12017-10-13 20:47:09 +05301252 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001253 bufs * sizeof(*ctx->lpra));
1254 if (err)
1255 goto bail;
1256
1257 if (invokefd->fds) {
1258 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1259 bufs * sizeof(*ctx->fds));
1260 if (err)
1261 goto bail;
1262 }
1263 if (invokefd->attrs) {
1264 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1265 bufs * sizeof(*ctx->attrs));
1266 if (err)
1267 goto bail;
1268 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001269 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001270 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001271 if (bufs) {
1272 VERIFY(err, 0 == context_build_overlap(ctx));
1273 if (err)
1274 goto bail;
1275 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001276 ctx->retval = -1;
1277 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301278 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001279 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301280 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001281
1282 spin_lock(&fl->hlock);
1283 hlist_add_head(&ctx->hn, &clst->pending);
1284 spin_unlock(&fl->hlock);
1285
Jeya R8fa59d62020-11-04 20:42:59 +05301286 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301287 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1288 if (!me->ctxtable[ii]) {
1289 me->ctxtable[ii] = ctx;
1290 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1291 break;
1292 }
1293 }
Jeya R8fa59d62020-11-04 20:42:59 +05301294 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301295 VERIFY(err, ii < FASTRPC_CTX_MAX);
1296 if (err) {
1297 pr_err("adsprpc: out of context memory\n");
1298 goto bail;
1299 }
1300
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001301 *po = ctx;
1302bail:
1303 if (ctx && err)
1304 context_free(ctx);
1305 return err;
1306}
1307
1308static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1309{
1310 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1311
1312 spin_lock(&ctx->fl->hlock);
1313 hlist_del_init(&ctx->hn);
1314 hlist_add_head(&ctx->hn, &clst->interrupted);
1315 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001316}
1317
1318static void context_free(struct smq_invoke_ctx *ctx)
1319{
1320 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301321 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001322 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1323 REMOTE_SCALARS_OUTBUFS(ctx->sc);
Jeya R8fa59d62020-11-04 20:42:59 +05301324 unsigned long irq_flags = 0;
1325 void *handle = NULL;
1326 const void *ptr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001327 spin_lock(&ctx->fl->hlock);
1328 hlist_del_init(&ctx->hn);
1329 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301330 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001331 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301332 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301333
1334 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001335 fastrpc_buf_free(ctx->buf, 1);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301336 fastrpc_buf_free(ctx->lbuf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301337 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301338 ctx->ctxid = 0;
1339
Jeya R8fa59d62020-11-04 20:42:59 +05301340 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301341 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1342 if (me->ctxtable[i] == ctx) {
Jeya R8fa59d62020-11-04 20:42:59 +05301343 handle = me->ctxtable[i]->handle;
1344 ptr = me->ctxtable[i]->ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301345 me->ctxtable[i] = NULL;
1346 break;
1347 }
1348 }
Jeya R8fa59d62020-11-04 20:42:59 +05301349 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
1350 if (handle) {
1351 glink_rx_done(handle, ptr, true);
1352 handle = NULL;
1353 }
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301354
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001355 kfree(ctx);
1356}
1357
1358static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1359{
1360 ctx->retval = retval;
1361 complete(&ctx->work);
1362}
1363
1364
1365static void fastrpc_notify_users(struct fastrpc_file *me)
1366{
1367 struct smq_invoke_ctx *ictx;
1368 struct hlist_node *n;
1369
1370 spin_lock(&me->hlock);
1371 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1372 complete(&ictx->work);
1373 }
1374 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1375 complete(&ictx->work);
1376 }
1377 spin_unlock(&me->hlock);
1378
1379}
1380
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301381
1382static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1383{
1384 struct smq_invoke_ctx *ictx;
1385 struct hlist_node *n;
1386
1387 spin_lock(&me->hlock);
1388 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1389 if (ictx->msg.pid)
1390 complete(&ictx->work);
1391 }
1392 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1393 if (ictx->msg.pid)
1394 complete(&ictx->work);
1395 }
1396 spin_unlock(&me->hlock);
1397}
1398
1399
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001400static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1401{
1402 struct fastrpc_file *fl;
1403 struct hlist_node *n;
1404
1405 spin_lock(&me->hlock);
1406 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1407 if (fl->cid == cid)
1408 fastrpc_notify_users(fl);
1409 }
1410 spin_unlock(&me->hlock);
1411
1412}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301413
1414static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1415{
1416 struct fastrpc_file *fl;
1417 struct hlist_node *n;
1418
1419 spin_lock(&me->hlock);
1420 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1421 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301422 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301423 }
1424 spin_unlock(&me->hlock);
1425
1426}
1427
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001428static void context_list_ctor(struct fastrpc_ctx_lst *me)
1429{
1430 INIT_HLIST_HEAD(&me->interrupted);
1431 INIT_HLIST_HEAD(&me->pending);
1432}
1433
1434static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1435{
1436 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301437 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001438 struct hlist_node *n;
1439
1440 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301441 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001442 spin_lock(&fl->hlock);
1443 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1444 hlist_del_init(&ictx->hn);
1445 ctxfree = ictx;
1446 break;
1447 }
1448 spin_unlock(&fl->hlock);
1449 if (ctxfree)
1450 context_free(ctxfree);
1451 } while (ctxfree);
1452 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301453 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001454 spin_lock(&fl->hlock);
1455 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1456 hlist_del_init(&ictx->hn);
1457 ctxfree = ictx;
1458 break;
1459 }
1460 spin_unlock(&fl->hlock);
1461 if (ctxfree)
1462 context_free(ctxfree);
1463 } while (ctxfree);
1464}
1465
1466static int fastrpc_file_free(struct fastrpc_file *fl);
1467static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1468{
1469 struct fastrpc_file *fl, *free;
1470 struct hlist_node *n;
1471
1472 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301473 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001474 spin_lock(&me->hlock);
1475 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1476 hlist_del_init(&fl->hn);
1477 free = fl;
1478 break;
1479 }
1480 spin_unlock(&me->hlock);
1481 if (free)
1482 fastrpc_file_free(free);
1483 } while (free);
1484}
1485
1486static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1487{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301488 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301489 remote_arg64_t *rpra, *lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001490 remote_arg_t *lpra = ctx->lpra;
1491 struct smq_invoke_buf *list;
1492 struct smq_phy_page *pages, *ipage;
1493 uint32_t sc = ctx->sc;
1494 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1495 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001496 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001497 uintptr_t args;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301498 size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001499 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001500 int err = 0;
1501 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001502 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001503 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301504 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505
1506 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301507 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001508 list = smq_invoke_buf_start(rpra, sc);
1509 pages = smq_phy_page_start(sc, list);
1510 ipage = pages;
1511
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301512 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001513 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301514 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1515 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001516
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301517 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301518 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1519 unsigned int attrs = 0;
1520
1521 if (ctx->attrs)
1522 attrs = ctx->attrs[i];
1523
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001524 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301525 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001526 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301527 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301528 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001529 ipage += 1;
1530 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301531 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001532 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301533 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001534 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301535 int dmaflags = 0;
1536
1537 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1538 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001539 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301540 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301541 if (err) {
1542 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001543 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301544 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001545 ipage += 1;
1546 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301547 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301548 if (!me->legacy) {
1549 metalen = copylen = (size_t)&ipage[0] +
1550 (sizeof(uint64_t) * M_FDLIST) +
1551 (sizeof(uint32_t) * M_CRCLIST);
1552 } else {
1553 metalen = copylen = (size_t)&ipage[0];
1554 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001555
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301556 /* allocate new local rpra buffer */
1557 lrpralen = (size_t)&list[0];
1558 if (lrpralen) {
1559 err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
1560 if (err)
1561 goto bail;
1562 }
1563 if (ctx->lbuf->virt)
1564 memset(ctx->lbuf->virt, 0, lrpralen);
1565
1566 lrpra = ctx->lbuf->virt;
1567 ctx->lrpra = lrpra;
1568
1569 /* calculate len required for copying */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001570 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1571 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001572 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301573 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001574
1575 if (!len)
1576 continue;
1577 if (ctx->maps[i])
1578 continue;
1579 if (ctx->overps[oix]->offset == 0)
1580 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001581 mstart = ctx->overps[oix]->mstart;
1582 mend = ctx->overps[oix]->mend;
1583 VERIFY(err, (mend - mstart) <= LONG_MAX);
1584 if (err)
1585 goto bail;
1586 copylen += mend - mstart;
1587 VERIFY(err, copylen >= 0);
1588 if (err)
1589 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001590 }
1591 ctx->used = copylen;
1592
1593 /* allocate new buffer */
1594 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301595 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001596 if (err)
1597 goto bail;
1598 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301599 if (ctx->buf->virt && metalen <= copylen)
1600 memset(ctx->buf->virt, 0, metalen);
1601
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001602 /* copy metadata */
1603 rpra = ctx->buf->virt;
1604 ctx->rpra = rpra;
1605 list = smq_invoke_buf_start(rpra, sc);
1606 pages = smq_phy_page_start(sc, list);
1607 ipage = pages;
1608 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001609 for (i = 0; i < bufs + handles; ++i) {
1610 if (lpra[i].buf.len)
1611 list[i].num = 1;
1612 else
1613 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001614 list[i].pgidx = ipage - pages;
1615 ipage++;
1616 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301617
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001618 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301619 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301620 for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001621 struct fastrpc_mmap *map = ctx->maps[i];
1622 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301623 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001624
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301625 rpra[i].buf.pv = lrpra[i].buf.pv = 0;
1626 rpra[i].buf.len = lrpra[i].buf.len = len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001627 if (!len)
1628 continue;
1629 if (map) {
1630 struct vm_area_struct *vma;
1631 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301632 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633 int idx = list[i].pgidx;
1634
1635 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001636 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001637 } else {
1638 down_read(&current->mm->mmap_sem);
1639 VERIFY(err, NULL != (vma = find_vma(current->mm,
1640 map->va)));
1641 if (err) {
1642 up_read(&current->mm->mmap_sem);
1643 goto bail;
1644 }
1645 offset = buf_page_start(buf) - vma->vm_start;
1646 up_read(&current->mm->mmap_sem);
1647 VERIFY(err, offset < (uintptr_t)map->size);
1648 if (err)
1649 goto bail;
1650 }
1651 pages[idx].addr = map->phys + offset;
1652 pages[idx].size = num << PAGE_SHIFT;
1653 }
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301654 rpra[i].buf.pv = lrpra[i].buf.pv = buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001655 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001656 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001657 for (i = bufs; i < bufs + handles; ++i) {
1658 struct fastrpc_mmap *map = ctx->maps[i];
Jeya R4c7abf22020-07-23 16:00:50 +05301659 if (map) {
1660 pages[i].addr = map->phys;
1661 pages[i].size = map->size;
1662 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001663 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301664 if (!me->legacy) {
1665 fdlist = (uint64_t *)&pages[bufs + handles];
1666 for (i = 0; i < M_FDLIST; i++)
1667 fdlist[i] = 0;
1668 crclist = (uint32_t *)&fdlist[M_FDLIST];
1669 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1670 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001671
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001672 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301673 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001674 rlen = copylen - metalen;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301675 for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001676 int i = ctx->overps[oix]->raix;
1677 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301678 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001679 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301680 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001681
1682 if (!len)
1683 continue;
1684 if (map)
1685 continue;
1686 if (ctx->overps[oix]->offset == 0) {
1687 rlen -= ALIGN(args, BALIGN) - args;
1688 args = ALIGN(args, BALIGN);
1689 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001690 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001691 VERIFY(err, rlen >= mlen);
1692 if (err)
1693 goto bail;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301694 rpra[i].buf.pv = lrpra[i].buf.pv =
1695 (args - ctx->overps[oix]->offset);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001696 pages[list[i].pgidx].addr = ctx->buf->phys -
1697 ctx->overps[oix]->offset +
1698 (copylen - rlen);
1699 pages[list[i].pgidx].addr =
1700 buf_page_start(pages[list[i].pgidx].addr);
1701 buf = rpra[i].buf.pv;
1702 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1703 if (i < inbufs) {
1704 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1705 lpra[i].buf.pv, len);
1706 if (err)
1707 goto bail;
1708 }
1709 args = args + mlen;
1710 rlen -= mlen;
1711 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001712 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001713
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301714 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001715 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1716 int i = ctx->overps[oix]->raix;
1717 struct fastrpc_mmap *map = ctx->maps[i];
1718
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001719 if (map && map->uncached)
1720 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301721 if (ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301722 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1723 continue;
1724 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1725 continue;
1726
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301727 if (rpra && lrpra && rpra[i].buf.len &&
1728 ctx->overps[oix]->mstart) {
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301729 if (map && map->handle)
1730 msm_ion_do_cache_op(ctx->fl->apps->client,
1731 map->handle,
1732 uint64_to_ptr(rpra[i].buf.pv),
1733 rpra[i].buf.len,
1734 ION_IOC_CLEAN_INV_CACHES);
1735 else
1736 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1737 uint64_to_ptr(rpra[i].buf.pv
1738 + rpra[i].buf.len));
1739 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001740 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001741 PERF_END);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301742 for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
Jeya R4c7abf22020-07-23 16:00:50 +05301743 if (ctx->fds)
1744 rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301745 rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1746 rpra[i].dma.offset = lrpra[i].dma.offset =
1747 (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001748 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001749
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001750 bail:
1751 return err;
1752}
1753
1754static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1755 remote_arg_t *upra)
1756{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301757 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001758 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001759 struct smq_invoke_buf *list;
1760 struct smq_phy_page *pages;
1761 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301762 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001763 uint32_t *crclist = NULL;
1764
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301765 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001766 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001767 int err = 0;
1768
1769 inbufs = REMOTE_SCALARS_INBUFS(sc);
1770 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001771 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1772 list = smq_invoke_buf_start(ctx->rpra, sc);
1773 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301774 if (!me->legacy) {
1775 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1776 crclist = (uint32_t *)(fdlist + M_FDLIST);
1777 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001778
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001779 for (i = inbufs; i < inbufs + outbufs; ++i) {
1780 if (!ctx->maps[i]) {
1781 K_COPY_TO_USER(err, kernel,
1782 ctx->lpra[i].buf.pv,
1783 uint64_to_ptr(rpra[i].buf.pv),
1784 rpra[i].buf.len);
1785 if (err)
1786 goto bail;
1787 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301788 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301789 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301790 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301791 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001792 }
1793 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301794 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301795 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001796 for (i = 0; i < M_FDLIST; i++) {
1797 if (!fdlist[i])
1798 break;
1799 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001800 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301801 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001802 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001803 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301804 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001805 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301806 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001807 crclist, M_CRCLIST*sizeof(uint32_t));
1808
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001809 bail:
1810 return err;
1811}
1812
1813static void inv_args_pre(struct smq_invoke_ctx *ctx)
1814{
1815 int i, inbufs, outbufs;
1816 uint32_t sc = ctx->sc;
1817 remote_arg64_t *rpra = ctx->rpra;
1818 uintptr_t end;
1819
1820 inbufs = REMOTE_SCALARS_INBUFS(sc);
1821 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1822 for (i = inbufs; i < inbufs + outbufs; ++i) {
1823 struct fastrpc_mmap *map = ctx->maps[i];
1824
1825 if (map && map->uncached)
1826 continue;
1827 if (!rpra[i].buf.len)
1828 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301829 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301830 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1831 continue;
1832 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1833 continue;
1834
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1836 buf_page_start(rpra[i].buf.pv))
1837 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301838 if (!IS_CACHE_ALIGNED((uintptr_t)
1839 uint64_to_ptr(rpra[i].buf.pv))) {
1840 if (map && map->handle)
1841 msm_ion_do_cache_op(ctx->fl->apps->client,
1842 map->handle,
1843 uint64_to_ptr(rpra[i].buf.pv),
1844 sizeof(uintptr_t),
1845 ION_IOC_CLEAN_INV_CACHES);
1846 else
1847 dmac_flush_range(
1848 uint64_to_ptr(rpra[i].buf.pv), (char *)
1849 uint64_to_ptr(rpra[i].buf.pv + 1));
1850 }
1851
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001852 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1853 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301854 if (!IS_CACHE_ALIGNED(end)) {
1855 if (map && map->handle)
1856 msm_ion_do_cache_op(ctx->fl->apps->client,
1857 map->handle,
1858 uint64_to_ptr(end),
1859 sizeof(uintptr_t),
1860 ION_IOC_CLEAN_INV_CACHES);
1861 else
1862 dmac_flush_range((char *)end,
1863 (char *)end + 1);
1864 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001865 }
1866}
1867
1868static void inv_args(struct smq_invoke_ctx *ctx)
1869{
1870 int i, inbufs, outbufs;
1871 uint32_t sc = ctx->sc;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301872 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001873
1874 inbufs = REMOTE_SCALARS_INBUFS(sc);
1875 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1876 for (i = inbufs; i < inbufs + outbufs; ++i) {
1877 struct fastrpc_mmap *map = ctx->maps[i];
1878
1879 if (map && map->uncached)
1880 continue;
1881 if (!rpra[i].buf.len)
1882 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301883 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301884 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1885 continue;
1886 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1887 continue;
1888
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001889 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1890 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001891 continue;
1892 }
1893 if (map && map->handle)
1894 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1895 (char *)uint64_to_ptr(rpra[i].buf.pv),
1896 rpra[i].buf.len, ION_IOC_INV_CACHES);
1897 else
1898 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1899 (char *)uint64_to_ptr(rpra[i].buf.pv
1900 + rpra[i].buf.len));
1901 }
1902
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001903}
1904
1905static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1906 uint32_t kernel, uint32_t handle)
1907{
1908 struct smq_msg *msg = &ctx->msg;
1909 struct fastrpc_file *fl = ctx->fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301910 int err = 0, len, cid = -1;
1911 struct fastrpc_channel_ctx *channel_ctx = NULL;
1912
1913 cid = fl->cid;
1914 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
1915 if (err) {
1916 err = -ECHRNG;
1917 goto bail;
1918 }
1919 channel_ctx = &fl->apps->channel[fl->cid];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001920
c_mtharue1a5ce12017-10-13 20:47:09 +05301921 VERIFY(err, NULL != channel_ctx->chan);
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301922 if (err) {
1923 err = -ECHRNG;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001924 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301925 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301926 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001927 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301928 if (fl->sessionid)
1929 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001930 if (kernel)
1931 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301932 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001933 msg->invoke.header.handle = handle;
1934 msg->invoke.header.sc = ctx->sc;
1935 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1936 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301937 if (fl->apps->glink) {
1938 if (fl->ssrcount != channel_ctx->ssrcount) {
1939 err = -ECONNRESET;
1940 goto bail;
1941 }
1942 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001943 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301944 if (err)
1945 goto bail;
1946 err = glink_tx(channel_ctx->chan,
1947 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1948 GLINK_TX_REQ_INTENT);
1949 } else {
1950 spin_lock(&fl->apps->hlock);
1951 len = smd_write((smd_channel_t *)
1952 channel_ctx->chan,
1953 msg, sizeof(*msg));
1954 spin_unlock(&fl->apps->hlock);
1955 VERIFY(err, len == sizeof(*msg));
1956 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001957 bail:
1958 return err;
1959}
1960
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301961static void fastrpc_smd_read_handler(int cid)
1962{
1963 struct fastrpc_apps *me = &gfa;
1964 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301965 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301966 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301967
1968 do {
1969 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1970 sizeof(rsp));
1971 if (ret != sizeof(rsp))
1972 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301973
1974 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1975 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301976 if (err)
1977 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301978
1979 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1980 if (err)
1981 goto bail;
1982
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301983 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301984 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1985 if (err)
1986 goto bail;
1987
1988 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301989 } while (ret == sizeof(rsp));
1990bail:
1991 if (err)
1992 pr_err("adsprpc: invalid response or context\n");
1993
1994}
1995
1996static void smd_event_handler(void *priv, unsigned int event)
1997{
1998 struct fastrpc_apps *me = &gfa;
1999 int cid = (int)(uintptr_t)priv;
2000
2001 switch (event) {
2002 case SMD_EVENT_OPEN:
2003 complete(&me->channel[cid].workport);
2004 break;
2005 case SMD_EVENT_CLOSE:
2006 fastrpc_notify_drivers(me, cid);
2007 break;
2008 case SMD_EVENT_DATA:
2009 fastrpc_smd_read_handler(cid);
2010 break;
2011 }
2012}
2013
2014
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002015static void fastrpc_init(struct fastrpc_apps *me)
2016{
2017 int i;
2018
2019 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302020 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002021 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302022 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302023 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002024 me->channel = &gcinfo[0];
2025 for (i = 0; i < NUM_CHANNELS; i++) {
2026 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302027 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002028 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302029 /* All channels are secure by default except CDSP */
2030 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002031 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302032 /* Set CDSP channel to non secure */
2033 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002034}
2035
2036static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
2037
2038static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
2039 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07002040 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002041{
c_mtharue1a5ce12017-10-13 20:47:09 +05302042 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002043 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302044 int err = 0, cid = -1, interrupted = 0;
Maria Yu757199c2017-09-22 16:05:49 +08002045 struct timespec invoket = {0};
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302046 int64_t *perf_counter = NULL;
2047
2048 cid = fl->cid;
2049 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
2050 if (err) {
2051 err = -ECHRNG;
2052 goto bail;
2053 }
2054 VERIFY(err, fl->sctx != NULL);
2055 if (err) {
2056 err = -EBADR;
2057 goto bail;
2058 }
2059 perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002060
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002061 if (fl->profile)
2062 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05302063
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302064 if (!kernel) {
2065 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
2066 if (err) {
2067 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
2068 __func__, current->comm, cid);
2069 goto bail;
2070 }
2071 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302072
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002073 if (!kernel) {
2074 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
2075 &ctx));
2076 if (err)
2077 goto bail;
2078 if (fl->sctx->smmu.faults)
2079 err = FASTRPC_ENOSUCH;
2080 if (err)
2081 goto bail;
2082 if (ctx)
2083 goto wait;
2084 }
2085
2086 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
2087 if (err)
2088 goto bail;
2089
2090 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302091 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002092 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002093 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002094 if (err)
2095 goto bail;
2096 }
2097
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302098 if (!fl->sctx->smmu.coherent) {
2099 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002100 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302101 PERF_END);
2102 }
2103
2104 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002105 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002106 PERF_END);
2107
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002108 if (err)
2109 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002110 wait:
2111 if (kernel)
2112 wait_for_completion(&ctx->work);
2113 else {
2114 interrupted = wait_for_completion_interruptible(&ctx->work);
2115 VERIFY(err, 0 == (err = interrupted));
2116 if (err)
2117 goto bail;
2118 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302119 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002120 if (!fl->sctx->smmu.coherent)
2121 inv_args(ctx);
2122 PERF_END);
2123
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002124 VERIFY(err, 0 == (err = ctx->retval));
2125 if (err)
2126 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002127
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302128 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002129 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002130 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002131 if (err)
2132 goto bail;
2133 bail:
2134 if (ctx && interrupted == -ERESTARTSYS)
2135 context_save_interrupted(ctx);
2136 else if (ctx)
2137 context_free(ctx);
2138 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2139 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002140
2141 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302142 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2143 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2144
2145 if (count)
2146 *count += getnstimediff(&invoket);
2147 }
2148 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2149 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2150
2151 if (count)
2152 *count = *count+1;
2153 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002154 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002155 return err;
2156}
2157
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302158static int fastrpc_get_adsp_session(char *name, int *session)
2159{
2160 struct fastrpc_apps *me = &gfa;
2161 int err = 0, i;
2162
2163 for (i = 0; i < NUM_SESSIONS; i++) {
2164 if (!me->channel[0].spd[i].spdname)
2165 continue;
2166 if (!strcmp(name, me->channel[0].spd[i].spdname))
2167 break;
2168 }
2169 VERIFY(err, i < NUM_SESSIONS);
2170 if (err)
2171 goto bail;
2172 *session = i;
2173bail:
2174 return err;
2175}
2176
2177static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002178static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302179static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002180static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002181 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002182{
2183 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302184 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002185 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002186 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002187 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302188 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302189 struct fastrpc_buf *imem = NULL;
2190 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302191 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302193 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002194 if (err)
2195 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302196 if (init->flags == FASTRPC_INIT_ATTACH ||
2197 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002198 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302199 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002200
2201 ra[0].buf.pv = (void *)&tgid;
2202 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302203 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002204 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2205 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302206 ioctl.fds = NULL;
2207 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002208 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302209 if (init->flags == FASTRPC_INIT_ATTACH)
2210 fl->pd = 0;
2211 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2212 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2213 fl->pd = 2;
2214 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002215 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2216 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2217 if (err)
2218 goto bail;
2219 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002220 remote_arg_t ra[6];
2221 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002222 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302223 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002224 struct {
2225 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302226 unsigned int namelen;
2227 unsigned int filelen;
2228 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002229 int attrs;
2230 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002231 } inbuf;
2232
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302233 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002234 inbuf.namelen = strlen(current->comm) + 1;
2235 inbuf.filelen = init->filelen;
2236 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302237
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302238 VERIFY(err, access_ok(0, (void __user *)init->file,
2239 init->filelen));
2240 if (err)
2241 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002242 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302243 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002244 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2245 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302246 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002247 if (err)
2248 goto bail;
2249 }
2250 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302251
2252 VERIFY(err, !init->mem);
2253 if (err) {
2254 err = -EINVAL;
2255 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2256 current->comm, __func__);
2257 goto bail;
2258 }
2259 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2260 1024*1024);
2261 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2262 DMA_ATTR_NO_KERNEL_MAPPING |
2263 DMA_ATTR_FORCE_NON_COHERENT;
2264 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302265 if (err)
2266 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302267 fl->init_mem = imem;
2268
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002269 inbuf.pageslen = 1;
2270 ra[0].buf.pv = (void *)&inbuf;
2271 ra[0].buf.len = sizeof(inbuf);
2272 fds[0] = 0;
2273
2274 ra[1].buf.pv = (void *)current->comm;
2275 ra[1].buf.len = inbuf.namelen;
2276 fds[1] = 0;
2277
2278 ra[2].buf.pv = (void *)init->file;
2279 ra[2].buf.len = inbuf.filelen;
2280 fds[2] = init->filefd;
2281
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302282 pages[0].addr = imem->phys;
2283 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002284 ra[3].buf.pv = (void *)pages;
2285 ra[3].buf.len = 1 * sizeof(*pages);
2286 fds[3] = 0;
2287
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002288 inbuf.attrs = uproc->attrs;
2289 ra[4].buf.pv = (void *)&(inbuf.attrs);
2290 ra[4].buf.len = sizeof(inbuf.attrs);
2291 fds[4] = 0;
2292
2293 inbuf.siglen = uproc->siglen;
2294 ra[5].buf.pv = (void *)&(inbuf.siglen);
2295 ra[5].buf.len = sizeof(inbuf.siglen);
2296 fds[5] = 0;
2297
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302298 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002299 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002300 if (uproc->attrs)
2301 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002302 ioctl.inv.pra = ra;
2303 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302304 ioctl.attrs = NULL;
2305 ioctl.crc = NULL;
2306 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2307 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2308 if (err)
2309 goto bail;
2310 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2311 remote_arg_t ra[3];
2312 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302313 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302314 int fds[3];
2315 struct {
2316 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302317 unsigned int namelen;
2318 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302319 } inbuf;
2320
2321 if (!init->filelen)
2322 goto bail;
2323
2324 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2325 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2326 if (err)
2327 goto bail;
2328 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2329 (void __user *)init->file, init->filelen));
2330 if (err)
2331 goto bail;
2332
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302333 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302334 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302335 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302336 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302337
2338 if (!strcmp(proc_name, "audiopd")) {
2339 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2340 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302341 if (err)
2342 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302343 }
2344
c_mtharue1a5ce12017-10-13 20:47:09 +05302345 if (!me->staticpd_flags) {
2346 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302347 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302348 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2349 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2350 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302351 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302352 if (err)
2353 goto bail;
2354 phys = mem->phys;
2355 size = mem->size;
2356 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302357 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2358 me->channel[fl->cid].rhvm.vmperm,
2359 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302360 if (err) {
2361 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2362 err);
2363 pr_err("map->phys %llx, map->size %d\n",
2364 phys, (int)size);
2365 goto bail;
2366 }
2367 me->staticpd_flags = 1;
2368 }
2369
2370 ra[0].buf.pv = (void *)&inbuf;
2371 ra[0].buf.len = sizeof(inbuf);
2372 fds[0] = 0;
2373
2374 ra[1].buf.pv = (void *)proc_name;
2375 ra[1].buf.len = inbuf.namelen;
2376 fds[1] = 0;
2377
2378 pages[0].addr = phys;
2379 pages[0].size = size;
2380
2381 ra[2].buf.pv = (void *)pages;
2382 ra[2].buf.len = sizeof(*pages);
2383 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302384 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302385
2386 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2387 ioctl.inv.pra = ra;
2388 ioctl.fds = NULL;
2389 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002390 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002391 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2392 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2393 if (err)
2394 goto bail;
2395 } else {
2396 err = -ENOTTY;
2397 }
2398bail:
c_mtharud91205a2017-11-07 16:01:06 +05302399 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302400 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2401 me->staticpd_flags = 0;
2402 if (mem && err) {
2403 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2404 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302405 me->channel[fl->cid].rhvm.vmid,
2406 me->channel[fl->cid].rhvm.vmcount,
2407 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302408 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302409 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302410 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302411 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302412 if (file) {
2413 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302414 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302415 mutex_unlock(&fl->fl_map_mutex);
2416 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002417 return err;
2418}
2419
2420static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2421{
2422 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002423 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002424 remote_arg_t ra[1];
2425 int tgid = 0;
2426
Sathish Ambley36849af2017-02-02 09:35:55 -08002427 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2428 if (err)
2429 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05302430 VERIFY(err, fl->sctx != NULL);
2431 if (err)
2432 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302433 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002434 if (err)
2435 goto bail;
2436 tgid = fl->tgid;
2437 ra[0].buf.pv = (void *)&tgid;
2438 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302439 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002440 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2441 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302442 ioctl.fds = NULL;
2443 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002444 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002445 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2446 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2447bail:
2448 return err;
2449}
2450
2451static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302452 uintptr_t va, uint64_t phys,
2453 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002454{
Sathish Ambleybae51902017-07-03 15:00:49 -07002455 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302456 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002457 struct smq_phy_page page;
2458 int num = 1;
2459 remote_arg_t ra[3];
2460 int err = 0;
2461 struct {
2462 int pid;
2463 uint32_t flags;
2464 uintptr_t vaddrin;
2465 int num;
2466 } inargs;
2467 struct {
2468 uintptr_t vaddrout;
2469 } routargs;
2470
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302471 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302472 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002473 inargs.flags = flags;
2474 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2475 ra[0].buf.pv = (void *)&inargs;
2476 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302477 page.addr = phys;
2478 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002479 ra[1].buf.pv = (void *)&page;
2480 ra[1].buf.len = num * sizeof(page);
2481
2482 ra[2].buf.pv = (void *)&routargs;
2483 ra[2].buf.len = sizeof(routargs);
2484
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302485 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002486 if (fl->apps->compat)
2487 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2488 else
2489 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2490 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302491 ioctl.fds = NULL;
2492 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002493 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002494 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2495 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302496 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302497 if (err)
2498 goto bail;
2499 if (flags == ADSP_MMAP_HEAP_ADDR) {
2500 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002501
c_mtharue1a5ce12017-10-13 20:47:09 +05302502 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302503 desc.args[1] = phys;
2504 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302505 desc.arginfo = SCM_ARGS(3);
2506 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2507 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2508 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302509 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302510 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2511 me->channel[fl->cid].rhvm.vmperm,
2512 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302513 if (err)
2514 goto bail;
2515 }
2516bail:
2517 return err;
2518}
2519
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302520static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2521 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302522{
2523 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302524 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302525 int tgid = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302526 int destVM[1] = {VMID_HLOS};
2527 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2528
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302529 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302530 struct fastrpc_ioctl_invoke_crc ioctl;
2531 struct scm_desc desc = {0};
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302532 remote_arg_t ra[2];
2533
c_mtharue1a5ce12017-10-13 20:47:09 +05302534 struct {
2535 uint8_t skey;
2536 } routargs;
2537
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302538 if (fl == NULL)
2539 goto bail;
2540 tgid = fl->tgid;
2541 ra[0].buf.pv = (void *)&tgid;
2542 ra[0].buf.len = sizeof(tgid);
2543 ra[1].buf.pv = (void *)&routargs;
2544 ra[1].buf.len = sizeof(routargs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302545
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302546 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302547 ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1);
c_mtharue1a5ce12017-10-13 20:47:09 +05302548 ioctl.inv.pra = ra;
2549 ioctl.fds = NULL;
2550 ioctl.attrs = NULL;
2551 ioctl.crc = NULL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302552
c_mtharue1a5ce12017-10-13 20:47:09 +05302553
2554 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2555 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302556 if (err == AEE_EUNSUPPORTED) {
2557 remote_arg_t ra[1];
2558
2559 pr_warn("ADSPRPC:Failed to get security key with updated remote call, falling back to older method");
2560 ra[0].buf.pv = (void *)&routargs;
2561 ra[0].buf.len = sizeof(routargs);
2562 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2563 ioctl.inv.pra = ra;
2564 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2565 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2566 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302567 if (err)
2568 goto bail;
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302569
c_mtharue1a5ce12017-10-13 20:47:09 +05302570 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302571 desc.args[1] = phys;
2572 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302573 desc.args[3] = routargs.skey;
2574 desc.arginfo = SCM_ARGS(4);
2575 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2576 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302577 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2578 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302579 me->channel[fl->cid].rhvm.vmid,
2580 me->channel[fl->cid].rhvm.vmcount,
2581 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302582 if (err)
2583 goto bail;
2584 }
2585
2586bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002587 return err;
2588}
2589
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302590static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2591 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002592{
Sathish Ambleybae51902017-07-03 15:00:49 -07002593 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002594 remote_arg_t ra[1];
2595 int err = 0;
2596 struct {
2597 int pid;
2598 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302599 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002600 } inargs;
2601
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302602 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302603 inargs.size = size;
2604 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002605 ra[0].buf.pv = (void *)&inargs;
2606 ra[0].buf.len = sizeof(inargs);
2607
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302608 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002609 if (fl->apps->compat)
2610 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2611 else
2612 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2613 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302614 ioctl.fds = NULL;
2615 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002616 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002617 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2618 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302619 if (err)
2620 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302621 if (flags == ADSP_MMAP_HEAP_ADDR ||
2622 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2623 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302624 if (err)
2625 goto bail;
2626 }
2627bail:
2628 return err;
2629}
2630
2631static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2632{
2633 struct fastrpc_mmap *match = NULL, *map = NULL;
2634 struct hlist_node *n = NULL;
2635 int err = 0, ret = 0;
2636 struct fastrpc_apps *me = &gfa;
2637 struct ramdump_segment *ramdump_segments_rh = NULL;
2638
2639 do {
2640 match = NULL;
2641 spin_lock(&me->hlock);
2642 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2643 match = map;
2644 hlist_del_init(&map->hn);
2645 break;
2646 }
2647 spin_unlock(&me->hlock);
2648
2649 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302650 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2651 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302652 if (err)
2653 goto bail;
2654 if (me->channel[0].ramdumpenabled) {
2655 ramdump_segments_rh = kcalloc(1,
2656 sizeof(struct ramdump_segment), GFP_KERNEL);
2657 if (ramdump_segments_rh) {
2658 ramdump_segments_rh->address =
2659 match->phys;
2660 ramdump_segments_rh->size = match->size;
2661 ret = do_elf_ramdump(
2662 me->channel[0].remoteheap_ramdump_dev,
2663 ramdump_segments_rh, 1);
2664 if (ret < 0)
2665 pr_err("ADSPRPC: unable to dump heap");
2666 kfree(ramdump_segments_rh);
2667 }
2668 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302669 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302670 }
2671 } while (match);
2672bail:
2673 if (err && match)
2674 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002675 return err;
2676}
2677
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302678static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2679{
2680 struct fastrpc_apps *me = &gfa;
2681 int session = 0, err = 0;
2682
2683 VERIFY(err, !fastrpc_get_adsp_session(
2684 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2685 if (err)
2686 goto bail;
2687 if (me->channel[fl->cid].spd[session].pdrcount !=
2688 me->channel[fl->cid].spd[session].prevpdrcount) {
2689 if (fastrpc_mmap_remove_ssr(fl))
2690 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2691 me->channel[fl->cid].spd[session].prevpdrcount =
2692 me->channel[fl->cid].spd[session].pdrcount;
2693 }
2694 if (!me->channel[fl->cid].spd[session].ispdup) {
2695 VERIFY(err, 0);
2696 if (err) {
2697 err = -ENOTCONN;
2698 goto bail;
2699 }
2700 }
2701bail:
2702 return err;
2703}
2704
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002705static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302706 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002707
2708static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2709
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302710static inline void get_fastrpc_ioctl_mmap_64(
2711 struct fastrpc_ioctl_mmap_64 *mmap64,
2712 struct fastrpc_ioctl_mmap *immap)
2713{
2714 immap->fd = mmap64->fd;
2715 immap->flags = mmap64->flags;
2716 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2717 immap->size = mmap64->size;
2718}
2719
2720static inline void put_fastrpc_ioctl_mmap_64(
2721 struct fastrpc_ioctl_mmap_64 *mmap64,
2722 struct fastrpc_ioctl_mmap *immap)
2723{
2724 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2725}
2726
2727static inline void get_fastrpc_ioctl_munmap_64(
2728 struct fastrpc_ioctl_munmap_64 *munmap64,
2729 struct fastrpc_ioctl_munmap *imunmap)
2730{
2731 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2732 imunmap->size = munmap64->size;
2733}
2734
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002735static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2736 struct fastrpc_ioctl_munmap *ud)
2737{
2738 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302739 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302740 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2741 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002742
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302743 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302744
2745 spin_lock(&fl->hlock);
2746 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2747 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2748 if ((rbuf->raddr == ud->vaddrout) &&
2749 (rbuf->size == ud->size)) {
2750 free = rbuf;
2751 break;
2752 }
2753 }
2754 }
2755 spin_unlock(&fl->hlock);
2756
2757 if (free) {
2758 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2759 free->phys, free->size, free->flags));
2760 if (err)
2761 goto bail;
2762 fastrpc_buf_free(rbuf, 0);
2763 mutex_unlock(&fl->map_mutex);
2764 return err;
2765 }
2766
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302767 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002768 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302769 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002770 if (err)
2771 goto bail;
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302772 VERIFY(err, map != NULL);
2773 if (err) {
2774 err = -EINVAL;
2775 goto bail;
2776 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302777 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302778 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002779 if (err)
2780 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302781 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302782 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302783 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002784bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302785 if (err && map) {
2786 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002787 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302788 mutex_unlock(&fl->fl_map_mutex);
2789 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302790 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002791 return err;
2792}
2793
c_mtharu7bd6a422017-10-17 18:15:37 +05302794static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2795 struct fastrpc_ioctl_munmap_fd *ud) {
2796 int err = 0;
2797 struct fastrpc_mmap *map = NULL;
2798
2799 VERIFY(err, (fl && ud));
2800 if (err)
2801 goto bail;
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302802 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302803 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302804 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2805 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302806 ud->fd, (unsigned long long)ud->va,
2807 (unsigned int)ud->len);
2808 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302809 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302810 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302811 goto bail;
2812 }
2813 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302814 fastrpc_mmap_free(map, 0);
2815 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302816 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302817bail:
2818 return err;
2819}
2820
2821
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002822static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2823 struct fastrpc_ioctl_mmap *ud)
2824{
2825
c_mtharue1a5ce12017-10-13 20:47:09 +05302826 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302827 struct fastrpc_buf *rbuf = NULL;
2828 unsigned long dma_attr = 0;
2829 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002830 int err = 0;
2831
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302832 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302833 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2834 if (ud->vaddrin) {
2835 err = -EINVAL;
2836 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2837 current->comm, __func__);
2838 goto bail;
2839 }
2840 dma_attr = DMA_ATTR_EXEC_MAPPING |
2841 DMA_ATTR_NO_KERNEL_MAPPING |
2842 DMA_ATTR_FORCE_NON_COHERENT;
2843 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2844 1, &rbuf);
2845 if (err)
2846 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302847 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302848 rbuf->phys, rbuf->size, &raddr);
2849 if (err)
2850 goto bail;
2851 rbuf->raddr = raddr;
2852 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302853
2854 uintptr_t va_to_dsp;
2855
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302856 mutex_lock(&fl->fl_map_mutex);
2857 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2858 ud->size, ud->flags, 1, &map)) {
Mohammed Nayeem Ur Rahmanaf5f6102019-10-09 13:36:52 +05302859 ud->vaddrout = map->raddr;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302860 mutex_unlock(&fl->fl_map_mutex);
2861 mutex_unlock(&fl->map_mutex);
2862 return 0;
2863 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302864
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302865 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2866 (uintptr_t)ud->vaddrin, ud->size,
2867 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302868 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302869 if (err)
2870 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302871
2872 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2873 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2874 va_to_dsp = 0;
2875 else
2876 va_to_dsp = (uintptr_t)map->va;
2877 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302878 map->phys, map->size, &raddr));
2879 if (err)
2880 goto bail;
2881 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302882 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302883 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002884 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302885 if (err && map) {
2886 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302887 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302888 mutex_unlock(&fl->fl_map_mutex);
2889 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302890 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002891 return err;
2892}
2893
2894static void fastrpc_channel_close(struct kref *kref)
2895{
2896 struct fastrpc_apps *me = &gfa;
2897 struct fastrpc_channel_ctx *ctx;
2898 int cid;
2899
2900 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2901 cid = ctx - &gcinfo[0];
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302902 if (me->glink) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302903 fastrpc_glink_close(ctx->chan, cid);
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302904 ctx->chan = NULL;
2905 }
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302906 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002907 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2908 MAJOR(me->dev_no), cid);
2909}
2910
2911static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2912
2913static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302914 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002915{
2916 struct fastrpc_apps *me = &gfa;
2917 int idx = 0, err = 0;
2918
2919 if (chan->sesscount) {
2920 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302921 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2922 (!chan->session[idx].used &&
2923 chan->session[idx].smmu.secure
2924 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002925 chan->session[idx].used = 1;
2926 break;
2927 }
2928 }
2929 VERIFY(err, idx < chan->sesscount);
2930 if (err)
2931 goto bail;
2932 chan->session[idx].smmu.faults = 0;
2933 } else {
2934 VERIFY(err, me->dev != NULL);
2935 if (err)
2936 goto bail;
2937 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302938 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002939 }
2940
2941 *session = &chan->session[idx];
2942 bail:
2943 return err;
2944}
2945
c_mtharue1a5ce12017-10-13 20:47:09 +05302946static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2947 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002948{
2949 if (glink_queue_rx_intent(h, NULL, size))
2950 return false;
2951 return true;
2952}
2953
c_mtharue1a5ce12017-10-13 20:47:09 +05302954static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002955 const void *pkt_priv, const void *ptr)
2956{
2957}
2958
c_mtharue1a5ce12017-10-13 20:47:09 +05302959static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002960 const void *pkt_priv, const void *ptr, size_t size)
2961{
2962 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302963 struct fastrpc_apps *me = &gfa;
2964 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302965 int err = 0;
Jeya R8fa59d62020-11-04 20:42:59 +05302966 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002967
c_mtharufdac6892017-10-12 13:09:01 +05302968 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2969 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302970 goto bail;
2971
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302972 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2973 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302974 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302975 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302976
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302977 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2978 if (err)
2979 goto bail;
2980
Jeya R8fa59d62020-11-04 20:42:59 +05302981 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302982 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302983 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
Jeya R8fa59d62020-11-04 20:42:59 +05302984 if (err) {
2985 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302986 goto bail;
Jeya R8fa59d62020-11-04 20:42:59 +05302987 }
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302988 me->ctxtable[index]->handle = handle;
2989 me->ctxtable[index]->ptr = ptr;
Jeya R8fa59d62020-11-04 20:42:59 +05302990 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302991
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302992 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302993bail:
Jeya R859f8012020-08-09 02:09:14 +05302994 if (err) {
2995 glink_rx_done(handle, ptr, true);
c_mtharufdac6892017-10-12 13:09:01 +05302996 pr_err("adsprpc: invalid response or context\n");
Jeya R859f8012020-08-09 02:09:14 +05302997 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002998}
2999
c_mtharue1a5ce12017-10-13 20:47:09 +05303000static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003001 unsigned int event)
3002{
3003 struct fastrpc_apps *me = &gfa;
3004 int cid = (int)(uintptr_t)priv;
3005 struct fastrpc_glink_info *link;
3006
3007 if (cid < 0 || cid >= NUM_CHANNELS)
3008 return;
3009 link = &me->channel[cid].link;
3010 switch (event) {
3011 case GLINK_CONNECTED:
3012 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303013 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003014 break;
3015 case GLINK_LOCAL_DISCONNECTED:
3016 link->port_state = FASTRPC_LINK_DISCONNECTED;
3017 break;
3018 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003019 break;
3020 default:
3021 break;
3022 }
3023}
3024
3025static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
3026 struct fastrpc_session_ctx **session)
3027{
3028 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303029 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003030
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303031 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003032 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303033 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303034 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003035 return err;
3036}
3037
3038static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
3039 struct fastrpc_session_ctx *session)
3040{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303041 struct fastrpc_apps *me = &gfa;
3042
3043 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003044 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303045 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003046}
3047
3048static int fastrpc_file_free(struct fastrpc_file *fl)
3049{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303050 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303051 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303052 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003053 int cid;
3054
3055 if (!fl)
3056 return 0;
3057 cid = fl->cid;
3058
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303059 (void)fastrpc_release_current_dsp_process(fl);
3060
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003061 spin_lock(&fl->apps->hlock);
3062 hlist_del_init(&fl->hn);
3063 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303064 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003065
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08003066 if (!fl->sctx) {
3067 kfree(fl);
3068 return 0;
3069 }
tharun kumar9f899ea2017-07-03 17:07:03 +05303070 spin_lock(&fl->hlock);
3071 fl->file_close = 1;
3072 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303073 if (!IS_ERR_OR_NULL(fl->init_mem))
3074 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003075 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303076 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303077 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303078 do {
3079 lmap = NULL;
3080 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3081 hlist_del_init(&map->hn);
3082 lmap = map;
3083 break;
3084 }
3085 fastrpc_mmap_free(lmap, 1);
3086 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303087 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303088 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003089 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303090 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003091 if (fl->sctx)
3092 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
3093 if (fl->secsctx)
3094 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303095
3096 mutex_lock(&fl->perf_mutex);
3097 do {
3098 struct hlist_node *pn = NULL;
3099
3100 fperf = NULL;
3101 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
3102 hlist_del_init(&perf->hn);
3103 fperf = perf;
3104 break;
3105 }
3106 kfree(fperf);
3107 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303108 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303109 mutex_unlock(&fl->perf_mutex);
3110 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303111 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05303112 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003113 kfree(fl);
3114 return 0;
3115}
3116
3117static int fastrpc_device_release(struct inode *inode, struct file *file)
3118{
3119 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3120
3121 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303122 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
3123 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003124 if (fl->debugfs_file != NULL)
3125 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003126 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05303127 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003128 }
3129 return 0;
3130}
3131
3132static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
3133 void *priv)
3134{
3135 struct fastrpc_apps *me = &gfa;
3136 int cid = (int)((uintptr_t)priv);
3137 struct fastrpc_glink_info *link;
3138
3139 if (cid < 0 || cid >= NUM_CHANNELS)
3140 return;
3141
3142 link = &me->channel[cid].link;
3143 switch (cb_info->link_state) {
3144 case GLINK_LINK_STATE_UP:
3145 link->link_state = FASTRPC_LINK_STATE_UP;
3146 complete(&me->channel[cid].work);
3147 break;
3148 case GLINK_LINK_STATE_DOWN:
3149 link->link_state = FASTRPC_LINK_STATE_DOWN;
3150 break;
3151 default:
3152 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3153 break;
3154 }
3155}
3156
3157static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3158{
3159 int err = 0;
3160 struct fastrpc_glink_info *link;
3161
3162 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3163 if (err)
3164 goto bail;
3165
3166 link = &me->channel[cid].link;
3167 if (link->link_notify_handle != NULL)
3168 goto bail;
3169
3170 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3171 link->link_notify_handle = glink_register_link_state_cb(
3172 &link->link_info,
3173 (void *)((uintptr_t)cid));
3174 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3175 if (err) {
3176 link->link_notify_handle = NULL;
3177 goto bail;
3178 }
3179 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3180 RPC_TIMEOUT));
3181bail:
3182 return err;
3183}
3184
3185static void fastrpc_glink_close(void *chan, int cid)
3186{
3187 int err = 0;
3188 struct fastrpc_glink_info *link;
3189
3190 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3191 if (err)
3192 return;
3193 link = &gfa.channel[cid].link;
3194
c_mtharu314a4202017-11-15 22:09:17 +05303195 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3196 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003197 link->port_state = FASTRPC_LINK_DISCONNECTING;
3198 glink_close(chan);
3199 }
3200}
3201
3202static int fastrpc_glink_open(int cid)
3203{
3204 int err = 0;
3205 void *handle = NULL;
3206 struct fastrpc_apps *me = &gfa;
3207 struct glink_open_config *cfg;
3208 struct fastrpc_glink_info *link;
3209
3210 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3211 if (err)
3212 goto bail;
3213 link = &me->channel[cid].link;
3214 cfg = &me->channel[cid].link.cfg;
3215 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3216 if (err)
3217 goto bail;
3218
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303219 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3220 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003221 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003222
3223 link->port_state = FASTRPC_LINK_CONNECTING;
3224 cfg->priv = (void *)(uintptr_t)cid;
3225 cfg->edge = gcinfo[cid].link.link_info.edge;
3226 cfg->transport = gcinfo[cid].link.link_info.transport;
3227 cfg->name = FASTRPC_GLINK_GUID;
3228 cfg->notify_rx = fastrpc_glink_notify_rx;
3229 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3230 cfg->notify_state = fastrpc_glink_notify_state;
3231 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3232 handle = glink_open(cfg);
3233 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303234 if (err) {
3235 if (link->port_state == FASTRPC_LINK_CONNECTING)
3236 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003237 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303238 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003239 me->channel[cid].chan = handle;
3240bail:
3241 return err;
3242}
3243
Sathish Ambley1ca68232017-01-19 10:32:55 -08003244static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3245{
3246 filp->private_data = inode->i_private;
3247 return 0;
3248}
3249
3250static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3251 size_t count, loff_t *position)
3252{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303253 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003254 struct fastrpc_file *fl = filp->private_data;
3255 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303256 struct fastrpc_buf *buf = NULL;
3257 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303258 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303259 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303260 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003261 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303262 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003263 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303264 char single_line[UL_SIZE] = "----------------";
3265 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003266
3267 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3268 if (!fileinfo)
3269 goto bail;
3270 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303271 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3272 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3273 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3274 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3275 "susbsys", "refcount", "sesscount", "issubsystemup",
3276 "ssrcount", "session_used");
3277 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3278 "-%s%s%s%s-\n", single_line, single_line,
3279 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003280 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303281 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003282 chan = &gcinfo[i];
3283 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303284 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003285 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303286 DEBUGFS_SIZE - len, "|%-9d",
3287 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303288 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303289 DEBUGFS_SIZE - len, "|%-9d",
3290 chan->sesscount);
3291 len += scnprintf(fileinfo + len,
3292 DEBUGFS_SIZE - len, "|%-14d",
3293 chan->issubsystemup);
3294 len += scnprintf(fileinfo + len,
3295 DEBUGFS_SIZE - len, "|%-9d",
3296 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003297 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303298 sess_used += chan->session[j].used;
3299 }
3300 len += scnprintf(fileinfo + len,
3301 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3302
3303 }
3304 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3305 "\n%s%s%s\n", "=============",
3306 " CMA HEAP ", "==============");
3307 len += scnprintf(fileinfo + len,
3308 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3309 len += scnprintf(fileinfo + len,
3310 DEBUGFS_SIZE - len, "--%s%s---\n",
3311 single_line, single_line);
3312 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3313 "0x%-18llX", me->range.addr);
3314 len += scnprintf(fileinfo + len,
3315 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3316 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3317 "\n==========%s %s %s===========\n",
3318 title, " GMAPS ", title);
3319 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3320 "%-20s|%-20s|%-20s|%-20s\n",
3321 "fd", "phys", "size", "va");
3322 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3323 "%s%s%s%s%s\n", single_line, single_line,
3324 single_line, single_line, single_line);
3325 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3326 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3327 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3328 gmaps->fd, gmaps->phys,
3329 (uint32_t)gmaps->size,
3330 gmaps->va);
3331 }
3332 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3333 "%-20s|%-20s|%-20s|%-20s\n",
3334 "len", "refs", "raddr", "flags");
3335 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3336 "%s%s%s%s%s\n", single_line, single_line,
3337 single_line, single_line, single_line);
3338 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3339 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3340 "0x%-18X|%-20d|%-20lu|%-20u\n",
3341 (uint32_t)gmaps->len, gmaps->refs,
3342 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003343 }
3344 } else {
3345 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303346 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003347 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303348 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003349 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303350 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003351 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303352 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303353 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303354 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003355 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303356 "%s %14s %d\n", "pd", ":", fl->pd);
3357 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3358 "%s %9s %s\n", "spdname", ":", fl->spdname);
3359 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3360 "%s %6s %d\n", "file_close", ":", fl->file_close);
3361 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3362 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3363 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3364 "%s %9s %d\n", "profile", ":", fl->profile);
3365 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3366 "%s %3s %d\n", "smmu.coherent", ":",
3367 fl->sctx->smmu.coherent);
3368 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3369 "%s %4s %d\n", "smmu.enabled", ":",
3370 fl->sctx->smmu.enabled);
3371 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3372 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3373 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3374 "%s %5s %d\n", "smmu.secure", ":",
3375 fl->sctx->smmu.secure);
3376 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3377 "%s %5s %d\n", "smmu.faults", ":",
3378 fl->sctx->smmu.faults);
3379 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3380 "%s %s %d\n", "link.link_state",
3381 ":", *&me->channel[fl->cid].link.link_state);
3382
3383 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3384 "\n=======%s %s %s======\n", title,
3385 " LIST OF MAPS ", title);
3386
3387 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3388 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3389 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3390 "%s%s%s%s%s\n",
3391 single_line, single_line, single_line,
3392 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003393 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303394 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3395 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3396 map->va, map->phys,
3397 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003398 }
3399 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303400 "%-20s|%-20s|%-20s|%-20s\n",
3401 "len", "refs",
3402 "raddr", "uncached");
3403 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3404 "%s%s%s%s%s\n",
3405 single_line, single_line, single_line,
3406 single_line, single_line);
3407 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3408 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3409 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3410 map->len, map->refs, map->raddr,
3411 map->uncached);
3412 }
3413 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3414 "%-20s|%-20s\n", "secure", "attr");
3415 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3416 "%s%s%s%s%s\n",
3417 single_line, single_line, single_line,
3418 single_line, single_line);
3419 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3420 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3421 "%-20d|0x%-20lX\n\n",
3422 map->secure, map->attr);
3423 }
3424 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303425 "%s %d\n\n",
3426 "KERNEL MEMORY ALLOCATION:", 1);
3427 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303428 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303429 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303430 spin_lock(&fl->hlock);
3431 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303432 "%-19s|%-19s|%-19s|%-19s\n",
3433 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303434 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3435 "%s%s%s%s%s\n", single_line, single_line,
3436 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303437 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303438 len += scnprintf(fileinfo + len,
3439 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303440 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3441 buf->virt, (uint64_t)buf->phys, buf->size,
3442 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303443 }
3444 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3445 "\n%s %s %s\n", title,
3446 " LIST OF PENDING SMQCONTEXTS ", title);
3447
3448 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3449 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3450 "sc", "pid", "tgid", "used", "ctxid");
3451 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3452 "%s%s%s%s%s\n", single_line, single_line,
3453 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003454 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3455 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303456 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3457 ictx->sc, ictx->pid, ictx->tgid,
3458 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003459 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303460
Sathish Ambley1ca68232017-01-19 10:32:55 -08003461 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303462 "\n%s %s %s\n", title,
3463 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3464
3465 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3466 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3467 "sc", "pid", "tgid", "used", "ctxid");
3468 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3469 "%s%s%s%s%s\n", single_line, single_line,
3470 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003471 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303472 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3473 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3474 ictx->sc, ictx->pid, ictx->tgid,
3475 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003476 }
3477 spin_unlock(&fl->hlock);
3478 }
3479 if (len > DEBUGFS_SIZE)
3480 len = DEBUGFS_SIZE;
3481 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3482 kfree(fileinfo);
3483bail:
3484 return ret;
3485}
3486
3487static const struct file_operations debugfs_fops = {
3488 .open = fastrpc_debugfs_open,
3489 .read = fastrpc_debugfs_read,
3490};
Sathish Ambley36849af2017-02-02 09:35:55 -08003491static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003492{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003493 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303494 int cid = -1, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003495
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303496 mutex_lock(&me->smd_mutex);
3497
Sathish Ambley36849af2017-02-02 09:35:55 -08003498 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003499 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303500 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003501 cid = fl->cid;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303502 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
3503 if (err) {
3504 err = -ECHRNG;
c_mtharu314a4202017-11-15 22:09:17 +05303505 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303506 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303507 if (me->channel[cid].ssrcount !=
3508 me->channel[cid].prevssrcount) {
3509 if (!me->channel[cid].issubsystemup) {
3510 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303511 if (err) {
3512 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303513 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303514 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303515 }
3516 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003517 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303518 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003519 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303520 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303521 if (me->glink) {
3522 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3523 if (err)
3524 goto bail;
3525 VERIFY(err, 0 == fastrpc_glink_open(cid));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303526 VERIFY(err,
3527 wait_for_completion_timeout(&me->channel[cid].workport,
3528 RPC_TIMEOUT));
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303529 } else {
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303530 if (me->channel[cid].chan == NULL) {
3531 VERIFY(err, !smd_named_open_on_edge(
3532 FASTRPC_SMD_GUID,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303533 gcinfo[cid].channel,
3534 (smd_channel_t **)&me->channel[cid].chan,
3535 (void *)(uintptr_t)cid,
3536 smd_event_handler));
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303537 VERIFY(err,
3538 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003539 RPC_TIMEOUT));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303540
3541 }
3542 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003543 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303544 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003545 goto bail;
3546 }
3547 kref_init(&me->channel[cid].kref);
3548 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3549 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303550
3551 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3552 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303553 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303554
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303555 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003556 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303557 if (fastrpc_mmap_remove_ssr(fl))
3558 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003559 me->channel[cid].prevssrcount =
3560 me->channel[cid].ssrcount;
3561 }
3562 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003563
3564bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303565 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003566 return err;
3567}
3568
Sathish Ambley36849af2017-02-02 09:35:55 -08003569static int fastrpc_device_open(struct inode *inode, struct file *filp)
3570{
3571 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303572 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003573 struct fastrpc_apps *me = &gfa;
3574
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303575 /*
3576 * Indicates the device node opened
3577 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3578 */
3579 int dev_minor = MINOR(inode->i_rdev);
3580
3581 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3582 (dev_minor == MINOR_NUM_SECURE_DEV)));
3583 if (err) {
3584 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3585 return err;
3586 }
3587
c_mtharue1a5ce12017-10-13 20:47:09 +05303588 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003589 if (err)
3590 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303591
Sathish Ambley36849af2017-02-02 09:35:55 -08003592 context_list_ctor(&fl->clst);
3593 spin_lock_init(&fl->hlock);
3594 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303595 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303596 INIT_HLIST_HEAD(&fl->cached_bufs);
3597 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003598 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303599 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003600 fl->apps = me;
3601 fl->mode = FASTRPC_MODE_SERIAL;
3602 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303603 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303604 fl->init_mem = NULL;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303605 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303606 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003607 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303608 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303609 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003610 spin_lock(&me->hlock);
3611 hlist_add_head(&fl->hn, &me->drivers);
3612 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303613 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003614 return 0;
3615}
3616
Edgar Flores1a772fa2020-02-07 14:59:29 -08003617static int fastrpc_set_process_info(struct fastrpc_file *fl)
3618{
3619 int err = 0, buf_size = 0;
3620 char strpid[PID_SIZE];
Jeya R336ada12021-03-18 14:04:49 +05303621 char cur_comm[TASK_COMM_LEN];
Edgar Flores1a772fa2020-02-07 14:59:29 -08003622
Jeya R336ada12021-03-18 14:04:49 +05303623 memcpy(cur_comm, current->comm, TASK_COMM_LEN);
3624 cur_comm[TASK_COMM_LEN-1] = '\0';
Edgar Flores1a772fa2020-02-07 14:59:29 -08003625 fl->tgid = current->tgid;
3626 snprintf(strpid, PID_SIZE, "%d", current->pid);
Jeya R336ada12021-03-18 14:04:49 +05303627 buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + 1;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003628 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3629 if (!fl->debug_buf) {
3630 err = -ENOMEM;
3631 return err;
3632 }
Jeya R336ada12021-03-18 14:04:49 +05303633 snprintf(fl->debug_buf, buf_size, "%.10s%s%d",
3634 cur_comm, "_", current->pid);
Edgar Flores1a772fa2020-02-07 14:59:29 -08003635 fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3636 debugfs_root, fl, &debugfs_fops);
3637 if (!fl->debugfs_file)
3638 pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
Jeya R336ada12021-03-18 14:04:49 +05303639 cur_comm, __func__, fl->debug_buf);
3640
Edgar Flores1a772fa2020-02-07 14:59:29 -08003641 return err;
3642}
3643
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003644static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3645{
3646 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003647 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003648
c_mtharue1a5ce12017-10-13 20:47:09 +05303649 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003650 if (err)
3651 goto bail;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003652 err = fastrpc_set_process_info(fl);
3653 if (err)
3654 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003655 if (fl->cid == -1) {
3656 cid = *info;
3657 VERIFY(err, cid < NUM_CHANNELS);
3658 if (err)
3659 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303660 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003661 if (fl->dev_minor == MINOR_NUM_DEV &&
3662 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303663 /*
3664 * For non secure device node check and make sure that
3665 * the channel allows non-secure access
3666 * If not, bail. Session will not start.
3667 * cid will remain -1 and client will not be able to
3668 * invoke any other methods without failure
3669 */
3670 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3671 err = -EPERM;
3672 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3673 fl->dev_minor, cid,
3674 fl->apps->channel[cid].secure);
3675 goto bail;
3676 }
3677 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003678 fl->cid = cid;
3679 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3680 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303681 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003682 if (err)
3683 goto bail;
3684 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303685 VERIFY(err, fl->sctx != NULL);
Jeya R984a1a32021-01-18 15:38:07 +05303686 if (err) {
3687 err = -EBADR;
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303688 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05303689 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003690 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3691bail:
3692 return err;
3693}
3694
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303695static int fastrpc_internal_control(struct fastrpc_file *fl,
3696 struct fastrpc_ioctl_control *cp)
3697{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303698 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303699 int err = 0;
3700 int latency;
3701
3702 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3703 if (err)
3704 goto bail;
3705 VERIFY(err, !IS_ERR_OR_NULL(cp));
3706 if (err)
3707 goto bail;
3708
3709 switch (cp->req) {
3710 case FASTRPC_CONTROL_LATENCY:
3711 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3712 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3713 VERIFY(err, latency != 0);
3714 if (err)
3715 goto bail;
3716 if (!fl->qos_request) {
3717 pm_qos_add_request(&fl->pm_qos_req,
3718 PM_QOS_CPU_DMA_LATENCY, latency);
3719 fl->qos_request = 1;
3720 } else
3721 pm_qos_update_request(&fl->pm_qos_req, latency);
3722 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303723 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303724 if (!me->legacy)
3725 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303726 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303727 case FASTRPC_CONTROL_KALLOC:
3728 cp->kalloc.kalloc_support = 1;
3729 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303730 default:
3731 err = -ENOTTY;
3732 break;
3733 }
3734bail:
3735 return err;
3736}
3737
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003738static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3739 unsigned long ioctl_param)
3740{
3741 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003742 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003743 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303744 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003745 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303746 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303747 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003748 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003749 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303750 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003751 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303752 union {
3753 struct fastrpc_ioctl_mmap mmap;
3754 struct fastrpc_ioctl_munmap munmap;
3755 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003756 void *param = (char *)ioctl_param;
3757 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3758 int size = 0, err = 0;
3759 uint32_t info;
3760
Jeya Rb70b4ad2021-01-25 10:28:42 -08003761 VERIFY(err, fl != NULL);
3762 if (err) {
3763 err = -EBADR;
3764 goto bail;
3765 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303766 p.inv.fds = NULL;
3767 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003768 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303769 spin_lock(&fl->hlock);
3770 if (fl->file_close == 1) {
3771 err = EBADF;
3772 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3773 spin_unlock(&fl->hlock);
3774 goto bail;
3775 }
3776 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003777
3778 switch (ioctl_num) {
3779 case FASTRPC_IOCTL_INVOKE:
3780 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003781 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003782 case FASTRPC_IOCTL_INVOKE_FD:
3783 if (!size)
3784 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3785 /* fall through */
3786 case FASTRPC_IOCTL_INVOKE_ATTRS:
3787 if (!size)
3788 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003789 /* fall through */
3790 case FASTRPC_IOCTL_INVOKE_CRC:
3791 if (!size)
3792 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303793 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003794 if (err)
3795 goto bail;
3796 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3797 0, &p.inv)));
3798 if (err)
3799 goto bail;
3800 break;
3801 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303802 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3803 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303804 if (err)
3805 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003806 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3807 if (err)
3808 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303809 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003810 if (err)
3811 goto bail;
3812 break;
3813 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303814 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3815 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303816 if (err)
3817 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003818 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3819 &p.munmap)));
3820 if (err)
3821 goto bail;
3822 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303823 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303824 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3825 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303826 if (err)
3827 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303828 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3829 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303830 if (err)
3831 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303832 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3833 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303834 if (err)
3835 goto bail;
3836 break;
3837 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303838 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3839 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303840 if (err)
3841 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303842 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303843 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303844 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303845 if (err)
3846 goto bail;
3847 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303848 case FASTRPC_IOCTL_MUNMAP_FD:
3849 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3850 sizeof(p.munmap_fd));
3851 if (err)
3852 goto bail;
3853 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3854 &p.munmap_fd)));
3855 if (err)
3856 goto bail;
3857 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003858 case FASTRPC_IOCTL_SETMODE:
3859 switch ((uint32_t)ioctl_param) {
3860 case FASTRPC_MODE_PARALLEL:
3861 case FASTRPC_MODE_SERIAL:
3862 fl->mode = (uint32_t)ioctl_param;
3863 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003864 case FASTRPC_MODE_PROFILE:
3865 fl->profile = (uint32_t)ioctl_param;
3866 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303867 case FASTRPC_MODE_SESSION:
3868 fl->sessionid = 1;
3869 fl->tgid |= (1 << SESSION_ID_INDEX);
3870 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003871 default:
3872 err = -ENOTTY;
3873 break;
3874 }
3875 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003876 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303877 K_COPY_FROM_USER(err, 0, &p.perf,
3878 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003879 if (err)
3880 goto bail;
3881 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3882 if (p.perf.keys) {
3883 char *keys = PERF_KEYS;
3884
c_mtharue1a5ce12017-10-13 20:47:09 +05303885 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3886 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003887 if (err)
3888 goto bail;
3889 }
3890 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303891 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3892 struct hlist_node *n = NULL;
3893
3894 mutex_lock(&fl->perf_mutex);
3895 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3896 if (perf->tid == current->pid) {
3897 fperf = perf;
3898 break;
3899 }
3900 }
3901
3902 mutex_unlock(&fl->perf_mutex);
3903
3904 if (fperf) {
3905 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3906 fperf, sizeof(*fperf));
3907 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003908 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303909 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003910 if (err)
3911 goto bail;
3912 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303913 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303914 K_COPY_FROM_USER(err, 0, &p.cp, param,
3915 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303916 if (err)
3917 goto bail;
3918 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3919 if (err)
3920 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303921 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3922 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3923 if (err)
3924 goto bail;
3925 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303926 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003927 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303928 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003929 if (err)
3930 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003931 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3932 if (err)
3933 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303934 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003935 if (err)
3936 goto bail;
3937 break;
3938 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003939 p.init.attrs = 0;
3940 p.init.siglen = 0;
3941 size = sizeof(struct fastrpc_ioctl_init);
3942 /* fall through */
3943 case FASTRPC_IOCTL_INIT_ATTRS:
3944 if (!size)
3945 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303946 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003947 if (err)
3948 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303949 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303950 p.init.init.filelen < INIT_FILELEN_MAX);
3951 if (err)
3952 goto bail;
3953 VERIFY(err, p.init.init.memlen >= 0 &&
3954 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303955 if (err)
3956 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303957 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003958 if (err)
3959 goto bail;
3960 break;
3961
3962 default:
3963 err = -ENOTTY;
3964 pr_info("bad ioctl: %d\n", ioctl_num);
3965 break;
3966 }
3967 bail:
3968 return err;
3969}
3970
3971static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3972 unsigned long code,
3973 void *data)
3974{
3975 struct fastrpc_apps *me = &gfa;
3976 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303977 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003978 int cid;
3979
3980 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3981 cid = ctx - &me->channel[0];
3982 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303983 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003984 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303985 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303986 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303987 if (me->glink)
3988 fastrpc_glink_close(ctx->chan, cid);
3989 else
3990 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303991 ctx->chan = NULL;
3992 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3993 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3994 }
3995 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303996 if (cid == 0)
3997 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003998 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303999 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4000 if (me->channel[0].remoteheap_ramdump_dev &&
4001 notifdata->enable_ramdump) {
4002 me->channel[0].ramdumpenabled = 1;
4003 }
4004 } else if (code == SUBSYS_AFTER_POWERUP) {
4005 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004006 }
4007
4008 return NOTIFY_DONE;
4009}
4010
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304011static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304012 unsigned long code,
4013 void *data)
4014{
4015 struct fastrpc_apps *me = &gfa;
4016 struct fastrpc_static_pd *spd;
4017 struct notif_data *notifdata = data;
4018
4019 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
4020 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
4021 mutex_lock(&me->smd_mutex);
4022 spd->pdrcount++;
4023 spd->ispdup = 0;
4024 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
4025 MAJOR(me->dev_no), spd->spdname);
4026 mutex_unlock(&me->smd_mutex);
4027 if (!strcmp(spd->spdname,
4028 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
4029 me->staticpd_flags = 0;
4030 fastrpc_notify_pdr_drivers(me, spd->spdname);
4031 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4032 if (me->channel[0].remoteheap_ramdump_dev &&
4033 notifdata->enable_ramdump) {
4034 me->channel[0].ramdumpenabled = 1;
4035 }
4036 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4037 spd->ispdup = 1;
4038 }
4039
4040 return NOTIFY_DONE;
4041}
4042
4043static int fastrpc_get_service_location_notify(struct notifier_block *nb,
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304044 unsigned long opcode, void *data)
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304045{
4046 struct fastrpc_static_pd *spd;
4047 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304048 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304049
4050 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
4051 if (opcode == LOCATOR_DOWN) {
4052 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
4053 return NOTIFY_DONE;
4054 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304055 for (i = 0; i < pdr->total_domains; i++) {
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304056 if ((!strcmp(spd->spdname, "audio_pdr_adsprpc"))
4057 && (!strcmp(pdr->domain_list[i].name,
4058 "msm/adsp/audio_pd"))) {
4059 goto pdr_register;
4060 } else if ((!strcmp(spd->spdname, "sensors_pdr_adsprpc"))
4061 && (!strcmp(pdr->domain_list[i].name,
4062 "msm/adsp/sensor_pd"))) {
4063 goto pdr_register;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304064 }
4065 }
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304066 return NOTIFY_DONE;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304067
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304068pdr_register:
4069 if (!spd->pdrhandle) {
4070 spd->pdrhandle =
4071 service_notif_register_notifier(
4072 pdr->domain_list[i].name,
4073 pdr->domain_list[i].instance_id,
4074 &spd->pdrnb, &curr_state);
4075 } else {
4076 pr_err("ADSPRPC: %s is already registered\n", spd->spdname);
4077 }
4078
4079 if (IS_ERR(spd->pdrhandle))
4080 pr_err("ADSPRPC: Unable to register notifier\n");
4081
4082 if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4083 pr_info("ADSPRPC: %s is up\n", spd->spdname);
4084 spd->ispdup = 1;
4085 } else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
4086 pr_info("ADSPRPC: %s is uninitialzed\n", spd->spdname);
4087 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304088 return NOTIFY_DONE;
4089}
4090
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004091static const struct file_operations fops = {
4092 .open = fastrpc_device_open,
4093 .release = fastrpc_device_release,
4094 .unlocked_ioctl = fastrpc_device_ioctl,
4095 .compat_ioctl = compat_fastrpc_device_ioctl,
4096};
4097
4098static const struct of_device_id fastrpc_match_table[] = {
4099 { .compatible = "qcom,msm-fastrpc-adsp", },
4100 { .compatible = "qcom,msm-fastrpc-compute", },
4101 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304102 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
4103 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004104 { .compatible = "qcom,msm-adsprpc-mem-region", },
4105 {}
4106};
4107
4108static int fastrpc_cb_probe(struct device *dev)
4109{
4110 struct fastrpc_channel_ctx *chan;
4111 struct fastrpc_session_ctx *sess;
4112 struct of_phandle_args iommuspec;
4113 const char *name;
4114 unsigned int start = 0x80000000;
4115 int err = 0, i;
4116 int secure_vmid = VMID_CP_PIXEL;
4117
c_mtharue1a5ce12017-10-13 20:47:09 +05304118 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4119 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004120 if (err)
4121 goto bail;
4122 for (i = 0; i < NUM_CHANNELS; i++) {
4123 if (!gcinfo[i].name)
4124 continue;
4125 if (!strcmp(name, gcinfo[i].name))
4126 break;
4127 }
4128 VERIFY(err, i < NUM_CHANNELS);
4129 if (err)
4130 goto bail;
4131 chan = &gcinfo[i];
4132 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4133 if (err)
4134 goto bail;
4135
4136 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
4137 "#iommu-cells", 0, &iommuspec));
4138 if (err)
4139 goto bail;
4140 sess = &chan->session[chan->sesscount];
4141 sess->smmu.cb = iommuspec.args[0] & 0xf;
4142 sess->used = 0;
4143 sess->smmu.coherent = of_property_read_bool(dev->of_node,
4144 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304145 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
4146 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004147 sess->smmu.secure = of_property_read_bool(dev->of_node,
4148 "qcom,secure-context-bank");
4149 if (sess->smmu.secure)
4150 start = 0x60000000;
4151 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
4152 arm_iommu_create_mapping(&platform_bus_type,
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05304153 start, MAX_SIZE_LIMIT)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004154 if (err)
4155 goto bail;
4156
4157 if (sess->smmu.secure)
4158 iommu_domain_set_attr(sess->smmu.mapping->domain,
4159 DOMAIN_ATTR_SECURE_VMID,
4160 &secure_vmid);
4161
4162 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
4163 if (err)
4164 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304165 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004166 sess->smmu.enabled = 1;
4167 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08004168 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
4169 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004170bail:
4171 return err;
4172}
4173
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304174static int fastrpc_cb_legacy_probe(struct device *dev)
4175{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304176 struct fastrpc_channel_ctx *chan;
4177 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4178 const char *name;
4179 unsigned int *sids = NULL, sids_size = 0;
4180 int err = 0, ret = 0, i;
4181
4182 unsigned int start = 0x80000000;
4183
4184 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4185 "label", NULL)));
4186 if (err)
4187 goto bail;
4188
4189 for (i = 0; i < NUM_CHANNELS; i++) {
4190 if (!gcinfo[i].name)
4191 continue;
4192 if (!strcmp(name, gcinfo[i].name))
4193 break;
4194 }
4195 VERIFY(err, i < NUM_CHANNELS);
4196 if (err)
4197 goto bail;
4198
4199 chan = &gcinfo[i];
4200 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4201 if (err)
4202 goto bail;
4203
4204 first_sess = &chan->session[chan->sesscount];
4205
4206 VERIFY(err, NULL != of_get_property(dev->of_node,
4207 "sids", &sids_size));
4208 if (err)
4209 goto bail;
4210
4211 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4212 if (err)
4213 goto bail;
4214 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4215 sids_size/sizeof(unsigned int));
4216 if (ret)
4217 goto bail;
4218
4219 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4220 arm_iommu_create_mapping(&platform_bus_type,
4221 start, 0x78000000)));
4222 if (err)
4223 goto bail;
4224
4225 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4226 if (err)
4227 goto bail;
4228
4229
4230 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4231 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4232 if (err)
4233 goto bail;
4234 sess = &chan->session[chan->sesscount];
4235 sess->smmu.cb = sids[i];
4236 sess->smmu.dev = dev;
4237 sess->smmu.mapping = first_sess->smmu.mapping;
4238 sess->smmu.enabled = 1;
4239 sess->used = 0;
4240 sess->smmu.coherent = false;
4241 sess->smmu.secure = false;
4242 chan->sesscount++;
4243 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304244bail:
4245 kfree(sids);
4246 return err;
4247}
4248
4249
4250
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304251static void init_secure_vmid_list(struct device *dev, char *prop_name,
4252 struct secure_vm *destvm)
4253{
4254 int err = 0;
4255 u32 len = 0, i = 0;
4256 u32 *rhvmlist = NULL;
4257 u32 *rhvmpermlist = NULL;
4258
4259 if (!of_find_property(dev->of_node, prop_name, &len))
4260 goto bail;
4261 if (len == 0)
4262 goto bail;
4263 len /= sizeof(u32);
4264 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4265 if (err)
4266 goto bail;
4267 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4268 GFP_KERNEL)));
4269 if (err)
4270 goto bail;
4271 for (i = 0; i < len; i++) {
4272 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4273 &rhvmlist[i]);
4274 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4275 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4276 if (err) {
4277 pr_err("ADSPRPC: Failed to read VMID\n");
4278 goto bail;
4279 }
4280 }
4281 destvm->vmid = rhvmlist;
4282 destvm->vmperm = rhvmpermlist;
4283 destvm->vmcount = len;
4284bail:
4285 if (err) {
4286 kfree(rhvmlist);
4287 kfree(rhvmpermlist);
4288 }
4289}
4290
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304291static void configure_secure_channels(uint32_t secure_domains)
4292{
4293 struct fastrpc_apps *me = &gfa;
4294 int ii = 0;
4295 /*
4296 * secure_domains contains the bitmask of the secure channels
4297 * Bit 0 - ADSP
4298 * Bit 1 - MDSP
4299 * Bit 2 - SLPI
4300 * Bit 3 - CDSP
4301 */
4302 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4303 int secure = (secure_domains >> ii) & 0x01;
4304
4305 me->channel[ii].secure = secure;
4306 }
4307}
4308
4309
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004310static int fastrpc_probe(struct platform_device *pdev)
4311{
4312 int err = 0;
4313 struct fastrpc_apps *me = &gfa;
4314 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004315 struct device_node *ion_node, *node;
4316 struct platform_device *ion_pdev;
4317 struct cma *cma;
4318 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304319 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304320 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304321
4322 if (of_device_is_compatible(dev->of_node,
4323 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304324 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4325 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304326
c_mtharu63ffc012017-11-16 15:26:56 +05304327
4328 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4329 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304330 if (of_get_property(dev->of_node,
4331 "qcom,secure-domains", NULL) != NULL) {
4332 VERIFY(err, !of_property_read_u32(dev->of_node,
4333 "qcom,secure-domains",
4334 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004335 if (!err) {
4336 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304337 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004338 } else {
4339 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304340 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004341 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304342 }
c_mtharu63ffc012017-11-16 15:26:56 +05304343 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004344 if (of_device_is_compatible(dev->of_node,
4345 "qcom,msm-fastrpc-compute-cb"))
4346 return fastrpc_cb_probe(dev);
4347
4348 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304349 "qcom,msm-fastrpc-legacy-compute")) {
4350 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304351 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304352 }
4353
4354 if (of_device_is_compatible(dev->of_node,
4355 "qcom,msm-fastrpc-legacy-compute-cb")){
4356 return fastrpc_cb_legacy_probe(dev);
4357 }
4358
4359 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004360 "qcom,msm-adsprpc-mem-region")) {
4361 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004362 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4363 if (ion_node) {
4364 for_each_available_child_of_node(ion_node, node) {
4365 if (of_property_read_u32(node, "reg", &val))
4366 continue;
4367 if (val != ION_ADSP_HEAP_ID)
4368 continue;
4369 ion_pdev = of_find_device_by_node(node);
4370 if (!ion_pdev)
4371 break;
4372 cma = dev_get_cma_area(&ion_pdev->dev);
4373 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304374 me->range.addr = cma_get_base(cma);
4375 me->range.size =
4376 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004377 }
4378 break;
4379 }
4380 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304381 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304382 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004383 int srcVM[1] = {VMID_HLOS};
4384 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4385 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004386 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004387 PERM_READ | PERM_WRITE | PERM_EXEC,
4388 PERM_READ | PERM_WRITE | PERM_EXEC,
4389 PERM_READ | PERM_WRITE | PERM_EXEC,
4390 };
4391
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304392 VERIFY(err, !hyp_assign_phys(me->range.addr,
4393 me->range.size, srcVM, 1,
4394 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004395 if (err)
4396 goto bail;
4397 }
4398 return 0;
4399 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304400 if (of_property_read_bool(dev->of_node,
4401 "qcom,fastrpc-adsp-audio-pdr")) {
4402 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004403
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304404 VERIFY(err, !fastrpc_get_adsp_session(
4405 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4406 if (err)
4407 goto spdbail;
4408 me->channel[0].spd[session].get_service_nb.notifier_call =
4409 fastrpc_get_service_location_notify;
4410 ret = get_service_location(
4411 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4412 AUDIO_PDR_ADSP_SERVICE_NAME,
4413 &me->channel[0].spd[session].get_service_nb);
4414 if (ret)
4415 pr_err("ADSPRPC: Get service location failed: %d\n",
4416 ret);
4417 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304418 if (of_property_read_bool(dev->of_node,
4419 "qcom,fastrpc-adsp-sensors-pdr")) {
4420 int session;
4421
4422 VERIFY(err, !fastrpc_get_adsp_session(
4423 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4424 if (err)
4425 goto spdbail;
4426 me->channel[0].spd[session].get_service_nb.notifier_call =
4427 fastrpc_get_service_location_notify;
4428 ret = get_service_location(
4429 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4430 SENSORS_PDR_ADSP_SERVICE_NAME,
4431 &me->channel[0].spd[session].get_service_nb);
4432 if (ret)
4433 pr_err("ADSPRPC: Get service location failed: %d\n",
4434 ret);
4435 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304436spdbail:
4437 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004438 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4439 fastrpc_match_table,
4440 NULL, &pdev->dev));
4441 if (err)
4442 goto bail;
4443bail:
4444 return err;
4445}
4446
4447static void fastrpc_deinit(void)
4448{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304449 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004450 struct fastrpc_channel_ctx *chan = gcinfo;
4451 int i, j;
4452
4453 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4454 if (chan->chan) {
4455 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304456 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304457 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004458 }
4459 for (j = 0; j < NUM_SESSIONS; j++) {
4460 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304461 if (sess->smmu.dev) {
4462 arm_iommu_detach_device(sess->smmu.dev);
4463 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004464 }
4465 if (sess->smmu.mapping) {
4466 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304467 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004468 }
4469 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304470 kfree(chan->rhvm.vmid);
4471 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004472 }
4473}
4474
4475static struct platform_driver fastrpc_driver = {
4476 .probe = fastrpc_probe,
4477 .driver = {
4478 .name = "fastrpc",
4479 .owner = THIS_MODULE,
4480 .of_match_table = fastrpc_match_table,
4481 },
4482};
4483
4484static int __init fastrpc_device_init(void)
4485{
4486 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304487 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304488 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004489 int err = 0, i;
4490
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304491 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004492 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004493 fastrpc_init(me);
4494 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304495 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004496 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004497 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4498 if (err)
4499 goto register_bail;
4500 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4501 DEVICE_NAME));
4502 if (err)
4503 goto alloc_chrdev_bail;
4504 cdev_init(&me->cdev, &fops);
4505 me->cdev.owner = THIS_MODULE;
4506 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304507 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004508 if (err)
4509 goto cdev_init_bail;
4510 me->class = class_create(THIS_MODULE, "fastrpc");
4511 VERIFY(err, !IS_ERR(me->class));
4512 if (err)
4513 goto class_create_bail;
4514 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304515
4516 /*
4517 * Create devices and register with sysfs
4518 * Create first device with minor number 0
4519 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004520 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304521 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4522 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004523 VERIFY(err, !IS_ERR_OR_NULL(dev));
4524 if (err)
4525 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304526
4527 /* Create secure device with minor number for secure device */
4528 secure_dev = device_create(me->class, NULL,
4529 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4530 NULL, DEVICE_NAME_SECURE);
4531 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4532 if (err)
4533 goto device_create_bail;
4534
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004535 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304536 me->channel[i].dev = secure_dev;
4537 if (i == CDSP_DOMAIN_ID)
4538 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004539 me->channel[i].ssrcount = 0;
4540 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304541 me->channel[i].issubsystemup = 1;
4542 me->channel[i].ramdumpenabled = 0;
4543 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004544 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4545 me->channel[i].handle = subsys_notif_register_notifier(
4546 gcinfo[i].subsys,
4547 &me->channel[i].nb);
4548 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004549 me->client = msm_ion_client_create(DEVICE_NAME);
4550 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4551 if (err)
4552 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304553
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004554 return 0;
4555device_create_bail:
4556 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004557 if (me->channel[i].handle)
4558 subsys_notif_unregister_notifier(me->channel[i].handle,
4559 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004560 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004561 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304562 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4563 MINOR_NUM_DEV));
4564 if (!IS_ERR_OR_NULL(secure_dev))
4565 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4566 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004567 class_destroy(me->class);
4568class_create_bail:
4569 cdev_del(&me->cdev);
4570cdev_init_bail:
4571 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4572alloc_chrdev_bail:
4573register_bail:
4574 fastrpc_deinit();
4575 return err;
4576}
4577
4578static void __exit fastrpc_device_exit(void)
4579{
4580 struct fastrpc_apps *me = &gfa;
4581 int i;
4582
4583 fastrpc_file_list_dtor(me);
4584 fastrpc_deinit();
4585 for (i = 0; i < NUM_CHANNELS; i++) {
4586 if (!gcinfo[i].name)
4587 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004588 subsys_notif_unregister_notifier(me->channel[i].handle,
4589 &me->channel[i].nb);
4590 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304591
4592 /* Destroy the secure and non secure devices */
4593 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4594 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4595 MINOR_NUM_SECURE_DEV));
4596
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004597 class_destroy(me->class);
4598 cdev_del(&me->cdev);
4599 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4600 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004601 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004602}
4603
4604late_initcall(fastrpc_device_init);
4605module_exit(fastrpc_device_exit);
4606
4607MODULE_LICENSE("GPL v2");