blob: 4398d60dc6d47bba02dab4a491106e3bbe6904a0 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Jeya Rb70b4ad2021-01-25 10:28:42 -08002 * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
109 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530110#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111#define FASTRPC_STATIC_HANDLE_LISTENER (3)
112#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530113#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800114
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530115#define MAX_SIZE_LIMIT (0x78000000)
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530116#define INIT_FILELEN_MAX (2*1024*1024)
117#define INIT_MEMLEN_MAX (8*1024*1024)
118
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800119#define PERF_END (void)0
120
121#define PERF(enb, cnt, ff) \
122 {\
123 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530124 int64_t *counter = cnt;\
125 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800126 getnstimeofday(&startT);\
127 } \
128 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530129 if (enb && counter) {\
130 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800131 } \
132 }
133
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530134#define GET_COUNTER(perf_ptr, offset) \
135 (perf_ptr != NULL ?\
136 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
137 (int64_t *)(perf_ptr + offset)\
138 : (int64_t *)NULL) : (int64_t *)NULL)
139
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700140static int fastrpc_glink_open(int cid);
141static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530142static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530143 unsigned long code,
144 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800145static struct dentry *debugfs_root;
146static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700147
148static inline uint64_t buf_page_start(uint64_t buf)
149{
150 uint64_t start = (uint64_t) buf & PAGE_MASK;
151 return start;
152}
153
154static inline uint64_t buf_page_offset(uint64_t buf)
155{
156 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
157 return offset;
158}
159
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530160static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700161{
162 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
163 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530164 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700165 return nPages;
166}
167
168static inline uint64_t buf_page_size(uint32_t size)
169{
170 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
171
172 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
173}
174
175static inline void *uint64_to_ptr(uint64_t addr)
176{
177 void *ptr = (void *)((uintptr_t)addr);
178
179 return ptr;
180}
181
182static inline uint64_t ptr_to_uint64(void *ptr)
183{
184 uint64_t addr = (uint64_t)((uintptr_t)ptr);
185
186 return addr;
187}
188
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530189struct secure_vm {
190 int *vmid;
191 int *vmperm;
192 int vmcount;
193};
194
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700195struct fastrpc_file;
196
197struct fastrpc_buf {
198 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530199 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700200 struct fastrpc_file *fl;
201 void *virt;
202 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530203 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530204 unsigned long dma_attr;
205 uintptr_t raddr;
206 uint32_t flags;
207 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700208};
209
210struct fastrpc_ctx_lst;
211
212struct overlap {
213 uintptr_t start;
214 uintptr_t end;
215 int raix;
216 uintptr_t mstart;
217 uintptr_t mend;
218 uintptr_t offset;
219};
220
221struct smq_invoke_ctx {
222 struct hlist_node hn;
223 struct completion work;
224 int retval;
225 int pid;
226 int tgid;
227 remote_arg_t *lpra;
228 remote_arg64_t *rpra;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530229 remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700231 struct fastrpc_mmap **maps;
232 struct fastrpc_buf *buf;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530233 struct fastrpc_buf *lbuf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530234 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700235 struct fastrpc_file *fl;
236 uint32_t sc;
237 struct overlap *overs;
238 struct overlap **overps;
239 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530240 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530241 unsigned int *attrs;
242 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530243 uint64_t ctxid;
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +0530244 void *handle;
245 const void *ptr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700246};
247
248struct fastrpc_ctx_lst {
249 struct hlist_head pending;
250 struct hlist_head interrupted;
251};
252
253struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530254 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255 struct dma_iommu_mapping *mapping;
256 int cb;
257 int enabled;
258 int faults;
259 int secure;
260 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530261 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700262};
263
264struct fastrpc_session_ctx {
265 struct device *dev;
266 struct fastrpc_smmu smmu;
267 int used;
268};
269
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530270struct fastrpc_static_pd {
271 char *spdname;
272 struct notifier_block pdrnb;
273 struct notifier_block get_service_nb;
274 void *pdrhandle;
275 int pdrcount;
276 int prevpdrcount;
277 int ispdup;
278};
279
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280struct fastrpc_glink_info {
281 int link_state;
282 int port_state;
283 struct glink_open_config cfg;
284 struct glink_link_info link_info;
285 void *link_notify_handle;
286};
287
288struct fastrpc_channel_ctx {
289 char *name;
290 char *subsys;
291 void *chan;
292 struct device *dev;
293 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530294 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700295 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530296 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700297 struct notifier_block nb;
298 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530299 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700300 int sesscount;
301 int ssrcount;
302 void *handle;
303 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530304 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700305 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530306 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530307 int ramdumpenabled;
308 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530310 /* Indicates, if channel is restricted to secure node only */
311 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700312};
313
314struct fastrpc_apps {
315 struct fastrpc_channel_ctx *channel;
316 struct cdev cdev;
317 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530318 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700319 struct smq_phy_page range;
320 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530321 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700322 dev_t dev_no;
323 int compat;
324 struct hlist_head drivers;
325 spinlock_t hlock;
326 struct ion_client *client;
327 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530328 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530329 bool glink;
330 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800331 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530332 spinlock_t ctxlock;
333 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700334};
335
336struct fastrpc_mmap {
337 struct hlist_node hn;
338 struct fastrpc_file *fl;
339 struct fastrpc_apps *apps;
340 int fd;
341 uint32_t flags;
342 struct dma_buf *buf;
343 struct sg_table *table;
344 struct dma_buf_attachment *attach;
345 struct ion_handle *handle;
346 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530347 size_t size;
348 uintptr_t va;
349 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700350 int refs;
351 uintptr_t raddr;
352 int uncached;
353 int secure;
354 uintptr_t attr;
Swathi K0e257332021-07-14 17:51:10 +0530355 bool is_filemap; /*flag to indicate map used in process init*/
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356};
357
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530358enum fastrpc_perfkeys {
359 PERF_COUNT = 0,
360 PERF_FLUSH = 1,
361 PERF_MAP = 2,
362 PERF_COPY = 3,
363 PERF_LINK = 4,
364 PERF_GETARGS = 5,
365 PERF_PUTARGS = 6,
366 PERF_INVARGS = 7,
367 PERF_INVOKE = 8,
368 PERF_KEY_MAX = 9,
369};
370
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800371struct fastrpc_perf {
372 int64_t count;
373 int64_t flush;
374 int64_t map;
375 int64_t copy;
376 int64_t link;
377 int64_t getargs;
378 int64_t putargs;
379 int64_t invargs;
380 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530381 int64_t tid;
382 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800383};
384
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700385struct fastrpc_file {
386 struct hlist_node hn;
387 spinlock_t hlock;
388 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530389 struct hlist_head cached_bufs;
390 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700391 struct fastrpc_ctx_lst clst;
392 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530393 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700394 struct fastrpc_session_ctx *secsctx;
395 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800396 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530397 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700398 int tgid;
399 int cid;
400 int ssrcount;
401 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530402 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530403 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530404 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700405 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530406 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800407 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530408 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530409 struct pm_qos_request pm_qos_req;
410 int qos_request;
Jeya R2bcad4f2021-06-10 13:03:44 +0530411 struct mutex pm_qos_mutex;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530412 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530413 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530414 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530415 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
416 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530417 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700418};
419
420static struct fastrpc_apps gfa;
421
422static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
423 {
424 .name = "adsprpc-smd",
425 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530426 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700427 .link.link_info.edge = "lpass",
428 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530429 .spd = {
430 {
431 .spdname =
432 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
433 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530434 fastrpc_pdr_notifier_cb,
435 },
436 {
437 .spdname =
438 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
439 .pdrnb.notifier_call =
440 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530441 }
442 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700443 },
444 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700445 .name = "mdsprpc-smd",
446 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530447 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700448 .link.link_info.edge = "mpss",
449 .link.link_info.transport = "smem",
450 },
451 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800452 .name = "sdsprpc-smd",
453 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530454 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800455 .link.link_info.edge = "dsps",
456 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800457 },
458 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700459 .name = "cdsprpc-smd",
460 .subsys = "cdsp",
461 .link.link_info.edge = "cdsp",
462 .link.link_info.transport = "smem",
463 },
464};
465
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530466static int hlosvm[1] = {VMID_HLOS};
467static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
468
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800469static inline int64_t getnstimediff(struct timespec *start)
470{
471 int64_t ns;
472 struct timespec ts, b;
473
474 getnstimeofday(&ts);
475 b = timespec_sub(ts, *start);
476 ns = timespec_to_ns(&b);
477 return ns;
478}
479
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530480static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
481{
482 int err = 0;
483 int64_t *val = NULL;
484 struct fastrpc_perf *perf = NULL, *fperf = NULL;
485 struct hlist_node *n = NULL;
486
487 VERIFY(err, !IS_ERR_OR_NULL(fl));
488 if (err)
489 goto bail;
490
491 mutex_lock(&fl->perf_mutex);
492 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
493 if (perf->tid == current->pid) {
494 fperf = perf;
495 break;
496 }
497 }
498
499 if (IS_ERR_OR_NULL(fperf)) {
500 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
501
502 VERIFY(err, !IS_ERR_OR_NULL(fperf));
503 if (err) {
504 mutex_unlock(&fl->perf_mutex);
505 kfree(fperf);
506 goto bail;
507 }
508
509 fperf->tid = current->pid;
510 hlist_add_head(&fperf->hn, &fl->perf);
511 }
512
513 val = ((int64_t *)fperf) + key;
514 mutex_unlock(&fl->perf_mutex);
515bail:
516 return val;
517}
518
519
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700520static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
521{
c_mtharue1a5ce12017-10-13 20:47:09 +0530522 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Jeya R984a1a32021-01-18 15:38:07 +0530523 int vmid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700524
525 if (!fl)
526 return;
527 if (cache) {
528 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530529 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700530 spin_unlock(&fl->hlock);
531 return;
532 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530533 if (buf->remote) {
534 spin_lock(&fl->hlock);
535 hlist_del_init(&buf->hn_rem);
536 spin_unlock(&fl->hlock);
537 buf->remote = 0;
538 buf->raddr = 0;
539 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700540 if (!IS_ERR_OR_NULL(buf->virt)) {
541 int destVM[1] = {VMID_HLOS};
542 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
543
Jeya R984a1a32021-01-18 15:38:07 +0530544 VERIFY(err, fl->sctx != NULL);
545 if (err)
546 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700547 if (fl->sctx->smmu.cb)
548 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
549 vmid = fl->apps->channel[fl->cid].vmid;
550 if (vmid) {
551 int srcVM[2] = {VMID_HLOS, vmid};
552
553 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
554 srcVM, 2, destVM, destVMperm, 1);
555 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530556 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
557 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700558 }
Jeya R984a1a32021-01-18 15:38:07 +0530559bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700560 kfree(buf);
561}
562
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530563static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700564{
565 struct fastrpc_buf *buf, *free;
566
567 do {
568 struct hlist_node *n;
569
c_mtharue1a5ce12017-10-13 20:47:09 +0530570 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700571 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530572 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700573 hlist_del_init(&buf->hn);
574 free = buf;
575 break;
576 }
577 spin_unlock(&fl->hlock);
578 if (free)
579 fastrpc_buf_free(free, 0);
580 } while (free);
581}
582
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530583static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
584{
585 struct fastrpc_buf *buf, *free;
586
587 do {
588 struct hlist_node *n;
589
590 free = NULL;
591 spin_lock(&fl->hlock);
592 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
593 free = buf;
594 break;
595 }
596 spin_unlock(&fl->hlock);
597 if (free)
598 fastrpc_buf_free(free, 0);
599 } while (free);
600}
601
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700602static void fastrpc_mmap_add(struct fastrpc_mmap *map)
603{
c_mtharue1a5ce12017-10-13 20:47:09 +0530604 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
605 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
606 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700607
c_mtharue1a5ce12017-10-13 20:47:09 +0530608 spin_lock(&me->hlock);
609 hlist_add_head(&map->hn, &me->maps);
610 spin_unlock(&me->hlock);
611 } else {
612 struct fastrpc_file *fl = map->fl;
613
c_mtharue1a5ce12017-10-13 20:47:09 +0530614 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530615 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700616}
617
c_mtharue1a5ce12017-10-13 20:47:09 +0530618static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530619 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530620 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700621{
c_mtharue1a5ce12017-10-13 20:47:09 +0530622 struct fastrpc_apps *me = &gfa;
623 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700624 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530625
626 if ((va + len) < va)
627 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530628 if (mflags == ADSP_MMAP_HEAP_ADDR ||
629 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
630 spin_lock(&me->hlock);
631 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
632 if (va >= map->va &&
633 va + len <= map->va + map->len &&
634 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530635 if (refs) {
636 if (map->refs + 1 == INT_MAX) {
637 spin_unlock(&me->hlock);
638 return -ETOOMANYREFS;
639 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530640 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530641 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530642 match = map;
643 break;
644 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700645 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530646 spin_unlock(&me->hlock);
647 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530648 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
649 if (va >= map->va &&
650 va + len <= map->va + map->len &&
651 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530652 if (refs) {
653 if (map->refs + 1 == INT_MAX)
654 return -ETOOMANYREFS;
c_mtharue1a5ce12017-10-13 20:47:09 +0530655 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530656 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530657 match = map;
658 break;
659 }
660 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700661 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700662 if (match) {
663 *ppmap = match;
664 return 0;
665 }
666 return -ENOTTY;
667}
668
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530669static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530670 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530671{
Jeya Re9310762020-07-29 12:10:54 +0530672 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530673 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530674
675 if (me->dev == NULL) {
676 pr_err("device adsprpc-mem is not initialized\n");
677 return -ENODEV;
678 }
Jeya Re9310762020-07-29 12:10:54 +0530679 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
680 if (err) {
681 err = -EFAULT;
682 pr_err("adsprpc: %s: invalid allocation size 0x%zx\n",
683 __func__, size);
684 return err;
685 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530686 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530687 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530688 if (IS_ERR_OR_NULL(*vaddr)) {
689 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
690 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530691 return -ENOMEM;
692 }
693 return 0;
694}
695
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530697 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700698{
c_mtharue1a5ce12017-10-13 20:47:09 +0530699 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700700 struct hlist_node *n;
701 struct fastrpc_apps *me = &gfa;
702
703 spin_lock(&me->hlock);
704 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
Swathi K0e257332021-07-14 17:51:10 +0530705 if (map->refs == 1 && map->raddr == va &&
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700706 map->raddr + map->len == va + len &&
Swathi K0e257332021-07-14 17:51:10 +0530707 /*Remove map if not used in process initialization*/
708 !map->is_filemap) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700709 match = map;
710 hlist_del_init(&map->hn);
711 break;
712 }
713 }
714 spin_unlock(&me->hlock);
715 if (match) {
716 *ppmap = match;
717 return 0;
718 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Swathi K0e257332021-07-14 17:51:10 +0530720 if (map->refs == 1 && map->raddr == va &&
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700721 map->raddr + map->len == va + len &&
Swathi K0e257332021-07-14 17:51:10 +0530722 /*Remove map if not used in process initialization*/
723 !map->is_filemap) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700724 match = map;
725 hlist_del_init(&map->hn);
726 break;
727 }
728 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700729 if (match) {
730 *ppmap = match;
731 return 0;
732 }
733 return -ENOTTY;
734}
735
c_mtharu7bd6a422017-10-17 18:15:37 +0530736static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700737{
c_mtharue1a5ce12017-10-13 20:47:09 +0530738 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700739 struct fastrpc_file *fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530740 int vmid, cid = -1, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700741 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700742
743 if (!map)
744 return;
745 fl = map->fl;
Jeya R8a9ed442021-02-09 02:35:41 -0800746 /* remote heap and dynamic loading memory
747 * maps expected to initialize with NULL
748 */
749 if (!fl && !(map->flags == ADSP_MMAP_HEAP_ADDR ||
750 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR))
Jeya Rb70b4ad2021-01-25 10:28:42 -0800751 return;
Jeya R8a9ed442021-02-09 02:35:41 -0800752 if (fl && !(map->flags == ADSP_MMAP_HEAP_ADDR ||
Jeya Rccafee22020-05-26 18:17:26 +0530753 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) {
754 cid = fl->cid;
755 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
756 if (err) {
757 err = -ECHRNG;
758 pr_err("adsprpc: ERROR:%s, Invalid channel id: %d, err:%d",
759 __func__, cid, err);
760 return;
761 }
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530762 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530763 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
764 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
765 spin_lock(&me->hlock);
766 map->refs--;
767 if (!map->refs)
768 hlist_del_init(&map->hn);
769 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530770 if (map->refs > 0)
771 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530772 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530773 map->refs--;
774 if (!map->refs)
775 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530776 if (map->refs > 0 && !flags)
777 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530778 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530779 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
780 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700781
c_mtharue1a5ce12017-10-13 20:47:09 +0530782 if (me->dev == NULL) {
783 pr_err("failed to free remote heap allocation\n");
784 return;
785 }
786 if (map->phys) {
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +0530787 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
788 DMA_ATTR_NO_KERNEL_MAPPING;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530789 dma_free_attrs(me->dev, map->size, (void *)map->va,
790 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530791 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530792 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
793 if (!IS_ERR_OR_NULL(map->handle))
794 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530795 } else {
796 int destVM[1] = {VMID_HLOS};
797 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
798
799 if (map->secure)
800 sess = fl->secsctx;
801 else
802 sess = fl->sctx;
803
804 if (!IS_ERR_OR_NULL(map->handle))
805 ion_free(fl->apps->client, map->handle);
806 if (sess && sess->smmu.enabled) {
807 if (map->size || map->phys)
808 msm_dma_unmap_sg(sess->smmu.dev,
809 map->table->sgl,
810 map->table->nents, DMA_BIDIRECTIONAL,
811 map->buf);
812 }
813 vmid = fl->apps->channel[fl->cid].vmid;
814 if (vmid && map->phys) {
815 int srcVM[2] = {VMID_HLOS, vmid};
816
817 hyp_assign_phys(map->phys, buf_page_size(map->size),
818 srcVM, 2, destVM, destVMperm, 1);
819 }
820
821 if (!IS_ERR_OR_NULL(map->table))
822 dma_buf_unmap_attachment(map->attach, map->table,
823 DMA_BIDIRECTIONAL);
824 if (!IS_ERR_OR_NULL(map->attach))
825 dma_buf_detach(map->buf, map->attach);
826 if (!IS_ERR_OR_NULL(map->buf))
827 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700828 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700829 kfree(map);
830}
831
832static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
833 struct fastrpc_session_ctx **session);
834
835static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530836 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700837 struct fastrpc_mmap **ppmap)
838{
c_mtharue1a5ce12017-10-13 20:47:09 +0530839 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700840 struct fastrpc_session_ctx *sess;
841 struct fastrpc_apps *apps = fl->apps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530842 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530843 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700844 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530845 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530846 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700847 unsigned long flags;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530848 int err = 0, vmid, cid = -1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700849
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530850 cid = fl->cid;
851 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
852 if (err) {
853 err = -ECHRNG;
854 goto bail;
855 }
856 chan = &apps->channel[cid];
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800857 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700858 return 0;
859 map = kzalloc(sizeof(*map), GFP_KERNEL);
860 VERIFY(err, !IS_ERR_OR_NULL(map));
861 if (err)
862 goto bail;
863 INIT_HLIST_NODE(&map->hn);
864 map->flags = mflags;
865 map->refs = 1;
866 map->fl = fl;
867 map->fd = fd;
868 map->attr = attr;
Swathi K0e257332021-07-14 17:51:10 +0530869 map->is_filemap = false;
c_mtharue1a5ce12017-10-13 20:47:09 +0530870 if (mflags == ADSP_MMAP_HEAP_ADDR ||
871 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530872 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
873 DMA_ATTR_NO_KERNEL_MAPPING;
874
c_mtharue1a5ce12017-10-13 20:47:09 +0530875 map->apps = me;
876 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530877 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
878 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700879 if (err)
880 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530881 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530882 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530883 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530884 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
885 ion_phys_addr_t iphys;
886
887 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
888 ion_import_dma_buf_fd(fl->apps->client, fd)));
889 if (err)
890 goto bail;
891
892 map->uncached = 1;
893 map->buf = NULL;
894 map->attach = NULL;
895 map->table = NULL;
896 map->va = 0;
897 map->phys = 0;
898
899 err = ion_phys(fl->apps->client, map->handle,
900 &iphys, &map->size);
901 if (err)
902 goto bail;
903 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530904 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530905 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
906 pr_info("adsprpc: buffer mapped with persist attr %x\n",
907 (unsigned int)map->attr);
908 map->refs = 2;
909 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530910 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
911 ion_import_dma_buf_fd(fl->apps->client, fd)));
912 if (err)
913 goto bail;
914 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
915 &flags));
916 if (err)
917 goto bail;
918
c_mtharue1a5ce12017-10-13 20:47:09 +0530919 map->secure = flags & ION_FLAG_SECURE;
920 if (map->secure) {
921 if (!fl->secsctx)
922 err = fastrpc_session_alloc(chan, 1,
923 &fl->secsctx);
924 if (err)
925 goto bail;
926 }
927 if (map->secure)
928 sess = fl->secsctx;
929 else
930 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530931
c_mtharue1a5ce12017-10-13 20:47:09 +0530932 VERIFY(err, !IS_ERR_OR_NULL(sess));
933 if (err)
934 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530935
936 map->uncached = !ION_IS_CACHED(flags);
937 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
938 map->uncached = 1;
939
c_mtharue1a5ce12017-10-13 20:47:09 +0530940 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
941 if (err)
942 goto bail;
943 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
944 dma_buf_attach(map->buf, sess->smmu.dev)));
945 if (err)
946 goto bail;
947 VERIFY(err, !IS_ERR_OR_NULL(map->table =
948 dma_buf_map_attachment(map->attach,
949 DMA_BIDIRECTIONAL)));
950 if (err)
951 goto bail;
952 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700953 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530954
955 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
956 (sess->smmu.coherent && map->uncached))
957 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
958 else if (map->attr & FASTRPC_ATTR_COHERENT)
959 attrs |= DMA_ATTR_FORCE_COHERENT;
960
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700961 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530962 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700963 map->table->sgl, map->table->nents,
964 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530965 if (err)
966 goto bail;
967 } else {
968 VERIFY(err, map->table->nents == 1);
969 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700970 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530971 }
972 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530973
c_mtharue1a5ce12017-10-13 20:47:09 +0530974 if (sess->smmu.cb) {
975 map->phys += ((uint64_t)sess->smmu.cb << 32);
976 map->size = sg_dma_len(map->table->sgl);
977 } else {
978 map->size = buf_page_size(len);
979 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530980
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530981 VERIFY(err, map->size >= len && map->size < MAX_SIZE_LIMIT);
982 if (err) {
983 err = -EFAULT;
984 goto bail;
985 }
986
c_mtharue1a5ce12017-10-13 20:47:09 +0530987 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530988 if (!sess->smmu.enabled && !vmid) {
989 VERIFY(err, map->phys >= me->range.addr &&
990 map->phys + map->size <=
991 me->range.addr + me->range.size);
992 if (err) {
993 pr_err("adsprpc: mmap fail out of range\n");
994 goto bail;
995 }
996 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530997 if (vmid) {
998 int srcVM[1] = {VMID_HLOS};
999 int destVM[2] = {VMID_HLOS, vmid};
1000 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1001 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001002
c_mtharue1a5ce12017-10-13 20:47:09 +05301003 VERIFY(err, !hyp_assign_phys(map->phys,
1004 buf_page_size(map->size),
1005 srcVM, 1, destVM, destVMperm, 2));
1006 if (err)
1007 goto bail;
1008 }
1009 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001010 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001011 map->len = len;
1012
1013 fastrpc_mmap_add(map);
1014 *ppmap = map;
1015
1016bail:
1017 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05301018 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001019 return err;
1020}
1021
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301022static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301023 unsigned long dma_attr, uint32_t rflags,
1024 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001025{
1026 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +05301027 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001028 struct hlist_node *n;
1029
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301030 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
1031 if (err) {
1032 err = -EFAULT;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001033 goto bail;
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301034 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001035
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301036 if (!remote) {
1037 /* find the smallest buffer that fits in the cache */
1038 spin_lock(&fl->hlock);
1039 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
1040 if (buf->size >= size && (!fr || fr->size > buf->size))
1041 fr = buf;
1042 }
1043 if (fr)
1044 hlist_del_init(&fr->hn);
1045 spin_unlock(&fl->hlock);
1046 if (fr) {
1047 *obuf = fr;
1048 return 0;
1049 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001050 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301051 buf = NULL;
1052 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001053 if (err)
1054 goto bail;
1055 INIT_HLIST_NODE(&buf->hn);
1056 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +05301057 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001058 buf->phys = 0;
1059 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301060 buf->dma_attr = dma_attr;
1061 buf->flags = rflags;
1062 buf->raddr = 0;
1063 buf->remote = 0;
Jeya R984a1a32021-01-18 15:38:07 +05301064 VERIFY(err, fl && fl->sctx != NULL);
1065 if (err) {
1066 err = -EBADR;
1067 goto bail;
1068 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301069 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1070 (dma_addr_t *)&buf->phys,
1071 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001072 if (IS_ERR_OR_NULL(buf->virt)) {
1073 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301074 fastrpc_cached_buf_list_free(fl);
1075 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1076 (dma_addr_t *)&buf->phys,
1077 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001078 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1079 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301080 if (err) {
1081 err = -ENOMEM;
1082 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1083 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001084 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301085 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001086 if (fl->sctx->smmu.cb)
1087 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1088 vmid = fl->apps->channel[fl->cid].vmid;
1089 if (vmid) {
1090 int srcVM[1] = {VMID_HLOS};
1091 int destVM[2] = {VMID_HLOS, vmid};
1092 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1093 PERM_READ | PERM_WRITE | PERM_EXEC};
1094
1095 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1096 srcVM, 1, destVM, destVMperm, 2));
1097 if (err)
1098 goto bail;
1099 }
1100
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301101 if (remote) {
1102 INIT_HLIST_NODE(&buf->hn_rem);
1103 spin_lock(&fl->hlock);
1104 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1105 spin_unlock(&fl->hlock);
1106 buf->remote = remote;
1107 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001108 *obuf = buf;
1109 bail:
1110 if (err && buf)
1111 fastrpc_buf_free(buf, 0);
1112 return err;
1113}
1114
1115
1116static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001117 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 struct smq_invoke_ctx **po)
1119{
1120 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301121 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001122 struct hlist_node *n;
1123 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1124
1125 spin_lock(&fl->hlock);
1126 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1127 if (ictx->pid == current->pid) {
1128 if (invoke->sc != ictx->sc || ictx->fl != fl)
1129 err = -1;
1130 else {
1131 ctx = ictx;
1132 hlist_del_init(&ctx->hn);
1133 hlist_add_head(&ctx->hn, &fl->clst.pending);
1134 }
1135 break;
1136 }
1137 }
1138 spin_unlock(&fl->hlock);
1139 if (ctx)
1140 *po = ctx;
1141 return err;
1142}
1143
1144#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1145static int overlap_ptr_cmp(const void *a, const void *b)
1146{
1147 struct overlap *pa = *((struct overlap **)a);
1148 struct overlap *pb = *((struct overlap **)b);
1149 /* sort with lowest starting buffer first */
1150 int st = CMP(pa->start, pb->start);
1151 /* sort with highest ending buffer first */
1152 int ed = CMP(pb->end, pa->end);
1153 return st == 0 ? ed : st;
1154}
1155
Sathish Ambley9466d672017-01-25 10:51:55 -08001156static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001157{
Sathish Ambley9466d672017-01-25 10:51:55 -08001158 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001159 remote_arg_t *lpra = ctx->lpra;
1160 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1161 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1162 int nbufs = inbufs + outbufs;
1163 struct overlap max;
1164
1165 for (i = 0; i < nbufs; ++i) {
1166 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1167 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001168 if (lpra[i].buf.len) {
1169 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1170 if (err)
1171 goto bail;
1172 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001173 ctx->overs[i].raix = i;
1174 ctx->overps[i] = &ctx->overs[i];
1175 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301176 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001177 max.start = 0;
1178 max.end = 0;
1179 for (i = 0; i < nbufs; ++i) {
1180 if (ctx->overps[i]->start < max.end) {
1181 ctx->overps[i]->mstart = max.end;
1182 ctx->overps[i]->mend = ctx->overps[i]->end;
1183 ctx->overps[i]->offset = max.end -
1184 ctx->overps[i]->start;
1185 if (ctx->overps[i]->end > max.end) {
1186 max.end = ctx->overps[i]->end;
1187 } else {
1188 ctx->overps[i]->mend = 0;
1189 ctx->overps[i]->mstart = 0;
1190 }
1191 } else {
1192 ctx->overps[i]->mend = ctx->overps[i]->end;
1193 ctx->overps[i]->mstart = ctx->overps[i]->start;
1194 ctx->overps[i]->offset = 0;
1195 max = *ctx->overps[i];
1196 }
1197 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001198bail:
1199 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001200}
1201
1202#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1203 do {\
1204 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301205 VERIFY(err, 0 == copy_from_user((dst),\
1206 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001207 (size)));\
1208 else\
1209 memmove((dst), (src), (size));\
1210 } while (0)
1211
1212#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1213 do {\
1214 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301215 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301216 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001217 else\
1218 memmove((dst), (src), (size));\
1219 } while (0)
1220
1221
1222static void context_free(struct smq_invoke_ctx *ctx);
1223
1224static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001225 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001226 struct smq_invoke_ctx **po)
1227{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301228 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301229 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301230 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001231 struct fastrpc_ctx_lst *clst = &fl->clst;
1232 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
Jeya R8fa59d62020-11-04 20:42:59 +05301233 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001234
1235 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1236 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1237 sizeof(*ctx->fds) * (bufs) +
1238 sizeof(*ctx->attrs) * (bufs) +
1239 sizeof(*ctx->overs) * (bufs) +
1240 sizeof(*ctx->overps) * (bufs);
1241
c_mtharue1a5ce12017-10-13 20:47:09 +05301242 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001243 if (err)
1244 goto bail;
1245
1246 INIT_HLIST_NODE(&ctx->hn);
1247 hlist_add_fake(&ctx->hn);
1248 ctx->fl = fl;
1249 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1250 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1251 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301252 if (me->legacy) {
1253 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1254 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1255 } else {
1256 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1257 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1258 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1259 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001260
c_mtharue1a5ce12017-10-13 20:47:09 +05301261 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001262 bufs * sizeof(*ctx->lpra));
1263 if (err)
1264 goto bail;
1265
1266 if (invokefd->fds) {
1267 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1268 bufs * sizeof(*ctx->fds));
1269 if (err)
1270 goto bail;
1271 }
1272 if (invokefd->attrs) {
1273 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1274 bufs * sizeof(*ctx->attrs));
1275 if (err)
1276 goto bail;
1277 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001278 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001279 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001280 if (bufs) {
1281 VERIFY(err, 0 == context_build_overlap(ctx));
1282 if (err)
1283 goto bail;
1284 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001285 ctx->retval = -1;
1286 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301287 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001288 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301289 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001290
1291 spin_lock(&fl->hlock);
1292 hlist_add_head(&ctx->hn, &clst->pending);
1293 spin_unlock(&fl->hlock);
1294
Jeya R8fa59d62020-11-04 20:42:59 +05301295 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301296 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1297 if (!me->ctxtable[ii]) {
1298 me->ctxtable[ii] = ctx;
1299 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1300 break;
1301 }
1302 }
Jeya R8fa59d62020-11-04 20:42:59 +05301303 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301304 VERIFY(err, ii < FASTRPC_CTX_MAX);
1305 if (err) {
1306 pr_err("adsprpc: out of context memory\n");
1307 goto bail;
1308 }
1309
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001310 *po = ctx;
1311bail:
1312 if (ctx && err)
1313 context_free(ctx);
1314 return err;
1315}
1316
1317static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1318{
1319 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1320
1321 spin_lock(&ctx->fl->hlock);
1322 hlist_del_init(&ctx->hn);
1323 hlist_add_head(&ctx->hn, &clst->interrupted);
1324 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001325}
1326
1327static void context_free(struct smq_invoke_ctx *ctx)
1328{
1329 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301330 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001331 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1332 REMOTE_SCALARS_OUTBUFS(ctx->sc);
Jeya R8fa59d62020-11-04 20:42:59 +05301333 unsigned long irq_flags = 0;
1334 void *handle = NULL;
1335 const void *ptr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336 spin_lock(&ctx->fl->hlock);
1337 hlist_del_init(&ctx->hn);
1338 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301339 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001340 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301341 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301342
1343 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001344 fastrpc_buf_free(ctx->buf, 1);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301345 fastrpc_buf_free(ctx->lbuf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301346 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301347 ctx->ctxid = 0;
1348
Jeya R8fa59d62020-11-04 20:42:59 +05301349 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301350 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1351 if (me->ctxtable[i] == ctx) {
Jeya R8fa59d62020-11-04 20:42:59 +05301352 handle = me->ctxtable[i]->handle;
1353 ptr = me->ctxtable[i]->ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301354 me->ctxtable[i] = NULL;
1355 break;
1356 }
1357 }
Jeya R8fa59d62020-11-04 20:42:59 +05301358 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
1359 if (handle) {
1360 glink_rx_done(handle, ptr, true);
1361 handle = NULL;
1362 }
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301363
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364 kfree(ctx);
1365}
1366
1367static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1368{
1369 ctx->retval = retval;
1370 complete(&ctx->work);
1371}
1372
1373
1374static void fastrpc_notify_users(struct fastrpc_file *me)
1375{
1376 struct smq_invoke_ctx *ictx;
1377 struct hlist_node *n;
1378
1379 spin_lock(&me->hlock);
1380 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1381 complete(&ictx->work);
1382 }
1383 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1384 complete(&ictx->work);
1385 }
1386 spin_unlock(&me->hlock);
1387
1388}
1389
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301390
1391static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1392{
1393 struct smq_invoke_ctx *ictx;
1394 struct hlist_node *n;
1395
1396 spin_lock(&me->hlock);
1397 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1398 if (ictx->msg.pid)
1399 complete(&ictx->work);
1400 }
1401 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1402 if (ictx->msg.pid)
1403 complete(&ictx->work);
1404 }
1405 spin_unlock(&me->hlock);
1406}
1407
1408
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001409static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1410{
1411 struct fastrpc_file *fl;
1412 struct hlist_node *n;
1413
1414 spin_lock(&me->hlock);
1415 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1416 if (fl->cid == cid)
1417 fastrpc_notify_users(fl);
1418 }
1419 spin_unlock(&me->hlock);
1420
1421}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301422
1423static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1424{
1425 struct fastrpc_file *fl;
1426 struct hlist_node *n;
1427
1428 spin_lock(&me->hlock);
1429 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1430 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301431 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301432 }
1433 spin_unlock(&me->hlock);
1434
1435}
1436
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001437static void context_list_ctor(struct fastrpc_ctx_lst *me)
1438{
1439 INIT_HLIST_HEAD(&me->interrupted);
1440 INIT_HLIST_HEAD(&me->pending);
1441}
1442
1443static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1444{
1445 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301446 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001447 struct hlist_node *n;
1448
1449 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301450 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001451 spin_lock(&fl->hlock);
1452 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1453 hlist_del_init(&ictx->hn);
1454 ctxfree = ictx;
1455 break;
1456 }
1457 spin_unlock(&fl->hlock);
1458 if (ctxfree)
1459 context_free(ctxfree);
1460 } while (ctxfree);
1461 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301462 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001463 spin_lock(&fl->hlock);
1464 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1465 hlist_del_init(&ictx->hn);
1466 ctxfree = ictx;
1467 break;
1468 }
1469 spin_unlock(&fl->hlock);
1470 if (ctxfree)
1471 context_free(ctxfree);
1472 } while (ctxfree);
1473}
1474
1475static int fastrpc_file_free(struct fastrpc_file *fl);
1476static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1477{
1478 struct fastrpc_file *fl, *free;
1479 struct hlist_node *n;
1480
1481 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301482 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001483 spin_lock(&me->hlock);
1484 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1485 hlist_del_init(&fl->hn);
1486 free = fl;
1487 break;
1488 }
1489 spin_unlock(&me->hlock);
1490 if (free)
1491 fastrpc_file_free(free);
1492 } while (free);
1493}
1494
1495static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1496{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301497 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301498 remote_arg64_t *rpra, *lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001499 remote_arg_t *lpra = ctx->lpra;
1500 struct smq_invoke_buf *list;
1501 struct smq_phy_page *pages, *ipage;
1502 uint32_t sc = ctx->sc;
1503 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1504 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001505 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001506 uintptr_t args;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301507 size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001508 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001509 int err = 0;
1510 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001511 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001512 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301513 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001514
1515 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301516 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001517 list = smq_invoke_buf_start(rpra, sc);
1518 pages = smq_phy_page_start(sc, list);
1519 ipage = pages;
1520
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301521 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001522 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301523 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1524 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001525
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301526 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301527 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1528 unsigned int attrs = 0;
1529
1530 if (ctx->attrs)
1531 attrs = ctx->attrs[i];
1532
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001533 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301534 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001535 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301536 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301537 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001538 ipage += 1;
1539 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301540 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001541 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301542 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001543 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301544 int dmaflags = 0;
1545
1546 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1547 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001548 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301549 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301550 if (err) {
1551 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001552 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301553 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001554 ipage += 1;
1555 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301556 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301557 if (!me->legacy) {
1558 metalen = copylen = (size_t)&ipage[0] +
1559 (sizeof(uint64_t) * M_FDLIST) +
1560 (sizeof(uint32_t) * M_CRCLIST);
1561 } else {
1562 metalen = copylen = (size_t)&ipage[0];
1563 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001564
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301565 /* allocate new local rpra buffer */
1566 lrpralen = (size_t)&list[0];
1567 if (lrpralen) {
1568 err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
1569 if (err)
1570 goto bail;
1571 }
1572 if (ctx->lbuf->virt)
1573 memset(ctx->lbuf->virt, 0, lrpralen);
1574
1575 lrpra = ctx->lbuf->virt;
1576 ctx->lrpra = lrpra;
1577
1578 /* calculate len required for copying */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001579 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1580 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001581 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301582 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001583
1584 if (!len)
1585 continue;
1586 if (ctx->maps[i])
1587 continue;
1588 if (ctx->overps[oix]->offset == 0)
1589 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001590 mstart = ctx->overps[oix]->mstart;
1591 mend = ctx->overps[oix]->mend;
1592 VERIFY(err, (mend - mstart) <= LONG_MAX);
1593 if (err)
1594 goto bail;
1595 copylen += mend - mstart;
1596 VERIFY(err, copylen >= 0);
1597 if (err)
1598 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001599 }
1600 ctx->used = copylen;
1601
1602 /* allocate new buffer */
1603 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301604 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001605 if (err)
1606 goto bail;
1607 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301608 if (ctx->buf->virt && metalen <= copylen)
1609 memset(ctx->buf->virt, 0, metalen);
1610
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001611 /* copy metadata */
1612 rpra = ctx->buf->virt;
1613 ctx->rpra = rpra;
1614 list = smq_invoke_buf_start(rpra, sc);
1615 pages = smq_phy_page_start(sc, list);
1616 ipage = pages;
1617 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001618 for (i = 0; i < bufs + handles; ++i) {
1619 if (lpra[i].buf.len)
1620 list[i].num = 1;
1621 else
1622 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001623 list[i].pgidx = ipage - pages;
1624 ipage++;
1625 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301626
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001627 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301628 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301629 for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 struct fastrpc_mmap *map = ctx->maps[i];
1631 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301632 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001633
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301634 rpra[i].buf.pv = lrpra[i].buf.pv = 0;
1635 rpra[i].buf.len = lrpra[i].buf.len = len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001636 if (!len)
1637 continue;
1638 if (map) {
1639 struct vm_area_struct *vma;
1640 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301641 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001642 int idx = list[i].pgidx;
1643
1644 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001645 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001646 } else {
1647 down_read(&current->mm->mmap_sem);
1648 VERIFY(err, NULL != (vma = find_vma(current->mm,
1649 map->va)));
1650 if (err) {
1651 up_read(&current->mm->mmap_sem);
1652 goto bail;
1653 }
1654 offset = buf_page_start(buf) - vma->vm_start;
1655 up_read(&current->mm->mmap_sem);
1656 VERIFY(err, offset < (uintptr_t)map->size);
1657 if (err)
1658 goto bail;
1659 }
1660 pages[idx].addr = map->phys + offset;
1661 pages[idx].size = num << PAGE_SHIFT;
1662 }
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301663 rpra[i].buf.pv = lrpra[i].buf.pv = buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001664 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001665 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001666 for (i = bufs; i < bufs + handles; ++i) {
1667 struct fastrpc_mmap *map = ctx->maps[i];
Jeya R4c7abf22020-07-23 16:00:50 +05301668 if (map) {
1669 pages[i].addr = map->phys;
1670 pages[i].size = map->size;
1671 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001672 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301673 if (!me->legacy) {
1674 fdlist = (uint64_t *)&pages[bufs + handles];
1675 for (i = 0; i < M_FDLIST; i++)
1676 fdlist[i] = 0;
1677 crclist = (uint32_t *)&fdlist[M_FDLIST];
1678 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1679 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001680
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001681 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301682 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001683 rlen = copylen - metalen;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301684 for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001685 int i = ctx->overps[oix]->raix;
1686 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301687 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001688 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301689 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001690
1691 if (!len)
1692 continue;
1693 if (map)
1694 continue;
1695 if (ctx->overps[oix]->offset == 0) {
1696 rlen -= ALIGN(args, BALIGN) - args;
1697 args = ALIGN(args, BALIGN);
1698 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001699 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001700 VERIFY(err, rlen >= mlen);
1701 if (err)
1702 goto bail;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301703 rpra[i].buf.pv = lrpra[i].buf.pv =
1704 (args - ctx->overps[oix]->offset);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001705 pages[list[i].pgidx].addr = ctx->buf->phys -
1706 ctx->overps[oix]->offset +
1707 (copylen - rlen);
1708 pages[list[i].pgidx].addr =
1709 buf_page_start(pages[list[i].pgidx].addr);
1710 buf = rpra[i].buf.pv;
1711 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1712 if (i < inbufs) {
1713 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1714 lpra[i].buf.pv, len);
1715 if (err)
1716 goto bail;
1717 }
1718 args = args + mlen;
1719 rlen -= mlen;
1720 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001721 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001722
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301723 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001724 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1725 int i = ctx->overps[oix]->raix;
1726 struct fastrpc_mmap *map = ctx->maps[i];
1727
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001728 if (map && map->uncached)
1729 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301730 if (ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301731 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1732 continue;
1733 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1734 continue;
1735
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301736 if (rpra && lrpra && rpra[i].buf.len &&
1737 ctx->overps[oix]->mstart) {
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301738 if (map && map->handle)
1739 msm_ion_do_cache_op(ctx->fl->apps->client,
1740 map->handle,
1741 uint64_to_ptr(rpra[i].buf.pv),
1742 rpra[i].buf.len,
1743 ION_IOC_CLEAN_INV_CACHES);
1744 else
1745 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1746 uint64_to_ptr(rpra[i].buf.pv
1747 + rpra[i].buf.len));
1748 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001749 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001750 PERF_END);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301751 for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
Jeya R4c7abf22020-07-23 16:00:50 +05301752 if (ctx->fds)
1753 rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301754 rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1755 rpra[i].dma.offset = lrpra[i].dma.offset =
1756 (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001757 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001758
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001759 bail:
1760 return err;
1761}
1762
1763static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1764 remote_arg_t *upra)
1765{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301766 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001767 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001768 struct smq_invoke_buf *list;
1769 struct smq_phy_page *pages;
1770 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301771 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001772 uint32_t *crclist = NULL;
1773
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301774 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001775 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001776 int err = 0;
1777
1778 inbufs = REMOTE_SCALARS_INBUFS(sc);
1779 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001780 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1781 list = smq_invoke_buf_start(ctx->rpra, sc);
1782 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301783 if (!me->legacy) {
1784 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1785 crclist = (uint32_t *)(fdlist + M_FDLIST);
1786 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001787
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001788 for (i = inbufs; i < inbufs + outbufs; ++i) {
1789 if (!ctx->maps[i]) {
1790 K_COPY_TO_USER(err, kernel,
1791 ctx->lpra[i].buf.pv,
1792 uint64_to_ptr(rpra[i].buf.pv),
1793 rpra[i].buf.len);
1794 if (err)
1795 goto bail;
1796 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301797 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301798 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301799 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301800 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001801 }
1802 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301803 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301804 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001805 for (i = 0; i < M_FDLIST; i++) {
1806 if (!fdlist[i])
1807 break;
1808 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001809 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301810 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001811 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001812 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301813 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001814 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301815 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001816 crclist, M_CRCLIST*sizeof(uint32_t));
1817
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001818 bail:
1819 return err;
1820}
1821
1822static void inv_args_pre(struct smq_invoke_ctx *ctx)
1823{
1824 int i, inbufs, outbufs;
1825 uint32_t sc = ctx->sc;
1826 remote_arg64_t *rpra = ctx->rpra;
1827 uintptr_t end;
1828
1829 inbufs = REMOTE_SCALARS_INBUFS(sc);
1830 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1831 for (i = inbufs; i < inbufs + outbufs; ++i) {
1832 struct fastrpc_mmap *map = ctx->maps[i];
1833
1834 if (map && map->uncached)
1835 continue;
1836 if (!rpra[i].buf.len)
1837 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301838 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301839 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1840 continue;
1841 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1842 continue;
1843
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001844 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1845 buf_page_start(rpra[i].buf.pv))
1846 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301847 if (!IS_CACHE_ALIGNED((uintptr_t)
1848 uint64_to_ptr(rpra[i].buf.pv))) {
1849 if (map && map->handle)
1850 msm_ion_do_cache_op(ctx->fl->apps->client,
1851 map->handle,
1852 uint64_to_ptr(rpra[i].buf.pv),
1853 sizeof(uintptr_t),
1854 ION_IOC_CLEAN_INV_CACHES);
1855 else
1856 dmac_flush_range(
1857 uint64_to_ptr(rpra[i].buf.pv), (char *)
1858 uint64_to_ptr(rpra[i].buf.pv + 1));
1859 }
1860
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001861 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1862 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301863 if (!IS_CACHE_ALIGNED(end)) {
1864 if (map && map->handle)
1865 msm_ion_do_cache_op(ctx->fl->apps->client,
1866 map->handle,
1867 uint64_to_ptr(end),
1868 sizeof(uintptr_t),
1869 ION_IOC_CLEAN_INV_CACHES);
1870 else
1871 dmac_flush_range((char *)end,
1872 (char *)end + 1);
1873 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001874 }
1875}
1876
1877static void inv_args(struct smq_invoke_ctx *ctx)
1878{
1879 int i, inbufs, outbufs;
1880 uint32_t sc = ctx->sc;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301881 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001882
1883 inbufs = REMOTE_SCALARS_INBUFS(sc);
1884 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1885 for (i = inbufs; i < inbufs + outbufs; ++i) {
1886 struct fastrpc_mmap *map = ctx->maps[i];
1887
1888 if (map && map->uncached)
1889 continue;
1890 if (!rpra[i].buf.len)
1891 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301892 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301893 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1894 continue;
1895 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1896 continue;
1897
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001898 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1899 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001900 continue;
1901 }
1902 if (map && map->handle)
1903 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1904 (char *)uint64_to_ptr(rpra[i].buf.pv),
1905 rpra[i].buf.len, ION_IOC_INV_CACHES);
1906 else
1907 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1908 (char *)uint64_to_ptr(rpra[i].buf.pv
1909 + rpra[i].buf.len));
1910 }
1911
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001912}
1913
1914static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1915 uint32_t kernel, uint32_t handle)
1916{
1917 struct smq_msg *msg = &ctx->msg;
1918 struct fastrpc_file *fl = ctx->fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301919 int err = 0, len, cid = -1;
1920 struct fastrpc_channel_ctx *channel_ctx = NULL;
1921
1922 cid = fl->cid;
1923 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
1924 if (err) {
1925 err = -ECHRNG;
1926 goto bail;
1927 }
1928 channel_ctx = &fl->apps->channel[fl->cid];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001929
c_mtharue1a5ce12017-10-13 20:47:09 +05301930 VERIFY(err, NULL != channel_ctx->chan);
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301931 if (err) {
1932 err = -ECHRNG;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001933 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301934 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301935 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001936 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301937 if (fl->sessionid)
1938 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001939 if (kernel)
1940 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301941 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001942 msg->invoke.header.handle = handle;
1943 msg->invoke.header.sc = ctx->sc;
1944 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1945 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301946 if (fl->apps->glink) {
1947 if (fl->ssrcount != channel_ctx->ssrcount) {
1948 err = -ECONNRESET;
1949 goto bail;
1950 }
1951 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001952 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301953 if (err)
1954 goto bail;
1955 err = glink_tx(channel_ctx->chan,
1956 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1957 GLINK_TX_REQ_INTENT);
1958 } else {
1959 spin_lock(&fl->apps->hlock);
1960 len = smd_write((smd_channel_t *)
1961 channel_ctx->chan,
1962 msg, sizeof(*msg));
1963 spin_unlock(&fl->apps->hlock);
1964 VERIFY(err, len == sizeof(*msg));
1965 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001966 bail:
1967 return err;
1968}
1969
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301970static void fastrpc_smd_read_handler(int cid)
1971{
1972 struct fastrpc_apps *me = &gfa;
1973 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301974 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301975 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301976
1977 do {
1978 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1979 sizeof(rsp));
1980 if (ret != sizeof(rsp))
1981 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301982
1983 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1984 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301985 if (err)
1986 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301987
1988 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1989 if (err)
1990 goto bail;
1991
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301992 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301993 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1994 if (err)
1995 goto bail;
1996
1997 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301998 } while (ret == sizeof(rsp));
1999bail:
2000 if (err)
2001 pr_err("adsprpc: invalid response or context\n");
2002
2003}
2004
2005static void smd_event_handler(void *priv, unsigned int event)
2006{
2007 struct fastrpc_apps *me = &gfa;
2008 int cid = (int)(uintptr_t)priv;
2009
2010 switch (event) {
2011 case SMD_EVENT_OPEN:
2012 complete(&me->channel[cid].workport);
2013 break;
2014 case SMD_EVENT_CLOSE:
2015 fastrpc_notify_drivers(me, cid);
2016 break;
2017 case SMD_EVENT_DATA:
2018 fastrpc_smd_read_handler(cid);
2019 break;
2020 }
2021}
2022
2023
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002024static void fastrpc_init(struct fastrpc_apps *me)
2025{
2026 int i;
2027
2028 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302029 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002030 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302031 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302032 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002033 me->channel = &gcinfo[0];
2034 for (i = 0; i < NUM_CHANNELS; i++) {
2035 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302036 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002037 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302038 /* All channels are secure by default except CDSP */
2039 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002040 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302041 /* Set CDSP channel to non secure */
2042 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002043}
2044
2045static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
2046
2047static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
2048 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07002049 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002050{
c_mtharue1a5ce12017-10-13 20:47:09 +05302051 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002052 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302053 int err = 0, cid = -1, interrupted = 0;
Maria Yu757199c2017-09-22 16:05:49 +08002054 struct timespec invoket = {0};
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302055 int64_t *perf_counter = NULL;
2056
2057 cid = fl->cid;
2058 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
2059 if (err) {
2060 err = -ECHRNG;
2061 goto bail;
2062 }
2063 VERIFY(err, fl->sctx != NULL);
2064 if (err) {
2065 err = -EBADR;
2066 goto bail;
2067 }
2068 perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002069
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002070 if (fl->profile)
2071 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05302072
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302073 if (!kernel) {
2074 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
2075 if (err) {
2076 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
2077 __func__, current->comm, cid);
2078 goto bail;
2079 }
2080 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302081
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002082 if (!kernel) {
2083 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
2084 &ctx));
2085 if (err)
2086 goto bail;
2087 if (fl->sctx->smmu.faults)
2088 err = FASTRPC_ENOSUCH;
2089 if (err)
2090 goto bail;
2091 if (ctx)
2092 goto wait;
2093 }
2094
2095 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
2096 if (err)
2097 goto bail;
2098
2099 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302100 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002101 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002102 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002103 if (err)
2104 goto bail;
2105 }
2106
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302107 if (!fl->sctx->smmu.coherent) {
2108 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002109 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302110 PERF_END);
2111 }
2112
2113 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002114 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002115 PERF_END);
2116
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002117 if (err)
2118 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002119 wait:
2120 if (kernel)
2121 wait_for_completion(&ctx->work);
2122 else {
2123 interrupted = wait_for_completion_interruptible(&ctx->work);
2124 VERIFY(err, 0 == (err = interrupted));
2125 if (err)
2126 goto bail;
2127 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302128 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002129 if (!fl->sctx->smmu.coherent)
2130 inv_args(ctx);
2131 PERF_END);
2132
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002133 VERIFY(err, 0 == (err = ctx->retval));
2134 if (err)
2135 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002136
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302137 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002138 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002139 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002140 if (err)
2141 goto bail;
2142 bail:
2143 if (ctx && interrupted == -ERESTARTSYS)
2144 context_save_interrupted(ctx);
2145 else if (ctx)
2146 context_free(ctx);
2147 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2148 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002149
2150 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302151 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2152 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2153
2154 if (count)
2155 *count += getnstimediff(&invoket);
2156 }
2157 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2158 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2159
2160 if (count)
2161 *count = *count+1;
2162 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002163 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002164 return err;
2165}
2166
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302167static int fastrpc_get_adsp_session(char *name, int *session)
2168{
2169 struct fastrpc_apps *me = &gfa;
2170 int err = 0, i;
2171
2172 for (i = 0; i < NUM_SESSIONS; i++) {
2173 if (!me->channel[0].spd[i].spdname)
2174 continue;
2175 if (!strcmp(name, me->channel[0].spd[i].spdname))
2176 break;
2177 }
2178 VERIFY(err, i < NUM_SESSIONS);
2179 if (err)
2180 goto bail;
2181 *session = i;
2182bail:
2183 return err;
2184}
2185
2186static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002187static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302188static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002189static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002190 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002191{
2192 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302193 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002194 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002195 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002196 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302197 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302198 struct fastrpc_buf *imem = NULL;
2199 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302200 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002201
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302202 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002203 if (err)
2204 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302205 if (init->flags == FASTRPC_INIT_ATTACH ||
2206 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002207 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302208 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002209
2210 ra[0].buf.pv = (void *)&tgid;
2211 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302212 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002213 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2214 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302215 ioctl.fds = NULL;
2216 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002217 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302218 if (init->flags == FASTRPC_INIT_ATTACH)
2219 fl->pd = 0;
2220 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2221 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2222 fl->pd = 2;
2223 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002224 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2225 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2226 if (err)
2227 goto bail;
2228 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002229 remote_arg_t ra[6];
2230 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002231 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302232 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002233 struct {
2234 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302235 unsigned int namelen;
2236 unsigned int filelen;
2237 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002238 int attrs;
2239 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002240 } inbuf;
2241
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302242 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002243 inbuf.namelen = strlen(current->comm) + 1;
2244 inbuf.filelen = init->filelen;
2245 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302246
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302247 VERIFY(err, access_ok(0, (void __user *)init->file,
2248 init->filelen));
2249 if (err)
2250 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002251 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302252 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002253 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2254 init->file, init->filelen, mflags, &file));
Swathi K0e257332021-07-14 17:51:10 +05302255 if (file)
2256 file->is_filemap = true;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302257 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002258 if (err)
2259 goto bail;
2260 }
2261 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302262
2263 VERIFY(err, !init->mem);
2264 if (err) {
2265 err = -EINVAL;
2266 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2267 current->comm, __func__);
2268 goto bail;
2269 }
2270 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2271 1024*1024);
2272 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2273 DMA_ATTR_NO_KERNEL_MAPPING |
2274 DMA_ATTR_FORCE_NON_COHERENT;
2275 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302276 if (err)
2277 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302278 fl->init_mem = imem;
2279
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002280 inbuf.pageslen = 1;
2281 ra[0].buf.pv = (void *)&inbuf;
2282 ra[0].buf.len = sizeof(inbuf);
2283 fds[0] = 0;
2284
2285 ra[1].buf.pv = (void *)current->comm;
2286 ra[1].buf.len = inbuf.namelen;
2287 fds[1] = 0;
2288
2289 ra[2].buf.pv = (void *)init->file;
2290 ra[2].buf.len = inbuf.filelen;
2291 fds[2] = init->filefd;
2292
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302293 pages[0].addr = imem->phys;
2294 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002295 ra[3].buf.pv = (void *)pages;
2296 ra[3].buf.len = 1 * sizeof(*pages);
2297 fds[3] = 0;
2298
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002299 inbuf.attrs = uproc->attrs;
2300 ra[4].buf.pv = (void *)&(inbuf.attrs);
2301 ra[4].buf.len = sizeof(inbuf.attrs);
2302 fds[4] = 0;
2303
2304 inbuf.siglen = uproc->siglen;
2305 ra[5].buf.pv = (void *)&(inbuf.siglen);
2306 ra[5].buf.len = sizeof(inbuf.siglen);
2307 fds[5] = 0;
2308
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302309 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002310 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002311 if (uproc->attrs)
2312 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002313 ioctl.inv.pra = ra;
2314 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302315 ioctl.attrs = NULL;
2316 ioctl.crc = NULL;
2317 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2318 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2319 if (err)
2320 goto bail;
2321 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2322 remote_arg_t ra[3];
2323 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302324 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302325 int fds[3];
2326 struct {
2327 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302328 unsigned int namelen;
2329 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302330 } inbuf;
2331
2332 if (!init->filelen)
2333 goto bail;
2334
Vamsi Krishna Gattupallia38c9cb2021-11-11 17:19:03 +05302335 proc_name = kzalloc(init->filelen + 1, GFP_KERNEL);
c_mtharue1a5ce12017-10-13 20:47:09 +05302336 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2337 if (err)
2338 goto bail;
2339 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2340 (void __user *)init->file, init->filelen));
2341 if (err)
2342 goto bail;
2343
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302344 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302345 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302346 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302347 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302348
2349 if (!strcmp(proc_name, "audiopd")) {
2350 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2351 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302352 if (err)
2353 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302354 }
2355
c_mtharue1a5ce12017-10-13 20:47:09 +05302356 if (!me->staticpd_flags) {
2357 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302358 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302359 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2360 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2361 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302362 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302363 if (err)
2364 goto bail;
2365 phys = mem->phys;
2366 size = mem->size;
2367 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302368 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2369 me->channel[fl->cid].rhvm.vmperm,
2370 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302371 if (err) {
2372 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2373 err);
2374 pr_err("map->phys %llx, map->size %d\n",
2375 phys, (int)size);
2376 goto bail;
2377 }
2378 me->staticpd_flags = 1;
2379 }
2380
2381 ra[0].buf.pv = (void *)&inbuf;
2382 ra[0].buf.len = sizeof(inbuf);
2383 fds[0] = 0;
2384
2385 ra[1].buf.pv = (void *)proc_name;
2386 ra[1].buf.len = inbuf.namelen;
2387 fds[1] = 0;
2388
2389 pages[0].addr = phys;
2390 pages[0].size = size;
2391
2392 ra[2].buf.pv = (void *)pages;
2393 ra[2].buf.len = sizeof(*pages);
2394 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302395 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302396
2397 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2398 ioctl.inv.pra = ra;
2399 ioctl.fds = NULL;
2400 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002401 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002402 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2403 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2404 if (err)
2405 goto bail;
2406 } else {
2407 err = -ENOTTY;
2408 }
2409bail:
c_mtharud91205a2017-11-07 16:01:06 +05302410 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302411 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2412 me->staticpd_flags = 0;
2413 if (mem && err) {
2414 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2415 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302416 me->channel[fl->cid].rhvm.vmid,
2417 me->channel[fl->cid].rhvm.vmcount,
2418 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302419 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302420 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302421 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302422 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302423 if (file) {
2424 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302425 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302426 mutex_unlock(&fl->fl_map_mutex);
2427 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002428 return err;
2429}
2430
2431static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2432{
2433 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002434 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002435 remote_arg_t ra[1];
2436 int tgid = 0;
2437
Sathish Ambley36849af2017-02-02 09:35:55 -08002438 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2439 if (err)
2440 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05302441 VERIFY(err, fl->sctx != NULL);
2442 if (err)
2443 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302444 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002445 if (err)
2446 goto bail;
2447 tgid = fl->tgid;
2448 ra[0].buf.pv = (void *)&tgid;
2449 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302450 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002451 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2452 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302453 ioctl.fds = NULL;
2454 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002455 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002456 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2457 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2458bail:
2459 return err;
2460}
2461
2462static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302463 uintptr_t va, uint64_t phys,
2464 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002465{
Sathish Ambleybae51902017-07-03 15:00:49 -07002466 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302467 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002468 struct smq_phy_page page;
2469 int num = 1;
2470 remote_arg_t ra[3];
2471 int err = 0;
2472 struct {
2473 int pid;
2474 uint32_t flags;
2475 uintptr_t vaddrin;
2476 int num;
2477 } inargs;
2478 struct {
2479 uintptr_t vaddrout;
2480 } routargs;
2481
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302482 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302483 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002484 inargs.flags = flags;
2485 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2486 ra[0].buf.pv = (void *)&inargs;
2487 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302488 page.addr = phys;
2489 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002490 ra[1].buf.pv = (void *)&page;
2491 ra[1].buf.len = num * sizeof(page);
2492
2493 ra[2].buf.pv = (void *)&routargs;
2494 ra[2].buf.len = sizeof(routargs);
2495
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302496 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002497 if (fl->apps->compat)
2498 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2499 else
2500 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2501 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302502 ioctl.fds = NULL;
2503 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002504 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002505 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2506 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302507 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302508 if (err)
2509 goto bail;
2510 if (flags == ADSP_MMAP_HEAP_ADDR) {
2511 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002512
c_mtharue1a5ce12017-10-13 20:47:09 +05302513 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302514 desc.args[1] = phys;
2515 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302516 desc.arginfo = SCM_ARGS(3);
2517 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2518 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2519 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302520 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302521 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2522 me->channel[fl->cid].rhvm.vmperm,
2523 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302524 if (err)
2525 goto bail;
2526 }
2527bail:
2528 return err;
2529}
2530
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302531static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2532 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302533{
2534 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302535 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302536 int tgid = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302537 int destVM[1] = {VMID_HLOS};
2538 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2539
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302540 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302541 struct fastrpc_ioctl_invoke_crc ioctl;
2542 struct scm_desc desc = {0};
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302543 remote_arg_t ra[2];
2544
c_mtharue1a5ce12017-10-13 20:47:09 +05302545 struct {
2546 uint8_t skey;
2547 } routargs;
2548
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302549 if (fl == NULL)
2550 goto bail;
2551 tgid = fl->tgid;
2552 ra[0].buf.pv = (void *)&tgid;
2553 ra[0].buf.len = sizeof(tgid);
2554 ra[1].buf.pv = (void *)&routargs;
2555 ra[1].buf.len = sizeof(routargs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302556
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302557 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302558 ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1);
c_mtharue1a5ce12017-10-13 20:47:09 +05302559 ioctl.inv.pra = ra;
2560 ioctl.fds = NULL;
2561 ioctl.attrs = NULL;
2562 ioctl.crc = NULL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302563
c_mtharue1a5ce12017-10-13 20:47:09 +05302564
2565 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2566 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302567 if (err == AEE_EUNSUPPORTED) {
2568 remote_arg_t ra[1];
2569
2570 pr_warn("ADSPRPC:Failed to get security key with updated remote call, falling back to older method");
2571 ra[0].buf.pv = (void *)&routargs;
2572 ra[0].buf.len = sizeof(routargs);
2573 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2574 ioctl.inv.pra = ra;
2575 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2576 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2577 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302578 if (err)
2579 goto bail;
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302580
c_mtharue1a5ce12017-10-13 20:47:09 +05302581 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302582 desc.args[1] = phys;
2583 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302584 desc.args[3] = routargs.skey;
2585 desc.arginfo = SCM_ARGS(4);
2586 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2587 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302588 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2589 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302590 me->channel[fl->cid].rhvm.vmid,
2591 me->channel[fl->cid].rhvm.vmcount,
2592 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302593 if (err)
2594 goto bail;
2595 }
2596
2597bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002598 return err;
2599}
2600
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302601static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2602 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002603{
Sathish Ambleybae51902017-07-03 15:00:49 -07002604 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002605 remote_arg_t ra[1];
2606 int err = 0;
2607 struct {
2608 int pid;
2609 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302610 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002611 } inargs;
2612
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302613 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302614 inargs.size = size;
2615 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002616 ra[0].buf.pv = (void *)&inargs;
2617 ra[0].buf.len = sizeof(inargs);
2618
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302619 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002620 if (fl->apps->compat)
2621 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2622 else
2623 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2624 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302625 ioctl.fds = NULL;
2626 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002627 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002628 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2629 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302630 if (err)
2631 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302632 if (flags == ADSP_MMAP_HEAP_ADDR ||
2633 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2634 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302635 if (err)
2636 goto bail;
2637 }
2638bail:
2639 return err;
2640}
2641
2642static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2643{
2644 struct fastrpc_mmap *match = NULL, *map = NULL;
2645 struct hlist_node *n = NULL;
2646 int err = 0, ret = 0;
2647 struct fastrpc_apps *me = &gfa;
2648 struct ramdump_segment *ramdump_segments_rh = NULL;
2649
2650 do {
2651 match = NULL;
2652 spin_lock(&me->hlock);
2653 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2654 match = map;
2655 hlist_del_init(&map->hn);
2656 break;
2657 }
2658 spin_unlock(&me->hlock);
2659
2660 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302661 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2662 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302663 if (err)
2664 goto bail;
2665 if (me->channel[0].ramdumpenabled) {
2666 ramdump_segments_rh = kcalloc(1,
2667 sizeof(struct ramdump_segment), GFP_KERNEL);
2668 if (ramdump_segments_rh) {
2669 ramdump_segments_rh->address =
2670 match->phys;
2671 ramdump_segments_rh->size = match->size;
2672 ret = do_elf_ramdump(
2673 me->channel[0].remoteheap_ramdump_dev,
2674 ramdump_segments_rh, 1);
2675 if (ret < 0)
2676 pr_err("ADSPRPC: unable to dump heap");
2677 kfree(ramdump_segments_rh);
2678 }
2679 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302680 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302681 }
2682 } while (match);
2683bail:
2684 if (err && match)
2685 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002686 return err;
2687}
2688
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302689static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2690{
2691 struct fastrpc_apps *me = &gfa;
2692 int session = 0, err = 0;
2693
2694 VERIFY(err, !fastrpc_get_adsp_session(
2695 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2696 if (err)
2697 goto bail;
2698 if (me->channel[fl->cid].spd[session].pdrcount !=
2699 me->channel[fl->cid].spd[session].prevpdrcount) {
2700 if (fastrpc_mmap_remove_ssr(fl))
2701 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2702 me->channel[fl->cid].spd[session].prevpdrcount =
2703 me->channel[fl->cid].spd[session].pdrcount;
2704 }
2705 if (!me->channel[fl->cid].spd[session].ispdup) {
2706 VERIFY(err, 0);
2707 if (err) {
2708 err = -ENOTCONN;
2709 goto bail;
2710 }
2711 }
2712bail:
2713 return err;
2714}
2715
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002716static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302717 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002718
2719static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2720
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302721static inline void get_fastrpc_ioctl_mmap_64(
2722 struct fastrpc_ioctl_mmap_64 *mmap64,
2723 struct fastrpc_ioctl_mmap *immap)
2724{
2725 immap->fd = mmap64->fd;
2726 immap->flags = mmap64->flags;
2727 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2728 immap->size = mmap64->size;
2729}
2730
2731static inline void put_fastrpc_ioctl_mmap_64(
2732 struct fastrpc_ioctl_mmap_64 *mmap64,
2733 struct fastrpc_ioctl_mmap *immap)
2734{
2735 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2736}
2737
2738static inline void get_fastrpc_ioctl_munmap_64(
2739 struct fastrpc_ioctl_munmap_64 *munmap64,
2740 struct fastrpc_ioctl_munmap *imunmap)
2741{
2742 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2743 imunmap->size = munmap64->size;
2744}
2745
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002746static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2747 struct fastrpc_ioctl_munmap *ud)
2748{
2749 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302750 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302751 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2752 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002753
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302754 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302755
2756 spin_lock(&fl->hlock);
2757 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2758 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2759 if ((rbuf->raddr == ud->vaddrout) &&
2760 (rbuf->size == ud->size)) {
2761 free = rbuf;
2762 break;
2763 }
2764 }
2765 }
2766 spin_unlock(&fl->hlock);
2767
2768 if (free) {
2769 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2770 free->phys, free->size, free->flags));
2771 if (err)
2772 goto bail;
2773 fastrpc_buf_free(rbuf, 0);
2774 mutex_unlock(&fl->map_mutex);
2775 return err;
2776 }
2777
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302778 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002779 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302780 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002781 if (err)
2782 goto bail;
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302783 VERIFY(err, map != NULL);
2784 if (err) {
2785 err = -EINVAL;
2786 goto bail;
2787 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302788 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302789 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002790 if (err)
2791 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302792 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302793 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302794 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002795bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302796 if (err && map) {
2797 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002798 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302799 mutex_unlock(&fl->fl_map_mutex);
2800 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302801 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002802 return err;
2803}
2804
c_mtharu7bd6a422017-10-17 18:15:37 +05302805static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2806 struct fastrpc_ioctl_munmap_fd *ud) {
2807 int err = 0;
2808 struct fastrpc_mmap *map = NULL;
2809
2810 VERIFY(err, (fl && ud));
2811 if (err)
2812 goto bail;
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302813 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302814 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302815 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2816 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302817 ud->fd, (unsigned long long)ud->va,
2818 (unsigned int)ud->len);
2819 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302820 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302821 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302822 goto bail;
2823 }
2824 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302825 fastrpc_mmap_free(map, 0);
2826 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302827 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302828bail:
2829 return err;
2830}
2831
2832
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002833static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2834 struct fastrpc_ioctl_mmap *ud)
2835{
2836
c_mtharue1a5ce12017-10-13 20:47:09 +05302837 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302838 struct fastrpc_buf *rbuf = NULL;
2839 unsigned long dma_attr = 0;
2840 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002841 int err = 0;
2842
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302843 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302844 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2845 if (ud->vaddrin) {
2846 err = -EINVAL;
2847 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2848 current->comm, __func__);
2849 goto bail;
2850 }
2851 dma_attr = DMA_ATTR_EXEC_MAPPING |
2852 DMA_ATTR_NO_KERNEL_MAPPING |
2853 DMA_ATTR_FORCE_NON_COHERENT;
2854 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2855 1, &rbuf);
2856 if (err)
2857 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302858 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302859 rbuf->phys, rbuf->size, &raddr);
2860 if (err)
2861 goto bail;
2862 rbuf->raddr = raddr;
2863 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302864
2865 uintptr_t va_to_dsp;
2866
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302867 mutex_lock(&fl->fl_map_mutex);
2868 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2869 ud->size, ud->flags, 1, &map)) {
Mohammed Nayeem Ur Rahmanaf5f6102019-10-09 13:36:52 +05302870 ud->vaddrout = map->raddr;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302871 mutex_unlock(&fl->fl_map_mutex);
2872 mutex_unlock(&fl->map_mutex);
2873 return 0;
2874 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302875
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302876 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2877 (uintptr_t)ud->vaddrin, ud->size,
2878 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302879 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302880 if (err)
2881 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302882
2883 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2884 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2885 va_to_dsp = 0;
2886 else
2887 va_to_dsp = (uintptr_t)map->va;
2888 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302889 map->phys, map->size, &raddr));
2890 if (err)
2891 goto bail;
2892 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302893 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302894 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002895 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302896 if (err && map) {
2897 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302898 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302899 mutex_unlock(&fl->fl_map_mutex);
2900 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302901 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002902 return err;
2903}
2904
2905static void fastrpc_channel_close(struct kref *kref)
2906{
2907 struct fastrpc_apps *me = &gfa;
2908 struct fastrpc_channel_ctx *ctx;
2909 int cid;
2910
2911 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2912 cid = ctx - &gcinfo[0];
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302913 if (me->glink) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302914 fastrpc_glink_close(ctx->chan, cid);
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302915 ctx->chan = NULL;
2916 }
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302917 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002918 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2919 MAJOR(me->dev_no), cid);
2920}
2921
2922static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2923
2924static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302925 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002926{
2927 struct fastrpc_apps *me = &gfa;
2928 int idx = 0, err = 0;
2929
2930 if (chan->sesscount) {
2931 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302932 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2933 (!chan->session[idx].used &&
2934 chan->session[idx].smmu.secure
2935 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002936 chan->session[idx].used = 1;
2937 break;
2938 }
2939 }
2940 VERIFY(err, idx < chan->sesscount);
2941 if (err)
2942 goto bail;
2943 chan->session[idx].smmu.faults = 0;
2944 } else {
2945 VERIFY(err, me->dev != NULL);
2946 if (err)
2947 goto bail;
2948 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302949 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002950 }
2951
2952 *session = &chan->session[idx];
2953 bail:
2954 return err;
2955}
2956
c_mtharue1a5ce12017-10-13 20:47:09 +05302957static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2958 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002959{
2960 if (glink_queue_rx_intent(h, NULL, size))
2961 return false;
2962 return true;
2963}
2964
c_mtharue1a5ce12017-10-13 20:47:09 +05302965static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002966 const void *pkt_priv, const void *ptr)
2967{
2968}
2969
c_mtharue1a5ce12017-10-13 20:47:09 +05302970static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002971 const void *pkt_priv, const void *ptr, size_t size)
2972{
2973 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302974 struct fastrpc_apps *me = &gfa;
2975 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302976 int err = 0;
Jeya R8fa59d62020-11-04 20:42:59 +05302977 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002978
c_mtharufdac6892017-10-12 13:09:01 +05302979 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2980 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302981 goto bail;
2982
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302983 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2984 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302985 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302986 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302987
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302988 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2989 if (err)
2990 goto bail;
2991
Jeya R8fa59d62020-11-04 20:42:59 +05302992 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302993 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302994 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
Jeya R8fa59d62020-11-04 20:42:59 +05302995 if (err) {
2996 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302997 goto bail;
Jeya R8fa59d62020-11-04 20:42:59 +05302998 }
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302999 me->ctxtable[index]->handle = handle;
3000 me->ctxtable[index]->ptr = ptr;
Jeya R8fa59d62020-11-04 20:42:59 +05303001 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05303002
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05303003 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303004bail:
Jeya R859f8012020-08-09 02:09:14 +05303005 if (err) {
3006 glink_rx_done(handle, ptr, true);
c_mtharufdac6892017-10-12 13:09:01 +05303007 pr_err("adsprpc: invalid response or context\n");
Jeya R859f8012020-08-09 02:09:14 +05303008 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003009}
3010
c_mtharue1a5ce12017-10-13 20:47:09 +05303011static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003012 unsigned int event)
3013{
3014 struct fastrpc_apps *me = &gfa;
3015 int cid = (int)(uintptr_t)priv;
3016 struct fastrpc_glink_info *link;
3017
3018 if (cid < 0 || cid >= NUM_CHANNELS)
3019 return;
3020 link = &me->channel[cid].link;
3021 switch (event) {
3022 case GLINK_CONNECTED:
3023 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303024 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003025 break;
3026 case GLINK_LOCAL_DISCONNECTED:
3027 link->port_state = FASTRPC_LINK_DISCONNECTED;
3028 break;
3029 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003030 break;
3031 default:
3032 break;
3033 }
3034}
3035
3036static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
3037 struct fastrpc_session_ctx **session)
3038{
3039 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303040 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003041
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303042 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003043 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303044 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303045 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003046 return err;
3047}
3048
3049static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
3050 struct fastrpc_session_ctx *session)
3051{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303052 struct fastrpc_apps *me = &gfa;
3053
3054 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003055 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303056 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003057}
3058
3059static int fastrpc_file_free(struct fastrpc_file *fl)
3060{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303061 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303062 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303063 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003064 int cid;
3065
3066 if (!fl)
3067 return 0;
3068 cid = fl->cid;
3069
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303070 (void)fastrpc_release_current_dsp_process(fl);
3071
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072 spin_lock(&fl->apps->hlock);
3073 hlist_del_init(&fl->hn);
3074 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303075 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003076
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08003077 if (!fl->sctx) {
3078 kfree(fl);
3079 return 0;
3080 }
tharun kumar9f899ea2017-07-03 17:07:03 +05303081 spin_lock(&fl->hlock);
3082 fl->file_close = 1;
3083 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303084 if (!IS_ERR_OR_NULL(fl->init_mem))
3085 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003086 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303087 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303088 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303089 do {
3090 lmap = NULL;
3091 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3092 hlist_del_init(&map->hn);
3093 lmap = map;
3094 break;
3095 }
3096 fastrpc_mmap_free(lmap, 1);
3097 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303098 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303099 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003100 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303101 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003102 if (fl->sctx)
3103 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
3104 if (fl->secsctx)
3105 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303106
3107 mutex_lock(&fl->perf_mutex);
3108 do {
3109 struct hlist_node *pn = NULL;
3110
3111 fperf = NULL;
3112 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
3113 hlist_del_init(&perf->hn);
3114 fperf = perf;
3115 break;
3116 }
3117 kfree(fperf);
3118 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303119 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303120 mutex_unlock(&fl->perf_mutex);
3121 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303122 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05303123 mutex_destroy(&fl->map_mutex);
Jeya R2bcad4f2021-06-10 13:03:44 +05303124 mutex_destroy(&fl->pm_qos_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003125 kfree(fl);
3126 return 0;
3127}
3128
3129static int fastrpc_device_release(struct inode *inode, struct file *file)
3130{
3131 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3132
3133 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303134 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
3135 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003136 if (fl->debugfs_file != NULL)
3137 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003138 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05303139 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003140 }
3141 return 0;
3142}
3143
3144static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
3145 void *priv)
3146{
3147 struct fastrpc_apps *me = &gfa;
3148 int cid = (int)((uintptr_t)priv);
3149 struct fastrpc_glink_info *link;
3150
3151 if (cid < 0 || cid >= NUM_CHANNELS)
3152 return;
3153
3154 link = &me->channel[cid].link;
3155 switch (cb_info->link_state) {
3156 case GLINK_LINK_STATE_UP:
3157 link->link_state = FASTRPC_LINK_STATE_UP;
3158 complete(&me->channel[cid].work);
3159 break;
3160 case GLINK_LINK_STATE_DOWN:
3161 link->link_state = FASTRPC_LINK_STATE_DOWN;
3162 break;
3163 default:
3164 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3165 break;
3166 }
3167}
3168
3169static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3170{
3171 int err = 0;
3172 struct fastrpc_glink_info *link;
3173
3174 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3175 if (err)
3176 goto bail;
3177
3178 link = &me->channel[cid].link;
3179 if (link->link_notify_handle != NULL)
3180 goto bail;
3181
3182 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3183 link->link_notify_handle = glink_register_link_state_cb(
3184 &link->link_info,
3185 (void *)((uintptr_t)cid));
3186 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3187 if (err) {
3188 link->link_notify_handle = NULL;
3189 goto bail;
3190 }
3191 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3192 RPC_TIMEOUT));
3193bail:
3194 return err;
3195}
3196
3197static void fastrpc_glink_close(void *chan, int cid)
3198{
3199 int err = 0;
3200 struct fastrpc_glink_info *link;
3201
3202 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3203 if (err)
3204 return;
3205 link = &gfa.channel[cid].link;
3206
c_mtharu314a4202017-11-15 22:09:17 +05303207 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3208 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003209 link->port_state = FASTRPC_LINK_DISCONNECTING;
3210 glink_close(chan);
3211 }
3212}
3213
3214static int fastrpc_glink_open(int cid)
3215{
3216 int err = 0;
3217 void *handle = NULL;
3218 struct fastrpc_apps *me = &gfa;
3219 struct glink_open_config *cfg;
3220 struct fastrpc_glink_info *link;
3221
3222 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3223 if (err)
3224 goto bail;
3225 link = &me->channel[cid].link;
3226 cfg = &me->channel[cid].link.cfg;
3227 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3228 if (err)
3229 goto bail;
3230
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303231 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3232 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003233 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003234
3235 link->port_state = FASTRPC_LINK_CONNECTING;
3236 cfg->priv = (void *)(uintptr_t)cid;
3237 cfg->edge = gcinfo[cid].link.link_info.edge;
3238 cfg->transport = gcinfo[cid].link.link_info.transport;
3239 cfg->name = FASTRPC_GLINK_GUID;
3240 cfg->notify_rx = fastrpc_glink_notify_rx;
3241 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3242 cfg->notify_state = fastrpc_glink_notify_state;
3243 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3244 handle = glink_open(cfg);
3245 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303246 if (err) {
3247 if (link->port_state == FASTRPC_LINK_CONNECTING)
3248 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003249 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303250 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003251 me->channel[cid].chan = handle;
3252bail:
3253 return err;
3254}
3255
Sathish Ambley1ca68232017-01-19 10:32:55 -08003256static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3257{
3258 filp->private_data = inode->i_private;
3259 return 0;
3260}
3261
3262static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3263 size_t count, loff_t *position)
3264{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303265 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003266 struct fastrpc_file *fl = filp->private_data;
3267 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303268 struct fastrpc_buf *buf = NULL;
3269 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303270 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303271 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303272 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003273 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303274 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003275 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303276 char single_line[UL_SIZE] = "----------------";
3277 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003278
3279 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3280 if (!fileinfo)
3281 goto bail;
3282 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303283 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3284 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3285 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3286 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3287 "susbsys", "refcount", "sesscount", "issubsystemup",
3288 "ssrcount", "session_used");
3289 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3290 "-%s%s%s%s-\n", single_line, single_line,
3291 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003292 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303293 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003294 chan = &gcinfo[i];
3295 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303296 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003297 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303298 DEBUGFS_SIZE - len, "|%-9d",
3299 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303300 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303301 DEBUGFS_SIZE - len, "|%-9d",
3302 chan->sesscount);
3303 len += scnprintf(fileinfo + len,
3304 DEBUGFS_SIZE - len, "|%-14d",
3305 chan->issubsystemup);
3306 len += scnprintf(fileinfo + len,
3307 DEBUGFS_SIZE - len, "|%-9d",
3308 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003309 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303310 sess_used += chan->session[j].used;
3311 }
3312 len += scnprintf(fileinfo + len,
3313 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3314
3315 }
3316 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3317 "\n%s%s%s\n", "=============",
3318 " CMA HEAP ", "==============");
3319 len += scnprintf(fileinfo + len,
3320 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3321 len += scnprintf(fileinfo + len,
3322 DEBUGFS_SIZE - len, "--%s%s---\n",
3323 single_line, single_line);
3324 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3325 "0x%-18llX", me->range.addr);
3326 len += scnprintf(fileinfo + len,
3327 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3328 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3329 "\n==========%s %s %s===========\n",
3330 title, " GMAPS ", title);
3331 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3332 "%-20s|%-20s|%-20s|%-20s\n",
3333 "fd", "phys", "size", "va");
3334 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3335 "%s%s%s%s%s\n", single_line, single_line,
3336 single_line, single_line, single_line);
3337 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3338 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3339 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3340 gmaps->fd, gmaps->phys,
3341 (uint32_t)gmaps->size,
3342 gmaps->va);
3343 }
3344 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3345 "%-20s|%-20s|%-20s|%-20s\n",
3346 "len", "refs", "raddr", "flags");
3347 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3348 "%s%s%s%s%s\n", single_line, single_line,
3349 single_line, single_line, single_line);
3350 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3351 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3352 "0x%-18X|%-20d|%-20lu|%-20u\n",
3353 (uint32_t)gmaps->len, gmaps->refs,
3354 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003355 }
3356 } else {
3357 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303358 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003359 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303360 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003361 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303362 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003363 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303364 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303365 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303366 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003367 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303368 "%s %14s %d\n", "pd", ":", fl->pd);
3369 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3370 "%s %9s %s\n", "spdname", ":", fl->spdname);
3371 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3372 "%s %6s %d\n", "file_close", ":", fl->file_close);
3373 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3374 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3375 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3376 "%s %9s %d\n", "profile", ":", fl->profile);
3377 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3378 "%s %3s %d\n", "smmu.coherent", ":",
3379 fl->sctx->smmu.coherent);
3380 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3381 "%s %4s %d\n", "smmu.enabled", ":",
3382 fl->sctx->smmu.enabled);
3383 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3384 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3385 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3386 "%s %5s %d\n", "smmu.secure", ":",
3387 fl->sctx->smmu.secure);
3388 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3389 "%s %5s %d\n", "smmu.faults", ":",
3390 fl->sctx->smmu.faults);
3391 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3392 "%s %s %d\n", "link.link_state",
3393 ":", *&me->channel[fl->cid].link.link_state);
3394
3395 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3396 "\n=======%s %s %s======\n", title,
3397 " LIST OF MAPS ", title);
3398
3399 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3400 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3401 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3402 "%s%s%s%s%s\n",
3403 single_line, single_line, single_line,
3404 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003405 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303406 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3407 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3408 map->va, map->phys,
3409 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003410 }
3411 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303412 "%-20s|%-20s|%-20s|%-20s\n",
3413 "len", "refs",
3414 "raddr", "uncached");
3415 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3416 "%s%s%s%s%s\n",
3417 single_line, single_line, single_line,
3418 single_line, single_line);
3419 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3420 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3421 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3422 map->len, map->refs, map->raddr,
3423 map->uncached);
3424 }
3425 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3426 "%-20s|%-20s\n", "secure", "attr");
3427 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3428 "%s%s%s%s%s\n",
3429 single_line, single_line, single_line,
3430 single_line, single_line);
3431 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3432 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3433 "%-20d|0x%-20lX\n\n",
3434 map->secure, map->attr);
3435 }
3436 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303437 "%s %d\n\n",
3438 "KERNEL MEMORY ALLOCATION:", 1);
3439 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303440 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303441 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303442 spin_lock(&fl->hlock);
3443 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303444 "%-19s|%-19s|%-19s|%-19s\n",
3445 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303446 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3447 "%s%s%s%s%s\n", single_line, single_line,
3448 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303449 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303450 len += scnprintf(fileinfo + len,
3451 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303452 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3453 buf->virt, (uint64_t)buf->phys, buf->size,
3454 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303455 }
3456 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3457 "\n%s %s %s\n", title,
3458 " LIST OF PENDING SMQCONTEXTS ", title);
3459
3460 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3461 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3462 "sc", "pid", "tgid", "used", "ctxid");
3463 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3464 "%s%s%s%s%s\n", single_line, single_line,
3465 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003466 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3467 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303468 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3469 ictx->sc, ictx->pid, ictx->tgid,
3470 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003471 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303472
Sathish Ambley1ca68232017-01-19 10:32:55 -08003473 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303474 "\n%s %s %s\n", title,
3475 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3476
3477 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3478 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3479 "sc", "pid", "tgid", "used", "ctxid");
3480 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3481 "%s%s%s%s%s\n", single_line, single_line,
3482 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003483 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303484 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3485 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3486 ictx->sc, ictx->pid, ictx->tgid,
3487 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003488 }
3489 spin_unlock(&fl->hlock);
3490 }
3491 if (len > DEBUGFS_SIZE)
3492 len = DEBUGFS_SIZE;
3493 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3494 kfree(fileinfo);
3495bail:
3496 return ret;
3497}
3498
3499static const struct file_operations debugfs_fops = {
3500 .open = fastrpc_debugfs_open,
3501 .read = fastrpc_debugfs_read,
3502};
Sathish Ambley36849af2017-02-02 09:35:55 -08003503static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003504{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003505 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303506 int cid = -1, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003507
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303508 mutex_lock(&me->smd_mutex);
3509
Sathish Ambley36849af2017-02-02 09:35:55 -08003510 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003511 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303512 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003513 cid = fl->cid;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303514 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
3515 if (err) {
3516 err = -ECHRNG;
c_mtharu314a4202017-11-15 22:09:17 +05303517 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303518 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303519 if (me->channel[cid].ssrcount !=
3520 me->channel[cid].prevssrcount) {
3521 if (!me->channel[cid].issubsystemup) {
3522 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303523 if (err) {
3524 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303525 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303526 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303527 }
3528 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003529 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303530 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003531 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303532 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303533 if (me->glink) {
3534 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3535 if (err)
3536 goto bail;
3537 VERIFY(err, 0 == fastrpc_glink_open(cid));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303538 VERIFY(err,
3539 wait_for_completion_timeout(&me->channel[cid].workport,
3540 RPC_TIMEOUT));
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303541 } else {
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303542 if (me->channel[cid].chan == NULL) {
3543 VERIFY(err, !smd_named_open_on_edge(
3544 FASTRPC_SMD_GUID,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303545 gcinfo[cid].channel,
3546 (smd_channel_t **)&me->channel[cid].chan,
3547 (void *)(uintptr_t)cid,
3548 smd_event_handler));
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303549 VERIFY(err,
3550 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003551 RPC_TIMEOUT));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303552
3553 }
3554 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003555 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303556 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003557 goto bail;
3558 }
3559 kref_init(&me->channel[cid].kref);
3560 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3561 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303562
3563 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3564 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303565 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303566
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303567 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003568 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303569 if (fastrpc_mmap_remove_ssr(fl))
3570 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003571 me->channel[cid].prevssrcount =
3572 me->channel[cid].ssrcount;
3573 }
3574 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003575
3576bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303577 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003578 return err;
3579}
3580
Sathish Ambley36849af2017-02-02 09:35:55 -08003581static int fastrpc_device_open(struct inode *inode, struct file *filp)
3582{
3583 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303584 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003585 struct fastrpc_apps *me = &gfa;
3586
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303587 /*
3588 * Indicates the device node opened
3589 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3590 */
3591 int dev_minor = MINOR(inode->i_rdev);
3592
3593 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3594 (dev_minor == MINOR_NUM_SECURE_DEV)));
3595 if (err) {
3596 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3597 return err;
3598 }
3599
c_mtharue1a5ce12017-10-13 20:47:09 +05303600 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003601 if (err)
3602 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303603
Sathish Ambley36849af2017-02-02 09:35:55 -08003604 context_list_ctor(&fl->clst);
3605 spin_lock_init(&fl->hlock);
3606 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303607 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303608 INIT_HLIST_HEAD(&fl->cached_bufs);
3609 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003610 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303611 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003612 fl->apps = me;
3613 fl->mode = FASTRPC_MODE_SERIAL;
3614 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303615 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303616 fl->init_mem = NULL;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303617 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303618 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003619 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303620 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303621 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003622 spin_lock(&me->hlock);
3623 hlist_add_head(&fl->hn, &me->drivers);
3624 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303625 mutex_init(&fl->perf_mutex);
Jeya R2bcad4f2021-06-10 13:03:44 +05303626 mutex_init(&fl->pm_qos_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003627 return 0;
3628}
3629
Edgar Flores1a772fa2020-02-07 14:59:29 -08003630static int fastrpc_set_process_info(struct fastrpc_file *fl)
3631{
3632 int err = 0, buf_size = 0;
3633 char strpid[PID_SIZE];
Jeya R336ada12021-03-18 14:04:49 +05303634 char cur_comm[TASK_COMM_LEN];
Edgar Flores1a772fa2020-02-07 14:59:29 -08003635
Jeya R336ada12021-03-18 14:04:49 +05303636 memcpy(cur_comm, current->comm, TASK_COMM_LEN);
3637 cur_comm[TASK_COMM_LEN-1] = '\0';
Edgar Flores1a772fa2020-02-07 14:59:29 -08003638 fl->tgid = current->tgid;
3639 snprintf(strpid, PID_SIZE, "%d", current->pid);
Jeya R336ada12021-03-18 14:04:49 +05303640 buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + 1;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003641 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3642 if (!fl->debug_buf) {
3643 err = -ENOMEM;
3644 return err;
3645 }
Jeya R336ada12021-03-18 14:04:49 +05303646 snprintf(fl->debug_buf, buf_size, "%.10s%s%d",
3647 cur_comm, "_", current->pid);
Edgar Flores1a772fa2020-02-07 14:59:29 -08003648 fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3649 debugfs_root, fl, &debugfs_fops);
3650 if (!fl->debugfs_file)
3651 pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
Jeya R336ada12021-03-18 14:04:49 +05303652 cur_comm, __func__, fl->debug_buf);
3653
Edgar Flores1a772fa2020-02-07 14:59:29 -08003654 return err;
3655}
3656
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003657static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3658{
3659 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003660 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003661
c_mtharue1a5ce12017-10-13 20:47:09 +05303662 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003663 if (err)
3664 goto bail;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003665 err = fastrpc_set_process_info(fl);
3666 if (err)
3667 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003668 if (fl->cid == -1) {
3669 cid = *info;
3670 VERIFY(err, cid < NUM_CHANNELS);
3671 if (err)
3672 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303673 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003674 if (fl->dev_minor == MINOR_NUM_DEV &&
3675 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303676 /*
3677 * For non secure device node check and make sure that
3678 * the channel allows non-secure access
3679 * If not, bail. Session will not start.
3680 * cid will remain -1 and client will not be able to
3681 * invoke any other methods without failure
3682 */
3683 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3684 err = -EPERM;
3685 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3686 fl->dev_minor, cid,
3687 fl->apps->channel[cid].secure);
3688 goto bail;
3689 }
3690 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003691 fl->cid = cid;
3692 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3693 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303694 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003695 if (err)
3696 goto bail;
3697 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303698 VERIFY(err, fl->sctx != NULL);
Jeya R984a1a32021-01-18 15:38:07 +05303699 if (err) {
3700 err = -EBADR;
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303701 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05303702 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003703 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3704bail:
3705 return err;
3706}
3707
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303708static int fastrpc_internal_control(struct fastrpc_file *fl,
3709 struct fastrpc_ioctl_control *cp)
3710{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303711 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303712 int err = 0;
3713 int latency;
3714
3715 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3716 if (err)
3717 goto bail;
3718 VERIFY(err, !IS_ERR_OR_NULL(cp));
3719 if (err)
3720 goto bail;
3721
3722 switch (cp->req) {
3723 case FASTRPC_CONTROL_LATENCY:
3724 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3725 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3726 VERIFY(err, latency != 0);
3727 if (err)
3728 goto bail;
Jeya R2bcad4f2021-06-10 13:03:44 +05303729 mutex_lock(&fl->pm_qos_mutex);
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303730 if (!fl->qos_request) {
3731 pm_qos_add_request(&fl->pm_qos_req,
3732 PM_QOS_CPU_DMA_LATENCY, latency);
3733 fl->qos_request = 1;
3734 } else
3735 pm_qos_update_request(&fl->pm_qos_req, latency);
Jeya R2bcad4f2021-06-10 13:03:44 +05303736 mutex_unlock(&fl->pm_qos_mutex);
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303737 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303738 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303739 if (!me->legacy)
3740 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303741 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303742 case FASTRPC_CONTROL_KALLOC:
3743 cp->kalloc.kalloc_support = 1;
3744 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303745 default:
3746 err = -ENOTTY;
3747 break;
3748 }
3749bail:
3750 return err;
3751}
3752
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003753static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3754 unsigned long ioctl_param)
3755{
3756 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003757 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003758 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303759 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003760 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303761 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303762 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003763 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003764 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303765 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003766 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303767 union {
3768 struct fastrpc_ioctl_mmap mmap;
3769 struct fastrpc_ioctl_munmap munmap;
3770 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003771 void *param = (char *)ioctl_param;
3772 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3773 int size = 0, err = 0;
3774 uint32_t info;
3775
Jeya Rb70b4ad2021-01-25 10:28:42 -08003776 VERIFY(err, fl != NULL);
3777 if (err) {
3778 err = -EBADR;
3779 goto bail;
3780 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303781 p.inv.fds = NULL;
3782 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003783 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303784 spin_lock(&fl->hlock);
3785 if (fl->file_close == 1) {
3786 err = EBADF;
3787 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3788 spin_unlock(&fl->hlock);
3789 goto bail;
3790 }
3791 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003792
3793 switch (ioctl_num) {
3794 case FASTRPC_IOCTL_INVOKE:
3795 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003796 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003797 case FASTRPC_IOCTL_INVOKE_FD:
3798 if (!size)
3799 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3800 /* fall through */
3801 case FASTRPC_IOCTL_INVOKE_ATTRS:
3802 if (!size)
3803 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003804 /* fall through */
3805 case FASTRPC_IOCTL_INVOKE_CRC:
3806 if (!size)
3807 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303808 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003809 if (err)
3810 goto bail;
3811 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3812 0, &p.inv)));
3813 if (err)
3814 goto bail;
3815 break;
3816 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303817 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3818 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303819 if (err)
3820 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003821 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3822 if (err)
3823 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303824 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003825 if (err)
3826 goto bail;
3827 break;
3828 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303829 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3830 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303831 if (err)
3832 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003833 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3834 &p.munmap)));
3835 if (err)
3836 goto bail;
3837 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303838 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303839 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3840 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303841 if (err)
3842 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303843 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3844 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303845 if (err)
3846 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303847 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3848 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303849 if (err)
3850 goto bail;
3851 break;
3852 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303853 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3854 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303855 if (err)
3856 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303857 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303858 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303859 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303860 if (err)
3861 goto bail;
3862 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303863 case FASTRPC_IOCTL_MUNMAP_FD:
3864 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3865 sizeof(p.munmap_fd));
3866 if (err)
3867 goto bail;
3868 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3869 &p.munmap_fd)));
3870 if (err)
3871 goto bail;
3872 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003873 case FASTRPC_IOCTL_SETMODE:
3874 switch ((uint32_t)ioctl_param) {
3875 case FASTRPC_MODE_PARALLEL:
3876 case FASTRPC_MODE_SERIAL:
3877 fl->mode = (uint32_t)ioctl_param;
3878 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003879 case FASTRPC_MODE_PROFILE:
3880 fl->profile = (uint32_t)ioctl_param;
3881 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303882 case FASTRPC_MODE_SESSION:
3883 fl->sessionid = 1;
3884 fl->tgid |= (1 << SESSION_ID_INDEX);
3885 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003886 default:
3887 err = -ENOTTY;
3888 break;
3889 }
3890 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003891 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303892 K_COPY_FROM_USER(err, 0, &p.perf,
3893 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003894 if (err)
3895 goto bail;
3896 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3897 if (p.perf.keys) {
3898 char *keys = PERF_KEYS;
3899
c_mtharue1a5ce12017-10-13 20:47:09 +05303900 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3901 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003902 if (err)
3903 goto bail;
3904 }
3905 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303906 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3907 struct hlist_node *n = NULL;
3908
3909 mutex_lock(&fl->perf_mutex);
3910 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3911 if (perf->tid == current->pid) {
3912 fperf = perf;
3913 break;
3914 }
3915 }
3916
3917 mutex_unlock(&fl->perf_mutex);
3918
3919 if (fperf) {
3920 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3921 fperf, sizeof(*fperf));
3922 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003923 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303924 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003925 if (err)
3926 goto bail;
3927 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303928 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303929 K_COPY_FROM_USER(err, 0, &p.cp, param,
3930 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303931 if (err)
3932 goto bail;
3933 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3934 if (err)
3935 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303936 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3937 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3938 if (err)
3939 goto bail;
3940 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303941 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003942 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303943 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003944 if (err)
3945 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003946 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3947 if (err)
3948 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303949 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003950 if (err)
3951 goto bail;
3952 break;
3953 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003954 p.init.attrs = 0;
3955 p.init.siglen = 0;
3956 size = sizeof(struct fastrpc_ioctl_init);
3957 /* fall through */
3958 case FASTRPC_IOCTL_INIT_ATTRS:
3959 if (!size)
3960 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303961 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003962 if (err)
3963 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303964 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303965 p.init.init.filelen < INIT_FILELEN_MAX);
3966 if (err)
3967 goto bail;
3968 VERIFY(err, p.init.init.memlen >= 0 &&
3969 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303970 if (err)
3971 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303972 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003973 if (err)
3974 goto bail;
3975 break;
3976
3977 default:
3978 err = -ENOTTY;
3979 pr_info("bad ioctl: %d\n", ioctl_num);
3980 break;
3981 }
3982 bail:
3983 return err;
3984}
3985
3986static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3987 unsigned long code,
3988 void *data)
3989{
3990 struct fastrpc_apps *me = &gfa;
3991 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303992 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003993 int cid;
3994
3995 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3996 cid = ctx - &me->channel[0];
3997 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303998 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003999 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05304000 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304001 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304002 if (me->glink)
4003 fastrpc_glink_close(ctx->chan, cid);
4004 else
4005 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304006 ctx->chan = NULL;
4007 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
4008 gcinfo[cid].name, MAJOR(me->dev_no), cid);
4009 }
4010 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304011 if (cid == 0)
4012 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004013 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05304014 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4015 if (me->channel[0].remoteheap_ramdump_dev &&
4016 notifdata->enable_ramdump) {
4017 me->channel[0].ramdumpenabled = 1;
4018 }
4019 } else if (code == SUBSYS_AFTER_POWERUP) {
4020 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004021 }
4022
4023 return NOTIFY_DONE;
4024}
4025
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304026static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304027 unsigned long code,
4028 void *data)
4029{
4030 struct fastrpc_apps *me = &gfa;
4031 struct fastrpc_static_pd *spd;
4032 struct notif_data *notifdata = data;
4033
4034 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
4035 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
4036 mutex_lock(&me->smd_mutex);
4037 spd->pdrcount++;
4038 spd->ispdup = 0;
4039 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
4040 MAJOR(me->dev_no), spd->spdname);
4041 mutex_unlock(&me->smd_mutex);
4042 if (!strcmp(spd->spdname,
4043 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
4044 me->staticpd_flags = 0;
4045 fastrpc_notify_pdr_drivers(me, spd->spdname);
4046 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4047 if (me->channel[0].remoteheap_ramdump_dev &&
4048 notifdata->enable_ramdump) {
4049 me->channel[0].ramdumpenabled = 1;
4050 }
4051 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4052 spd->ispdup = 1;
4053 }
4054
4055 return NOTIFY_DONE;
4056}
4057
4058static int fastrpc_get_service_location_notify(struct notifier_block *nb,
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304059 unsigned long opcode, void *data)
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304060{
4061 struct fastrpc_static_pd *spd;
4062 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304063 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304064
4065 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
4066 if (opcode == LOCATOR_DOWN) {
4067 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
4068 return NOTIFY_DONE;
4069 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304070 for (i = 0; i < pdr->total_domains; i++) {
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304071 if ((!strcmp(spd->spdname, "audio_pdr_adsprpc"))
4072 && (!strcmp(pdr->domain_list[i].name,
4073 "msm/adsp/audio_pd"))) {
4074 goto pdr_register;
4075 } else if ((!strcmp(spd->spdname, "sensors_pdr_adsprpc"))
4076 && (!strcmp(pdr->domain_list[i].name,
4077 "msm/adsp/sensor_pd"))) {
4078 goto pdr_register;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304079 }
4080 }
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304081 return NOTIFY_DONE;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304082
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304083pdr_register:
4084 if (!spd->pdrhandle) {
4085 spd->pdrhandle =
4086 service_notif_register_notifier(
4087 pdr->domain_list[i].name,
4088 pdr->domain_list[i].instance_id,
4089 &spd->pdrnb, &curr_state);
4090 } else {
4091 pr_err("ADSPRPC: %s is already registered\n", spd->spdname);
4092 }
4093
4094 if (IS_ERR(spd->pdrhandle))
4095 pr_err("ADSPRPC: Unable to register notifier\n");
4096
4097 if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4098 pr_info("ADSPRPC: %s is up\n", spd->spdname);
4099 spd->ispdup = 1;
4100 } else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
4101 pr_info("ADSPRPC: %s is uninitialzed\n", spd->spdname);
4102 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304103 return NOTIFY_DONE;
4104}
4105
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004106static const struct file_operations fops = {
4107 .open = fastrpc_device_open,
4108 .release = fastrpc_device_release,
4109 .unlocked_ioctl = fastrpc_device_ioctl,
4110 .compat_ioctl = compat_fastrpc_device_ioctl,
4111};
4112
4113static const struct of_device_id fastrpc_match_table[] = {
4114 { .compatible = "qcom,msm-fastrpc-adsp", },
4115 { .compatible = "qcom,msm-fastrpc-compute", },
4116 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304117 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
4118 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004119 { .compatible = "qcom,msm-adsprpc-mem-region", },
4120 {}
4121};
4122
4123static int fastrpc_cb_probe(struct device *dev)
4124{
4125 struct fastrpc_channel_ctx *chan;
4126 struct fastrpc_session_ctx *sess;
4127 struct of_phandle_args iommuspec;
4128 const char *name;
4129 unsigned int start = 0x80000000;
4130 int err = 0, i;
4131 int secure_vmid = VMID_CP_PIXEL;
4132
c_mtharue1a5ce12017-10-13 20:47:09 +05304133 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4134 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004135 if (err)
4136 goto bail;
4137 for (i = 0; i < NUM_CHANNELS; i++) {
4138 if (!gcinfo[i].name)
4139 continue;
4140 if (!strcmp(name, gcinfo[i].name))
4141 break;
4142 }
4143 VERIFY(err, i < NUM_CHANNELS);
4144 if (err)
4145 goto bail;
4146 chan = &gcinfo[i];
4147 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4148 if (err)
4149 goto bail;
4150
4151 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
4152 "#iommu-cells", 0, &iommuspec));
4153 if (err)
4154 goto bail;
4155 sess = &chan->session[chan->sesscount];
4156 sess->smmu.cb = iommuspec.args[0] & 0xf;
4157 sess->used = 0;
4158 sess->smmu.coherent = of_property_read_bool(dev->of_node,
4159 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304160 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
4161 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004162 sess->smmu.secure = of_property_read_bool(dev->of_node,
4163 "qcom,secure-context-bank");
4164 if (sess->smmu.secure)
4165 start = 0x60000000;
4166 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
4167 arm_iommu_create_mapping(&platform_bus_type,
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05304168 start, MAX_SIZE_LIMIT)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004169 if (err)
4170 goto bail;
4171
4172 if (sess->smmu.secure)
4173 iommu_domain_set_attr(sess->smmu.mapping->domain,
4174 DOMAIN_ATTR_SECURE_VMID,
4175 &secure_vmid);
4176
4177 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
4178 if (err)
4179 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304180 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004181 sess->smmu.enabled = 1;
4182 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08004183 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
4184 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004185bail:
4186 return err;
4187}
4188
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304189static int fastrpc_cb_legacy_probe(struct device *dev)
4190{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304191 struct fastrpc_channel_ctx *chan;
4192 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4193 const char *name;
4194 unsigned int *sids = NULL, sids_size = 0;
4195 int err = 0, ret = 0, i;
4196
4197 unsigned int start = 0x80000000;
4198
4199 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4200 "label", NULL)));
4201 if (err)
4202 goto bail;
4203
4204 for (i = 0; i < NUM_CHANNELS; i++) {
4205 if (!gcinfo[i].name)
4206 continue;
4207 if (!strcmp(name, gcinfo[i].name))
4208 break;
4209 }
4210 VERIFY(err, i < NUM_CHANNELS);
4211 if (err)
4212 goto bail;
4213
4214 chan = &gcinfo[i];
4215 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4216 if (err)
4217 goto bail;
4218
4219 first_sess = &chan->session[chan->sesscount];
4220
4221 VERIFY(err, NULL != of_get_property(dev->of_node,
4222 "sids", &sids_size));
4223 if (err)
4224 goto bail;
4225
4226 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4227 if (err)
4228 goto bail;
4229 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4230 sids_size/sizeof(unsigned int));
4231 if (ret)
4232 goto bail;
4233
4234 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4235 arm_iommu_create_mapping(&platform_bus_type,
4236 start, 0x78000000)));
4237 if (err)
4238 goto bail;
4239
4240 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4241 if (err)
4242 goto bail;
4243
4244
4245 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4246 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4247 if (err)
4248 goto bail;
4249 sess = &chan->session[chan->sesscount];
4250 sess->smmu.cb = sids[i];
4251 sess->smmu.dev = dev;
4252 sess->smmu.mapping = first_sess->smmu.mapping;
4253 sess->smmu.enabled = 1;
4254 sess->used = 0;
4255 sess->smmu.coherent = false;
4256 sess->smmu.secure = false;
4257 chan->sesscount++;
4258 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304259bail:
4260 kfree(sids);
4261 return err;
4262}
4263
4264
4265
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304266static void init_secure_vmid_list(struct device *dev, char *prop_name,
4267 struct secure_vm *destvm)
4268{
4269 int err = 0;
4270 u32 len = 0, i = 0;
4271 u32 *rhvmlist = NULL;
4272 u32 *rhvmpermlist = NULL;
4273
4274 if (!of_find_property(dev->of_node, prop_name, &len))
4275 goto bail;
4276 if (len == 0)
4277 goto bail;
4278 len /= sizeof(u32);
4279 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4280 if (err)
4281 goto bail;
4282 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4283 GFP_KERNEL)));
4284 if (err)
4285 goto bail;
4286 for (i = 0; i < len; i++) {
4287 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4288 &rhvmlist[i]);
4289 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4290 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4291 if (err) {
4292 pr_err("ADSPRPC: Failed to read VMID\n");
4293 goto bail;
4294 }
4295 }
4296 destvm->vmid = rhvmlist;
4297 destvm->vmperm = rhvmpermlist;
4298 destvm->vmcount = len;
4299bail:
4300 if (err) {
4301 kfree(rhvmlist);
4302 kfree(rhvmpermlist);
4303 }
4304}
4305
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304306static void configure_secure_channels(uint32_t secure_domains)
4307{
4308 struct fastrpc_apps *me = &gfa;
4309 int ii = 0;
4310 /*
4311 * secure_domains contains the bitmask of the secure channels
4312 * Bit 0 - ADSP
4313 * Bit 1 - MDSP
4314 * Bit 2 - SLPI
4315 * Bit 3 - CDSP
4316 */
4317 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4318 int secure = (secure_domains >> ii) & 0x01;
4319
4320 me->channel[ii].secure = secure;
4321 }
4322}
4323
4324
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004325static int fastrpc_probe(struct platform_device *pdev)
4326{
4327 int err = 0;
4328 struct fastrpc_apps *me = &gfa;
4329 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004330 struct device_node *ion_node, *node;
4331 struct platform_device *ion_pdev;
4332 struct cma *cma;
4333 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304334 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304335 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304336
4337 if (of_device_is_compatible(dev->of_node,
4338 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304339 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4340 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304341
c_mtharu63ffc012017-11-16 15:26:56 +05304342
4343 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4344 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304345 if (of_get_property(dev->of_node,
4346 "qcom,secure-domains", NULL) != NULL) {
4347 VERIFY(err, !of_property_read_u32(dev->of_node,
4348 "qcom,secure-domains",
4349 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004350 if (!err) {
4351 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304352 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004353 } else {
4354 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304355 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004356 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304357 }
c_mtharu63ffc012017-11-16 15:26:56 +05304358 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004359 if (of_device_is_compatible(dev->of_node,
4360 "qcom,msm-fastrpc-compute-cb"))
4361 return fastrpc_cb_probe(dev);
4362
4363 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304364 "qcom,msm-fastrpc-legacy-compute")) {
4365 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304366 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304367 }
4368
4369 if (of_device_is_compatible(dev->of_node,
4370 "qcom,msm-fastrpc-legacy-compute-cb")){
4371 return fastrpc_cb_legacy_probe(dev);
4372 }
4373
4374 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004375 "qcom,msm-adsprpc-mem-region")) {
4376 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004377 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4378 if (ion_node) {
4379 for_each_available_child_of_node(ion_node, node) {
4380 if (of_property_read_u32(node, "reg", &val))
4381 continue;
4382 if (val != ION_ADSP_HEAP_ID)
4383 continue;
4384 ion_pdev = of_find_device_by_node(node);
4385 if (!ion_pdev)
4386 break;
4387 cma = dev_get_cma_area(&ion_pdev->dev);
4388 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304389 me->range.addr = cma_get_base(cma);
4390 me->range.size =
4391 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004392 }
4393 break;
4394 }
4395 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304396 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304397 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004398 int srcVM[1] = {VMID_HLOS};
4399 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4400 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004401 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004402 PERM_READ | PERM_WRITE | PERM_EXEC,
4403 PERM_READ | PERM_WRITE | PERM_EXEC,
4404 PERM_READ | PERM_WRITE | PERM_EXEC,
4405 };
4406
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304407 VERIFY(err, !hyp_assign_phys(me->range.addr,
4408 me->range.size, srcVM, 1,
4409 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004410 if (err)
4411 goto bail;
4412 }
4413 return 0;
4414 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304415 if (of_property_read_bool(dev->of_node,
4416 "qcom,fastrpc-adsp-audio-pdr")) {
4417 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004418
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304419 VERIFY(err, !fastrpc_get_adsp_session(
4420 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4421 if (err)
4422 goto spdbail;
4423 me->channel[0].spd[session].get_service_nb.notifier_call =
4424 fastrpc_get_service_location_notify;
4425 ret = get_service_location(
4426 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4427 AUDIO_PDR_ADSP_SERVICE_NAME,
4428 &me->channel[0].spd[session].get_service_nb);
4429 if (ret)
4430 pr_err("ADSPRPC: Get service location failed: %d\n",
4431 ret);
4432 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304433 if (of_property_read_bool(dev->of_node,
4434 "qcom,fastrpc-adsp-sensors-pdr")) {
4435 int session;
4436
4437 VERIFY(err, !fastrpc_get_adsp_session(
4438 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4439 if (err)
4440 goto spdbail;
4441 me->channel[0].spd[session].get_service_nb.notifier_call =
4442 fastrpc_get_service_location_notify;
4443 ret = get_service_location(
4444 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4445 SENSORS_PDR_ADSP_SERVICE_NAME,
4446 &me->channel[0].spd[session].get_service_nb);
4447 if (ret)
4448 pr_err("ADSPRPC: Get service location failed: %d\n",
4449 ret);
4450 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304451spdbail:
4452 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004453 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4454 fastrpc_match_table,
4455 NULL, &pdev->dev));
4456 if (err)
4457 goto bail;
4458bail:
4459 return err;
4460}
4461
4462static void fastrpc_deinit(void)
4463{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304464 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004465 struct fastrpc_channel_ctx *chan = gcinfo;
4466 int i, j;
4467
4468 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4469 if (chan->chan) {
4470 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304471 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304472 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004473 }
4474 for (j = 0; j < NUM_SESSIONS; j++) {
4475 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304476 if (sess->smmu.dev) {
4477 arm_iommu_detach_device(sess->smmu.dev);
4478 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004479 }
4480 if (sess->smmu.mapping) {
4481 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304482 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004483 }
4484 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304485 kfree(chan->rhvm.vmid);
4486 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004487 }
4488}
4489
4490static struct platform_driver fastrpc_driver = {
4491 .probe = fastrpc_probe,
4492 .driver = {
4493 .name = "fastrpc",
4494 .owner = THIS_MODULE,
4495 .of_match_table = fastrpc_match_table,
4496 },
4497};
4498
4499static int __init fastrpc_device_init(void)
4500{
4501 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304502 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304503 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004504 int err = 0, i;
4505
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304506 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004507 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004508 fastrpc_init(me);
4509 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304510 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004511 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004512 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4513 if (err)
4514 goto register_bail;
4515 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4516 DEVICE_NAME));
4517 if (err)
4518 goto alloc_chrdev_bail;
4519 cdev_init(&me->cdev, &fops);
4520 me->cdev.owner = THIS_MODULE;
4521 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304522 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004523 if (err)
4524 goto cdev_init_bail;
4525 me->class = class_create(THIS_MODULE, "fastrpc");
4526 VERIFY(err, !IS_ERR(me->class));
4527 if (err)
4528 goto class_create_bail;
4529 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304530
4531 /*
4532 * Create devices and register with sysfs
4533 * Create first device with minor number 0
4534 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004535 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304536 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4537 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004538 VERIFY(err, !IS_ERR_OR_NULL(dev));
4539 if (err)
4540 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304541
4542 /* Create secure device with minor number for secure device */
4543 secure_dev = device_create(me->class, NULL,
4544 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4545 NULL, DEVICE_NAME_SECURE);
4546 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4547 if (err)
4548 goto device_create_bail;
4549
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004550 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304551 me->channel[i].dev = secure_dev;
4552 if (i == CDSP_DOMAIN_ID)
4553 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004554 me->channel[i].ssrcount = 0;
4555 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304556 me->channel[i].issubsystemup = 1;
4557 me->channel[i].ramdumpenabled = 0;
4558 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004559 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4560 me->channel[i].handle = subsys_notif_register_notifier(
4561 gcinfo[i].subsys,
4562 &me->channel[i].nb);
4563 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004564 me->client = msm_ion_client_create(DEVICE_NAME);
4565 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4566 if (err)
4567 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304568
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004569 return 0;
4570device_create_bail:
4571 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004572 if (me->channel[i].handle)
4573 subsys_notif_unregister_notifier(me->channel[i].handle,
4574 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004575 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004576 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304577 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4578 MINOR_NUM_DEV));
4579 if (!IS_ERR_OR_NULL(secure_dev))
4580 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4581 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004582 class_destroy(me->class);
4583class_create_bail:
4584 cdev_del(&me->cdev);
4585cdev_init_bail:
4586 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4587alloc_chrdev_bail:
4588register_bail:
4589 fastrpc_deinit();
4590 return err;
4591}
4592
4593static void __exit fastrpc_device_exit(void)
4594{
4595 struct fastrpc_apps *me = &gfa;
4596 int i;
4597
4598 fastrpc_file_list_dtor(me);
4599 fastrpc_deinit();
4600 for (i = 0; i < NUM_CHANNELS; i++) {
4601 if (!gcinfo[i].name)
4602 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004603 subsys_notif_unregister_notifier(me->channel[i].handle,
4604 &me->channel[i].nb);
4605 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304606
4607 /* Destroy the secure and non secure devices */
4608 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4609 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4610 MINOR_NUM_SECURE_DEV));
4611
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004612 class_destroy(me->class);
4613 cdev_del(&me->cdev);
4614 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4615 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004616 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004617}
4618
4619late_initcall(fastrpc_device_init);
4620module_exit(fastrpc_device_exit);
4621
4622MODULE_LICENSE("GPL v2");