blob: 233c3c53bca23ff448c6f5fd976fe8bb3112b5f1 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Jeya Rb70b4ad2021-01-25 10:28:42 -08002 * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugue073de72018-07-30 23:57:47 +053059#define ADSP_MMAP_ADD_PAGES 0x1000
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053060#define FASTRPC_DMAHANDLE_NOMAP (16)
61
Sathish Ambley69e1ab02016-10-18 10:28:15 -070062#define FASTRPC_ENOSUCH 39
63#define VMID_SSC_Q6 5
64#define VMID_ADSP_Q6 6
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +053065#define DEBUGFS_SIZE 3072
66#define UL_SIZE 25
67#define PID_SIZE 10
Sathish Ambley69e1ab02016-10-18 10:28:15 -070068
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053069#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
70#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
71
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053072#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
73#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
74
Sathish Ambley69e1ab02016-10-18 10:28:15 -070075#define RPC_TIMEOUT (5 * HZ)
76#define BALIGN 128
77#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
78#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070079#define M_FDLIST (16)
80#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053081#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053082#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053083#define FASTRPC_CTX_MAX (256)
84#define FASTRPC_CTXID_MASK (0xFF0)
Tharun Kumar Merugud996b262018-07-18 22:28:53 +053085#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
86#define MINOR_NUM_DEV 0
87#define MINOR_NUM_SECURE_DEV 1
88#define NON_SECURE_CHANNEL 0
89#define SECURE_CHANNEL 1
90
91#define ADSP_DOMAIN_ID (0)
92#define MDSP_DOMAIN_ID (1)
93#define SDSP_DOMAIN_ID (2)
94#define CDSP_DOMAIN_ID (3)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070095
96#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
97
98#define FASTRPC_LINK_STATE_DOWN (0x0)
99#define FASTRPC_LINK_STATE_UP (0x1)
100#define FASTRPC_LINK_DISCONNECTED (0x0)
101#define FASTRPC_LINK_CONNECTING (0x1)
102#define FASTRPC_LINK_CONNECTED (0x3)
103#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +0530104#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
105#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +0530106#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700107
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108#define PERF_KEYS \
109 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +0530110#define FASTRPC_STATIC_HANDLE_KERNEL (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111#define FASTRPC_STATIC_HANDLE_LISTENER (3)
112#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530113#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800114
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530115#define MAX_SIZE_LIMIT (0x78000000)
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530116#define INIT_FILELEN_MAX (2*1024*1024)
117#define INIT_MEMLEN_MAX (8*1024*1024)
118
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800119#define PERF_END (void)0
120
121#define PERF(enb, cnt, ff) \
122 {\
123 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530124 int64_t *counter = cnt;\
125 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800126 getnstimeofday(&startT);\
127 } \
128 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530129 if (enb && counter) {\
130 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800131 } \
132 }
133
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530134#define GET_COUNTER(perf_ptr, offset) \
135 (perf_ptr != NULL ?\
136 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
137 (int64_t *)(perf_ptr + offset)\
138 : (int64_t *)NULL) : (int64_t *)NULL)
139
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700140static int fastrpc_glink_open(int cid);
141static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530142static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530143 unsigned long code,
144 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800145static struct dentry *debugfs_root;
146static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700147
148static inline uint64_t buf_page_start(uint64_t buf)
149{
150 uint64_t start = (uint64_t) buf & PAGE_MASK;
151 return start;
152}
153
154static inline uint64_t buf_page_offset(uint64_t buf)
155{
156 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
157 return offset;
158}
159
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530160static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700161{
162 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
163 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530164 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700165 return nPages;
166}
167
168static inline uint64_t buf_page_size(uint32_t size)
169{
170 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
171
172 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
173}
174
175static inline void *uint64_to_ptr(uint64_t addr)
176{
177 void *ptr = (void *)((uintptr_t)addr);
178
179 return ptr;
180}
181
182static inline uint64_t ptr_to_uint64(void *ptr)
183{
184 uint64_t addr = (uint64_t)((uintptr_t)ptr);
185
186 return addr;
187}
188
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530189struct secure_vm {
190 int *vmid;
191 int *vmperm;
192 int vmcount;
193};
194
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700195struct fastrpc_file;
196
197struct fastrpc_buf {
198 struct hlist_node hn;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530199 struct hlist_node hn_rem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700200 struct fastrpc_file *fl;
201 void *virt;
202 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530203 size_t size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530204 unsigned long dma_attr;
205 uintptr_t raddr;
206 uint32_t flags;
207 int remote;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700208};
209
210struct fastrpc_ctx_lst;
211
212struct overlap {
213 uintptr_t start;
214 uintptr_t end;
215 int raix;
216 uintptr_t mstart;
217 uintptr_t mend;
218 uintptr_t offset;
219};
220
221struct smq_invoke_ctx {
222 struct hlist_node hn;
223 struct completion work;
224 int retval;
225 int pid;
226 int tgid;
227 remote_arg_t *lpra;
228 remote_arg64_t *rpra;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530229 remote_arg64_t *lrpra; /* Local copy of rpra for put_args */
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700231 struct fastrpc_mmap **maps;
232 struct fastrpc_buf *buf;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +0530233 struct fastrpc_buf *lbuf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530234 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700235 struct fastrpc_file *fl;
236 uint32_t sc;
237 struct overlap *overs;
238 struct overlap **overps;
239 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530240 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530241 unsigned int *attrs;
242 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530243 uint64_t ctxid;
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +0530244 void *handle;
245 const void *ptr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700246};
247
248struct fastrpc_ctx_lst {
249 struct hlist_head pending;
250 struct hlist_head interrupted;
251};
252
253struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530254 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255 struct dma_iommu_mapping *mapping;
256 int cb;
257 int enabled;
258 int faults;
259 int secure;
260 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530261 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700262};
263
264struct fastrpc_session_ctx {
265 struct device *dev;
266 struct fastrpc_smmu smmu;
267 int used;
268};
269
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530270struct fastrpc_static_pd {
271 char *spdname;
272 struct notifier_block pdrnb;
273 struct notifier_block get_service_nb;
274 void *pdrhandle;
275 int pdrcount;
276 int prevpdrcount;
277 int ispdup;
278};
279
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280struct fastrpc_glink_info {
281 int link_state;
282 int port_state;
283 struct glink_open_config cfg;
284 struct glink_link_info link_info;
285 void *link_notify_handle;
286};
287
288struct fastrpc_channel_ctx {
289 char *name;
290 char *subsys;
291 void *chan;
292 struct device *dev;
293 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530294 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700295 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530296 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700297 struct notifier_block nb;
298 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530299 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700300 int sesscount;
301 int ssrcount;
302 void *handle;
303 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530304 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700305 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530306 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530307 int ramdumpenabled;
308 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700309 struct fastrpc_glink_info link;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530310 /* Indicates, if channel is restricted to secure node only */
311 int secure;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700312};
313
314struct fastrpc_apps {
315 struct fastrpc_channel_ctx *channel;
316 struct cdev cdev;
317 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530318 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700319 struct smq_phy_page range;
320 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530321 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700322 dev_t dev_no;
323 int compat;
324 struct hlist_head drivers;
325 spinlock_t hlock;
326 struct ion_client *client;
327 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530328 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530329 bool glink;
330 bool legacy;
zhaochenfc798572018-08-17 15:32:37 +0800331 bool secure_flag;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530332 spinlock_t ctxlock;
333 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700334};
335
336struct fastrpc_mmap {
337 struct hlist_node hn;
338 struct fastrpc_file *fl;
339 struct fastrpc_apps *apps;
340 int fd;
341 uint32_t flags;
342 struct dma_buf *buf;
343 struct sg_table *table;
344 struct dma_buf_attachment *attach;
345 struct ion_handle *handle;
346 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530347 size_t size;
348 uintptr_t va;
349 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700350 int refs;
351 uintptr_t raddr;
352 int uncached;
353 int secure;
354 uintptr_t attr;
Swathi K0e257332021-07-14 17:51:10 +0530355 bool is_filemap; /*flag to indicate map used in process init*/
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356};
357
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530358enum fastrpc_perfkeys {
359 PERF_COUNT = 0,
360 PERF_FLUSH = 1,
361 PERF_MAP = 2,
362 PERF_COPY = 3,
363 PERF_LINK = 4,
364 PERF_GETARGS = 5,
365 PERF_PUTARGS = 6,
366 PERF_INVARGS = 7,
367 PERF_INVOKE = 8,
368 PERF_KEY_MAX = 9,
369};
370
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800371struct fastrpc_perf {
372 int64_t count;
373 int64_t flush;
374 int64_t map;
375 int64_t copy;
376 int64_t link;
377 int64_t getargs;
378 int64_t putargs;
379 int64_t invargs;
380 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530381 int64_t tid;
382 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800383};
384
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700385struct fastrpc_file {
386 struct hlist_node hn;
387 spinlock_t hlock;
388 struct hlist_head maps;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530389 struct hlist_head cached_bufs;
390 struct hlist_head remote_bufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700391 struct fastrpc_ctx_lst clst;
392 struct fastrpc_session_ctx *sctx;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530393 struct fastrpc_buf *init_mem;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700394 struct fastrpc_session_ctx *secsctx;
395 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800396 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530397 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700398 int tgid;
399 int cid;
400 int ssrcount;
401 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530402 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530403 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530404 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700405 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530406 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800407 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530408 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530409 struct pm_qos_request pm_qos_req;
410 int qos_request;
Jeya R2bcad4f2021-06-10 13:03:44 +0530411 struct mutex pm_qos_mutex;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530412 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530413 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530414 int refcount;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +0530415 /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
416 int dev_minor;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +0530417 char *debug_buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700418};
419
420static struct fastrpc_apps gfa;
421
422static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
423 {
424 .name = "adsprpc-smd",
425 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530426 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700427 .link.link_info.edge = "lpass",
428 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530429 .spd = {
430 {
431 .spdname =
432 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
433 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530434 fastrpc_pdr_notifier_cb,
435 },
436 {
437 .spdname =
438 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
439 .pdrnb.notifier_call =
440 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530441 }
442 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700443 },
444 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700445 .name = "mdsprpc-smd",
446 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530447 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700448 .link.link_info.edge = "mpss",
449 .link.link_info.transport = "smem",
450 },
451 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800452 .name = "sdsprpc-smd",
453 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530454 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800455 .link.link_info.edge = "dsps",
456 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800457 },
458 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700459 .name = "cdsprpc-smd",
460 .subsys = "cdsp",
461 .link.link_info.edge = "cdsp",
462 .link.link_info.transport = "smem",
463 },
464};
465
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530466static int hlosvm[1] = {VMID_HLOS};
467static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
468
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800469static inline int64_t getnstimediff(struct timespec *start)
470{
471 int64_t ns;
472 struct timespec ts, b;
473
474 getnstimeofday(&ts);
475 b = timespec_sub(ts, *start);
476 ns = timespec_to_ns(&b);
477 return ns;
478}
479
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530480static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
481{
482 int err = 0;
483 int64_t *val = NULL;
484 struct fastrpc_perf *perf = NULL, *fperf = NULL;
485 struct hlist_node *n = NULL;
486
487 VERIFY(err, !IS_ERR_OR_NULL(fl));
488 if (err)
489 goto bail;
490
491 mutex_lock(&fl->perf_mutex);
492 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
493 if (perf->tid == current->pid) {
494 fperf = perf;
495 break;
496 }
497 }
498
499 if (IS_ERR_OR_NULL(fperf)) {
500 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
501
502 VERIFY(err, !IS_ERR_OR_NULL(fperf));
503 if (err) {
504 mutex_unlock(&fl->perf_mutex);
505 kfree(fperf);
506 goto bail;
507 }
508
509 fperf->tid = current->pid;
510 hlist_add_head(&fperf->hn, &fl->perf);
511 }
512
513 val = ((int64_t *)fperf) + key;
514 mutex_unlock(&fl->perf_mutex);
515bail:
516 return val;
517}
518
519
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700520static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
521{
c_mtharue1a5ce12017-10-13 20:47:09 +0530522 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Jeya R984a1a32021-01-18 15:38:07 +0530523 int vmid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700524
525 if (!fl)
526 return;
527 if (cache) {
528 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530529 hlist_add_head(&buf->hn, &fl->cached_bufs);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700530 spin_unlock(&fl->hlock);
531 return;
532 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530533 if (buf->remote) {
534 spin_lock(&fl->hlock);
535 hlist_del_init(&buf->hn_rem);
536 spin_unlock(&fl->hlock);
537 buf->remote = 0;
538 buf->raddr = 0;
539 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700540 if (!IS_ERR_OR_NULL(buf->virt)) {
541 int destVM[1] = {VMID_HLOS};
542 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
543
Jeya R984a1a32021-01-18 15:38:07 +0530544 VERIFY(err, fl->sctx != NULL);
545 if (err)
546 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700547 if (fl->sctx->smmu.cb)
548 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
549 vmid = fl->apps->channel[fl->cid].vmid;
550 if (vmid) {
551 int srcVM[2] = {VMID_HLOS, vmid};
552
553 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
554 srcVM, 2, destVM, destVMperm, 1);
555 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530556 dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
557 buf->phys, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700558 }
Jeya R984a1a32021-01-18 15:38:07 +0530559bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700560 kfree(buf);
561}
562
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530563static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700564{
565 struct fastrpc_buf *buf, *free;
566
567 do {
568 struct hlist_node *n;
569
c_mtharue1a5ce12017-10-13 20:47:09 +0530570 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700571 spin_lock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530572 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700573 hlist_del_init(&buf->hn);
574 free = buf;
575 break;
576 }
577 spin_unlock(&fl->hlock);
578 if (free)
579 fastrpc_buf_free(free, 0);
580 } while (free);
581}
582
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530583static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
584{
585 struct fastrpc_buf *buf, *free;
586
587 do {
588 struct hlist_node *n;
589
590 free = NULL;
591 spin_lock(&fl->hlock);
592 hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
593 free = buf;
594 break;
595 }
596 spin_unlock(&fl->hlock);
597 if (free)
598 fastrpc_buf_free(free, 0);
599 } while (free);
600}
601
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700602static void fastrpc_mmap_add(struct fastrpc_mmap *map)
603{
c_mtharue1a5ce12017-10-13 20:47:09 +0530604 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
605 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
606 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700607
c_mtharue1a5ce12017-10-13 20:47:09 +0530608 spin_lock(&me->hlock);
609 hlist_add_head(&map->hn, &me->maps);
610 spin_unlock(&me->hlock);
611 } else {
612 struct fastrpc_file *fl = map->fl;
613
c_mtharue1a5ce12017-10-13 20:47:09 +0530614 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530615 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700616}
617
c_mtharue1a5ce12017-10-13 20:47:09 +0530618static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530619 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530620 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700621{
c_mtharue1a5ce12017-10-13 20:47:09 +0530622 struct fastrpc_apps *me = &gfa;
623 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700624 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530625
626 if ((va + len) < va)
627 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530628 if (mflags == ADSP_MMAP_HEAP_ADDR ||
629 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
630 spin_lock(&me->hlock);
631 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
632 if (va >= map->va &&
633 va + len <= map->va + map->len &&
634 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530635 if (refs) {
636 if (map->refs + 1 == INT_MAX) {
637 spin_unlock(&me->hlock);
638 return -ETOOMANYREFS;
639 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530640 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530641 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530642 match = map;
643 break;
644 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700645 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530646 spin_unlock(&me->hlock);
647 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530648 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
649 if (va >= map->va &&
650 va + len <= map->va + map->len &&
651 map->fd == fd) {
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530652 if (refs) {
653 if (map->refs + 1 == INT_MAX)
654 return -ETOOMANYREFS;
c_mtharue1a5ce12017-10-13 20:47:09 +0530655 map->refs++;
Tharun Kumar Merugu496ad342019-06-06 15:01:42 +0530656 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530657 match = map;
658 break;
659 }
660 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700661 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700662 if (match) {
663 *ppmap = match;
664 return 0;
665 }
666 return -ENOTTY;
667}
668
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530669static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530670 unsigned long dma_attrs)
c_mtharue1a5ce12017-10-13 20:47:09 +0530671{
Jeya Re9310762020-07-29 12:10:54 +0530672 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530673 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +0530674
675 if (me->dev == NULL) {
676 pr_err("device adsprpc-mem is not initialized\n");
677 return -ENODEV;
678 }
Jeya Re9310762020-07-29 12:10:54 +0530679 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
680 if (err) {
681 err = -EFAULT;
682 pr_err("adsprpc: %s: invalid allocation size 0x%zx\n",
683 __func__, size);
684 return err;
685 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530686 *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530687 dma_attrs);
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530688 if (IS_ERR_OR_NULL(*vaddr)) {
689 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
690 current->comm, __func__, size, (*vaddr));
c_mtharue1a5ce12017-10-13 20:47:09 +0530691 return -ENOMEM;
692 }
693 return 0;
694}
695
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700696static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530697 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700698{
c_mtharue1a5ce12017-10-13 20:47:09 +0530699 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700700 struct hlist_node *n;
701 struct fastrpc_apps *me = &gfa;
702
703 spin_lock(&me->hlock);
704 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
Swathi K0e257332021-07-14 17:51:10 +0530705 if (map->refs == 1 && map->raddr == va &&
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700706 map->raddr + map->len == va + len &&
Swathi K0e257332021-07-14 17:51:10 +0530707 /*Remove map if not used in process initialization*/
708 !map->is_filemap) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700709 match = map;
710 hlist_del_init(&map->hn);
711 break;
712 }
713 }
714 spin_unlock(&me->hlock);
715 if (match) {
716 *ppmap = match;
717 return 0;
718 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700719 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Swathi K0e257332021-07-14 17:51:10 +0530720 if (map->refs == 1 && map->raddr == va &&
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700721 map->raddr + map->len == va + len &&
Swathi K0e257332021-07-14 17:51:10 +0530722 /*Remove map if not used in process initialization*/
723 !map->is_filemap) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700724 match = map;
725 hlist_del_init(&map->hn);
726 break;
727 }
728 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700729 if (match) {
730 *ppmap = match;
731 return 0;
732 }
733 return -ENOTTY;
734}
735
c_mtharu7bd6a422017-10-17 18:15:37 +0530736static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700737{
c_mtharue1a5ce12017-10-13 20:47:09 +0530738 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700739 struct fastrpc_file *fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530740 int vmid, cid = -1, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700741 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700742
743 if (!map)
744 return;
745 fl = map->fl;
Jeya Rb70b4ad2021-01-25 10:28:42 -0800746 if (!fl)
747 return;
748 if (!(map->flags == ADSP_MMAP_HEAP_ADDR ||
Jeya Rccafee22020-05-26 18:17:26 +0530749 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)) {
750 cid = fl->cid;
751 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
752 if (err) {
753 err = -ECHRNG;
754 pr_err("adsprpc: ERROR:%s, Invalid channel id: %d, err:%d",
755 __func__, cid, err);
756 return;
757 }
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530758 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530759 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
760 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
761 spin_lock(&me->hlock);
762 map->refs--;
763 if (!map->refs)
764 hlist_del_init(&map->hn);
765 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530766 if (map->refs > 0)
767 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530768 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530769 map->refs--;
770 if (!map->refs)
771 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530772 if (map->refs > 0 && !flags)
773 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530774 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530775 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
776 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700777
c_mtharue1a5ce12017-10-13 20:47:09 +0530778 if (me->dev == NULL) {
779 pr_err("failed to free remote heap allocation\n");
780 return;
781 }
782 if (map->phys) {
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +0530783 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
784 DMA_ATTR_NO_KERNEL_MAPPING;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530785 dma_free_attrs(me->dev, map->size, (void *)map->va,
786 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530787 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530788 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
789 if (!IS_ERR_OR_NULL(map->handle))
790 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530791 } else {
792 int destVM[1] = {VMID_HLOS};
793 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
794
795 if (map->secure)
796 sess = fl->secsctx;
797 else
798 sess = fl->sctx;
799
800 if (!IS_ERR_OR_NULL(map->handle))
801 ion_free(fl->apps->client, map->handle);
802 if (sess && sess->smmu.enabled) {
803 if (map->size || map->phys)
804 msm_dma_unmap_sg(sess->smmu.dev,
805 map->table->sgl,
806 map->table->nents, DMA_BIDIRECTIONAL,
807 map->buf);
808 }
809 vmid = fl->apps->channel[fl->cid].vmid;
810 if (vmid && map->phys) {
811 int srcVM[2] = {VMID_HLOS, vmid};
812
813 hyp_assign_phys(map->phys, buf_page_size(map->size),
814 srcVM, 2, destVM, destVMperm, 1);
815 }
816
817 if (!IS_ERR_OR_NULL(map->table))
818 dma_buf_unmap_attachment(map->attach, map->table,
819 DMA_BIDIRECTIONAL);
820 if (!IS_ERR_OR_NULL(map->attach))
821 dma_buf_detach(map->buf, map->attach);
822 if (!IS_ERR_OR_NULL(map->buf))
823 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700824 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700825 kfree(map);
826}
827
828static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
829 struct fastrpc_session_ctx **session);
830
831static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530832 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700833 struct fastrpc_mmap **ppmap)
834{
c_mtharue1a5ce12017-10-13 20:47:09 +0530835 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700836 struct fastrpc_session_ctx *sess;
837 struct fastrpc_apps *apps = fl->apps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530838 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530839 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700840 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530841 dma_addr_t region_phys = 0;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530842 void *region_vaddr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700843 unsigned long flags;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530844 int err = 0, vmid, cid = -1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700845
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +0530846 cid = fl->cid;
847 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
848 if (err) {
849 err = -ECHRNG;
850 goto bail;
851 }
852 chan = &apps->channel[cid];
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800853 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700854 return 0;
855 map = kzalloc(sizeof(*map), GFP_KERNEL);
856 VERIFY(err, !IS_ERR_OR_NULL(map));
857 if (err)
858 goto bail;
859 INIT_HLIST_NODE(&map->hn);
860 map->flags = mflags;
861 map->refs = 1;
862 map->fl = fl;
863 map->fd = fd;
864 map->attr = attr;
Swathi K0e257332021-07-14 17:51:10 +0530865 map->is_filemap = false;
c_mtharue1a5ce12017-10-13 20:47:09 +0530866 if (mflags == ADSP_MMAP_HEAP_ADDR ||
867 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +0530868 unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
869 DMA_ATTR_NO_KERNEL_MAPPING;
870
c_mtharue1a5ce12017-10-13 20:47:09 +0530871 map->apps = me;
872 map->fl = NULL;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530873 VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
874 len, dma_attrs));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700875 if (err)
876 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530877 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530878 map->size = len;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +0530879 map->va = (uintptr_t)region_vaddr;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530880 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
881 ion_phys_addr_t iphys;
882
883 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
884 ion_import_dma_buf_fd(fl->apps->client, fd)));
885 if (err)
886 goto bail;
887
888 map->uncached = 1;
889 map->buf = NULL;
890 map->attach = NULL;
891 map->table = NULL;
892 map->va = 0;
893 map->phys = 0;
894
895 err = ion_phys(fl->apps->client, map->handle,
896 &iphys, &map->size);
897 if (err)
898 goto bail;
899 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530900 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530901 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
902 pr_info("adsprpc: buffer mapped with persist attr %x\n",
903 (unsigned int)map->attr);
904 map->refs = 2;
905 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530906 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
907 ion_import_dma_buf_fd(fl->apps->client, fd)));
908 if (err)
909 goto bail;
910 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
911 &flags));
912 if (err)
913 goto bail;
914
c_mtharue1a5ce12017-10-13 20:47:09 +0530915 map->secure = flags & ION_FLAG_SECURE;
916 if (map->secure) {
917 if (!fl->secsctx)
918 err = fastrpc_session_alloc(chan, 1,
919 &fl->secsctx);
920 if (err)
921 goto bail;
922 }
923 if (map->secure)
924 sess = fl->secsctx;
925 else
926 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530927
c_mtharue1a5ce12017-10-13 20:47:09 +0530928 VERIFY(err, !IS_ERR_OR_NULL(sess));
929 if (err)
930 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530931
932 map->uncached = !ION_IS_CACHED(flags);
933 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
934 map->uncached = 1;
935
c_mtharue1a5ce12017-10-13 20:47:09 +0530936 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
937 if (err)
938 goto bail;
939 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
940 dma_buf_attach(map->buf, sess->smmu.dev)));
941 if (err)
942 goto bail;
943 VERIFY(err, !IS_ERR_OR_NULL(map->table =
944 dma_buf_map_attachment(map->attach,
945 DMA_BIDIRECTIONAL)));
946 if (err)
947 goto bail;
948 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700949 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530950
951 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
952 (sess->smmu.coherent && map->uncached))
953 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
954 else if (map->attr & FASTRPC_ATTR_COHERENT)
955 attrs |= DMA_ATTR_FORCE_COHERENT;
956
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700957 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530958 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700959 map->table->sgl, map->table->nents,
960 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530961 if (err)
962 goto bail;
963 } else {
964 VERIFY(err, map->table->nents == 1);
965 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700966 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530967 }
968 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530969
c_mtharue1a5ce12017-10-13 20:47:09 +0530970 if (sess->smmu.cb) {
971 map->phys += ((uint64_t)sess->smmu.cb << 32);
972 map->size = sg_dma_len(map->table->sgl);
973 } else {
974 map->size = buf_page_size(len);
975 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530976
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +0530977 VERIFY(err, map->size >= len && map->size < MAX_SIZE_LIMIT);
978 if (err) {
979 err = -EFAULT;
980 goto bail;
981 }
982
c_mtharue1a5ce12017-10-13 20:47:09 +0530983 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530984 if (!sess->smmu.enabled && !vmid) {
985 VERIFY(err, map->phys >= me->range.addr &&
986 map->phys + map->size <=
987 me->range.addr + me->range.size);
988 if (err) {
989 pr_err("adsprpc: mmap fail out of range\n");
990 goto bail;
991 }
992 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530993 if (vmid) {
994 int srcVM[1] = {VMID_HLOS};
995 int destVM[2] = {VMID_HLOS, vmid};
996 int destVMperm[2] = {PERM_READ | PERM_WRITE,
997 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700998
c_mtharue1a5ce12017-10-13 20:47:09 +0530999 VERIFY(err, !hyp_assign_phys(map->phys,
1000 buf_page_size(map->size),
1001 srcVM, 1, destVM, destVMperm, 2));
1002 if (err)
1003 goto bail;
1004 }
1005 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001006 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001007 map->len = len;
1008
1009 fastrpc_mmap_add(map);
1010 *ppmap = map;
1011
1012bail:
1013 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +05301014 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001015 return err;
1016}
1017
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301018static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301019 unsigned long dma_attr, uint32_t rflags,
1020 int remote, struct fastrpc_buf **obuf)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001021{
1022 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +05301023 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001024 struct hlist_node *n;
1025
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301026 VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT);
1027 if (err) {
1028 err = -EFAULT;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001029 goto bail;
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05301030 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001031
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301032 if (!remote) {
1033 /* find the smallest buffer that fits in the cache */
1034 spin_lock(&fl->hlock);
1035 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
1036 if (buf->size >= size && (!fr || fr->size > buf->size))
1037 fr = buf;
1038 }
1039 if (fr)
1040 hlist_del_init(&fr->hn);
1041 spin_unlock(&fl->hlock);
1042 if (fr) {
1043 *obuf = fr;
1044 return 0;
1045 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001046 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301047 buf = NULL;
1048 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001049 if (err)
1050 goto bail;
1051 INIT_HLIST_NODE(&buf->hn);
1052 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +05301053 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001054 buf->phys = 0;
1055 buf->size = size;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301056 buf->dma_attr = dma_attr;
1057 buf->flags = rflags;
1058 buf->raddr = 0;
1059 buf->remote = 0;
Jeya R984a1a32021-01-18 15:38:07 +05301060 VERIFY(err, fl && fl->sctx != NULL);
1061 if (err) {
1062 err = -EBADR;
1063 goto bail;
1064 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301065 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1066 (dma_addr_t *)&buf->phys,
1067 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001068 if (IS_ERR_OR_NULL(buf->virt)) {
1069 /* free cache and retry */
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301070 fastrpc_cached_buf_list_free(fl);
1071 buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
1072 (dma_addr_t *)&buf->phys,
1073 GFP_KERNEL, buf->dma_attr);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001074 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
1075 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301076 if (err) {
1077 err = -ENOMEM;
1078 pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
1079 current->comm, __func__, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001080 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301081 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001082 if (fl->sctx->smmu.cb)
1083 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
1084 vmid = fl->apps->channel[fl->cid].vmid;
1085 if (vmid) {
1086 int srcVM[1] = {VMID_HLOS};
1087 int destVM[2] = {VMID_HLOS, vmid};
1088 int destVMperm[2] = {PERM_READ | PERM_WRITE,
1089 PERM_READ | PERM_WRITE | PERM_EXEC};
1090
1091 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
1092 srcVM, 1, destVM, destVMperm, 2));
1093 if (err)
1094 goto bail;
1095 }
1096
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301097 if (remote) {
1098 INIT_HLIST_NODE(&buf->hn_rem);
1099 spin_lock(&fl->hlock);
1100 hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
1101 spin_unlock(&fl->hlock);
1102 buf->remote = remote;
1103 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001104 *obuf = buf;
1105 bail:
1106 if (err && buf)
1107 fastrpc_buf_free(buf, 0);
1108 return err;
1109}
1110
1111
1112static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -07001113 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001114 struct smq_invoke_ctx **po)
1115{
1116 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301117 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118 struct hlist_node *n;
1119 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1120
1121 spin_lock(&fl->hlock);
1122 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
1123 if (ictx->pid == current->pid) {
1124 if (invoke->sc != ictx->sc || ictx->fl != fl)
1125 err = -1;
1126 else {
1127 ctx = ictx;
1128 hlist_del_init(&ctx->hn);
1129 hlist_add_head(&ctx->hn, &fl->clst.pending);
1130 }
1131 break;
1132 }
1133 }
1134 spin_unlock(&fl->hlock);
1135 if (ctx)
1136 *po = ctx;
1137 return err;
1138}
1139
1140#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1141static int overlap_ptr_cmp(const void *a, const void *b)
1142{
1143 struct overlap *pa = *((struct overlap **)a);
1144 struct overlap *pb = *((struct overlap **)b);
1145 /* sort with lowest starting buffer first */
1146 int st = CMP(pa->start, pb->start);
1147 /* sort with highest ending buffer first */
1148 int ed = CMP(pb->end, pa->end);
1149 return st == 0 ? ed : st;
1150}
1151
Sathish Ambley9466d672017-01-25 10:51:55 -08001152static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001153{
Sathish Ambley9466d672017-01-25 10:51:55 -08001154 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001155 remote_arg_t *lpra = ctx->lpra;
1156 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1157 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1158 int nbufs = inbufs + outbufs;
1159 struct overlap max;
1160
1161 for (i = 0; i < nbufs; ++i) {
1162 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1163 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001164 if (lpra[i].buf.len) {
1165 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1166 if (err)
1167 goto bail;
1168 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001169 ctx->overs[i].raix = i;
1170 ctx->overps[i] = &ctx->overs[i];
1171 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301172 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001173 max.start = 0;
1174 max.end = 0;
1175 for (i = 0; i < nbufs; ++i) {
1176 if (ctx->overps[i]->start < max.end) {
1177 ctx->overps[i]->mstart = max.end;
1178 ctx->overps[i]->mend = ctx->overps[i]->end;
1179 ctx->overps[i]->offset = max.end -
1180 ctx->overps[i]->start;
1181 if (ctx->overps[i]->end > max.end) {
1182 max.end = ctx->overps[i]->end;
1183 } else {
1184 ctx->overps[i]->mend = 0;
1185 ctx->overps[i]->mstart = 0;
1186 }
1187 } else {
1188 ctx->overps[i]->mend = ctx->overps[i]->end;
1189 ctx->overps[i]->mstart = ctx->overps[i]->start;
1190 ctx->overps[i]->offset = 0;
1191 max = *ctx->overps[i];
1192 }
1193 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001194bail:
1195 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001196}
1197
1198#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1199 do {\
1200 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301201 VERIFY(err, 0 == copy_from_user((dst),\
1202 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001203 (size)));\
1204 else\
1205 memmove((dst), (src), (size));\
1206 } while (0)
1207
1208#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1209 do {\
1210 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301211 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301212 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001213 else\
1214 memmove((dst), (src), (size));\
1215 } while (0)
1216
1217
1218static void context_free(struct smq_invoke_ctx *ctx);
1219
1220static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001221 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001222 struct smq_invoke_ctx **po)
1223{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301224 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301225 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301226 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001227 struct fastrpc_ctx_lst *clst = &fl->clst;
1228 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
Jeya R8fa59d62020-11-04 20:42:59 +05301229 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001230
1231 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1232 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1233 sizeof(*ctx->fds) * (bufs) +
1234 sizeof(*ctx->attrs) * (bufs) +
1235 sizeof(*ctx->overs) * (bufs) +
1236 sizeof(*ctx->overps) * (bufs);
1237
c_mtharue1a5ce12017-10-13 20:47:09 +05301238 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001239 if (err)
1240 goto bail;
1241
1242 INIT_HLIST_NODE(&ctx->hn);
1243 hlist_add_fake(&ctx->hn);
1244 ctx->fl = fl;
1245 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1246 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1247 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301248 if (me->legacy) {
1249 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1250 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1251 } else {
1252 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1253 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1254 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1255 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001256
c_mtharue1a5ce12017-10-13 20:47:09 +05301257 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001258 bufs * sizeof(*ctx->lpra));
1259 if (err)
1260 goto bail;
1261
1262 if (invokefd->fds) {
1263 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1264 bufs * sizeof(*ctx->fds));
1265 if (err)
1266 goto bail;
1267 }
1268 if (invokefd->attrs) {
1269 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1270 bufs * sizeof(*ctx->attrs));
1271 if (err)
1272 goto bail;
1273 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001274 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001275 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001276 if (bufs) {
1277 VERIFY(err, 0 == context_build_overlap(ctx));
1278 if (err)
1279 goto bail;
1280 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001281 ctx->retval = -1;
1282 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301283 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001284 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301285 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001286
1287 spin_lock(&fl->hlock);
1288 hlist_add_head(&ctx->hn, &clst->pending);
1289 spin_unlock(&fl->hlock);
1290
Jeya R8fa59d62020-11-04 20:42:59 +05301291 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301292 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1293 if (!me->ctxtable[ii]) {
1294 me->ctxtable[ii] = ctx;
1295 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1296 break;
1297 }
1298 }
Jeya R8fa59d62020-11-04 20:42:59 +05301299 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301300 VERIFY(err, ii < FASTRPC_CTX_MAX);
1301 if (err) {
1302 pr_err("adsprpc: out of context memory\n");
1303 goto bail;
1304 }
1305
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001306 *po = ctx;
1307bail:
1308 if (ctx && err)
1309 context_free(ctx);
1310 return err;
1311}
1312
1313static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1314{
1315 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1316
1317 spin_lock(&ctx->fl->hlock);
1318 hlist_del_init(&ctx->hn);
1319 hlist_add_head(&ctx->hn, &clst->interrupted);
1320 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001321}
1322
1323static void context_free(struct smq_invoke_ctx *ctx)
1324{
1325 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301326 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001327 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1328 REMOTE_SCALARS_OUTBUFS(ctx->sc);
Jeya R8fa59d62020-11-04 20:42:59 +05301329 unsigned long irq_flags = 0;
1330 void *handle = NULL;
1331 const void *ptr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001332 spin_lock(&ctx->fl->hlock);
1333 hlist_del_init(&ctx->hn);
1334 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301335 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001336 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301337 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301338
1339 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001340 fastrpc_buf_free(ctx->buf, 1);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301341 fastrpc_buf_free(ctx->lbuf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301342 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301343 ctx->ctxid = 0;
1344
Jeya R8fa59d62020-11-04 20:42:59 +05301345 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301346 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1347 if (me->ctxtable[i] == ctx) {
Jeya R8fa59d62020-11-04 20:42:59 +05301348 handle = me->ctxtable[i]->handle;
1349 ptr = me->ctxtable[i]->ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301350 me->ctxtable[i] = NULL;
1351 break;
1352 }
1353 }
Jeya R8fa59d62020-11-04 20:42:59 +05301354 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
1355 if (handle) {
1356 glink_rx_done(handle, ptr, true);
1357 handle = NULL;
1358 }
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301359
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001360 kfree(ctx);
1361}
1362
1363static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1364{
1365 ctx->retval = retval;
1366 complete(&ctx->work);
1367}
1368
1369
1370static void fastrpc_notify_users(struct fastrpc_file *me)
1371{
1372 struct smq_invoke_ctx *ictx;
1373 struct hlist_node *n;
1374
1375 spin_lock(&me->hlock);
1376 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1377 complete(&ictx->work);
1378 }
1379 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1380 complete(&ictx->work);
1381 }
1382 spin_unlock(&me->hlock);
1383
1384}
1385
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301386
1387static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1388{
1389 struct smq_invoke_ctx *ictx;
1390 struct hlist_node *n;
1391
1392 spin_lock(&me->hlock);
1393 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1394 if (ictx->msg.pid)
1395 complete(&ictx->work);
1396 }
1397 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1398 if (ictx->msg.pid)
1399 complete(&ictx->work);
1400 }
1401 spin_unlock(&me->hlock);
1402}
1403
1404
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001405static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1406{
1407 struct fastrpc_file *fl;
1408 struct hlist_node *n;
1409
1410 spin_lock(&me->hlock);
1411 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1412 if (fl->cid == cid)
1413 fastrpc_notify_users(fl);
1414 }
1415 spin_unlock(&me->hlock);
1416
1417}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301418
1419static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1420{
1421 struct fastrpc_file *fl;
1422 struct hlist_node *n;
1423
1424 spin_lock(&me->hlock);
1425 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1426 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301427 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301428 }
1429 spin_unlock(&me->hlock);
1430
1431}
1432
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001433static void context_list_ctor(struct fastrpc_ctx_lst *me)
1434{
1435 INIT_HLIST_HEAD(&me->interrupted);
1436 INIT_HLIST_HEAD(&me->pending);
1437}
1438
1439static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1440{
1441 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301442 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001443 struct hlist_node *n;
1444
1445 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301446 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001447 spin_lock(&fl->hlock);
1448 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1449 hlist_del_init(&ictx->hn);
1450 ctxfree = ictx;
1451 break;
1452 }
1453 spin_unlock(&fl->hlock);
1454 if (ctxfree)
1455 context_free(ctxfree);
1456 } while (ctxfree);
1457 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301458 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001459 spin_lock(&fl->hlock);
1460 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1461 hlist_del_init(&ictx->hn);
1462 ctxfree = ictx;
1463 break;
1464 }
1465 spin_unlock(&fl->hlock);
1466 if (ctxfree)
1467 context_free(ctxfree);
1468 } while (ctxfree);
1469}
1470
1471static int fastrpc_file_free(struct fastrpc_file *fl);
1472static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1473{
1474 struct fastrpc_file *fl, *free;
1475 struct hlist_node *n;
1476
1477 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301478 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001479 spin_lock(&me->hlock);
1480 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1481 hlist_del_init(&fl->hn);
1482 free = fl;
1483 break;
1484 }
1485 spin_unlock(&me->hlock);
1486 if (free)
1487 fastrpc_file_free(free);
1488 } while (free);
1489}
1490
1491static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1492{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301493 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301494 remote_arg64_t *rpra, *lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001495 remote_arg_t *lpra = ctx->lpra;
1496 struct smq_invoke_buf *list;
1497 struct smq_phy_page *pages, *ipage;
1498 uint32_t sc = ctx->sc;
1499 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1500 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001501 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001502 uintptr_t args;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301503 size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001504 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001505 int err = 0;
1506 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001507 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001508 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301509 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001510
1511 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301512 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001513 list = smq_invoke_buf_start(rpra, sc);
1514 pages = smq_phy_page_start(sc, list);
1515 ipage = pages;
1516
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301517 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001518 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301519 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1520 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001521
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301522 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301523 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1524 unsigned int attrs = 0;
1525
1526 if (ctx->attrs)
1527 attrs = ctx->attrs[i];
1528
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001529 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301530 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001531 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301532 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301533 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001534 ipage += 1;
1535 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301536 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001537 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301538 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001539 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301540 int dmaflags = 0;
1541
1542 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1543 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001544 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301545 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301546 if (err) {
1547 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001548 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301549 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001550 ipage += 1;
1551 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301552 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301553 if (!me->legacy) {
1554 metalen = copylen = (size_t)&ipage[0] +
1555 (sizeof(uint64_t) * M_FDLIST) +
1556 (sizeof(uint32_t) * M_CRCLIST);
1557 } else {
1558 metalen = copylen = (size_t)&ipage[0];
1559 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001560
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301561 /* allocate new local rpra buffer */
1562 lrpralen = (size_t)&list[0];
1563 if (lrpralen) {
1564 err = fastrpc_buf_alloc(ctx->fl, lrpralen, 0, 0, 0, &ctx->lbuf);
1565 if (err)
1566 goto bail;
1567 }
1568 if (ctx->lbuf->virt)
1569 memset(ctx->lbuf->virt, 0, lrpralen);
1570
1571 lrpra = ctx->lbuf->virt;
1572 ctx->lrpra = lrpra;
1573
1574 /* calculate len required for copying */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001575 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1576 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001577 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301578 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001579
1580 if (!len)
1581 continue;
1582 if (ctx->maps[i])
1583 continue;
1584 if (ctx->overps[oix]->offset == 0)
1585 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001586 mstart = ctx->overps[oix]->mstart;
1587 mend = ctx->overps[oix]->mend;
1588 VERIFY(err, (mend - mstart) <= LONG_MAX);
1589 if (err)
1590 goto bail;
1591 copylen += mend - mstart;
1592 VERIFY(err, copylen >= 0);
1593 if (err)
1594 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001595 }
1596 ctx->used = copylen;
1597
1598 /* allocate new buffer */
1599 if (copylen) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05301600 err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001601 if (err)
1602 goto bail;
1603 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301604 if (ctx->buf->virt && metalen <= copylen)
1605 memset(ctx->buf->virt, 0, metalen);
1606
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001607 /* copy metadata */
1608 rpra = ctx->buf->virt;
1609 ctx->rpra = rpra;
1610 list = smq_invoke_buf_start(rpra, sc);
1611 pages = smq_phy_page_start(sc, list);
1612 ipage = pages;
1613 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001614 for (i = 0; i < bufs + handles; ++i) {
1615 if (lpra[i].buf.len)
1616 list[i].num = 1;
1617 else
1618 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001619 list[i].pgidx = ipage - pages;
1620 ipage++;
1621 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301622
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001623 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301624 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301625 for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001626 struct fastrpc_mmap *map = ctx->maps[i];
1627 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301628 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001629
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301630 rpra[i].buf.pv = lrpra[i].buf.pv = 0;
1631 rpra[i].buf.len = lrpra[i].buf.len = len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001632 if (!len)
1633 continue;
1634 if (map) {
1635 struct vm_area_struct *vma;
1636 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301637 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001638 int idx = list[i].pgidx;
1639
1640 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001641 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001642 } else {
1643 down_read(&current->mm->mmap_sem);
1644 VERIFY(err, NULL != (vma = find_vma(current->mm,
1645 map->va)));
1646 if (err) {
1647 up_read(&current->mm->mmap_sem);
1648 goto bail;
1649 }
1650 offset = buf_page_start(buf) - vma->vm_start;
1651 up_read(&current->mm->mmap_sem);
1652 VERIFY(err, offset < (uintptr_t)map->size);
1653 if (err)
1654 goto bail;
1655 }
1656 pages[idx].addr = map->phys + offset;
1657 pages[idx].size = num << PAGE_SHIFT;
1658 }
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301659 rpra[i].buf.pv = lrpra[i].buf.pv = buf;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001660 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001661 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001662 for (i = bufs; i < bufs + handles; ++i) {
1663 struct fastrpc_mmap *map = ctx->maps[i];
Jeya R4c7abf22020-07-23 16:00:50 +05301664 if (map) {
1665 pages[i].addr = map->phys;
1666 pages[i].size = map->size;
1667 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001668 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301669 if (!me->legacy) {
1670 fdlist = (uint64_t *)&pages[bufs + handles];
1671 for (i = 0; i < M_FDLIST; i++)
1672 fdlist[i] = 0;
1673 crclist = (uint32_t *)&fdlist[M_FDLIST];
1674 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1675 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001676
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001677 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301678 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001679 rlen = copylen - metalen;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301680 for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001681 int i = ctx->overps[oix]->raix;
1682 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301683 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001684 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301685 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001686
1687 if (!len)
1688 continue;
1689 if (map)
1690 continue;
1691 if (ctx->overps[oix]->offset == 0) {
1692 rlen -= ALIGN(args, BALIGN) - args;
1693 args = ALIGN(args, BALIGN);
1694 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001695 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001696 VERIFY(err, rlen >= mlen);
1697 if (err)
1698 goto bail;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301699 rpra[i].buf.pv = lrpra[i].buf.pv =
1700 (args - ctx->overps[oix]->offset);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001701 pages[list[i].pgidx].addr = ctx->buf->phys -
1702 ctx->overps[oix]->offset +
1703 (copylen - rlen);
1704 pages[list[i].pgidx].addr =
1705 buf_page_start(pages[list[i].pgidx].addr);
1706 buf = rpra[i].buf.pv;
1707 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1708 if (i < inbufs) {
1709 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1710 lpra[i].buf.pv, len);
1711 if (err)
1712 goto bail;
1713 }
1714 args = args + mlen;
1715 rlen -= mlen;
1716 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001717 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001718
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301719 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001720 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1721 int i = ctx->overps[oix]->raix;
1722 struct fastrpc_mmap *map = ctx->maps[i];
1723
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001724 if (map && map->uncached)
1725 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301726 if (ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301727 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1728 continue;
1729 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1730 continue;
1731
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301732 if (rpra && lrpra && rpra[i].buf.len &&
1733 ctx->overps[oix]->mstart) {
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301734 if (map && map->handle)
1735 msm_ion_do_cache_op(ctx->fl->apps->client,
1736 map->handle,
1737 uint64_to_ptr(rpra[i].buf.pv),
1738 rpra[i].buf.len,
1739 ION_IOC_CLEAN_INV_CACHES);
1740 else
1741 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1742 uint64_to_ptr(rpra[i].buf.pv
1743 + rpra[i].buf.len));
1744 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001745 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001746 PERF_END);
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301747 for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
Jeya R4c7abf22020-07-23 16:00:50 +05301748 if (ctx->fds)
1749 rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301750 rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1751 rpra[i].dma.offset = lrpra[i].dma.offset =
1752 (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001753 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001754
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001755 bail:
1756 return err;
1757}
1758
1759static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1760 remote_arg_t *upra)
1761{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301762 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001763 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001764 struct smq_invoke_buf *list;
1765 struct smq_phy_page *pages;
1766 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301767 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001768 uint32_t *crclist = NULL;
1769
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301770 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001771 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001772 int err = 0;
1773
1774 inbufs = REMOTE_SCALARS_INBUFS(sc);
1775 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001776 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1777 list = smq_invoke_buf_start(ctx->rpra, sc);
1778 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301779 if (!me->legacy) {
1780 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1781 crclist = (uint32_t *)(fdlist + M_FDLIST);
1782 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001783
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001784 for (i = inbufs; i < inbufs + outbufs; ++i) {
1785 if (!ctx->maps[i]) {
1786 K_COPY_TO_USER(err, kernel,
1787 ctx->lpra[i].buf.pv,
1788 uint64_to_ptr(rpra[i].buf.pv),
1789 rpra[i].buf.len);
1790 if (err)
1791 goto bail;
1792 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301793 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301794 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301795 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301796 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001797 }
1798 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301799 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301800 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001801 for (i = 0; i < M_FDLIST; i++) {
1802 if (!fdlist[i])
1803 break;
1804 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001805 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301806 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001807 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001808 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301809 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001810 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301811 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001812 crclist, M_CRCLIST*sizeof(uint32_t));
1813
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001814 bail:
1815 return err;
1816}
1817
1818static void inv_args_pre(struct smq_invoke_ctx *ctx)
1819{
1820 int i, inbufs, outbufs;
1821 uint32_t sc = ctx->sc;
1822 remote_arg64_t *rpra = ctx->rpra;
1823 uintptr_t end;
1824
1825 inbufs = REMOTE_SCALARS_INBUFS(sc);
1826 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1827 for (i = inbufs; i < inbufs + outbufs; ++i) {
1828 struct fastrpc_mmap *map = ctx->maps[i];
1829
1830 if (map && map->uncached)
1831 continue;
1832 if (!rpra[i].buf.len)
1833 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301834 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301835 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1836 continue;
1837 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1838 continue;
1839
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001840 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1841 buf_page_start(rpra[i].buf.pv))
1842 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301843 if (!IS_CACHE_ALIGNED((uintptr_t)
1844 uint64_to_ptr(rpra[i].buf.pv))) {
1845 if (map && map->handle)
1846 msm_ion_do_cache_op(ctx->fl->apps->client,
1847 map->handle,
1848 uint64_to_ptr(rpra[i].buf.pv),
1849 sizeof(uintptr_t),
1850 ION_IOC_CLEAN_INV_CACHES);
1851 else
1852 dmac_flush_range(
1853 uint64_to_ptr(rpra[i].buf.pv), (char *)
1854 uint64_to_ptr(rpra[i].buf.pv + 1));
1855 }
1856
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001857 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1858 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301859 if (!IS_CACHE_ALIGNED(end)) {
1860 if (map && map->handle)
1861 msm_ion_do_cache_op(ctx->fl->apps->client,
1862 map->handle,
1863 uint64_to_ptr(end),
1864 sizeof(uintptr_t),
1865 ION_IOC_CLEAN_INV_CACHES);
1866 else
1867 dmac_flush_range((char *)end,
1868 (char *)end + 1);
1869 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001870 }
1871}
1872
1873static void inv_args(struct smq_invoke_ctx *ctx)
1874{
1875 int i, inbufs, outbufs;
1876 uint32_t sc = ctx->sc;
Tharun Kumar Merugub31cc732019-05-07 00:39:43 +05301877 remote_arg64_t *rpra = ctx->lrpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001878
1879 inbufs = REMOTE_SCALARS_INBUFS(sc);
1880 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1881 for (i = inbufs; i < inbufs + outbufs; ++i) {
1882 struct fastrpc_mmap *map = ctx->maps[i];
1883
1884 if (map && map->uncached)
1885 continue;
1886 if (!rpra[i].buf.len)
1887 continue;
Jeya R984a1a32021-01-18 15:38:07 +05301888 if (ctx->fl && ctx->fl->sctx && ctx->fl->sctx->smmu.coherent &&
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301889 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1890 continue;
1891 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1892 continue;
1893
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001894 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1895 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001896 continue;
1897 }
1898 if (map && map->handle)
1899 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1900 (char *)uint64_to_ptr(rpra[i].buf.pv),
1901 rpra[i].buf.len, ION_IOC_INV_CACHES);
1902 else
1903 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1904 (char *)uint64_to_ptr(rpra[i].buf.pv
1905 + rpra[i].buf.len));
1906 }
1907
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001908}
1909
1910static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1911 uint32_t kernel, uint32_t handle)
1912{
1913 struct smq_msg *msg = &ctx->msg;
1914 struct fastrpc_file *fl = ctx->fl;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301915 int err = 0, len, cid = -1;
1916 struct fastrpc_channel_ctx *channel_ctx = NULL;
1917
1918 cid = fl->cid;
1919 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
1920 if (err) {
1921 err = -ECHRNG;
1922 goto bail;
1923 }
1924 channel_ctx = &fl->apps->channel[fl->cid];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001925
c_mtharue1a5ce12017-10-13 20:47:09 +05301926 VERIFY(err, NULL != channel_ctx->chan);
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301927 if (err) {
1928 err = -ECHRNG;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001929 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05301930 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301931 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001932 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301933 if (fl->sessionid)
1934 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001935 if (kernel)
1936 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301937 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001938 msg->invoke.header.handle = handle;
1939 msg->invoke.header.sc = ctx->sc;
1940 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1941 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301942 if (fl->apps->glink) {
1943 if (fl->ssrcount != channel_ctx->ssrcount) {
1944 err = -ECONNRESET;
1945 goto bail;
1946 }
1947 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001948 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301949 if (err)
1950 goto bail;
1951 err = glink_tx(channel_ctx->chan,
1952 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1953 GLINK_TX_REQ_INTENT);
1954 } else {
1955 spin_lock(&fl->apps->hlock);
1956 len = smd_write((smd_channel_t *)
1957 channel_ctx->chan,
1958 msg, sizeof(*msg));
1959 spin_unlock(&fl->apps->hlock);
1960 VERIFY(err, len == sizeof(*msg));
1961 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001962 bail:
1963 return err;
1964}
1965
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301966static void fastrpc_smd_read_handler(int cid)
1967{
1968 struct fastrpc_apps *me = &gfa;
1969 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301970 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301971 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301972
1973 do {
1974 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1975 sizeof(rsp));
1976 if (ret != sizeof(rsp))
1977 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301978
1979 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1980 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301981 if (err)
1982 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301983
1984 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1985 if (err)
1986 goto bail;
1987
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301988 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301989 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1990 if (err)
1991 goto bail;
1992
1993 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301994 } while (ret == sizeof(rsp));
1995bail:
1996 if (err)
1997 pr_err("adsprpc: invalid response or context\n");
1998
1999}
2000
2001static void smd_event_handler(void *priv, unsigned int event)
2002{
2003 struct fastrpc_apps *me = &gfa;
2004 int cid = (int)(uintptr_t)priv;
2005
2006 switch (event) {
2007 case SMD_EVENT_OPEN:
2008 complete(&me->channel[cid].workport);
2009 break;
2010 case SMD_EVENT_CLOSE:
2011 fastrpc_notify_drivers(me, cid);
2012 break;
2013 case SMD_EVENT_DATA:
2014 fastrpc_smd_read_handler(cid);
2015 break;
2016 }
2017}
2018
2019
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002020static void fastrpc_init(struct fastrpc_apps *me)
2021{
2022 int i;
2023
2024 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302025 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002026 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302027 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302028 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002029 me->channel = &gcinfo[0];
2030 for (i = 0; i < NUM_CHANNELS; i++) {
2031 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302032 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002033 me->channel[i].sesscount = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302034 /* All channels are secure by default except CDSP */
2035 me->channel[i].secure = SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002036 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05302037 /* Set CDSP channel to non secure */
2038 me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002039}
2040
2041static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
2042
2043static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
2044 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07002045 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002046{
c_mtharue1a5ce12017-10-13 20:47:09 +05302047 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002048 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302049 int err = 0, cid = -1, interrupted = 0;
Maria Yu757199c2017-09-22 16:05:49 +08002050 struct timespec invoket = {0};
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05302051 int64_t *perf_counter = NULL;
2052
2053 cid = fl->cid;
2054 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
2055 if (err) {
2056 err = -ECHRNG;
2057 goto bail;
2058 }
2059 VERIFY(err, fl->sctx != NULL);
2060 if (err) {
2061 err = -EBADR;
2062 goto bail;
2063 }
2064 perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002065
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002066 if (fl->profile)
2067 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05302068
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302069 if (!kernel) {
2070 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
2071 if (err) {
2072 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
2073 __func__, current->comm, cid);
2074 goto bail;
2075 }
2076 }
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302077
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002078 if (!kernel) {
2079 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
2080 &ctx));
2081 if (err)
2082 goto bail;
2083 if (fl->sctx->smmu.faults)
2084 err = FASTRPC_ENOSUCH;
2085 if (err)
2086 goto bail;
2087 if (ctx)
2088 goto wait;
2089 }
2090
2091 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
2092 if (err)
2093 goto bail;
2094
2095 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302096 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002097 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002098 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002099 if (err)
2100 goto bail;
2101 }
2102
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302103 if (!fl->sctx->smmu.coherent) {
2104 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002105 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302106 PERF_END);
2107 }
2108
2109 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002110 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002111 PERF_END);
2112
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002113 if (err)
2114 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002115 wait:
2116 if (kernel)
2117 wait_for_completion(&ctx->work);
2118 else {
2119 interrupted = wait_for_completion_interruptible(&ctx->work);
2120 VERIFY(err, 0 == (err = interrupted));
2121 if (err)
2122 goto bail;
2123 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302124 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07002125 if (!fl->sctx->smmu.coherent)
2126 inv_args(ctx);
2127 PERF_END);
2128
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002129 VERIFY(err, 0 == (err = ctx->retval));
2130 if (err)
2131 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002132
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302133 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002134 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002135 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002136 if (err)
2137 goto bail;
2138 bail:
2139 if (ctx && interrupted == -ERESTARTSYS)
2140 context_save_interrupted(ctx);
2141 else if (ctx)
2142 context_free(ctx);
2143 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
2144 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002145
2146 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302147 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
2148 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
2149
2150 if (count)
2151 *count += getnstimediff(&invoket);
2152 }
2153 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
2154 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
2155
2156 if (count)
2157 *count = *count+1;
2158 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08002159 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002160 return err;
2161}
2162
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302163static int fastrpc_get_adsp_session(char *name, int *session)
2164{
2165 struct fastrpc_apps *me = &gfa;
2166 int err = 0, i;
2167
2168 for (i = 0; i < NUM_SESSIONS; i++) {
2169 if (!me->channel[0].spd[i].spdname)
2170 continue;
2171 if (!strcmp(name, me->channel[0].spd[i].spdname))
2172 break;
2173 }
2174 VERIFY(err, i < NUM_SESSIONS);
2175 if (err)
2176 goto bail;
2177 *session = i;
2178bail:
2179 return err;
2180}
2181
2182static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08002183static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302184static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002185static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002186 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002187{
2188 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302189 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07002190 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002191 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002192 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302193 struct fastrpc_mmap *file = NULL, *mem = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302194 struct fastrpc_buf *imem = NULL;
2195 unsigned long imem_dma_attr = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302196 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002197
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302198 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002199 if (err)
2200 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302201 if (init->flags == FASTRPC_INIT_ATTACH ||
2202 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002203 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302204 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002205
2206 ra[0].buf.pv = (void *)&tgid;
2207 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302208 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002209 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2210 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302211 ioctl.fds = NULL;
2212 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002213 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302214 if (init->flags == FASTRPC_INIT_ATTACH)
2215 fl->pd = 0;
2216 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2217 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2218 fl->pd = 2;
2219 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002220 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2221 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2222 if (err)
2223 goto bail;
2224 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002225 remote_arg_t ra[6];
2226 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002227 int mflags = 0;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302228 int memlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002229 struct {
2230 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302231 unsigned int namelen;
2232 unsigned int filelen;
2233 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002234 int attrs;
2235 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002236 } inbuf;
2237
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302238 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002239 inbuf.namelen = strlen(current->comm) + 1;
2240 inbuf.filelen = init->filelen;
2241 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302242
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302243 VERIFY(err, access_ok(0, (void __user *)init->file,
2244 init->filelen));
2245 if (err)
2246 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002247 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302248 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002249 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2250 init->file, init->filelen, mflags, &file));
Swathi K0e257332021-07-14 17:51:10 +05302251 if (file)
2252 file->is_filemap = true;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302253 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002254 if (err)
2255 goto bail;
2256 }
2257 inbuf.pageslen = 1;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302258
2259 VERIFY(err, !init->mem);
2260 if (err) {
2261 err = -EINVAL;
2262 pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
2263 current->comm, __func__);
2264 goto bail;
2265 }
2266 memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
2267 1024*1024);
2268 imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
2269 DMA_ATTR_NO_KERNEL_MAPPING |
2270 DMA_ATTR_FORCE_NON_COHERENT;
2271 err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302272 if (err)
2273 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302274 fl->init_mem = imem;
2275
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002276 inbuf.pageslen = 1;
2277 ra[0].buf.pv = (void *)&inbuf;
2278 ra[0].buf.len = sizeof(inbuf);
2279 fds[0] = 0;
2280
2281 ra[1].buf.pv = (void *)current->comm;
2282 ra[1].buf.len = inbuf.namelen;
2283 fds[1] = 0;
2284
2285 ra[2].buf.pv = (void *)init->file;
2286 ra[2].buf.len = inbuf.filelen;
2287 fds[2] = init->filefd;
2288
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302289 pages[0].addr = imem->phys;
2290 pages[0].size = imem->size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002291 ra[3].buf.pv = (void *)pages;
2292 ra[3].buf.len = 1 * sizeof(*pages);
2293 fds[3] = 0;
2294
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002295 inbuf.attrs = uproc->attrs;
2296 ra[4].buf.pv = (void *)&(inbuf.attrs);
2297 ra[4].buf.len = sizeof(inbuf.attrs);
2298 fds[4] = 0;
2299
2300 inbuf.siglen = uproc->siglen;
2301 ra[5].buf.pv = (void *)&(inbuf.siglen);
2302 ra[5].buf.len = sizeof(inbuf.siglen);
2303 fds[5] = 0;
2304
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302305 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002306 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002307 if (uproc->attrs)
2308 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002309 ioctl.inv.pra = ra;
2310 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302311 ioctl.attrs = NULL;
2312 ioctl.crc = NULL;
2313 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2314 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2315 if (err)
2316 goto bail;
2317 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2318 remote_arg_t ra[3];
2319 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302320 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302321 int fds[3];
2322 struct {
2323 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302324 unsigned int namelen;
2325 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302326 } inbuf;
2327
2328 if (!init->filelen)
2329 goto bail;
2330
2331 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2332 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2333 if (err)
2334 goto bail;
2335 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2336 (void __user *)init->file, init->filelen));
2337 if (err)
2338 goto bail;
2339
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302340 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302341 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302342 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302343 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302344
2345 if (!strcmp(proc_name, "audiopd")) {
2346 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2347 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302348 if (err)
2349 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302350 }
2351
c_mtharue1a5ce12017-10-13 20:47:09 +05302352 if (!me->staticpd_flags) {
2353 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302354 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302355 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2356 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2357 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302358 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302359 if (err)
2360 goto bail;
2361 phys = mem->phys;
2362 size = mem->size;
2363 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302364 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2365 me->channel[fl->cid].rhvm.vmperm,
2366 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302367 if (err) {
2368 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2369 err);
2370 pr_err("map->phys %llx, map->size %d\n",
2371 phys, (int)size);
2372 goto bail;
2373 }
2374 me->staticpd_flags = 1;
2375 }
2376
2377 ra[0].buf.pv = (void *)&inbuf;
2378 ra[0].buf.len = sizeof(inbuf);
2379 fds[0] = 0;
2380
2381 ra[1].buf.pv = (void *)proc_name;
2382 ra[1].buf.len = inbuf.namelen;
2383 fds[1] = 0;
2384
2385 pages[0].addr = phys;
2386 pages[0].size = size;
2387
2388 ra[2].buf.pv = (void *)pages;
2389 ra[2].buf.len = sizeof(*pages);
2390 fds[2] = 0;
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302391 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
c_mtharue1a5ce12017-10-13 20:47:09 +05302392
2393 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2394 ioctl.inv.pra = ra;
2395 ioctl.fds = NULL;
2396 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002397 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002398 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2399 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2400 if (err)
2401 goto bail;
2402 } else {
2403 err = -ENOTTY;
2404 }
2405bail:
c_mtharud91205a2017-11-07 16:01:06 +05302406 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302407 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2408 me->staticpd_flags = 0;
2409 if (mem && err) {
2410 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2411 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302412 me->channel[fl->cid].rhvm.vmid,
2413 me->channel[fl->cid].rhvm.vmcount,
2414 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302415 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302416 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302417 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302418 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302419 if (file) {
2420 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302421 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302422 mutex_unlock(&fl->fl_map_mutex);
2423 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002424 return err;
2425}
2426
2427static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2428{
2429 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002430 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002431 remote_arg_t ra[1];
2432 int tgid = 0;
2433
Sathish Ambley36849af2017-02-02 09:35:55 -08002434 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2435 if (err)
2436 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05302437 VERIFY(err, fl->sctx != NULL);
2438 if (err)
2439 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302440 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002441 if (err)
2442 goto bail;
2443 tgid = fl->tgid;
2444 ra[0].buf.pv = (void *)&tgid;
2445 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302446 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002447 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2448 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302449 ioctl.fds = NULL;
2450 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002451 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002452 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2453 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2454bail:
2455 return err;
2456}
2457
2458static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302459 uintptr_t va, uint64_t phys,
2460 size_t size, uintptr_t *raddr)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002461{
Sathish Ambleybae51902017-07-03 15:00:49 -07002462 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302463 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002464 struct smq_phy_page page;
2465 int num = 1;
2466 remote_arg_t ra[3];
2467 int err = 0;
2468 struct {
2469 int pid;
2470 uint32_t flags;
2471 uintptr_t vaddrin;
2472 int num;
2473 } inargs;
2474 struct {
2475 uintptr_t vaddrout;
2476 } routargs;
2477
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302478 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302479 inargs.vaddrin = (uintptr_t)va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002480 inargs.flags = flags;
2481 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2482 ra[0].buf.pv = (void *)&inargs;
2483 ra[0].buf.len = sizeof(inargs);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302484 page.addr = phys;
2485 page.size = size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002486 ra[1].buf.pv = (void *)&page;
2487 ra[1].buf.len = num * sizeof(page);
2488
2489 ra[2].buf.pv = (void *)&routargs;
2490 ra[2].buf.len = sizeof(routargs);
2491
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302492 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002493 if (fl->apps->compat)
2494 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2495 else
2496 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2497 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302498 ioctl.fds = NULL;
2499 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002500 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002501 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2502 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302503 *raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302504 if (err)
2505 goto bail;
2506 if (flags == ADSP_MMAP_HEAP_ADDR) {
2507 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002508
c_mtharue1a5ce12017-10-13 20:47:09 +05302509 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302510 desc.args[1] = phys;
2511 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302512 desc.arginfo = SCM_ARGS(3);
2513 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2514 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2515 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302516 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302517 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2518 me->channel[fl->cid].rhvm.vmperm,
2519 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302520 if (err)
2521 goto bail;
2522 }
2523bail:
2524 return err;
2525}
2526
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302527static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
2528 size_t size, uint32_t flags)
c_mtharue1a5ce12017-10-13 20:47:09 +05302529{
2530 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302531 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302532 int tgid = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302533 int destVM[1] = {VMID_HLOS};
2534 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2535
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302536 if (flags == ADSP_MMAP_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302537 struct fastrpc_ioctl_invoke_crc ioctl;
2538 struct scm_desc desc = {0};
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302539 remote_arg_t ra[2];
2540
c_mtharue1a5ce12017-10-13 20:47:09 +05302541 struct {
2542 uint8_t skey;
2543 } routargs;
2544
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302545 if (fl == NULL)
2546 goto bail;
2547 tgid = fl->tgid;
2548 ra[0].buf.pv = (void *)&tgid;
2549 ra[0].buf.len = sizeof(tgid);
2550 ra[1].buf.pv = (void *)&routargs;
2551 ra[1].buf.len = sizeof(routargs);
c_mtharue1a5ce12017-10-13 20:47:09 +05302552
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302553 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302554 ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1);
c_mtharue1a5ce12017-10-13 20:47:09 +05302555 ioctl.inv.pra = ra;
2556 ioctl.fds = NULL;
2557 ioctl.attrs = NULL;
2558 ioctl.crc = NULL;
Tharun Kumar Merugu72c90252019-08-29 18:36:08 +05302559
c_mtharue1a5ce12017-10-13 20:47:09 +05302560
2561 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2562 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302563 if (err == AEE_EUNSUPPORTED) {
2564 remote_arg_t ra[1];
2565
2566 pr_warn("ADSPRPC:Failed to get security key with updated remote call, falling back to older method");
2567 ra[0].buf.pv = (void *)&routargs;
2568 ra[0].buf.len = sizeof(routargs);
2569 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2570 ioctl.inv.pra = ra;
2571 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2572 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2573 }
c_mtharue1a5ce12017-10-13 20:47:09 +05302574 if (err)
2575 goto bail;
Mohammed Nayeem Ur Rahman80f45dc2019-09-23 19:35:19 +05302576
c_mtharue1a5ce12017-10-13 20:47:09 +05302577 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302578 desc.args[1] = phys;
2579 desc.args[2] = size;
c_mtharue1a5ce12017-10-13 20:47:09 +05302580 desc.args[3] = routargs.skey;
2581 desc.arginfo = SCM_ARGS(4);
2582 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2583 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302584 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2585 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302586 me->channel[fl->cid].rhvm.vmid,
2587 me->channel[fl->cid].rhvm.vmcount,
2588 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302589 if (err)
2590 goto bail;
2591 }
2592
2593bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002594 return err;
2595}
2596
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302597static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
2598 uint64_t phys, size_t size, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002599{
Sathish Ambleybae51902017-07-03 15:00:49 -07002600 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002601 remote_arg_t ra[1];
2602 int err = 0;
2603 struct {
2604 int pid;
2605 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302606 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002607 } inargs;
2608
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302609 inargs.pid = fl->tgid;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302610 inargs.size = size;
2611 inargs.vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002612 ra[0].buf.pv = (void *)&inargs;
2613 ra[0].buf.len = sizeof(inargs);
2614
Tharun Kumar Merugucc2e11e2019-02-02 01:22:47 +05302615 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002616 if (fl->apps->compat)
2617 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2618 else
2619 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2620 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302621 ioctl.fds = NULL;
2622 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002623 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002624 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2625 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302626 if (err)
2627 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302628 if (flags == ADSP_MMAP_HEAP_ADDR ||
2629 flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2630 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302631 if (err)
2632 goto bail;
2633 }
2634bail:
2635 return err;
2636}
2637
2638static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2639{
2640 struct fastrpc_mmap *match = NULL, *map = NULL;
2641 struct hlist_node *n = NULL;
2642 int err = 0, ret = 0;
2643 struct fastrpc_apps *me = &gfa;
2644 struct ramdump_segment *ramdump_segments_rh = NULL;
2645
2646 do {
2647 match = NULL;
2648 spin_lock(&me->hlock);
2649 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2650 match = map;
2651 hlist_del_init(&map->hn);
2652 break;
2653 }
2654 spin_unlock(&me->hlock);
2655
2656 if (match) {
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302657 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
2658 match->size, match->flags));
c_mtharue1a5ce12017-10-13 20:47:09 +05302659 if (err)
2660 goto bail;
2661 if (me->channel[0].ramdumpenabled) {
2662 ramdump_segments_rh = kcalloc(1,
2663 sizeof(struct ramdump_segment), GFP_KERNEL);
2664 if (ramdump_segments_rh) {
2665 ramdump_segments_rh->address =
2666 match->phys;
2667 ramdump_segments_rh->size = match->size;
2668 ret = do_elf_ramdump(
2669 me->channel[0].remoteheap_ramdump_dev,
2670 ramdump_segments_rh, 1);
2671 if (ret < 0)
2672 pr_err("ADSPRPC: unable to dump heap");
2673 kfree(ramdump_segments_rh);
2674 }
2675 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302676 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302677 }
2678 } while (match);
2679bail:
2680 if (err && match)
2681 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002682 return err;
2683}
2684
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302685static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2686{
2687 struct fastrpc_apps *me = &gfa;
2688 int session = 0, err = 0;
2689
2690 VERIFY(err, !fastrpc_get_adsp_session(
2691 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2692 if (err)
2693 goto bail;
2694 if (me->channel[fl->cid].spd[session].pdrcount !=
2695 me->channel[fl->cid].spd[session].prevpdrcount) {
2696 if (fastrpc_mmap_remove_ssr(fl))
2697 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2698 me->channel[fl->cid].spd[session].prevpdrcount =
2699 me->channel[fl->cid].spd[session].pdrcount;
2700 }
2701 if (!me->channel[fl->cid].spd[session].ispdup) {
2702 VERIFY(err, 0);
2703 if (err) {
2704 err = -ENOTCONN;
2705 goto bail;
2706 }
2707 }
2708bail:
2709 return err;
2710}
2711
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002712static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302713 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002714
2715static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2716
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05302717static inline void get_fastrpc_ioctl_mmap_64(
2718 struct fastrpc_ioctl_mmap_64 *mmap64,
2719 struct fastrpc_ioctl_mmap *immap)
2720{
2721 immap->fd = mmap64->fd;
2722 immap->flags = mmap64->flags;
2723 immap->vaddrin = (uintptr_t)mmap64->vaddrin;
2724 immap->size = mmap64->size;
2725}
2726
2727static inline void put_fastrpc_ioctl_mmap_64(
2728 struct fastrpc_ioctl_mmap_64 *mmap64,
2729 struct fastrpc_ioctl_mmap *immap)
2730{
2731 mmap64->vaddrout = (uint64_t)immap->vaddrout;
2732}
2733
2734static inline void get_fastrpc_ioctl_munmap_64(
2735 struct fastrpc_ioctl_munmap_64 *munmap64,
2736 struct fastrpc_ioctl_munmap *imunmap)
2737{
2738 imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
2739 imunmap->size = munmap64->size;
2740}
2741
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002742static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2743 struct fastrpc_ioctl_munmap *ud)
2744{
2745 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302746 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302747 struct fastrpc_buf *rbuf = NULL, *free = NULL;
2748 struct hlist_node *n;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002749
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302750 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302751
2752 spin_lock(&fl->hlock);
2753 hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
2754 if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
2755 if ((rbuf->raddr == ud->vaddrout) &&
2756 (rbuf->size == ud->size)) {
2757 free = rbuf;
2758 break;
2759 }
2760 }
2761 }
2762 spin_unlock(&fl->hlock);
2763
2764 if (free) {
2765 VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
2766 free->phys, free->size, free->flags));
2767 if (err)
2768 goto bail;
2769 fastrpc_buf_free(rbuf, 0);
2770 mutex_unlock(&fl->map_mutex);
2771 return err;
2772 }
2773
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302774 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002775 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302776 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002777 if (err)
2778 goto bail;
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302779 VERIFY(err, map != NULL);
2780 if (err) {
2781 err = -EINVAL;
2782 goto bail;
2783 }
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302784 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
Vamsi krishna Gattupalli4e87ecb2020-12-14 11:06:27 +05302785 map->phys, map->size, map->flags));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002786 if (err)
2787 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302788 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302789 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302790 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002791bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302792 if (err && map) {
2793 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002794 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302795 mutex_unlock(&fl->fl_map_mutex);
2796 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302797 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002798 return err;
2799}
2800
c_mtharu7bd6a422017-10-17 18:15:37 +05302801static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2802 struct fastrpc_ioctl_munmap_fd *ud) {
2803 int err = 0;
2804 struct fastrpc_mmap *map = NULL;
2805
2806 VERIFY(err, (fl && ud));
2807 if (err)
2808 goto bail;
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302809 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302810 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302811 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2812 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302813 ud->fd, (unsigned long long)ud->va,
2814 (unsigned int)ud->len);
2815 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302816 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302817 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302818 goto bail;
2819 }
2820 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302821 fastrpc_mmap_free(map, 0);
2822 mutex_unlock(&fl->fl_map_mutex);
Mohammed Nayeem Ur Rahmanfc548a52020-01-07 17:07:55 +05302823 mutex_unlock(&fl->map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302824bail:
2825 return err;
2826}
2827
2828
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002829static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2830 struct fastrpc_ioctl_mmap *ud)
2831{
2832
c_mtharue1a5ce12017-10-13 20:47:09 +05302833 struct fastrpc_mmap *map = NULL;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302834 struct fastrpc_buf *rbuf = NULL;
2835 unsigned long dma_attr = 0;
2836 uintptr_t raddr = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002837 int err = 0;
2838
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302839 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302840 if (ud->flags == ADSP_MMAP_ADD_PAGES) {
2841 if (ud->vaddrin) {
2842 err = -EINVAL;
2843 pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
2844 current->comm, __func__);
2845 goto bail;
2846 }
2847 dma_attr = DMA_ATTR_EXEC_MAPPING |
2848 DMA_ATTR_NO_KERNEL_MAPPING |
2849 DMA_ATTR_FORCE_NON_COHERENT;
2850 err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
2851 1, &rbuf);
2852 if (err)
2853 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302854 err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302855 rbuf->phys, rbuf->size, &raddr);
2856 if (err)
2857 goto bail;
2858 rbuf->raddr = raddr;
2859 } else {
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302860
2861 uintptr_t va_to_dsp;
2862
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302863 mutex_lock(&fl->fl_map_mutex);
2864 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
2865 ud->size, ud->flags, 1, &map)) {
Mohammed Nayeem Ur Rahmanaf5f6102019-10-09 13:36:52 +05302866 ud->vaddrout = map->raddr;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302867 mutex_unlock(&fl->fl_map_mutex);
2868 mutex_unlock(&fl->map_mutex);
2869 return 0;
2870 }
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302871
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302872 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
2873 (uintptr_t)ud->vaddrin, ud->size,
2874 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302875 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302876 if (err)
2877 goto bail;
Tharun Kumar Merugu0d0b69e2018-09-14 22:30:58 +05302878
2879 if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
2880 ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2881 va_to_dsp = 0;
2882 else
2883 va_to_dsp = (uintptr_t)map->va;
2884 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05302885 map->phys, map->size, &raddr));
2886 if (err)
2887 goto bail;
2888 map->raddr = raddr;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302889 }
Mohammed Nayeem Ur Rahman3ac5d322018-09-24 13:54:08 +05302890 ud->vaddrout = raddr;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002891 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302892 if (err && map) {
2893 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302894 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302895 mutex_unlock(&fl->fl_map_mutex);
2896 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302897 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002898 return err;
2899}
2900
2901static void fastrpc_channel_close(struct kref *kref)
2902{
2903 struct fastrpc_apps *me = &gfa;
2904 struct fastrpc_channel_ctx *ctx;
2905 int cid;
2906
2907 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2908 cid = ctx - &gcinfo[0];
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302909 if (me->glink) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302910 fastrpc_glink_close(ctx->chan, cid);
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05302911 ctx->chan = NULL;
2912 }
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302913 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002914 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2915 MAJOR(me->dev_no), cid);
2916}
2917
2918static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2919
2920static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302921 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002922{
2923 struct fastrpc_apps *me = &gfa;
2924 int idx = 0, err = 0;
2925
2926 if (chan->sesscount) {
2927 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302928 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2929 (!chan->session[idx].used &&
2930 chan->session[idx].smmu.secure
2931 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002932 chan->session[idx].used = 1;
2933 break;
2934 }
2935 }
2936 VERIFY(err, idx < chan->sesscount);
2937 if (err)
2938 goto bail;
2939 chan->session[idx].smmu.faults = 0;
2940 } else {
2941 VERIFY(err, me->dev != NULL);
2942 if (err)
2943 goto bail;
2944 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302945 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002946 }
2947
2948 *session = &chan->session[idx];
2949 bail:
2950 return err;
2951}
2952
c_mtharue1a5ce12017-10-13 20:47:09 +05302953static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2954 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002955{
2956 if (glink_queue_rx_intent(h, NULL, size))
2957 return false;
2958 return true;
2959}
2960
c_mtharue1a5ce12017-10-13 20:47:09 +05302961static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002962 const void *pkt_priv, const void *ptr)
2963{
2964}
2965
c_mtharue1a5ce12017-10-13 20:47:09 +05302966static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002967 const void *pkt_priv, const void *ptr, size_t size)
2968{
2969 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302970 struct fastrpc_apps *me = &gfa;
2971 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302972 int err = 0;
Jeya R8fa59d62020-11-04 20:42:59 +05302973 unsigned long irq_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002974
c_mtharufdac6892017-10-12 13:09:01 +05302975 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2976 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302977 goto bail;
2978
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302979 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2980 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302981 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302982 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302983
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302984 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2985 if (err)
2986 goto bail;
2987
Jeya R8fa59d62020-11-04 20:42:59 +05302988 spin_lock_irqsave(&me->ctxlock, irq_flags);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302989 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302990 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
Jeya R8fa59d62020-11-04 20:42:59 +05302991 if (err) {
2992 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302993 goto bail;
Jeya R8fa59d62020-11-04 20:42:59 +05302994 }
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302995 me->ctxtable[index]->handle = handle;
2996 me->ctxtable[index]->ptr = ptr;
Jeya R8fa59d62020-11-04 20:42:59 +05302997 spin_unlock_irqrestore(&me->ctxlock, irq_flags);
Mohammed Nayeem Ur Rahman32ba95d2019-07-26 17:31:37 +05302998
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302999 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303000bail:
Jeya R859f8012020-08-09 02:09:14 +05303001 if (err) {
3002 glink_rx_done(handle, ptr, true);
c_mtharufdac6892017-10-12 13:09:01 +05303003 pr_err("adsprpc: invalid response or context\n");
Jeya R859f8012020-08-09 02:09:14 +05303004 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003005}
3006
c_mtharue1a5ce12017-10-13 20:47:09 +05303007static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003008 unsigned int event)
3009{
3010 struct fastrpc_apps *me = &gfa;
3011 int cid = (int)(uintptr_t)priv;
3012 struct fastrpc_glink_info *link;
3013
3014 if (cid < 0 || cid >= NUM_CHANNELS)
3015 return;
3016 link = &me->channel[cid].link;
3017 switch (event) {
3018 case GLINK_CONNECTED:
3019 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303020 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003021 break;
3022 case GLINK_LOCAL_DISCONNECTED:
3023 link->port_state = FASTRPC_LINK_DISCONNECTED;
3024 break;
3025 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003026 break;
3027 default:
3028 break;
3029 }
3030}
3031
3032static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
3033 struct fastrpc_session_ctx **session)
3034{
3035 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303036 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003037
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303038 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003039 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303040 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303041 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003042 return err;
3043}
3044
3045static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
3046 struct fastrpc_session_ctx *session)
3047{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303048 struct fastrpc_apps *me = &gfa;
3049
3050 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003051 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303052 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003053}
3054
3055static int fastrpc_file_free(struct fastrpc_file *fl)
3056{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303057 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303058 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303059 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003060 int cid;
3061
3062 if (!fl)
3063 return 0;
3064 cid = fl->cid;
3065
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05303066 (void)fastrpc_release_current_dsp_process(fl);
3067
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003068 spin_lock(&fl->apps->hlock);
3069 hlist_del_init(&fl->hn);
3070 spin_unlock(&fl->apps->hlock);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303071 kfree(fl->debug_buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08003073 if (!fl->sctx) {
3074 kfree(fl);
3075 return 0;
3076 }
tharun kumar9f899ea2017-07-03 17:07:03 +05303077 spin_lock(&fl->hlock);
3078 fl->file_close = 1;
3079 spin_unlock(&fl->hlock);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303080 if (!IS_ERR_OR_NULL(fl->init_mem))
3081 fastrpc_buf_free(fl->init_mem, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003082 fastrpc_context_list_dtor(fl);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303083 fastrpc_cached_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303084 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05303085 do {
3086 lmap = NULL;
3087 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3088 hlist_del_init(&map->hn);
3089 lmap = map;
3090 break;
3091 }
3092 fastrpc_mmap_free(lmap, 1);
3093 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303094 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303095 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003096 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303097 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003098 if (fl->sctx)
3099 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
3100 if (fl->secsctx)
3101 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303102
3103 mutex_lock(&fl->perf_mutex);
3104 do {
3105 struct hlist_node *pn = NULL;
3106
3107 fperf = NULL;
3108 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
3109 hlist_del_init(&perf->hn);
3110 fperf = perf;
3111 break;
3112 }
3113 kfree(fperf);
3114 } while (fperf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303115 fastrpc_remote_buf_list_free(fl);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303116 mutex_unlock(&fl->perf_mutex);
3117 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303118 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05303119 mutex_destroy(&fl->map_mutex);
Jeya R2bcad4f2021-06-10 13:03:44 +05303120 mutex_destroy(&fl->pm_qos_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003121 kfree(fl);
3122 return 0;
3123}
3124
3125static int fastrpc_device_release(struct inode *inode, struct file *file)
3126{
3127 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3128
3129 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303130 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
3131 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003132 if (fl->debugfs_file != NULL)
3133 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003134 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05303135 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003136 }
3137 return 0;
3138}
3139
3140static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
3141 void *priv)
3142{
3143 struct fastrpc_apps *me = &gfa;
3144 int cid = (int)((uintptr_t)priv);
3145 struct fastrpc_glink_info *link;
3146
3147 if (cid < 0 || cid >= NUM_CHANNELS)
3148 return;
3149
3150 link = &me->channel[cid].link;
3151 switch (cb_info->link_state) {
3152 case GLINK_LINK_STATE_UP:
3153 link->link_state = FASTRPC_LINK_STATE_UP;
3154 complete(&me->channel[cid].work);
3155 break;
3156 case GLINK_LINK_STATE_DOWN:
3157 link->link_state = FASTRPC_LINK_STATE_DOWN;
3158 break;
3159 default:
3160 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
3161 break;
3162 }
3163}
3164
3165static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
3166{
3167 int err = 0;
3168 struct fastrpc_glink_info *link;
3169
3170 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3171 if (err)
3172 goto bail;
3173
3174 link = &me->channel[cid].link;
3175 if (link->link_notify_handle != NULL)
3176 goto bail;
3177
3178 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
3179 link->link_notify_handle = glink_register_link_state_cb(
3180 &link->link_info,
3181 (void *)((uintptr_t)cid));
3182 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
3183 if (err) {
3184 link->link_notify_handle = NULL;
3185 goto bail;
3186 }
3187 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
3188 RPC_TIMEOUT));
3189bail:
3190 return err;
3191}
3192
3193static void fastrpc_glink_close(void *chan, int cid)
3194{
3195 int err = 0;
3196 struct fastrpc_glink_info *link;
3197
3198 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3199 if (err)
3200 return;
3201 link = &gfa.channel[cid].link;
3202
c_mtharu314a4202017-11-15 22:09:17 +05303203 if (link->port_state == FASTRPC_LINK_CONNECTED ||
3204 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003205 link->port_state = FASTRPC_LINK_DISCONNECTING;
3206 glink_close(chan);
3207 }
3208}
3209
3210static int fastrpc_glink_open(int cid)
3211{
3212 int err = 0;
3213 void *handle = NULL;
3214 struct fastrpc_apps *me = &gfa;
3215 struct glink_open_config *cfg;
3216 struct fastrpc_glink_info *link;
3217
3218 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
3219 if (err)
3220 goto bail;
3221 link = &me->channel[cid].link;
3222 cfg = &me->channel[cid].link.cfg;
3223 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
3224 if (err)
3225 goto bail;
3226
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05303227 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
3228 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003229 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003230
3231 link->port_state = FASTRPC_LINK_CONNECTING;
3232 cfg->priv = (void *)(uintptr_t)cid;
3233 cfg->edge = gcinfo[cid].link.link_info.edge;
3234 cfg->transport = gcinfo[cid].link.link_info.transport;
3235 cfg->name = FASTRPC_GLINK_GUID;
3236 cfg->notify_rx = fastrpc_glink_notify_rx;
3237 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
3238 cfg->notify_state = fastrpc_glink_notify_state;
3239 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
3240 handle = glink_open(cfg);
3241 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05303242 if (err) {
3243 if (link->port_state == FASTRPC_LINK_CONNECTING)
3244 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003245 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05303246 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003247 me->channel[cid].chan = handle;
3248bail:
3249 return err;
3250}
3251
Sathish Ambley1ca68232017-01-19 10:32:55 -08003252static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
3253{
3254 filp->private_data = inode->i_private;
3255 return 0;
3256}
3257
3258static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
3259 size_t count, loff_t *position)
3260{
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303261 struct fastrpc_apps *me = &gfa;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003262 struct fastrpc_file *fl = filp->private_data;
3263 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05303264 struct fastrpc_buf *buf = NULL;
3265 struct fastrpc_mmap *map = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303266 struct fastrpc_mmap *gmaps = NULL;
c_mtharue1a5ce12017-10-13 20:47:09 +05303267 struct smq_invoke_ctx *ictx = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303268 struct fastrpc_channel_ctx *chan = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003269 unsigned int len = 0;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303270 int i, j, sess_used = 0, ret = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003271 char *fileinfo = NULL;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303272 char single_line[UL_SIZE] = "----------------";
3273 char title[UL_SIZE] = "=========================";
Sathish Ambley1ca68232017-01-19 10:32:55 -08003274
3275 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
3276 if (!fileinfo)
3277 goto bail;
3278 if (fl == NULL) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303279 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3280 "\n%s %s %s\n", title, " CHANNEL INFO ", title);
3281 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3282 "%-8s|%-9s|%-9s|%-14s|%-9s|%-13s\n",
3283 "susbsys", "refcount", "sesscount", "issubsystemup",
3284 "ssrcount", "session_used");
3285 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3286 "-%s%s%s%s-\n", single_line, single_line,
3287 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003288 for (i = 0; i < NUM_CHANNELS; i++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303289 sess_used = 0;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003290 chan = &gcinfo[i];
3291 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303292 DEBUGFS_SIZE - len, "%-8s", chan->subsys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003293 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303294 DEBUGFS_SIZE - len, "|%-9d",
3295 chan->kref.refcount.counter);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303296 len += scnprintf(fileinfo + len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303297 DEBUGFS_SIZE - len, "|%-9d",
3298 chan->sesscount);
3299 len += scnprintf(fileinfo + len,
3300 DEBUGFS_SIZE - len, "|%-14d",
3301 chan->issubsystemup);
3302 len += scnprintf(fileinfo + len,
3303 DEBUGFS_SIZE - len, "|%-9d",
3304 chan->ssrcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003305 for (j = 0; j < chan->sesscount; j++) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303306 sess_used += chan->session[j].used;
3307 }
3308 len += scnprintf(fileinfo + len,
3309 DEBUGFS_SIZE - len, "|%-13d\n", sess_used);
3310
3311 }
3312 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3313 "\n%s%s%s\n", "=============",
3314 " CMA HEAP ", "==============");
3315 len += scnprintf(fileinfo + len,
3316 DEBUGFS_SIZE - len, "%-20s|%-20s\n", "addr", "size");
3317 len += scnprintf(fileinfo + len,
3318 DEBUGFS_SIZE - len, "--%s%s---\n",
3319 single_line, single_line);
3320 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3321 "0x%-18llX", me->range.addr);
3322 len += scnprintf(fileinfo + len,
3323 DEBUGFS_SIZE - len, "|0x%-18llX\n", me->range.size);
3324 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3325 "\n==========%s %s %s===========\n",
3326 title, " GMAPS ", title);
3327 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3328 "%-20s|%-20s|%-20s|%-20s\n",
3329 "fd", "phys", "size", "va");
3330 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3331 "%s%s%s%s%s\n", single_line, single_line,
3332 single_line, single_line, single_line);
3333 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3334 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3335 "%-20d|0x%-18llX|0x%-18X|0x%-20lX\n\n",
3336 gmaps->fd, gmaps->phys,
3337 (uint32_t)gmaps->size,
3338 gmaps->va);
3339 }
3340 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3341 "%-20s|%-20s|%-20s|%-20s\n",
3342 "len", "refs", "raddr", "flags");
3343 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3344 "%s%s%s%s%s\n", single_line, single_line,
3345 single_line, single_line, single_line);
3346 hlist_for_each_entry_safe(gmaps, n, &me->maps, hn) {
3347 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3348 "0x%-18X|%-20d|%-20lu|%-20u\n",
3349 (uint32_t)gmaps->len, gmaps->refs,
3350 gmaps->raddr, gmaps->flags);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003351 }
3352 } else {
3353 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303354 "\n%s %13s %d\n", "cid", ":", fl->cid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003355 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303356 "%s %12s %d\n", "tgid", ":", fl->tgid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003357 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303358 "%s %7s %d\n", "sessionid", ":", fl->sessionid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003359 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303360 "%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303361 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303362 "%s %8s %d\n", "refcount", ":", fl->refcount);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003363 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303364 "%s %14s %d\n", "pd", ":", fl->pd);
3365 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3366 "%s %9s %s\n", "spdname", ":", fl->spdname);
3367 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3368 "%s %6s %d\n", "file_close", ":", fl->file_close);
3369 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3370 "%s %8s %d\n", "sharedcb", ":", fl->sharedcb);
3371 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3372 "%s %9s %d\n", "profile", ":", fl->profile);
3373 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3374 "%s %3s %d\n", "smmu.coherent", ":",
3375 fl->sctx->smmu.coherent);
3376 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3377 "%s %4s %d\n", "smmu.enabled", ":",
3378 fl->sctx->smmu.enabled);
3379 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3380 "%s %9s %d\n", "smmu.cb", ":", fl->sctx->smmu.cb);
3381 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3382 "%s %5s %d\n", "smmu.secure", ":",
3383 fl->sctx->smmu.secure);
3384 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3385 "%s %5s %d\n", "smmu.faults", ":",
3386 fl->sctx->smmu.faults);
3387 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3388 "%s %s %d\n", "link.link_state",
3389 ":", *&me->channel[fl->cid].link.link_state);
3390
3391 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3392 "\n=======%s %s %s======\n", title,
3393 " LIST OF MAPS ", title);
3394
3395 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3396 "%-20s|%-20s|%-20s\n", "va", "phys", "size");
3397 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3398 "%s%s%s%s%s\n",
3399 single_line, single_line, single_line,
3400 single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003401 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303402 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3403 "0x%-20lX|0x%-20llX|0x%-20zu\n\n",
3404 map->va, map->phys,
3405 map->size);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003406 }
3407 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303408 "%-20s|%-20s|%-20s|%-20s\n",
3409 "len", "refs",
3410 "raddr", "uncached");
3411 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3412 "%s%s%s%s%s\n",
3413 single_line, single_line, single_line,
3414 single_line, single_line);
3415 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3416 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3417 "%-20zu|%-20d|0x%-20lX|%-20d\n\n",
3418 map->len, map->refs, map->raddr,
3419 map->uncached);
3420 }
3421 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3422 "%-20s|%-20s\n", "secure", "attr");
3423 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3424 "%s%s%s%s%s\n",
3425 single_line, single_line, single_line,
3426 single_line, single_line);
3427 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
3428 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3429 "%-20d|0x%-20lX\n\n",
3430 map->secure, map->attr);
3431 }
3432 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303433 "%s %d\n\n",
3434 "KERNEL MEMORY ALLOCATION:", 1);
3435 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303436 "\n======%s %s %s======\n", title,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303437 " LIST OF CACHED BUFS ", title);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303438 spin_lock(&fl->hlock);
3439 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303440 "%-19s|%-19s|%-19s|%-19s\n",
3441 "virt", "phys", "size", "dma_attr");
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303442 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3443 "%s%s%s%s%s\n", single_line, single_line,
3444 single_line, single_line, single_line);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303445 hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303446 len += scnprintf(fileinfo + len,
3447 DEBUGFS_SIZE - len,
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303448 "0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
3449 buf->virt, (uint64_t)buf->phys, buf->size,
3450 buf->dma_attr);
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303451 }
3452 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3453 "\n%s %s %s\n", title,
3454 " LIST OF PENDING SMQCONTEXTS ", title);
3455
3456 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3457 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3458 "sc", "pid", "tgid", "used", "ctxid");
3459 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3460 "%s%s%s%s%s\n", single_line, single_line,
3461 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003462 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
3463 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303464 "0x%-18X|%-10d|%-10d|%-10zu|0x%-20llX\n\n",
3465 ictx->sc, ictx->pid, ictx->tgid,
3466 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003467 }
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303468
Sathish Ambley1ca68232017-01-19 10:32:55 -08003469 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303470 "\n%s %s %s\n", title,
3471 " LIST OF INTERRUPTED SMQCONTEXTS ", title);
3472
3473 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3474 "%-20s|%-10s|%-10s|%-10s|%-20s\n",
3475 "sc", "pid", "tgid", "used", "ctxid");
3476 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3477 "%s%s%s%s%s\n", single_line, single_line,
3478 single_line, single_line, single_line);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003479 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303480 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3481 "%-20u|%-20d|%-20d|%-20zu|0x%-20llX\n\n",
3482 ictx->sc, ictx->pid, ictx->tgid,
3483 ictx->used, ictx->ctxid);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003484 }
3485 spin_unlock(&fl->hlock);
3486 }
3487 if (len > DEBUGFS_SIZE)
3488 len = DEBUGFS_SIZE;
3489 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3490 kfree(fileinfo);
3491bail:
3492 return ret;
3493}
3494
3495static const struct file_operations debugfs_fops = {
3496 .open = fastrpc_debugfs_open,
3497 .read = fastrpc_debugfs_read,
3498};
Sathish Ambley36849af2017-02-02 09:35:55 -08003499static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003500{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003501 struct fastrpc_apps *me = &gfa;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303502 int cid = -1, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003503
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303504 mutex_lock(&me->smd_mutex);
3505
Sathish Ambley36849af2017-02-02 09:35:55 -08003506 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003507 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303508 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003509 cid = fl->cid;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303510 VERIFY(err, cid >= ADSP_DOMAIN_ID && cid < NUM_CHANNELS);
3511 if (err) {
3512 err = -ECHRNG;
c_mtharu314a4202017-11-15 22:09:17 +05303513 goto bail;
Mohammed Nayeem Ur Rahmancd836462020-04-01 14:30:33 +05303514 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303515 if (me->channel[cid].ssrcount !=
3516 me->channel[cid].prevssrcount) {
3517 if (!me->channel[cid].issubsystemup) {
3518 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303519 if (err) {
3520 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303521 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303522 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303523 }
3524 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003525 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303526 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003527 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303528 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303529 if (me->glink) {
3530 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3531 if (err)
3532 goto bail;
3533 VERIFY(err, 0 == fastrpc_glink_open(cid));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303534 VERIFY(err,
3535 wait_for_completion_timeout(&me->channel[cid].workport,
3536 RPC_TIMEOUT));
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303537 } else {
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303538 if (me->channel[cid].chan == NULL) {
3539 VERIFY(err, !smd_named_open_on_edge(
3540 FASTRPC_SMD_GUID,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303541 gcinfo[cid].channel,
3542 (smd_channel_t **)&me->channel[cid].chan,
3543 (void *)(uintptr_t)cid,
3544 smd_event_handler));
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303545 VERIFY(err,
3546 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003547 RPC_TIMEOUT));
Mohammed Nayeem Ur Rahmana967be62019-09-23 20:56:15 +05303548
3549 }
3550 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003551 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303552 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003553 goto bail;
3554 }
3555 kref_init(&me->channel[cid].kref);
3556 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3557 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303558
3559 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3560 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303561 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303562
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303563 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003564 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303565 if (fastrpc_mmap_remove_ssr(fl))
3566 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003567 me->channel[cid].prevssrcount =
3568 me->channel[cid].ssrcount;
3569 }
3570 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003571
3572bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303573 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003574 return err;
3575}
3576
Sathish Ambley36849af2017-02-02 09:35:55 -08003577static int fastrpc_device_open(struct inode *inode, struct file *filp)
3578{
3579 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303580 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003581 struct fastrpc_apps *me = &gfa;
3582
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303583 /*
3584 * Indicates the device node opened
3585 * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
3586 */
3587 int dev_minor = MINOR(inode->i_rdev);
3588
3589 VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
3590 (dev_minor == MINOR_NUM_SECURE_DEV)));
3591 if (err) {
3592 pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
3593 return err;
3594 }
3595
c_mtharue1a5ce12017-10-13 20:47:09 +05303596 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003597 if (err)
3598 return err;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05303599
Sathish Ambley36849af2017-02-02 09:35:55 -08003600 context_list_ctor(&fl->clst);
3601 spin_lock_init(&fl->hlock);
3602 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303603 INIT_HLIST_HEAD(&fl->perf);
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303604 INIT_HLIST_HEAD(&fl->cached_bufs);
3605 INIT_HLIST_HEAD(&fl->remote_bufs);
Sathish Ambley36849af2017-02-02 09:35:55 -08003606 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303607 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003608 fl->apps = me;
3609 fl->mode = FASTRPC_MODE_SERIAL;
3610 fl->cid = -1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303611 fl->dev_minor = dev_minor;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303612 fl->init_mem = NULL;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303613 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303614 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003615 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303616 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303617 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003618 spin_lock(&me->hlock);
3619 hlist_add_head(&fl->hn, &me->drivers);
3620 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303621 mutex_init(&fl->perf_mutex);
Jeya R2bcad4f2021-06-10 13:03:44 +05303622 mutex_init(&fl->pm_qos_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003623 return 0;
3624}
3625
Edgar Flores1a772fa2020-02-07 14:59:29 -08003626static int fastrpc_set_process_info(struct fastrpc_file *fl)
3627{
3628 int err = 0, buf_size = 0;
3629 char strpid[PID_SIZE];
Jeya R336ada12021-03-18 14:04:49 +05303630 char cur_comm[TASK_COMM_LEN];
Edgar Flores1a772fa2020-02-07 14:59:29 -08003631
Jeya R336ada12021-03-18 14:04:49 +05303632 memcpy(cur_comm, current->comm, TASK_COMM_LEN);
3633 cur_comm[TASK_COMM_LEN-1] = '\0';
Edgar Flores1a772fa2020-02-07 14:59:29 -08003634 fl->tgid = current->tgid;
3635 snprintf(strpid, PID_SIZE, "%d", current->pid);
Jeya R336ada12021-03-18 14:04:49 +05303636 buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + 1;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003637 fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
3638 if (!fl->debug_buf) {
3639 err = -ENOMEM;
3640 return err;
3641 }
Jeya R336ada12021-03-18 14:04:49 +05303642 snprintf(fl->debug_buf, buf_size, "%.10s%s%d",
3643 cur_comm, "_", current->pid);
Edgar Flores1a772fa2020-02-07 14:59:29 -08003644 fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
3645 debugfs_root, fl, &debugfs_fops);
3646 if (!fl->debugfs_file)
3647 pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
Jeya R336ada12021-03-18 14:04:49 +05303648 cur_comm, __func__, fl->debug_buf);
3649
Edgar Flores1a772fa2020-02-07 14:59:29 -08003650 return err;
3651}
3652
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003653static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3654{
3655 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003656 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003657
c_mtharue1a5ce12017-10-13 20:47:09 +05303658 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003659 if (err)
3660 goto bail;
Edgar Flores1a772fa2020-02-07 14:59:29 -08003661 err = fastrpc_set_process_info(fl);
3662 if (err)
3663 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003664 if (fl->cid == -1) {
3665 cid = *info;
3666 VERIFY(err, cid < NUM_CHANNELS);
3667 if (err)
3668 goto bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303669 /* Check to see if the device node is non-secure */
zhaochenfc798572018-08-17 15:32:37 +08003670 if (fl->dev_minor == MINOR_NUM_DEV &&
3671 fl->apps->secure_flag == true) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05303672 /*
3673 * For non secure device node check and make sure that
3674 * the channel allows non-secure access
3675 * If not, bail. Session will not start.
3676 * cid will remain -1 and client will not be able to
3677 * invoke any other methods without failure
3678 */
3679 if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
3680 err = -EPERM;
3681 pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
3682 fl->dev_minor, cid,
3683 fl->apps->channel[cid].secure);
3684 goto bail;
3685 }
3686 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003687 fl->cid = cid;
3688 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3689 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303690 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003691 if (err)
3692 goto bail;
3693 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303694 VERIFY(err, fl->sctx != NULL);
Jeya R984a1a32021-01-18 15:38:07 +05303695 if (err) {
3696 err = -EBADR;
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303697 goto bail;
Jeya R984a1a32021-01-18 15:38:07 +05303698 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003699 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3700bail:
3701 return err;
3702}
3703
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303704static int fastrpc_internal_control(struct fastrpc_file *fl,
3705 struct fastrpc_ioctl_control *cp)
3706{
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303707 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303708 int err = 0;
3709 int latency;
3710
3711 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3712 if (err)
3713 goto bail;
3714 VERIFY(err, !IS_ERR_OR_NULL(cp));
3715 if (err)
3716 goto bail;
3717
3718 switch (cp->req) {
3719 case FASTRPC_CONTROL_LATENCY:
3720 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3721 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3722 VERIFY(err, latency != 0);
3723 if (err)
3724 goto bail;
Jeya R2bcad4f2021-06-10 13:03:44 +05303725 mutex_lock(&fl->pm_qos_mutex);
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303726 if (!fl->qos_request) {
3727 pm_qos_add_request(&fl->pm_qos_req,
3728 PM_QOS_CPU_DMA_LATENCY, latency);
3729 fl->qos_request = 1;
3730 } else
3731 pm_qos_update_request(&fl->pm_qos_req, latency);
Jeya R2bcad4f2021-06-10 13:03:44 +05303732 mutex_unlock(&fl->pm_qos_mutex);
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303733 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303734 case FASTRPC_CONTROL_SMMU:
Mohammed Nayeem Ur Rahman18d633f2019-05-28 15:11:40 +05303735 if (!me->legacy)
3736 fl->sharedcb = cp->smmu.sharedcb;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303737 break;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303738 case FASTRPC_CONTROL_KALLOC:
3739 cp->kalloc.kalloc_support = 1;
3740 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303741 default:
3742 err = -ENOTTY;
3743 break;
3744 }
3745bail:
3746 return err;
3747}
3748
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003749static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3750 unsigned long ioctl_param)
3751{
3752 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003753 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003754 struct fastrpc_ioctl_mmap mmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303755 struct fastrpc_ioctl_mmap_64 mmap64;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003756 struct fastrpc_ioctl_munmap munmap;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303757 struct fastrpc_ioctl_munmap_64 munmap64;
c_mtharu7bd6a422017-10-17 18:15:37 +05303758 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003759 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003760 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303761 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003762 } p;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303763 union {
3764 struct fastrpc_ioctl_mmap mmap;
3765 struct fastrpc_ioctl_munmap munmap;
3766 } i;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003767 void *param = (char *)ioctl_param;
3768 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3769 int size = 0, err = 0;
3770 uint32_t info;
3771
Jeya Rb70b4ad2021-01-25 10:28:42 -08003772 VERIFY(err, fl != NULL);
3773 if (err) {
3774 err = -EBADR;
3775 goto bail;
3776 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303777 p.inv.fds = NULL;
3778 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003779 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303780 spin_lock(&fl->hlock);
3781 if (fl->file_close == 1) {
3782 err = EBADF;
3783 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3784 spin_unlock(&fl->hlock);
3785 goto bail;
3786 }
3787 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003788
3789 switch (ioctl_num) {
3790 case FASTRPC_IOCTL_INVOKE:
3791 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003792 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003793 case FASTRPC_IOCTL_INVOKE_FD:
3794 if (!size)
3795 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3796 /* fall through */
3797 case FASTRPC_IOCTL_INVOKE_ATTRS:
3798 if (!size)
3799 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003800 /* fall through */
3801 case FASTRPC_IOCTL_INVOKE_CRC:
3802 if (!size)
3803 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303804 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003805 if (err)
3806 goto bail;
3807 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3808 0, &p.inv)));
3809 if (err)
3810 goto bail;
3811 break;
3812 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303813 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3814 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303815 if (err)
3816 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003817 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3818 if (err)
3819 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303820 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003821 if (err)
3822 goto bail;
3823 break;
3824 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303825 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3826 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303827 if (err)
3828 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003829 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3830 &p.munmap)));
3831 if (err)
3832 goto bail;
3833 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303834 case FASTRPC_IOCTL_MMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303835 K_COPY_FROM_USER(err, 0, &p.mmap64, param,
3836 sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303837 if (err)
3838 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303839 get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3840 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303841 if (err)
3842 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303843 put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
3844 K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303845 if (err)
3846 goto bail;
3847 break;
3848 case FASTRPC_IOCTL_MUNMAP_64:
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303849 K_COPY_FROM_USER(err, 0, &p.munmap64, param,
3850 sizeof(p.munmap64));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303851 if (err)
3852 goto bail;
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303853 get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303854 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
Tharun Kumar Merugu92b5e132018-07-18 15:03:35 +05303855 &i.munmap)));
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303856 if (err)
3857 goto bail;
3858 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303859 case FASTRPC_IOCTL_MUNMAP_FD:
3860 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3861 sizeof(p.munmap_fd));
3862 if (err)
3863 goto bail;
3864 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3865 &p.munmap_fd)));
3866 if (err)
3867 goto bail;
3868 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003869 case FASTRPC_IOCTL_SETMODE:
3870 switch ((uint32_t)ioctl_param) {
3871 case FASTRPC_MODE_PARALLEL:
3872 case FASTRPC_MODE_SERIAL:
3873 fl->mode = (uint32_t)ioctl_param;
3874 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003875 case FASTRPC_MODE_PROFILE:
3876 fl->profile = (uint32_t)ioctl_param;
3877 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303878 case FASTRPC_MODE_SESSION:
3879 fl->sessionid = 1;
3880 fl->tgid |= (1 << SESSION_ID_INDEX);
3881 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003882 default:
3883 err = -ENOTTY;
3884 break;
3885 }
3886 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003887 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303888 K_COPY_FROM_USER(err, 0, &p.perf,
3889 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003890 if (err)
3891 goto bail;
3892 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3893 if (p.perf.keys) {
3894 char *keys = PERF_KEYS;
3895
c_mtharue1a5ce12017-10-13 20:47:09 +05303896 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3897 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003898 if (err)
3899 goto bail;
3900 }
3901 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303902 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3903 struct hlist_node *n = NULL;
3904
3905 mutex_lock(&fl->perf_mutex);
3906 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3907 if (perf->tid == current->pid) {
3908 fperf = perf;
3909 break;
3910 }
3911 }
3912
3913 mutex_unlock(&fl->perf_mutex);
3914
3915 if (fperf) {
3916 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3917 fperf, sizeof(*fperf));
3918 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003919 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303920 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003921 if (err)
3922 goto bail;
3923 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303924 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303925 K_COPY_FROM_USER(err, 0, &p.cp, param,
3926 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303927 if (err)
3928 goto bail;
3929 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3930 if (err)
3931 goto bail;
Tharun Kumar Merugue073de72018-07-30 23:57:47 +05303932 if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
3933 K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
3934 if (err)
3935 goto bail;
3936 }
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303937 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003938 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303939 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003940 if (err)
3941 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003942 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3943 if (err)
3944 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303945 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003946 if (err)
3947 goto bail;
3948 break;
3949 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003950 p.init.attrs = 0;
3951 p.init.siglen = 0;
3952 size = sizeof(struct fastrpc_ioctl_init);
3953 /* fall through */
3954 case FASTRPC_IOCTL_INIT_ATTRS:
3955 if (!size)
3956 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303957 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003958 if (err)
3959 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303960 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303961 p.init.init.filelen < INIT_FILELEN_MAX);
3962 if (err)
3963 goto bail;
3964 VERIFY(err, p.init.init.memlen >= 0 &&
3965 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303966 if (err)
3967 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303968 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003969 if (err)
3970 goto bail;
3971 break;
3972
3973 default:
3974 err = -ENOTTY;
3975 pr_info("bad ioctl: %d\n", ioctl_num);
3976 break;
3977 }
3978 bail:
3979 return err;
3980}
3981
3982static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3983 unsigned long code,
3984 void *data)
3985{
3986 struct fastrpc_apps *me = &gfa;
3987 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303988 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003989 int cid;
3990
3991 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3992 cid = ctx - &me->channel[0];
3993 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303994 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003995 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303996 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303997 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303998 if (me->glink)
3999 fastrpc_glink_close(ctx->chan, cid);
4000 else
4001 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304002 ctx->chan = NULL;
4003 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
4004 gcinfo[cid].name, MAJOR(me->dev_no), cid);
4005 }
4006 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304007 if (cid == 0)
4008 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004009 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05304010 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4011 if (me->channel[0].remoteheap_ramdump_dev &&
4012 notifdata->enable_ramdump) {
4013 me->channel[0].ramdumpenabled = 1;
4014 }
4015 } else if (code == SUBSYS_AFTER_POWERUP) {
4016 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004017 }
4018
4019 return NOTIFY_DONE;
4020}
4021
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304022static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304023 unsigned long code,
4024 void *data)
4025{
4026 struct fastrpc_apps *me = &gfa;
4027 struct fastrpc_static_pd *spd;
4028 struct notif_data *notifdata = data;
4029
4030 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
4031 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
4032 mutex_lock(&me->smd_mutex);
4033 spd->pdrcount++;
4034 spd->ispdup = 0;
4035 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
4036 MAJOR(me->dev_no), spd->spdname);
4037 mutex_unlock(&me->smd_mutex);
4038 if (!strcmp(spd->spdname,
4039 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
4040 me->staticpd_flags = 0;
4041 fastrpc_notify_pdr_drivers(me, spd->spdname);
4042 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
4043 if (me->channel[0].remoteheap_ramdump_dev &&
4044 notifdata->enable_ramdump) {
4045 me->channel[0].ramdumpenabled = 1;
4046 }
4047 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4048 spd->ispdup = 1;
4049 }
4050
4051 return NOTIFY_DONE;
4052}
4053
4054static int fastrpc_get_service_location_notify(struct notifier_block *nb,
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304055 unsigned long opcode, void *data)
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304056{
4057 struct fastrpc_static_pd *spd;
4058 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304059 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304060
4061 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
4062 if (opcode == LOCATOR_DOWN) {
4063 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
4064 return NOTIFY_DONE;
4065 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304066 for (i = 0; i < pdr->total_domains; i++) {
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304067 if ((!strcmp(spd->spdname, "audio_pdr_adsprpc"))
4068 && (!strcmp(pdr->domain_list[i].name,
4069 "msm/adsp/audio_pd"))) {
4070 goto pdr_register;
4071 } else if ((!strcmp(spd->spdname, "sensors_pdr_adsprpc"))
4072 && (!strcmp(pdr->domain_list[i].name,
4073 "msm/adsp/sensor_pd"))) {
4074 goto pdr_register;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304075 }
4076 }
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304077 return NOTIFY_DONE;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304078
Vamsi krishna Gattupalli4c11c602020-08-25 19:34:14 +05304079pdr_register:
4080 if (!spd->pdrhandle) {
4081 spd->pdrhandle =
4082 service_notif_register_notifier(
4083 pdr->domain_list[i].name,
4084 pdr->domain_list[i].instance_id,
4085 &spd->pdrnb, &curr_state);
4086 } else {
4087 pr_err("ADSPRPC: %s is already registered\n", spd->spdname);
4088 }
4089
4090 if (IS_ERR(spd->pdrhandle))
4091 pr_err("ADSPRPC: Unable to register notifier\n");
4092
4093 if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
4094 pr_info("ADSPRPC: %s is up\n", spd->spdname);
4095 spd->ispdup = 1;
4096 } else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
4097 pr_info("ADSPRPC: %s is uninitialzed\n", spd->spdname);
4098 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304099 return NOTIFY_DONE;
4100}
4101
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004102static const struct file_operations fops = {
4103 .open = fastrpc_device_open,
4104 .release = fastrpc_device_release,
4105 .unlocked_ioctl = fastrpc_device_ioctl,
4106 .compat_ioctl = compat_fastrpc_device_ioctl,
4107};
4108
4109static const struct of_device_id fastrpc_match_table[] = {
4110 { .compatible = "qcom,msm-fastrpc-adsp", },
4111 { .compatible = "qcom,msm-fastrpc-compute", },
4112 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304113 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
4114 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004115 { .compatible = "qcom,msm-adsprpc-mem-region", },
4116 {}
4117};
4118
4119static int fastrpc_cb_probe(struct device *dev)
4120{
4121 struct fastrpc_channel_ctx *chan;
4122 struct fastrpc_session_ctx *sess;
4123 struct of_phandle_args iommuspec;
4124 const char *name;
4125 unsigned int start = 0x80000000;
4126 int err = 0, i;
4127 int secure_vmid = VMID_CP_PIXEL;
4128
c_mtharue1a5ce12017-10-13 20:47:09 +05304129 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4130 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004131 if (err)
4132 goto bail;
4133 for (i = 0; i < NUM_CHANNELS; i++) {
4134 if (!gcinfo[i].name)
4135 continue;
4136 if (!strcmp(name, gcinfo[i].name))
4137 break;
4138 }
4139 VERIFY(err, i < NUM_CHANNELS);
4140 if (err)
4141 goto bail;
4142 chan = &gcinfo[i];
4143 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4144 if (err)
4145 goto bail;
4146
4147 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
4148 "#iommu-cells", 0, &iommuspec));
4149 if (err)
4150 goto bail;
4151 sess = &chan->session[chan->sesscount];
4152 sess->smmu.cb = iommuspec.args[0] & 0xf;
4153 sess->used = 0;
4154 sess->smmu.coherent = of_property_read_bool(dev->of_node,
4155 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304156 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
4157 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004158 sess->smmu.secure = of_property_read_bool(dev->of_node,
4159 "qcom,secure-context-bank");
4160 if (sess->smmu.secure)
4161 start = 0x60000000;
4162 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
4163 arm_iommu_create_mapping(&platform_bus_type,
Mohammed Nayeem Ur Rahman62f7f9c2020-04-13 11:16:19 +05304164 start, MAX_SIZE_LIMIT)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004165 if (err)
4166 goto bail;
4167
4168 if (sess->smmu.secure)
4169 iommu_domain_set_attr(sess->smmu.mapping->domain,
4170 DOMAIN_ATTR_SECURE_VMID,
4171 &secure_vmid);
4172
4173 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
4174 if (err)
4175 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05304176 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004177 sess->smmu.enabled = 1;
4178 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08004179 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
4180 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004181bail:
4182 return err;
4183}
4184
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304185static int fastrpc_cb_legacy_probe(struct device *dev)
4186{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304187 struct fastrpc_channel_ctx *chan;
4188 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
4189 const char *name;
4190 unsigned int *sids = NULL, sids_size = 0;
4191 int err = 0, ret = 0, i;
4192
4193 unsigned int start = 0x80000000;
4194
4195 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
4196 "label", NULL)));
4197 if (err)
4198 goto bail;
4199
4200 for (i = 0; i < NUM_CHANNELS; i++) {
4201 if (!gcinfo[i].name)
4202 continue;
4203 if (!strcmp(name, gcinfo[i].name))
4204 break;
4205 }
4206 VERIFY(err, i < NUM_CHANNELS);
4207 if (err)
4208 goto bail;
4209
4210 chan = &gcinfo[i];
4211 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4212 if (err)
4213 goto bail;
4214
4215 first_sess = &chan->session[chan->sesscount];
4216
4217 VERIFY(err, NULL != of_get_property(dev->of_node,
4218 "sids", &sids_size));
4219 if (err)
4220 goto bail;
4221
4222 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
4223 if (err)
4224 goto bail;
4225 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
4226 sids_size/sizeof(unsigned int));
4227 if (ret)
4228 goto bail;
4229
4230 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
4231 arm_iommu_create_mapping(&platform_bus_type,
4232 start, 0x78000000)));
4233 if (err)
4234 goto bail;
4235
4236 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
4237 if (err)
4238 goto bail;
4239
4240
4241 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
4242 VERIFY(err, chan->sesscount < NUM_SESSIONS);
4243 if (err)
4244 goto bail;
4245 sess = &chan->session[chan->sesscount];
4246 sess->smmu.cb = sids[i];
4247 sess->smmu.dev = dev;
4248 sess->smmu.mapping = first_sess->smmu.mapping;
4249 sess->smmu.enabled = 1;
4250 sess->used = 0;
4251 sess->smmu.coherent = false;
4252 sess->smmu.secure = false;
4253 chan->sesscount++;
4254 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304255bail:
4256 kfree(sids);
4257 return err;
4258}
4259
4260
4261
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304262static void init_secure_vmid_list(struct device *dev, char *prop_name,
4263 struct secure_vm *destvm)
4264{
4265 int err = 0;
4266 u32 len = 0, i = 0;
4267 u32 *rhvmlist = NULL;
4268 u32 *rhvmpermlist = NULL;
4269
4270 if (!of_find_property(dev->of_node, prop_name, &len))
4271 goto bail;
4272 if (len == 0)
4273 goto bail;
4274 len /= sizeof(u32);
4275 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
4276 if (err)
4277 goto bail;
4278 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
4279 GFP_KERNEL)));
4280 if (err)
4281 goto bail;
4282 for (i = 0; i < len; i++) {
4283 err = of_property_read_u32_index(dev->of_node, prop_name, i,
4284 &rhvmlist[i]);
4285 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
4286 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
4287 if (err) {
4288 pr_err("ADSPRPC: Failed to read VMID\n");
4289 goto bail;
4290 }
4291 }
4292 destvm->vmid = rhvmlist;
4293 destvm->vmperm = rhvmpermlist;
4294 destvm->vmcount = len;
4295bail:
4296 if (err) {
4297 kfree(rhvmlist);
4298 kfree(rhvmpermlist);
4299 }
4300}
4301
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304302static void configure_secure_channels(uint32_t secure_domains)
4303{
4304 struct fastrpc_apps *me = &gfa;
4305 int ii = 0;
4306 /*
4307 * secure_domains contains the bitmask of the secure channels
4308 * Bit 0 - ADSP
4309 * Bit 1 - MDSP
4310 * Bit 2 - SLPI
4311 * Bit 3 - CDSP
4312 */
4313 for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
4314 int secure = (secure_domains >> ii) & 0x01;
4315
4316 me->channel[ii].secure = secure;
4317 }
4318}
4319
4320
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004321static int fastrpc_probe(struct platform_device *pdev)
4322{
4323 int err = 0;
4324 struct fastrpc_apps *me = &gfa;
4325 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004326 struct device_node *ion_node, *node;
4327 struct platform_device *ion_pdev;
4328 struct cma *cma;
4329 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304330 int ret = 0;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304331 uint32_t secure_domains;
c_mtharu63ffc012017-11-16 15:26:56 +05304332
4333 if (of_device_is_compatible(dev->of_node,
4334 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304335 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
4336 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05304337
c_mtharu63ffc012017-11-16 15:26:56 +05304338
4339 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
4340 &me->latency);
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304341 if (of_get_property(dev->of_node,
4342 "qcom,secure-domains", NULL) != NULL) {
4343 VERIFY(err, !of_property_read_u32(dev->of_node,
4344 "qcom,secure-domains",
4345 &secure_domains));
zhaochenfc798572018-08-17 15:32:37 +08004346 if (!err) {
4347 me->secure_flag = true;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304348 configure_secure_channels(secure_domains);
zhaochenfc798572018-08-17 15:32:37 +08004349 } else {
4350 me->secure_flag = false;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304351 pr_info("adsprpc: unable to read the domain configuration from dts\n");
zhaochenfc798572018-08-17 15:32:37 +08004352 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304353 }
c_mtharu63ffc012017-11-16 15:26:56 +05304354 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004355 if (of_device_is_compatible(dev->of_node,
4356 "qcom,msm-fastrpc-compute-cb"))
4357 return fastrpc_cb_probe(dev);
4358
4359 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304360 "qcom,msm-fastrpc-legacy-compute")) {
4361 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304362 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304363 }
4364
4365 if (of_device_is_compatible(dev->of_node,
4366 "qcom,msm-fastrpc-legacy-compute-cb")){
4367 return fastrpc_cb_legacy_probe(dev);
4368 }
4369
4370 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004371 "qcom,msm-adsprpc-mem-region")) {
4372 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004373 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
4374 if (ion_node) {
4375 for_each_available_child_of_node(ion_node, node) {
4376 if (of_property_read_u32(node, "reg", &val))
4377 continue;
4378 if (val != ION_ADSP_HEAP_ID)
4379 continue;
4380 ion_pdev = of_find_device_by_node(node);
4381 if (!ion_pdev)
4382 break;
4383 cma = dev_get_cma_area(&ion_pdev->dev);
4384 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304385 me->range.addr = cma_get_base(cma);
4386 me->range.size =
4387 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004388 }
4389 break;
4390 }
4391 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304392 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05304393 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004394 int srcVM[1] = {VMID_HLOS};
4395 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
4396 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07004397 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004398 PERM_READ | PERM_WRITE | PERM_EXEC,
4399 PERM_READ | PERM_WRITE | PERM_EXEC,
4400 PERM_READ | PERM_WRITE | PERM_EXEC,
4401 };
4402
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05304403 VERIFY(err, !hyp_assign_phys(me->range.addr,
4404 me->range.size, srcVM, 1,
4405 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004406 if (err)
4407 goto bail;
4408 }
4409 return 0;
4410 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304411 if (of_property_read_bool(dev->of_node,
4412 "qcom,fastrpc-adsp-audio-pdr")) {
4413 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004414
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304415 VERIFY(err, !fastrpc_get_adsp_session(
4416 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4417 if (err)
4418 goto spdbail;
4419 me->channel[0].spd[session].get_service_nb.notifier_call =
4420 fastrpc_get_service_location_notify;
4421 ret = get_service_location(
4422 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
4423 AUDIO_PDR_ADSP_SERVICE_NAME,
4424 &me->channel[0].spd[session].get_service_nb);
4425 if (ret)
4426 pr_err("ADSPRPC: Get service location failed: %d\n",
4427 ret);
4428 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05304429 if (of_property_read_bool(dev->of_node,
4430 "qcom,fastrpc-adsp-sensors-pdr")) {
4431 int session;
4432
4433 VERIFY(err, !fastrpc_get_adsp_session(
4434 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
4435 if (err)
4436 goto spdbail;
4437 me->channel[0].spd[session].get_service_nb.notifier_call =
4438 fastrpc_get_service_location_notify;
4439 ret = get_service_location(
4440 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
4441 SENSORS_PDR_ADSP_SERVICE_NAME,
4442 &me->channel[0].spd[session].get_service_nb);
4443 if (ret)
4444 pr_err("ADSPRPC: Get service location failed: %d\n",
4445 ret);
4446 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05304447spdbail:
4448 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004449 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
4450 fastrpc_match_table,
4451 NULL, &pdev->dev));
4452 if (err)
4453 goto bail;
4454bail:
4455 return err;
4456}
4457
4458static void fastrpc_deinit(void)
4459{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304460 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004461 struct fastrpc_channel_ctx *chan = gcinfo;
4462 int i, j;
4463
4464 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
4465 if (chan->chan) {
4466 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05304467 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05304468 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004469 }
4470 for (j = 0; j < NUM_SESSIONS; j++) {
4471 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05304472 if (sess->smmu.dev) {
4473 arm_iommu_detach_device(sess->smmu.dev);
4474 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004475 }
4476 if (sess->smmu.mapping) {
4477 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05304478 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004479 }
4480 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05304481 kfree(chan->rhvm.vmid);
4482 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004483 }
4484}
4485
4486static struct platform_driver fastrpc_driver = {
4487 .probe = fastrpc_probe,
4488 .driver = {
4489 .name = "fastrpc",
4490 .owner = THIS_MODULE,
4491 .of_match_table = fastrpc_match_table,
4492 },
4493};
4494
4495static int __init fastrpc_device_init(void)
4496{
4497 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05304498 struct device *dev = NULL;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304499 struct device *secure_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004500 int err = 0, i;
4501
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304502 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004503 memset(me, 0, sizeof(*me));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004504 fastrpc_init(me);
4505 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05304506 me->glink = true;
zhaochenfc798572018-08-17 15:32:37 +08004507 me->secure_flag = false;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004508 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
4509 if (err)
4510 goto register_bail;
4511 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
4512 DEVICE_NAME));
4513 if (err)
4514 goto alloc_chrdev_bail;
4515 cdev_init(&me->cdev, &fops);
4516 me->cdev.owner = THIS_MODULE;
4517 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304518 NUM_DEVICES));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004519 if (err)
4520 goto cdev_init_bail;
4521 me->class = class_create(THIS_MODULE, "fastrpc");
4522 VERIFY(err, !IS_ERR(me->class));
4523 if (err)
4524 goto class_create_bail;
4525 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304526
4527 /*
4528 * Create devices and register with sysfs
4529 * Create first device with minor number 0
4530 */
Sathish Ambley36849af2017-02-02 09:35:55 -08004531 dev = device_create(me->class, NULL,
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304532 MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
4533 NULL, DEVICE_NAME);
Sathish Ambley36849af2017-02-02 09:35:55 -08004534 VERIFY(err, !IS_ERR_OR_NULL(dev));
4535 if (err)
4536 goto device_create_bail;
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304537
4538 /* Create secure device with minor number for secure device */
4539 secure_dev = device_create(me->class, NULL,
4540 MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
4541 NULL, DEVICE_NAME_SECURE);
4542 VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
4543 if (err)
4544 goto device_create_bail;
4545
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004546 for (i = 0; i < NUM_CHANNELS; i++) {
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304547 me->channel[i].dev = secure_dev;
4548 if (i == CDSP_DOMAIN_ID)
4549 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004550 me->channel[i].ssrcount = 0;
4551 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05304552 me->channel[i].issubsystemup = 1;
4553 me->channel[i].ramdumpenabled = 0;
4554 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004555 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
4556 me->channel[i].handle = subsys_notif_register_notifier(
4557 gcinfo[i].subsys,
4558 &me->channel[i].nb);
4559 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004560 me->client = msm_ion_client_create(DEVICE_NAME);
4561 VERIFY(err, !IS_ERR_OR_NULL(me->client));
4562 if (err)
4563 goto device_create_bail;
Mohammed Nayeem Ur Rahman492e3862018-08-17 12:53:43 +05304564
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004565 return 0;
4566device_create_bail:
4567 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08004568 if (me->channel[i].handle)
4569 subsys_notif_unregister_notifier(me->channel[i].handle,
4570 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004571 }
Sathish Ambley36849af2017-02-02 09:35:55 -08004572 if (!IS_ERR_OR_NULL(dev))
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304573 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4574 MINOR_NUM_DEV));
4575 if (!IS_ERR_OR_NULL(secure_dev))
4576 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4577 MINOR_NUM_SECURE_DEV));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004578 class_destroy(me->class);
4579class_create_bail:
4580 cdev_del(&me->cdev);
4581cdev_init_bail:
4582 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4583alloc_chrdev_bail:
4584register_bail:
4585 fastrpc_deinit();
4586 return err;
4587}
4588
4589static void __exit fastrpc_device_exit(void)
4590{
4591 struct fastrpc_apps *me = &gfa;
4592 int i;
4593
4594 fastrpc_file_list_dtor(me);
4595 fastrpc_deinit();
4596 for (i = 0; i < NUM_CHANNELS; i++) {
4597 if (!gcinfo[i].name)
4598 continue;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004599 subsys_notif_unregister_notifier(me->channel[i].handle,
4600 &me->channel[i].nb);
4601 }
Tharun Kumar Merugud996b262018-07-18 22:28:53 +05304602
4603 /* Destroy the secure and non secure devices */
4604 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
4605 device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
4606 MINOR_NUM_SECURE_DEV));
4607
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004608 class_destroy(me->class);
4609 cdev_del(&me->cdev);
4610 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
4611 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08004612 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07004613}
4614
4615late_initcall(fastrpc_device_init);
4616module_exit(fastrpc_device_exit);
4617
4618MODULE_LICENSE("GPL v2");