blob: 6aa60b1eb5afc226b9d95b56f8cc13fbd2fb3c62 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053059#define FASTRPC_DMAHANDLE_NOMAP (16)
60
Sathish Ambley69e1ab02016-10-18 10:28:15 -070061#define FASTRPC_ENOSUCH 39
62#define VMID_SSC_Q6 5
63#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080064#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070065
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053066#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
67#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
68
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053069#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
70#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
71
Sathish Ambley69e1ab02016-10-18 10:28:15 -070072#define RPC_TIMEOUT (5 * HZ)
73#define BALIGN 128
74#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
75#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070076#define M_FDLIST (16)
77#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053078#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053079#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053080#define FASTRPC_CTX_MAX (256)
81#define FASTRPC_CTXID_MASK (0xFF0)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070082
83#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
84
85#define FASTRPC_LINK_STATE_DOWN (0x0)
86#define FASTRPC_LINK_STATE_UP (0x1)
87#define FASTRPC_LINK_DISCONNECTED (0x0)
88#define FASTRPC_LINK_CONNECTING (0x1)
89#define FASTRPC_LINK_CONNECTED (0x3)
90#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053091#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
92#define FASTRPC_GLINK_INTENT_LEN (64)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070093
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +053094#define PERF_KEYS \
95 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Sathish Ambleya21b5b52017-01-11 16:11:01 -080096#define FASTRPC_STATIC_HANDLE_LISTENER (3)
97#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053098#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -080099
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530100#define INIT_FILELEN_MAX (2*1024*1024)
101#define INIT_MEMLEN_MAX (8*1024*1024)
102
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800103#define PERF_END (void)0
104
105#define PERF(enb, cnt, ff) \
106 {\
107 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530108 int64_t *counter = cnt;\
109 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800110 getnstimeofday(&startT);\
111 } \
112 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530113 if (enb && counter) {\
114 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800115 } \
116 }
117
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530118#define GET_COUNTER(perf_ptr, offset) \
119 (perf_ptr != NULL ?\
120 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
121 (int64_t *)(perf_ptr + offset)\
122 : (int64_t *)NULL) : (int64_t *)NULL)
123
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700124static int fastrpc_glink_open(int cid);
125static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530126static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530127 unsigned long code,
128 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800129static struct dentry *debugfs_root;
130static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700131
132static inline uint64_t buf_page_start(uint64_t buf)
133{
134 uint64_t start = (uint64_t) buf & PAGE_MASK;
135 return start;
136}
137
138static inline uint64_t buf_page_offset(uint64_t buf)
139{
140 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
141 return offset;
142}
143
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530144static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700145{
146 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
147 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530148 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700149 return nPages;
150}
151
152static inline uint64_t buf_page_size(uint32_t size)
153{
154 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
155
156 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
157}
158
159static inline void *uint64_to_ptr(uint64_t addr)
160{
161 void *ptr = (void *)((uintptr_t)addr);
162
163 return ptr;
164}
165
166static inline uint64_t ptr_to_uint64(void *ptr)
167{
168 uint64_t addr = (uint64_t)((uintptr_t)ptr);
169
170 return addr;
171}
172
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530173struct secure_vm {
174 int *vmid;
175 int *vmperm;
176 int vmcount;
177};
178
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700179struct fastrpc_file;
180
181struct fastrpc_buf {
182 struct hlist_node hn;
183 struct fastrpc_file *fl;
184 void *virt;
185 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530186 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700187};
188
189struct fastrpc_ctx_lst;
190
191struct overlap {
192 uintptr_t start;
193 uintptr_t end;
194 int raix;
195 uintptr_t mstart;
196 uintptr_t mend;
197 uintptr_t offset;
198};
199
200struct smq_invoke_ctx {
201 struct hlist_node hn;
202 struct completion work;
203 int retval;
204 int pid;
205 int tgid;
206 remote_arg_t *lpra;
207 remote_arg64_t *rpra;
208 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700209 struct fastrpc_mmap **maps;
210 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530211 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700212 struct fastrpc_file *fl;
213 uint32_t sc;
214 struct overlap *overs;
215 struct overlap **overps;
216 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530217 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530218 unsigned int *attrs;
219 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530220 uint64_t ctxid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700221};
222
223struct fastrpc_ctx_lst {
224 struct hlist_head pending;
225 struct hlist_head interrupted;
226};
227
228struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530229 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700230 struct dma_iommu_mapping *mapping;
231 int cb;
232 int enabled;
233 int faults;
234 int secure;
235 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530236 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700237};
238
239struct fastrpc_session_ctx {
240 struct device *dev;
241 struct fastrpc_smmu smmu;
242 int used;
243};
244
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530245struct fastrpc_static_pd {
246 char *spdname;
247 struct notifier_block pdrnb;
248 struct notifier_block get_service_nb;
249 void *pdrhandle;
250 int pdrcount;
251 int prevpdrcount;
252 int ispdup;
253};
254
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700255struct fastrpc_glink_info {
256 int link_state;
257 int port_state;
258 struct glink_open_config cfg;
259 struct glink_link_info link_info;
260 void *link_notify_handle;
261};
262
263struct fastrpc_channel_ctx {
264 char *name;
265 char *subsys;
266 void *chan;
267 struct device *dev;
268 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530269 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700270 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530271 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700272 struct notifier_block nb;
273 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530274 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700275 int sesscount;
276 int ssrcount;
277 void *handle;
278 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530279 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700280 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530281 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530282 int ramdumpenabled;
283 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700284 struct fastrpc_glink_info link;
285};
286
287struct fastrpc_apps {
288 struct fastrpc_channel_ctx *channel;
289 struct cdev cdev;
290 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530291 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700292 struct smq_phy_page range;
293 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530294 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700295 dev_t dev_no;
296 int compat;
297 struct hlist_head drivers;
298 spinlock_t hlock;
299 struct ion_client *client;
300 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530301 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530302 bool glink;
303 bool legacy;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530304 spinlock_t ctxlock;
305 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700306};
307
308struct fastrpc_mmap {
309 struct hlist_node hn;
310 struct fastrpc_file *fl;
311 struct fastrpc_apps *apps;
312 int fd;
313 uint32_t flags;
314 struct dma_buf *buf;
315 struct sg_table *table;
316 struct dma_buf_attachment *attach;
317 struct ion_handle *handle;
318 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530319 size_t size;
320 uintptr_t va;
321 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700322 int refs;
323 uintptr_t raddr;
324 int uncached;
325 int secure;
326 uintptr_t attr;
327};
328
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530329enum fastrpc_perfkeys {
330 PERF_COUNT = 0,
331 PERF_FLUSH = 1,
332 PERF_MAP = 2,
333 PERF_COPY = 3,
334 PERF_LINK = 4,
335 PERF_GETARGS = 5,
336 PERF_PUTARGS = 6,
337 PERF_INVARGS = 7,
338 PERF_INVOKE = 8,
339 PERF_KEY_MAX = 9,
340};
341
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800342struct fastrpc_perf {
343 int64_t count;
344 int64_t flush;
345 int64_t map;
346 int64_t copy;
347 int64_t link;
348 int64_t getargs;
349 int64_t putargs;
350 int64_t invargs;
351 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530352 int64_t tid;
353 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800354};
355
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700356struct fastrpc_file {
357 struct hlist_node hn;
358 spinlock_t hlock;
359 struct hlist_head maps;
360 struct hlist_head bufs;
361 struct fastrpc_ctx_lst clst;
362 struct fastrpc_session_ctx *sctx;
363 struct fastrpc_session_ctx *secsctx;
364 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800365 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530366 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700367 int tgid;
368 int cid;
369 int ssrcount;
370 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530371 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530372 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530373 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700374 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530375 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800376 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530377 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530378 struct pm_qos_request pm_qos_req;
379 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530380 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530381 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530382 int refcount;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700383};
384
385static struct fastrpc_apps gfa;
386
387static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
388 {
389 .name = "adsprpc-smd",
390 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530391 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700392 .link.link_info.edge = "lpass",
393 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530394 .spd = {
395 {
396 .spdname =
397 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
398 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530399 fastrpc_pdr_notifier_cb,
400 },
401 {
402 .spdname =
403 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
404 .pdrnb.notifier_call =
405 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530406 }
407 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700408 },
409 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700410 .name = "mdsprpc-smd",
411 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530412 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700413 .link.link_info.edge = "mpss",
414 .link.link_info.transport = "smem",
415 },
416 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800417 .name = "sdsprpc-smd",
418 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530419 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800420 .link.link_info.edge = "dsps",
421 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800422 },
423 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700424 .name = "cdsprpc-smd",
425 .subsys = "cdsp",
426 .link.link_info.edge = "cdsp",
427 .link.link_info.transport = "smem",
428 },
429};
430
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530431static int hlosvm[1] = {VMID_HLOS};
432static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
433
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800434static inline int64_t getnstimediff(struct timespec *start)
435{
436 int64_t ns;
437 struct timespec ts, b;
438
439 getnstimeofday(&ts);
440 b = timespec_sub(ts, *start);
441 ns = timespec_to_ns(&b);
442 return ns;
443}
444
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530445static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
446{
447 int err = 0;
448 int64_t *val = NULL;
449 struct fastrpc_perf *perf = NULL, *fperf = NULL;
450 struct hlist_node *n = NULL;
451
452 VERIFY(err, !IS_ERR_OR_NULL(fl));
453 if (err)
454 goto bail;
455
456 mutex_lock(&fl->perf_mutex);
457 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
458 if (perf->tid == current->pid) {
459 fperf = perf;
460 break;
461 }
462 }
463
464 if (IS_ERR_OR_NULL(fperf)) {
465 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
466
467 VERIFY(err, !IS_ERR_OR_NULL(fperf));
468 if (err) {
469 mutex_unlock(&fl->perf_mutex);
470 kfree(fperf);
471 goto bail;
472 }
473
474 fperf->tid = current->pid;
475 hlist_add_head(&fperf->hn, &fl->perf);
476 }
477
478 val = ((int64_t *)fperf) + key;
479 mutex_unlock(&fl->perf_mutex);
480bail:
481 return val;
482}
483
484
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700485static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
486{
c_mtharue1a5ce12017-10-13 20:47:09 +0530487 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700488 int vmid;
489
490 if (!fl)
491 return;
492 if (cache) {
493 spin_lock(&fl->hlock);
494 hlist_add_head(&buf->hn, &fl->bufs);
495 spin_unlock(&fl->hlock);
496 return;
497 }
498 if (!IS_ERR_OR_NULL(buf->virt)) {
499 int destVM[1] = {VMID_HLOS};
500 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
501
502 if (fl->sctx->smmu.cb)
503 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
504 vmid = fl->apps->channel[fl->cid].vmid;
505 if (vmid) {
506 int srcVM[2] = {VMID_HLOS, vmid};
507
508 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
509 srcVM, 2, destVM, destVMperm, 1);
510 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530511 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700512 buf->phys);
513 }
514 kfree(buf);
515}
516
517static void fastrpc_buf_list_free(struct fastrpc_file *fl)
518{
519 struct fastrpc_buf *buf, *free;
520
521 do {
522 struct hlist_node *n;
523
c_mtharue1a5ce12017-10-13 20:47:09 +0530524 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700525 spin_lock(&fl->hlock);
526 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
527 hlist_del_init(&buf->hn);
528 free = buf;
529 break;
530 }
531 spin_unlock(&fl->hlock);
532 if (free)
533 fastrpc_buf_free(free, 0);
534 } while (free);
535}
536
537static void fastrpc_mmap_add(struct fastrpc_mmap *map)
538{
c_mtharue1a5ce12017-10-13 20:47:09 +0530539 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
540 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
541 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700542
c_mtharue1a5ce12017-10-13 20:47:09 +0530543 spin_lock(&me->hlock);
544 hlist_add_head(&map->hn, &me->maps);
545 spin_unlock(&me->hlock);
546 } else {
547 struct fastrpc_file *fl = map->fl;
548
c_mtharue1a5ce12017-10-13 20:47:09 +0530549 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530550 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700551}
552
c_mtharue1a5ce12017-10-13 20:47:09 +0530553static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530554 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530555 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700556{
c_mtharue1a5ce12017-10-13 20:47:09 +0530557 struct fastrpc_apps *me = &gfa;
558 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700559 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530560
561 if ((va + len) < va)
562 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530563 if (mflags == ADSP_MMAP_HEAP_ADDR ||
564 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
565 spin_lock(&me->hlock);
566 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
567 if (va >= map->va &&
568 va + len <= map->va + map->len &&
569 map->fd == fd) {
570 if (refs)
571 map->refs++;
572 match = map;
573 break;
574 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700575 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530576 spin_unlock(&me->hlock);
577 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530578 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
579 if (va >= map->va &&
580 va + len <= map->va + map->len &&
581 map->fd == fd) {
582 if (refs)
583 map->refs++;
584 match = map;
585 break;
586 }
587 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700588 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700589 if (match) {
590 *ppmap = match;
591 return 0;
592 }
593 return -ENOTTY;
594}
595
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530596static int dma_alloc_memory(dma_addr_t *region_phys, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530597{
598 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530599 void *vaddr = NULL;
600 unsigned long dma_attrs = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530601
602 if (me->dev == NULL) {
603 pr_err("device adsprpc-mem is not initialized\n");
604 return -ENODEV;
605 }
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530606 dma_attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
607 vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
608 dma_attrs);
609 if (!vaddr) {
c_mtharue1a5ce12017-10-13 20:47:09 +0530610 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
611 (unsigned int)size);
612 return -ENOMEM;
613 }
614 return 0;
615}
616
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700617static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530618 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700619{
c_mtharue1a5ce12017-10-13 20:47:09 +0530620 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700621 struct hlist_node *n;
622 struct fastrpc_apps *me = &gfa;
623
624 spin_lock(&me->hlock);
625 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
626 if (map->raddr == va &&
627 map->raddr + map->len == va + len &&
628 map->refs == 1) {
629 match = map;
630 hlist_del_init(&map->hn);
631 break;
632 }
633 }
634 spin_unlock(&me->hlock);
635 if (match) {
636 *ppmap = match;
637 return 0;
638 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700639 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
640 if (map->raddr == va &&
641 map->raddr + map->len == va + len &&
642 map->refs == 1) {
643 match = map;
644 hlist_del_init(&map->hn);
645 break;
646 }
647 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700648 if (match) {
649 *ppmap = match;
650 return 0;
651 }
652 return -ENOTTY;
653}
654
c_mtharu7bd6a422017-10-17 18:15:37 +0530655static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700656{
c_mtharue1a5ce12017-10-13 20:47:09 +0530657 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700658 struct fastrpc_file *fl;
659 int vmid;
660 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700661
662 if (!map)
663 return;
664 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530665 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
666 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
667 spin_lock(&me->hlock);
668 map->refs--;
669 if (!map->refs)
670 hlist_del_init(&map->hn);
671 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530672 if (map->refs > 0)
673 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530674 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530675 map->refs--;
676 if (!map->refs)
677 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530678 if (map->refs > 0 && !flags)
679 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530680 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530681 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
682 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530683 unsigned long dma_attrs = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700684
c_mtharue1a5ce12017-10-13 20:47:09 +0530685 if (me->dev == NULL) {
686 pr_err("failed to free remote heap allocation\n");
687 return;
688 }
689 if (map->phys) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530690 dma_attrs |=
691 DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
692 dma_free_attrs(me->dev, map->size, (void *)map->va,
693 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530694 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530695 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
696 if (!IS_ERR_OR_NULL(map->handle))
697 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530698 } else {
699 int destVM[1] = {VMID_HLOS};
700 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
701
702 if (map->secure)
703 sess = fl->secsctx;
704 else
705 sess = fl->sctx;
706
707 if (!IS_ERR_OR_NULL(map->handle))
708 ion_free(fl->apps->client, map->handle);
709 if (sess && sess->smmu.enabled) {
710 if (map->size || map->phys)
711 msm_dma_unmap_sg(sess->smmu.dev,
712 map->table->sgl,
713 map->table->nents, DMA_BIDIRECTIONAL,
714 map->buf);
715 }
716 vmid = fl->apps->channel[fl->cid].vmid;
717 if (vmid && map->phys) {
718 int srcVM[2] = {VMID_HLOS, vmid};
719
720 hyp_assign_phys(map->phys, buf_page_size(map->size),
721 srcVM, 2, destVM, destVMperm, 1);
722 }
723
724 if (!IS_ERR_OR_NULL(map->table))
725 dma_buf_unmap_attachment(map->attach, map->table,
726 DMA_BIDIRECTIONAL);
727 if (!IS_ERR_OR_NULL(map->attach))
728 dma_buf_detach(map->buf, map->attach);
729 if (!IS_ERR_OR_NULL(map->buf))
730 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700731 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700732 kfree(map);
733}
734
735static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
736 struct fastrpc_session_ctx **session);
737
738static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530739 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700740 struct fastrpc_mmap **ppmap)
741{
c_mtharue1a5ce12017-10-13 20:47:09 +0530742 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700743 struct fastrpc_session_ctx *sess;
744 struct fastrpc_apps *apps = fl->apps;
745 int cid = fl->cid;
746 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530747 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700748 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530749 dma_addr_t region_phys = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700750 unsigned long flags;
751 int err = 0, vmid;
752
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800753 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700754 return 0;
755 map = kzalloc(sizeof(*map), GFP_KERNEL);
756 VERIFY(err, !IS_ERR_OR_NULL(map));
757 if (err)
758 goto bail;
759 INIT_HLIST_NODE(&map->hn);
760 map->flags = mflags;
761 map->refs = 1;
762 map->fl = fl;
763 map->fd = fd;
764 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530765 if (mflags == ADSP_MMAP_HEAP_ADDR ||
766 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
767 map->apps = me;
768 map->fl = NULL;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530769 VERIFY(err, !dma_alloc_memory(&region_phys, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700770 if (err)
771 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530772 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530773 map->size = len;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530774 map->va = (uintptr_t)map->phys;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530775 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
776 ion_phys_addr_t iphys;
777
778 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
779 ion_import_dma_buf_fd(fl->apps->client, fd)));
780 if (err)
781 goto bail;
782
783 map->uncached = 1;
784 map->buf = NULL;
785 map->attach = NULL;
786 map->table = NULL;
787 map->va = 0;
788 map->phys = 0;
789
790 err = ion_phys(fl->apps->client, map->handle,
791 &iphys, &map->size);
792 if (err)
793 goto bail;
794 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530795 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530796 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
797 pr_info("adsprpc: buffer mapped with persist attr %x\n",
798 (unsigned int)map->attr);
799 map->refs = 2;
800 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530801 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
802 ion_import_dma_buf_fd(fl->apps->client, fd)));
803 if (err)
804 goto bail;
805 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
806 &flags));
807 if (err)
808 goto bail;
809
c_mtharue1a5ce12017-10-13 20:47:09 +0530810 map->secure = flags & ION_FLAG_SECURE;
811 if (map->secure) {
812 if (!fl->secsctx)
813 err = fastrpc_session_alloc(chan, 1,
814 &fl->secsctx);
815 if (err)
816 goto bail;
817 }
818 if (map->secure)
819 sess = fl->secsctx;
820 else
821 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530822
c_mtharue1a5ce12017-10-13 20:47:09 +0530823 VERIFY(err, !IS_ERR_OR_NULL(sess));
824 if (err)
825 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530826
827 map->uncached = !ION_IS_CACHED(flags);
828 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
829 map->uncached = 1;
830
c_mtharue1a5ce12017-10-13 20:47:09 +0530831 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
832 if (err)
833 goto bail;
834 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
835 dma_buf_attach(map->buf, sess->smmu.dev)));
836 if (err)
837 goto bail;
838 VERIFY(err, !IS_ERR_OR_NULL(map->table =
839 dma_buf_map_attachment(map->attach,
840 DMA_BIDIRECTIONAL)));
841 if (err)
842 goto bail;
843 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700844 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530845
846 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
847 (sess->smmu.coherent && map->uncached))
848 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
849 else if (map->attr & FASTRPC_ATTR_COHERENT)
850 attrs |= DMA_ATTR_FORCE_COHERENT;
851
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700852 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530853 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700854 map->table->sgl, map->table->nents,
855 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530856 if (err)
857 goto bail;
858 } else {
859 VERIFY(err, map->table->nents == 1);
860 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700861 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530862 }
863 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530864
c_mtharue1a5ce12017-10-13 20:47:09 +0530865 if (sess->smmu.cb) {
866 map->phys += ((uint64_t)sess->smmu.cb << 32);
867 map->size = sg_dma_len(map->table->sgl);
868 } else {
869 map->size = buf_page_size(len);
870 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530871
c_mtharue1a5ce12017-10-13 20:47:09 +0530872 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530873 if (!sess->smmu.enabled && !vmid) {
874 VERIFY(err, map->phys >= me->range.addr &&
875 map->phys + map->size <=
876 me->range.addr + me->range.size);
877 if (err) {
878 pr_err("adsprpc: mmap fail out of range\n");
879 goto bail;
880 }
881 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530882 if (vmid) {
883 int srcVM[1] = {VMID_HLOS};
884 int destVM[2] = {VMID_HLOS, vmid};
885 int destVMperm[2] = {PERM_READ | PERM_WRITE,
886 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700887
c_mtharue1a5ce12017-10-13 20:47:09 +0530888 VERIFY(err, !hyp_assign_phys(map->phys,
889 buf_page_size(map->size),
890 srcVM, 1, destVM, destVMperm, 2));
891 if (err)
892 goto bail;
893 }
894 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700895 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700896 map->len = len;
897
898 fastrpc_mmap_add(map);
899 *ppmap = map;
900
901bail:
902 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530903 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700904 return err;
905}
906
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530907static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700908 struct fastrpc_buf **obuf)
909{
910 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530911 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700912 struct hlist_node *n;
913
914 VERIFY(err, size > 0);
915 if (err)
916 goto bail;
917
918 /* find the smallest buffer that fits in the cache */
919 spin_lock(&fl->hlock);
920 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
921 if (buf->size >= size && (!fr || fr->size > buf->size))
922 fr = buf;
923 }
924 if (fr)
925 hlist_del_init(&fr->hn);
926 spin_unlock(&fl->hlock);
927 if (fr) {
928 *obuf = fr;
929 return 0;
930 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530931 buf = NULL;
932 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700933 if (err)
934 goto bail;
935 INIT_HLIST_NODE(&buf->hn);
936 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530937 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700938 buf->phys = 0;
939 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530940 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700941 (void *)&buf->phys, GFP_KERNEL);
942 if (IS_ERR_OR_NULL(buf->virt)) {
943 /* free cache and retry */
944 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530945 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700946 (void *)&buf->phys, GFP_KERNEL);
947 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
948 }
949 if (err)
950 goto bail;
951 if (fl->sctx->smmu.cb)
952 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
953 vmid = fl->apps->channel[fl->cid].vmid;
954 if (vmid) {
955 int srcVM[1] = {VMID_HLOS};
956 int destVM[2] = {VMID_HLOS, vmid};
957 int destVMperm[2] = {PERM_READ | PERM_WRITE,
958 PERM_READ | PERM_WRITE | PERM_EXEC};
959
960 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
961 srcVM, 1, destVM, destVMperm, 2));
962 if (err)
963 goto bail;
964 }
965
966 *obuf = buf;
967 bail:
968 if (err && buf)
969 fastrpc_buf_free(buf, 0);
970 return err;
971}
972
973
974static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700975 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700976 struct smq_invoke_ctx **po)
977{
978 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530979 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700980 struct hlist_node *n;
981 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
982
983 spin_lock(&fl->hlock);
984 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
985 if (ictx->pid == current->pid) {
986 if (invoke->sc != ictx->sc || ictx->fl != fl)
987 err = -1;
988 else {
989 ctx = ictx;
990 hlist_del_init(&ctx->hn);
991 hlist_add_head(&ctx->hn, &fl->clst.pending);
992 }
993 break;
994 }
995 }
996 spin_unlock(&fl->hlock);
997 if (ctx)
998 *po = ctx;
999 return err;
1000}
1001
1002#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1003static int overlap_ptr_cmp(const void *a, const void *b)
1004{
1005 struct overlap *pa = *((struct overlap **)a);
1006 struct overlap *pb = *((struct overlap **)b);
1007 /* sort with lowest starting buffer first */
1008 int st = CMP(pa->start, pb->start);
1009 /* sort with highest ending buffer first */
1010 int ed = CMP(pb->end, pa->end);
1011 return st == 0 ? ed : st;
1012}
1013
Sathish Ambley9466d672017-01-25 10:51:55 -08001014static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001015{
Sathish Ambley9466d672017-01-25 10:51:55 -08001016 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001017 remote_arg_t *lpra = ctx->lpra;
1018 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1019 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1020 int nbufs = inbufs + outbufs;
1021 struct overlap max;
1022
1023 for (i = 0; i < nbufs; ++i) {
1024 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1025 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001026 if (lpra[i].buf.len) {
1027 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1028 if (err)
1029 goto bail;
1030 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001031 ctx->overs[i].raix = i;
1032 ctx->overps[i] = &ctx->overs[i];
1033 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301034 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001035 max.start = 0;
1036 max.end = 0;
1037 for (i = 0; i < nbufs; ++i) {
1038 if (ctx->overps[i]->start < max.end) {
1039 ctx->overps[i]->mstart = max.end;
1040 ctx->overps[i]->mend = ctx->overps[i]->end;
1041 ctx->overps[i]->offset = max.end -
1042 ctx->overps[i]->start;
1043 if (ctx->overps[i]->end > max.end) {
1044 max.end = ctx->overps[i]->end;
1045 } else {
1046 ctx->overps[i]->mend = 0;
1047 ctx->overps[i]->mstart = 0;
1048 }
1049 } else {
1050 ctx->overps[i]->mend = ctx->overps[i]->end;
1051 ctx->overps[i]->mstart = ctx->overps[i]->start;
1052 ctx->overps[i]->offset = 0;
1053 max = *ctx->overps[i];
1054 }
1055 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001056bail:
1057 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001058}
1059
1060#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1061 do {\
1062 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301063 VERIFY(err, 0 == copy_from_user((dst),\
1064 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001065 (size)));\
1066 else\
1067 memmove((dst), (src), (size));\
1068 } while (0)
1069
1070#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1071 do {\
1072 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301073 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301074 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001075 else\
1076 memmove((dst), (src), (size));\
1077 } while (0)
1078
1079
1080static void context_free(struct smq_invoke_ctx *ctx);
1081
1082static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001083 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001084 struct smq_invoke_ctx **po)
1085{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301086 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301087 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301088 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001089 struct fastrpc_ctx_lst *clst = &fl->clst;
1090 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1091
1092 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1093 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1094 sizeof(*ctx->fds) * (bufs) +
1095 sizeof(*ctx->attrs) * (bufs) +
1096 sizeof(*ctx->overs) * (bufs) +
1097 sizeof(*ctx->overps) * (bufs);
1098
c_mtharue1a5ce12017-10-13 20:47:09 +05301099 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001100 if (err)
1101 goto bail;
1102
1103 INIT_HLIST_NODE(&ctx->hn);
1104 hlist_add_fake(&ctx->hn);
1105 ctx->fl = fl;
1106 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1107 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1108 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301109 if (me->legacy) {
1110 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1111 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1112 } else {
1113 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1114 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1115 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1116 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001117
c_mtharue1a5ce12017-10-13 20:47:09 +05301118 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001119 bufs * sizeof(*ctx->lpra));
1120 if (err)
1121 goto bail;
1122
1123 if (invokefd->fds) {
1124 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1125 bufs * sizeof(*ctx->fds));
1126 if (err)
1127 goto bail;
1128 }
1129 if (invokefd->attrs) {
1130 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1131 bufs * sizeof(*ctx->attrs));
1132 if (err)
1133 goto bail;
1134 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001135 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001136 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001137 if (bufs) {
1138 VERIFY(err, 0 == context_build_overlap(ctx));
1139 if (err)
1140 goto bail;
1141 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001142 ctx->retval = -1;
1143 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301144 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001145 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301146 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001147
1148 spin_lock(&fl->hlock);
1149 hlist_add_head(&ctx->hn, &clst->pending);
1150 spin_unlock(&fl->hlock);
1151
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301152 spin_lock(&me->ctxlock);
1153 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1154 if (!me->ctxtable[ii]) {
1155 me->ctxtable[ii] = ctx;
1156 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1157 break;
1158 }
1159 }
1160 spin_unlock(&me->ctxlock);
1161 VERIFY(err, ii < FASTRPC_CTX_MAX);
1162 if (err) {
1163 pr_err("adsprpc: out of context memory\n");
1164 goto bail;
1165 }
1166
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001167 *po = ctx;
1168bail:
1169 if (ctx && err)
1170 context_free(ctx);
1171 return err;
1172}
1173
1174static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1175{
1176 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1177
1178 spin_lock(&ctx->fl->hlock);
1179 hlist_del_init(&ctx->hn);
1180 hlist_add_head(&ctx->hn, &clst->interrupted);
1181 spin_unlock(&ctx->fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001182}
1183
1184static void context_free(struct smq_invoke_ctx *ctx)
1185{
1186 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301187 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001188 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1189 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1190 spin_lock(&ctx->fl->hlock);
1191 hlist_del_init(&ctx->hn);
1192 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301193 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001194 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301195 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301196
1197 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001198 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301199 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301200 ctx->ctxid = 0;
1201
1202 spin_lock(&me->ctxlock);
1203 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1204 if (me->ctxtable[i] == ctx) {
1205 me->ctxtable[i] = NULL;
1206 break;
1207 }
1208 }
1209 spin_unlock(&me->ctxlock);
1210
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001211 kfree(ctx);
1212}
1213
1214static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1215{
1216 ctx->retval = retval;
1217 complete(&ctx->work);
1218}
1219
1220
1221static void fastrpc_notify_users(struct fastrpc_file *me)
1222{
1223 struct smq_invoke_ctx *ictx;
1224 struct hlist_node *n;
1225
1226 spin_lock(&me->hlock);
1227 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1228 complete(&ictx->work);
1229 }
1230 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1231 complete(&ictx->work);
1232 }
1233 spin_unlock(&me->hlock);
1234
1235}
1236
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301237
1238static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1239{
1240 struct smq_invoke_ctx *ictx;
1241 struct hlist_node *n;
1242
1243 spin_lock(&me->hlock);
1244 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1245 if (ictx->msg.pid)
1246 complete(&ictx->work);
1247 }
1248 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1249 if (ictx->msg.pid)
1250 complete(&ictx->work);
1251 }
1252 spin_unlock(&me->hlock);
1253}
1254
1255
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001256static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1257{
1258 struct fastrpc_file *fl;
1259 struct hlist_node *n;
1260
1261 spin_lock(&me->hlock);
1262 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1263 if (fl->cid == cid)
1264 fastrpc_notify_users(fl);
1265 }
1266 spin_unlock(&me->hlock);
1267
1268}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301269
1270static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1271{
1272 struct fastrpc_file *fl;
1273 struct hlist_node *n;
1274
1275 spin_lock(&me->hlock);
1276 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1277 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301278 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301279 }
1280 spin_unlock(&me->hlock);
1281
1282}
1283
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001284static void context_list_ctor(struct fastrpc_ctx_lst *me)
1285{
1286 INIT_HLIST_HEAD(&me->interrupted);
1287 INIT_HLIST_HEAD(&me->pending);
1288}
1289
1290static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1291{
1292 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301293 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001294 struct hlist_node *n;
1295
1296 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301297 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001298 spin_lock(&fl->hlock);
1299 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1300 hlist_del_init(&ictx->hn);
1301 ctxfree = ictx;
1302 break;
1303 }
1304 spin_unlock(&fl->hlock);
1305 if (ctxfree)
1306 context_free(ctxfree);
1307 } while (ctxfree);
1308 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301309 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001310 spin_lock(&fl->hlock);
1311 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1312 hlist_del_init(&ictx->hn);
1313 ctxfree = ictx;
1314 break;
1315 }
1316 spin_unlock(&fl->hlock);
1317 if (ctxfree)
1318 context_free(ctxfree);
1319 } while (ctxfree);
1320}
1321
1322static int fastrpc_file_free(struct fastrpc_file *fl);
1323static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1324{
1325 struct fastrpc_file *fl, *free;
1326 struct hlist_node *n;
1327
1328 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301329 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001330 spin_lock(&me->hlock);
1331 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1332 hlist_del_init(&fl->hn);
1333 free = fl;
1334 break;
1335 }
1336 spin_unlock(&me->hlock);
1337 if (free)
1338 fastrpc_file_free(free);
1339 } while (free);
1340}
1341
1342static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1343{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301344 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001345 remote_arg64_t *rpra;
1346 remote_arg_t *lpra = ctx->lpra;
1347 struct smq_invoke_buf *list;
1348 struct smq_phy_page *pages, *ipage;
1349 uint32_t sc = ctx->sc;
1350 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1351 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001352 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001353 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301354 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001355 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001356 int err = 0;
1357 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001358 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001359 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301360 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001361
1362 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301363 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364 list = smq_invoke_buf_start(rpra, sc);
1365 pages = smq_phy_page_start(sc, list);
1366 ipage = pages;
1367
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301368 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001369 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301370 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1371 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001372
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301373 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301374 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1375 unsigned int attrs = 0;
1376
1377 if (ctx->attrs)
1378 attrs = ctx->attrs[i];
1379
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001380 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301381 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001382 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301383 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301384 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001385 ipage += 1;
1386 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301387 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001388 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301389 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001390 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301391 int dmaflags = 0;
1392
1393 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1394 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001395 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301396 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301397 if (err) {
1398 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001399 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301400 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001401 ipage += 1;
1402 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301403 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301404 if (!me->legacy) {
1405 metalen = copylen = (size_t)&ipage[0] +
1406 (sizeof(uint64_t) * M_FDLIST) +
1407 (sizeof(uint32_t) * M_CRCLIST);
1408 } else {
1409 metalen = copylen = (size_t)&ipage[0];
1410 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001411
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001412 /* calculate len requreed for copying */
1413 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1414 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001415 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301416 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001417
1418 if (!len)
1419 continue;
1420 if (ctx->maps[i])
1421 continue;
1422 if (ctx->overps[oix]->offset == 0)
1423 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001424 mstart = ctx->overps[oix]->mstart;
1425 mend = ctx->overps[oix]->mend;
1426 VERIFY(err, (mend - mstart) <= LONG_MAX);
1427 if (err)
1428 goto bail;
1429 copylen += mend - mstart;
1430 VERIFY(err, copylen >= 0);
1431 if (err)
1432 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001433 }
1434 ctx->used = copylen;
1435
1436 /* allocate new buffer */
1437 if (copylen) {
1438 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1439 if (err)
1440 goto bail;
1441 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301442 if (ctx->buf->virt && metalen <= copylen)
1443 memset(ctx->buf->virt, 0, metalen);
1444
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001445 /* copy metadata */
1446 rpra = ctx->buf->virt;
1447 ctx->rpra = rpra;
1448 list = smq_invoke_buf_start(rpra, sc);
1449 pages = smq_phy_page_start(sc, list);
1450 ipage = pages;
1451 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001452 for (i = 0; i < bufs + handles; ++i) {
1453 if (lpra[i].buf.len)
1454 list[i].num = 1;
1455 else
1456 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001457 list[i].pgidx = ipage - pages;
1458 ipage++;
1459 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301460
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001461 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301462 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301463 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001464 struct fastrpc_mmap *map = ctx->maps[i];
1465 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301466 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001467
1468 rpra[i].buf.pv = 0;
1469 rpra[i].buf.len = len;
1470 if (!len)
1471 continue;
1472 if (map) {
1473 struct vm_area_struct *vma;
1474 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301475 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001476 int idx = list[i].pgidx;
1477
1478 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001479 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001480 } else {
1481 down_read(&current->mm->mmap_sem);
1482 VERIFY(err, NULL != (vma = find_vma(current->mm,
1483 map->va)));
1484 if (err) {
1485 up_read(&current->mm->mmap_sem);
1486 goto bail;
1487 }
1488 offset = buf_page_start(buf) - vma->vm_start;
1489 up_read(&current->mm->mmap_sem);
1490 VERIFY(err, offset < (uintptr_t)map->size);
1491 if (err)
1492 goto bail;
1493 }
1494 pages[idx].addr = map->phys + offset;
1495 pages[idx].size = num << PAGE_SHIFT;
1496 }
1497 rpra[i].buf.pv = buf;
1498 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001499 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001500 for (i = bufs; i < bufs + handles; ++i) {
1501 struct fastrpc_mmap *map = ctx->maps[i];
1502
1503 pages[i].addr = map->phys;
1504 pages[i].size = map->size;
1505 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301506 if (!me->legacy) {
1507 fdlist = (uint64_t *)&pages[bufs + handles];
1508 for (i = 0; i < M_FDLIST; i++)
1509 fdlist[i] = 0;
1510 crclist = (uint32_t *)&fdlist[M_FDLIST];
1511 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1512 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001513
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001514 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301515 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001516 rlen = copylen - metalen;
Tharun Kumar Meruguc230bd72018-01-29 18:02:42 +05301517 for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001518 int i = ctx->overps[oix]->raix;
1519 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301520 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001521 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301522 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001523
1524 if (!len)
1525 continue;
1526 if (map)
1527 continue;
1528 if (ctx->overps[oix]->offset == 0) {
1529 rlen -= ALIGN(args, BALIGN) - args;
1530 args = ALIGN(args, BALIGN);
1531 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001532 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001533 VERIFY(err, rlen >= mlen);
1534 if (err)
1535 goto bail;
1536 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1537 pages[list[i].pgidx].addr = ctx->buf->phys -
1538 ctx->overps[oix]->offset +
1539 (copylen - rlen);
1540 pages[list[i].pgidx].addr =
1541 buf_page_start(pages[list[i].pgidx].addr);
1542 buf = rpra[i].buf.pv;
1543 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1544 if (i < inbufs) {
1545 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1546 lpra[i].buf.pv, len);
1547 if (err)
1548 goto bail;
1549 }
1550 args = args + mlen;
1551 rlen -= mlen;
1552 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001553 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001554
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301555 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001556 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1557 int i = ctx->overps[oix]->raix;
1558 struct fastrpc_mmap *map = ctx->maps[i];
1559
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001560 if (map && map->uncached)
1561 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301562 if (ctx->fl->sctx->smmu.coherent &&
1563 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1564 continue;
1565 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1566 continue;
1567
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301568 if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) {
1569 if (map && map->handle)
1570 msm_ion_do_cache_op(ctx->fl->apps->client,
1571 map->handle,
1572 uint64_to_ptr(rpra[i].buf.pv),
1573 rpra[i].buf.len,
1574 ION_IOC_CLEAN_INV_CACHES);
1575 else
1576 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1577 uint64_to_ptr(rpra[i].buf.pv
1578 + rpra[i].buf.len));
1579 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001580 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001581 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301582 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001583 rpra[i].dma.fd = ctx->fds[i];
1584 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1585 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001586 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001587
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001588 bail:
1589 return err;
1590}
1591
1592static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1593 remote_arg_t *upra)
1594{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301595 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001596 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001597 struct smq_invoke_buf *list;
1598 struct smq_phy_page *pages;
1599 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301600 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001601 uint32_t *crclist = NULL;
1602
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001603 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001604 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001605 int err = 0;
1606
1607 inbufs = REMOTE_SCALARS_INBUFS(sc);
1608 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001609 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1610 list = smq_invoke_buf_start(ctx->rpra, sc);
1611 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301612 if (!me->legacy) {
1613 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1614 crclist = (uint32_t *)(fdlist + M_FDLIST);
1615 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001616
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001617 for (i = inbufs; i < inbufs + outbufs; ++i) {
1618 if (!ctx->maps[i]) {
1619 K_COPY_TO_USER(err, kernel,
1620 ctx->lpra[i].buf.pv,
1621 uint64_to_ptr(rpra[i].buf.pv),
1622 rpra[i].buf.len);
1623 if (err)
1624 goto bail;
1625 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301626 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301627 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301628 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301629 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001630 }
1631 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301632 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301633 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001634 for (i = 0; i < M_FDLIST; i++) {
1635 if (!fdlist[i])
1636 break;
1637 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001638 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301639 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001640 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001641 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301642 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001643 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301644 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001645 crclist, M_CRCLIST*sizeof(uint32_t));
1646
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001647 bail:
1648 return err;
1649}
1650
1651static void inv_args_pre(struct smq_invoke_ctx *ctx)
1652{
1653 int i, inbufs, outbufs;
1654 uint32_t sc = ctx->sc;
1655 remote_arg64_t *rpra = ctx->rpra;
1656 uintptr_t end;
1657
1658 inbufs = REMOTE_SCALARS_INBUFS(sc);
1659 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1660 for (i = inbufs; i < inbufs + outbufs; ++i) {
1661 struct fastrpc_mmap *map = ctx->maps[i];
1662
1663 if (map && map->uncached)
1664 continue;
1665 if (!rpra[i].buf.len)
1666 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301667 if (ctx->fl->sctx->smmu.coherent &&
1668 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1669 continue;
1670 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1671 continue;
1672
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001673 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1674 buf_page_start(rpra[i].buf.pv))
1675 continue;
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301676 if (!IS_CACHE_ALIGNED((uintptr_t)
1677 uint64_to_ptr(rpra[i].buf.pv))) {
1678 if (map && map->handle)
1679 msm_ion_do_cache_op(ctx->fl->apps->client,
1680 map->handle,
1681 uint64_to_ptr(rpra[i].buf.pv),
1682 sizeof(uintptr_t),
1683 ION_IOC_CLEAN_INV_CACHES);
1684 else
1685 dmac_flush_range(
1686 uint64_to_ptr(rpra[i].buf.pv), (char *)
1687 uint64_to_ptr(rpra[i].buf.pv + 1));
1688 }
1689
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001690 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1691 rpra[i].buf.len);
Tharun Kumar Merugub67336e2017-08-08 18:56:03 +05301692 if (!IS_CACHE_ALIGNED(end)) {
1693 if (map && map->handle)
1694 msm_ion_do_cache_op(ctx->fl->apps->client,
1695 map->handle,
1696 uint64_to_ptr(end),
1697 sizeof(uintptr_t),
1698 ION_IOC_CLEAN_INV_CACHES);
1699 else
1700 dmac_flush_range((char *)end,
1701 (char *)end + 1);
1702 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001703 }
1704}
1705
1706static void inv_args(struct smq_invoke_ctx *ctx)
1707{
1708 int i, inbufs, outbufs;
1709 uint32_t sc = ctx->sc;
1710 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001711
1712 inbufs = REMOTE_SCALARS_INBUFS(sc);
1713 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1714 for (i = inbufs; i < inbufs + outbufs; ++i) {
1715 struct fastrpc_mmap *map = ctx->maps[i];
1716
1717 if (map && map->uncached)
1718 continue;
1719 if (!rpra[i].buf.len)
1720 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301721 if (ctx->fl->sctx->smmu.coherent &&
1722 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1723 continue;
1724 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1725 continue;
1726
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001727 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1728 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001729 continue;
1730 }
1731 if (map && map->handle)
1732 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1733 (char *)uint64_to_ptr(rpra[i].buf.pv),
1734 rpra[i].buf.len, ION_IOC_INV_CACHES);
1735 else
1736 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1737 (char *)uint64_to_ptr(rpra[i].buf.pv
1738 + rpra[i].buf.len));
1739 }
1740
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001741}
1742
1743static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1744 uint32_t kernel, uint32_t handle)
1745{
1746 struct smq_msg *msg = &ctx->msg;
1747 struct fastrpc_file *fl = ctx->fl;
1748 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301749 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001750
c_mtharue1a5ce12017-10-13 20:47:09 +05301751 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001752 if (err)
1753 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301754 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001755 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301756 if (fl->sessionid)
1757 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001758 if (kernel)
1759 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301760 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001761 msg->invoke.header.handle = handle;
1762 msg->invoke.header.sc = ctx->sc;
1763 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1764 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301765 if (fl->apps->glink) {
1766 if (fl->ssrcount != channel_ctx->ssrcount) {
1767 err = -ECONNRESET;
1768 goto bail;
1769 }
1770 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001771 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301772 if (err)
1773 goto bail;
1774 err = glink_tx(channel_ctx->chan,
1775 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1776 GLINK_TX_REQ_INTENT);
1777 } else {
1778 spin_lock(&fl->apps->hlock);
1779 len = smd_write((smd_channel_t *)
1780 channel_ctx->chan,
1781 msg, sizeof(*msg));
1782 spin_unlock(&fl->apps->hlock);
1783 VERIFY(err, len == sizeof(*msg));
1784 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001785 bail:
1786 return err;
1787}
1788
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301789static void fastrpc_smd_read_handler(int cid)
1790{
1791 struct fastrpc_apps *me = &gfa;
1792 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301793 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301794 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301795
1796 do {
1797 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1798 sizeof(rsp));
1799 if (ret != sizeof(rsp))
1800 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301801
1802 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1803 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301804 if (err)
1805 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301806
1807 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1808 if (err)
1809 goto bail;
1810
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301811 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301812 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1813 if (err)
1814 goto bail;
1815
1816 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301817 } while (ret == sizeof(rsp));
1818bail:
1819 if (err)
1820 pr_err("adsprpc: invalid response or context\n");
1821
1822}
1823
1824static void smd_event_handler(void *priv, unsigned int event)
1825{
1826 struct fastrpc_apps *me = &gfa;
1827 int cid = (int)(uintptr_t)priv;
1828
1829 switch (event) {
1830 case SMD_EVENT_OPEN:
1831 complete(&me->channel[cid].workport);
1832 break;
1833 case SMD_EVENT_CLOSE:
1834 fastrpc_notify_drivers(me, cid);
1835 break;
1836 case SMD_EVENT_DATA:
1837 fastrpc_smd_read_handler(cid);
1838 break;
1839 }
1840}
1841
1842
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001843static void fastrpc_init(struct fastrpc_apps *me)
1844{
1845 int i;
1846
1847 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301848 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001849 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301850 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301851 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001852 me->channel = &gcinfo[0];
1853 for (i = 0; i < NUM_CHANNELS; i++) {
1854 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301855 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001856 me->channel[i].sesscount = 0;
1857 }
1858}
1859
1860static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1861
1862static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1863 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001864 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001865{
c_mtharue1a5ce12017-10-13 20:47:09 +05301866 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001867 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1868 int cid = fl->cid;
1869 int interrupted = 0;
1870 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001871 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301872 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001873
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001874 if (fl->profile)
1875 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301876
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301877
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301878 VERIFY(err, fl->sctx != NULL);
1879 if (err)
1880 goto bail;
1881 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1882 if (err)
1883 goto bail;
1884
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001885 if (!kernel) {
1886 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1887 &ctx));
1888 if (err)
1889 goto bail;
1890 if (fl->sctx->smmu.faults)
1891 err = FASTRPC_ENOSUCH;
1892 if (err)
1893 goto bail;
1894 if (ctx)
1895 goto wait;
1896 }
1897
1898 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1899 if (err)
1900 goto bail;
1901
1902 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301903 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001904 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001905 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001906 if (err)
1907 goto bail;
1908 }
1909
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301910 if (!fl->sctx->smmu.coherent) {
1911 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001912 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301913 PERF_END);
1914 }
1915
1916 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001917 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001918 PERF_END);
1919
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001920 if (err)
1921 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001922 wait:
1923 if (kernel)
1924 wait_for_completion(&ctx->work);
1925 else {
1926 interrupted = wait_for_completion_interruptible(&ctx->work);
1927 VERIFY(err, 0 == (err = interrupted));
1928 if (err)
1929 goto bail;
1930 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001931
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301932 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07001933 if (!fl->sctx->smmu.coherent)
1934 inv_args(ctx);
1935 PERF_END);
1936
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001937 VERIFY(err, 0 == (err = ctx->retval));
1938 if (err)
1939 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001940
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301941 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001942 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001943 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001944 if (err)
1945 goto bail;
1946 bail:
1947 if (ctx && interrupted == -ERESTARTSYS)
1948 context_save_interrupted(ctx);
1949 else if (ctx)
1950 context_free(ctx);
1951 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1952 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001953
1954 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301955 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
1956 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
1957
1958 if (count)
1959 *count += getnstimediff(&invoket);
1960 }
1961 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
1962 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
1963
1964 if (count)
1965 *count = *count+1;
1966 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001967 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001968 return err;
1969}
1970
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301971static int fastrpc_get_adsp_session(char *name, int *session)
1972{
1973 struct fastrpc_apps *me = &gfa;
1974 int err = 0, i;
1975
1976 for (i = 0; i < NUM_SESSIONS; i++) {
1977 if (!me->channel[0].spd[i].spdname)
1978 continue;
1979 if (!strcmp(name, me->channel[0].spd[i].spdname))
1980 break;
1981 }
1982 VERIFY(err, i < NUM_SESSIONS);
1983 if (err)
1984 goto bail;
1985 *session = i;
1986bail:
1987 return err;
1988}
1989
1990static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08001991static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301992static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001993static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001994 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001995{
1996 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301997 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001998 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001999 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002000 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05302001 struct fastrpc_mmap *file = NULL, *mem = NULL;
2002 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002003
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302004 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08002005 if (err)
2006 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302007 if (init->flags == FASTRPC_INIT_ATTACH ||
2008 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002009 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302010 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002011
2012 ra[0].buf.pv = (void *)&tgid;
2013 ra[0].buf.len = sizeof(tgid);
2014 ioctl.inv.handle = 1;
2015 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
2016 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302017 ioctl.fds = NULL;
2018 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002019 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302020 if (init->flags == FASTRPC_INIT_ATTACH)
2021 fl->pd = 0;
2022 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2023 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2024 fl->pd = 2;
2025 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002026 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2027 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2028 if (err)
2029 goto bail;
2030 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002031 remote_arg_t ra[6];
2032 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002033 int mflags = 0;
2034 struct {
2035 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302036 unsigned int namelen;
2037 unsigned int filelen;
2038 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002039 int attrs;
2040 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002041 } inbuf;
2042
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302043 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002044 inbuf.namelen = strlen(current->comm) + 1;
2045 inbuf.filelen = init->filelen;
2046 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302047
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302048 VERIFY(err, access_ok(0, (void __user *)init->file,
2049 init->filelen));
2050 if (err)
2051 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002052 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302053 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002054 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2055 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302056 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002057 if (err)
2058 goto bail;
2059 }
2060 inbuf.pageslen = 1;
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302061 VERIFY(err, access_ok(1, (void __user *)init->mem,
2062 init->memlen));
2063 if (err)
2064 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302065 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002066 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
2067 init->mem, init->memlen, mflags, &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302068 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002069 if (err)
2070 goto bail;
2071 inbuf.pageslen = 1;
2072 ra[0].buf.pv = (void *)&inbuf;
2073 ra[0].buf.len = sizeof(inbuf);
2074 fds[0] = 0;
2075
2076 ra[1].buf.pv = (void *)current->comm;
2077 ra[1].buf.len = inbuf.namelen;
2078 fds[1] = 0;
2079
2080 ra[2].buf.pv = (void *)init->file;
2081 ra[2].buf.len = inbuf.filelen;
2082 fds[2] = init->filefd;
2083
2084 pages[0].addr = mem->phys;
2085 pages[0].size = mem->size;
2086 ra[3].buf.pv = (void *)pages;
2087 ra[3].buf.len = 1 * sizeof(*pages);
2088 fds[3] = 0;
2089
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002090 inbuf.attrs = uproc->attrs;
2091 ra[4].buf.pv = (void *)&(inbuf.attrs);
2092 ra[4].buf.len = sizeof(inbuf.attrs);
2093 fds[4] = 0;
2094
2095 inbuf.siglen = uproc->siglen;
2096 ra[5].buf.pv = (void *)&(inbuf.siglen);
2097 ra[5].buf.len = sizeof(inbuf.siglen);
2098 fds[5] = 0;
2099
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002100 ioctl.inv.handle = 1;
2101 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002102 if (uproc->attrs)
2103 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002104 ioctl.inv.pra = ra;
2105 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302106 ioctl.attrs = NULL;
2107 ioctl.crc = NULL;
2108 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2109 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2110 if (err)
2111 goto bail;
2112 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2113 remote_arg_t ra[3];
2114 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302115 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302116 int fds[3];
2117 struct {
2118 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302119 unsigned int namelen;
2120 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302121 } inbuf;
2122
2123 if (!init->filelen)
2124 goto bail;
2125
2126 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2127 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2128 if (err)
2129 goto bail;
2130 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2131 (void __user *)init->file, init->filelen));
2132 if (err)
2133 goto bail;
2134
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302135 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302136 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302137 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302138 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302139
2140 if (!strcmp(proc_name, "audiopd")) {
2141 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2142 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302143 if (err)
2144 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302145 }
2146
c_mtharue1a5ce12017-10-13 20:47:09 +05302147 if (!me->staticpd_flags) {
2148 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302149 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302150 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2151 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2152 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302153 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302154 if (err)
2155 goto bail;
2156 phys = mem->phys;
2157 size = mem->size;
2158 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302159 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2160 me->channel[fl->cid].rhvm.vmperm,
2161 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302162 if (err) {
2163 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2164 err);
2165 pr_err("map->phys %llx, map->size %d\n",
2166 phys, (int)size);
2167 goto bail;
2168 }
2169 me->staticpd_flags = 1;
2170 }
2171
2172 ra[0].buf.pv = (void *)&inbuf;
2173 ra[0].buf.len = sizeof(inbuf);
2174 fds[0] = 0;
2175
2176 ra[1].buf.pv = (void *)proc_name;
2177 ra[1].buf.len = inbuf.namelen;
2178 fds[1] = 0;
2179
2180 pages[0].addr = phys;
2181 pages[0].size = size;
2182
2183 ra[2].buf.pv = (void *)pages;
2184 ra[2].buf.len = sizeof(*pages);
2185 fds[2] = 0;
2186 ioctl.inv.handle = 1;
2187
2188 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2189 ioctl.inv.pra = ra;
2190 ioctl.fds = NULL;
2191 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002192 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002193 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2194 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2195 if (err)
2196 goto bail;
2197 } else {
2198 err = -ENOTTY;
2199 }
2200bail:
c_mtharud91205a2017-11-07 16:01:06 +05302201 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302202 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2203 me->staticpd_flags = 0;
2204 if (mem && err) {
2205 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2206 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302207 me->channel[fl->cid].rhvm.vmid,
2208 me->channel[fl->cid].rhvm.vmcount,
2209 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302210 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302211 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302212 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302213 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302214 if (file) {
2215 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302216 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302217 mutex_unlock(&fl->fl_map_mutex);
2218 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002219 return err;
2220}
2221
2222static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2223{
2224 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002225 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002226 remote_arg_t ra[1];
2227 int tgid = 0;
2228
Sathish Ambley36849af2017-02-02 09:35:55 -08002229 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2230 if (err)
2231 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302232 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002233 if (err)
2234 goto bail;
2235 tgid = fl->tgid;
2236 ra[0].buf.pv = (void *)&tgid;
2237 ra[0].buf.len = sizeof(tgid);
2238 ioctl.inv.handle = 1;
2239 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2240 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302241 ioctl.fds = NULL;
2242 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002243 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002244 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2245 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2246bail:
2247 return err;
2248}
2249
2250static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
2251 struct fastrpc_mmap *map)
2252{
Sathish Ambleybae51902017-07-03 15:00:49 -07002253 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302254 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002255 struct smq_phy_page page;
2256 int num = 1;
2257 remote_arg_t ra[3];
2258 int err = 0;
2259 struct {
2260 int pid;
2261 uint32_t flags;
2262 uintptr_t vaddrin;
2263 int num;
2264 } inargs;
2265 struct {
2266 uintptr_t vaddrout;
2267 } routargs;
2268
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302269 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002270 inargs.vaddrin = (uintptr_t)map->va;
2271 inargs.flags = flags;
2272 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2273 ra[0].buf.pv = (void *)&inargs;
2274 ra[0].buf.len = sizeof(inargs);
2275 page.addr = map->phys;
2276 page.size = map->size;
2277 ra[1].buf.pv = (void *)&page;
2278 ra[1].buf.len = num * sizeof(page);
2279
2280 ra[2].buf.pv = (void *)&routargs;
2281 ra[2].buf.len = sizeof(routargs);
2282
2283 ioctl.inv.handle = 1;
2284 if (fl->apps->compat)
2285 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2286 else
2287 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2288 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302289 ioctl.fds = NULL;
2290 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002291 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002292 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2293 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2294 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302295 if (err)
2296 goto bail;
2297 if (flags == ADSP_MMAP_HEAP_ADDR) {
2298 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002299
c_mtharue1a5ce12017-10-13 20:47:09 +05302300 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
2301 desc.args[1] = map->phys;
2302 desc.args[2] = map->size;
2303 desc.arginfo = SCM_ARGS(3);
2304 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2305 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2306 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302307 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302308 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2309 me->channel[fl->cid].rhvm.vmperm,
2310 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302311 if (err)
2312 goto bail;
2313 }
2314bail:
2315 return err;
2316}
2317
2318static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
2319 struct fastrpc_mmap *map)
2320{
2321 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302322 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302323 int destVM[1] = {VMID_HLOS};
2324 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2325
2326 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
2327 struct fastrpc_ioctl_invoke_crc ioctl;
2328 struct scm_desc desc = {0};
2329 remote_arg_t ra[1];
2330 int err = 0;
2331 struct {
2332 uint8_t skey;
2333 } routargs;
2334
2335 ra[0].buf.pv = (void *)&routargs;
2336 ra[0].buf.len = sizeof(routargs);
2337
2338 ioctl.inv.handle = 1;
2339 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2340 ioctl.inv.pra = ra;
2341 ioctl.fds = NULL;
2342 ioctl.attrs = NULL;
2343 ioctl.crc = NULL;
2344 if (fl == NULL)
2345 goto bail;
2346
2347 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2348 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2349 if (err)
2350 goto bail;
2351 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
2352 desc.args[1] = map->phys;
2353 desc.args[2] = map->size;
2354 desc.args[3] = routargs.skey;
2355 desc.arginfo = SCM_ARGS(4);
2356 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2357 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
2358 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2359 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302360 me->channel[fl->cid].rhvm.vmid,
2361 me->channel[fl->cid].rhvm.vmcount,
2362 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302363 if (err)
2364 goto bail;
2365 }
2366
2367bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002368 return err;
2369}
2370
2371static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
2372 struct fastrpc_mmap *map)
2373{
Sathish Ambleybae51902017-07-03 15:00:49 -07002374 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002375 remote_arg_t ra[1];
2376 int err = 0;
2377 struct {
2378 int pid;
2379 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302380 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002381 } inargs;
2382
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302383 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002384 inargs.size = map->size;
2385 inargs.vaddrout = map->raddr;
2386 ra[0].buf.pv = (void *)&inargs;
2387 ra[0].buf.len = sizeof(inargs);
2388
2389 ioctl.inv.handle = 1;
2390 if (fl->apps->compat)
2391 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2392 else
2393 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2394 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302395 ioctl.fds = NULL;
2396 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002397 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002398 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2399 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302400 if (err)
2401 goto bail;
2402 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
2403 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2404 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
2405 if (err)
2406 goto bail;
2407 }
2408bail:
2409 return err;
2410}
2411
2412static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2413{
2414 struct fastrpc_mmap *match = NULL, *map = NULL;
2415 struct hlist_node *n = NULL;
2416 int err = 0, ret = 0;
2417 struct fastrpc_apps *me = &gfa;
2418 struct ramdump_segment *ramdump_segments_rh = NULL;
2419
2420 do {
2421 match = NULL;
2422 spin_lock(&me->hlock);
2423 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2424 match = map;
2425 hlist_del_init(&map->hn);
2426 break;
2427 }
2428 spin_unlock(&me->hlock);
2429
2430 if (match) {
2431 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2432 if (err)
2433 goto bail;
2434 if (me->channel[0].ramdumpenabled) {
2435 ramdump_segments_rh = kcalloc(1,
2436 sizeof(struct ramdump_segment), GFP_KERNEL);
2437 if (ramdump_segments_rh) {
2438 ramdump_segments_rh->address =
2439 match->phys;
2440 ramdump_segments_rh->size = match->size;
2441 ret = do_elf_ramdump(
2442 me->channel[0].remoteheap_ramdump_dev,
2443 ramdump_segments_rh, 1);
2444 if (ret < 0)
2445 pr_err("ADSPRPC: unable to dump heap");
2446 kfree(ramdump_segments_rh);
2447 }
2448 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302449 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302450 }
2451 } while (match);
2452bail:
2453 if (err && match)
2454 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002455 return err;
2456}
2457
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302458static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2459{
2460 struct fastrpc_apps *me = &gfa;
2461 int session = 0, err = 0;
2462
2463 VERIFY(err, !fastrpc_get_adsp_session(
2464 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2465 if (err)
2466 goto bail;
2467 if (me->channel[fl->cid].spd[session].pdrcount !=
2468 me->channel[fl->cid].spd[session].prevpdrcount) {
2469 if (fastrpc_mmap_remove_ssr(fl))
2470 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2471 me->channel[fl->cid].spd[session].prevpdrcount =
2472 me->channel[fl->cid].spd[session].pdrcount;
2473 }
2474 if (!me->channel[fl->cid].spd[session].ispdup) {
2475 VERIFY(err, 0);
2476 if (err) {
2477 err = -ENOTCONN;
2478 goto bail;
2479 }
2480 }
2481bail:
2482 return err;
2483}
2484
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002485static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302486 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002487
2488static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2489
2490static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2491 struct fastrpc_ioctl_munmap *ud)
2492{
2493 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302494 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002495
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302496 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302497 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002498 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302499 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002500 if (err)
2501 goto bail;
2502 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2503 if (err)
2504 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302505 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302506 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302507 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002508bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302509 if (err && map) {
2510 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002511 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302512 mutex_unlock(&fl->fl_map_mutex);
2513 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302514 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002515 return err;
2516}
2517
c_mtharu7bd6a422017-10-17 18:15:37 +05302518static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2519 struct fastrpc_ioctl_munmap_fd *ud) {
2520 int err = 0;
2521 struct fastrpc_mmap *map = NULL;
2522
2523 VERIFY(err, (fl && ud));
2524 if (err)
2525 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302526 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302527 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2528 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302529 ud->fd, (unsigned long long)ud->va,
2530 (unsigned int)ud->len);
2531 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302532 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302533 goto bail;
2534 }
2535 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302536 fastrpc_mmap_free(map, 0);
2537 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302538bail:
2539 return err;
2540}
2541
2542
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002543static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2544 struct fastrpc_ioctl_mmap *ud)
2545{
2546
c_mtharue1a5ce12017-10-13 20:47:09 +05302547 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002548 int err = 0;
2549
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302550 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302551 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302552 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302553 ud->size, ud->flags, 1, &map)) {
2554 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302555 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002556 return 0;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302557 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002558 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302559 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302560 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302561 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002562 if (err)
2563 goto bail;
2564 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2565 if (err)
2566 goto bail;
2567 ud->vaddrout = map->raddr;
2568 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302569 if (err && map) {
2570 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302571 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302572 mutex_unlock(&fl->fl_map_mutex);
2573 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302574 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002575 return err;
2576}
2577
2578static void fastrpc_channel_close(struct kref *kref)
2579{
2580 struct fastrpc_apps *me = &gfa;
2581 struct fastrpc_channel_ctx *ctx;
2582 int cid;
2583
2584 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2585 cid = ctx - &gcinfo[0];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302586 if (!me->glink)
2587 smd_close(ctx->chan);
2588 else
2589 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302590 ctx->chan = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302591 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002592 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2593 MAJOR(me->dev_no), cid);
2594}
2595
2596static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2597
2598static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302599 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002600{
2601 struct fastrpc_apps *me = &gfa;
2602 int idx = 0, err = 0;
2603
2604 if (chan->sesscount) {
2605 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302606 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2607 (!chan->session[idx].used &&
2608 chan->session[idx].smmu.secure
2609 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002610 chan->session[idx].used = 1;
2611 break;
2612 }
2613 }
2614 VERIFY(err, idx < chan->sesscount);
2615 if (err)
2616 goto bail;
2617 chan->session[idx].smmu.faults = 0;
2618 } else {
2619 VERIFY(err, me->dev != NULL);
2620 if (err)
2621 goto bail;
2622 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302623 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002624 }
2625
2626 *session = &chan->session[idx];
2627 bail:
2628 return err;
2629}
2630
c_mtharue1a5ce12017-10-13 20:47:09 +05302631static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2632 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002633{
2634 if (glink_queue_rx_intent(h, NULL, size))
2635 return false;
2636 return true;
2637}
2638
c_mtharue1a5ce12017-10-13 20:47:09 +05302639static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002640 const void *pkt_priv, const void *ptr)
2641{
2642}
2643
c_mtharue1a5ce12017-10-13 20:47:09 +05302644static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002645 const void *pkt_priv, const void *ptr, size_t size)
2646{
2647 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302648 struct fastrpc_apps *me = &gfa;
2649 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302650 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002651
c_mtharufdac6892017-10-12 13:09:01 +05302652 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2653 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302654 goto bail;
2655
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302656 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2657 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302658 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302659 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302660
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302661 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2662 if (err)
2663 goto bail;
2664
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302665 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302666 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2667 if (err)
2668 goto bail;
2669
2670 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302671bail:
c_mtharufdac6892017-10-12 13:09:01 +05302672 if (err)
2673 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002674 glink_rx_done(handle, ptr, true);
2675}
2676
c_mtharue1a5ce12017-10-13 20:47:09 +05302677static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002678 unsigned int event)
2679{
2680 struct fastrpc_apps *me = &gfa;
2681 int cid = (int)(uintptr_t)priv;
2682 struct fastrpc_glink_info *link;
2683
2684 if (cid < 0 || cid >= NUM_CHANNELS)
2685 return;
2686 link = &me->channel[cid].link;
2687 switch (event) {
2688 case GLINK_CONNECTED:
2689 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302690 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002691 break;
2692 case GLINK_LOCAL_DISCONNECTED:
2693 link->port_state = FASTRPC_LINK_DISCONNECTED;
2694 break;
2695 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002696 break;
2697 default:
2698 break;
2699 }
2700}
2701
2702static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2703 struct fastrpc_session_ctx **session)
2704{
2705 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302706 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002707
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302708 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002709 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302710 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302711 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002712 return err;
2713}
2714
2715static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2716 struct fastrpc_session_ctx *session)
2717{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302718 struct fastrpc_apps *me = &gfa;
2719
2720 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002721 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302722 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002723}
2724
2725static int fastrpc_file_free(struct fastrpc_file *fl)
2726{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302727 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302728 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302729 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002730 int cid;
2731
2732 if (!fl)
2733 return 0;
2734 cid = fl->cid;
2735
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302736 (void)fastrpc_release_current_dsp_process(fl);
2737
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002738 spin_lock(&fl->apps->hlock);
2739 hlist_del_init(&fl->hn);
2740 spin_unlock(&fl->apps->hlock);
2741
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002742 if (!fl->sctx) {
2743 kfree(fl);
2744 return 0;
2745 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302746 spin_lock(&fl->hlock);
2747 fl->file_close = 1;
2748 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002749 fastrpc_context_list_dtor(fl);
2750 fastrpc_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302751 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302752 do {
2753 lmap = NULL;
2754 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2755 hlist_del_init(&map->hn);
2756 lmap = map;
2757 break;
2758 }
2759 fastrpc_mmap_free(lmap, 1);
2760 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302761 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302762 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002763 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302764 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002765 if (fl->sctx)
2766 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2767 if (fl->secsctx)
2768 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302769
2770 mutex_lock(&fl->perf_mutex);
2771 do {
2772 struct hlist_node *pn = NULL;
2773
2774 fperf = NULL;
2775 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2776 hlist_del_init(&perf->hn);
2777 fperf = perf;
2778 break;
2779 }
2780 kfree(fperf);
2781 } while (fperf);
2782 mutex_unlock(&fl->perf_mutex);
2783 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302784 mutex_destroy(&fl->fl_map_mutex);
Tharun Kumar Merugu8714e642018-05-17 15:21:08 +05302785 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002786 kfree(fl);
2787 return 0;
2788}
2789
2790static int fastrpc_device_release(struct inode *inode, struct file *file)
2791{
2792 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2793
2794 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302795 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2796 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002797 if (fl->debugfs_file != NULL)
2798 debugfs_remove(fl->debugfs_file);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002799 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302800 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002801 }
2802 return 0;
2803}
2804
2805static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2806 void *priv)
2807{
2808 struct fastrpc_apps *me = &gfa;
2809 int cid = (int)((uintptr_t)priv);
2810 struct fastrpc_glink_info *link;
2811
2812 if (cid < 0 || cid >= NUM_CHANNELS)
2813 return;
2814
2815 link = &me->channel[cid].link;
2816 switch (cb_info->link_state) {
2817 case GLINK_LINK_STATE_UP:
2818 link->link_state = FASTRPC_LINK_STATE_UP;
2819 complete(&me->channel[cid].work);
2820 break;
2821 case GLINK_LINK_STATE_DOWN:
2822 link->link_state = FASTRPC_LINK_STATE_DOWN;
2823 break;
2824 default:
2825 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2826 break;
2827 }
2828}
2829
2830static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2831{
2832 int err = 0;
2833 struct fastrpc_glink_info *link;
2834
2835 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2836 if (err)
2837 goto bail;
2838
2839 link = &me->channel[cid].link;
2840 if (link->link_notify_handle != NULL)
2841 goto bail;
2842
2843 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2844 link->link_notify_handle = glink_register_link_state_cb(
2845 &link->link_info,
2846 (void *)((uintptr_t)cid));
2847 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2848 if (err) {
2849 link->link_notify_handle = NULL;
2850 goto bail;
2851 }
2852 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2853 RPC_TIMEOUT));
2854bail:
2855 return err;
2856}
2857
2858static void fastrpc_glink_close(void *chan, int cid)
2859{
2860 int err = 0;
2861 struct fastrpc_glink_info *link;
2862
2863 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2864 if (err)
2865 return;
2866 link = &gfa.channel[cid].link;
2867
c_mtharu314a4202017-11-15 22:09:17 +05302868 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2869 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002870 link->port_state = FASTRPC_LINK_DISCONNECTING;
2871 glink_close(chan);
2872 }
2873}
2874
2875static int fastrpc_glink_open(int cid)
2876{
2877 int err = 0;
2878 void *handle = NULL;
2879 struct fastrpc_apps *me = &gfa;
2880 struct glink_open_config *cfg;
2881 struct fastrpc_glink_info *link;
2882
2883 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2884 if (err)
2885 goto bail;
2886 link = &me->channel[cid].link;
2887 cfg = &me->channel[cid].link.cfg;
2888 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2889 if (err)
2890 goto bail;
2891
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302892 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2893 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002894 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002895
2896 link->port_state = FASTRPC_LINK_CONNECTING;
2897 cfg->priv = (void *)(uintptr_t)cid;
2898 cfg->edge = gcinfo[cid].link.link_info.edge;
2899 cfg->transport = gcinfo[cid].link.link_info.transport;
2900 cfg->name = FASTRPC_GLINK_GUID;
2901 cfg->notify_rx = fastrpc_glink_notify_rx;
2902 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2903 cfg->notify_state = fastrpc_glink_notify_state;
2904 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2905 handle = glink_open(cfg);
2906 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302907 if (err) {
2908 if (link->port_state == FASTRPC_LINK_CONNECTING)
2909 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002910 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302911 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002912 me->channel[cid].chan = handle;
2913bail:
2914 return err;
2915}
2916
Sathish Ambley1ca68232017-01-19 10:32:55 -08002917static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2918{
2919 filp->private_data = inode->i_private;
2920 return 0;
2921}
2922
2923static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2924 size_t count, loff_t *position)
2925{
2926 struct fastrpc_file *fl = filp->private_data;
2927 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302928 struct fastrpc_buf *buf = NULL;
2929 struct fastrpc_mmap *map = NULL;
2930 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002931 struct fastrpc_channel_ctx *chan;
2932 struct fastrpc_session_ctx *sess;
2933 unsigned int len = 0;
2934 int i, j, ret = 0;
2935 char *fileinfo = NULL;
2936
2937 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2938 if (!fileinfo)
2939 goto bail;
2940 if (fl == NULL) {
2941 for (i = 0; i < NUM_CHANNELS; i++) {
2942 chan = &gcinfo[i];
2943 len += scnprintf(fileinfo + len,
2944 DEBUGFS_SIZE - len, "%s\n\n",
2945 chan->name);
2946 len += scnprintf(fileinfo + len,
2947 DEBUGFS_SIZE - len, "%s %d\n",
2948 "sesscount:", chan->sesscount);
2949 for (j = 0; j < chan->sesscount; j++) {
2950 sess = &chan->session[j];
2951 len += scnprintf(fileinfo + len,
2952 DEBUGFS_SIZE - len,
2953 "%s%d\n\n", "SESSION", j);
2954 len += scnprintf(fileinfo + len,
2955 DEBUGFS_SIZE - len,
2956 "%s %d\n", "sid:",
2957 sess->smmu.cb);
2958 len += scnprintf(fileinfo + len,
2959 DEBUGFS_SIZE - len,
2960 "%s %d\n", "SECURE:",
2961 sess->smmu.secure);
2962 }
2963 }
2964 } else {
2965 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2966 "%s %d\n\n",
2967 "PROCESS_ID:", fl->tgid);
2968 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2969 "%s %d\n\n",
2970 "CHANNEL_ID:", fl->cid);
2971 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2972 "%s %d\n\n",
2973 "SSRCOUNT:", fl->ssrcount);
2974 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2975 "%s\n",
2976 "LIST OF BUFS:");
2977 spin_lock(&fl->hlock);
2978 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2979 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302980 "%s %pK %s %pK %s %llx\n", "buf:",
2981 buf, "buf->virt:", buf->virt,
2982 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002983 }
2984 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2985 "\n%s\n",
2986 "LIST OF MAPS:");
2987 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2988 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302989 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002990 "map:", map,
2991 "map->va:", map->va,
2992 "map->phys:", map->phys);
2993 }
2994 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2995 "\n%s\n",
2996 "LIST OF PENDING SMQCONTEXTS:");
2997 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2998 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302999 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08003000 "smqcontext:", ictx,
3001 "sc:", ictx->sc,
3002 "tid:", ictx->pid,
3003 "handle", ictx->rpra->h);
3004 }
3005 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
3006 "\n%s\n",
3007 "LIST OF INTERRUPTED SMQCONTEXTS:");
3008 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
3009 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05303010 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08003011 "smqcontext:", ictx,
3012 "sc:", ictx->sc,
3013 "tid:", ictx->pid,
3014 "handle", ictx->rpra->h);
3015 }
3016 spin_unlock(&fl->hlock);
3017 }
3018 if (len > DEBUGFS_SIZE)
3019 len = DEBUGFS_SIZE;
3020 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3021 kfree(fileinfo);
3022bail:
3023 return ret;
3024}
3025
3026static const struct file_operations debugfs_fops = {
3027 .open = fastrpc_debugfs_open,
3028 .read = fastrpc_debugfs_read,
3029};
Sathish Ambley36849af2017-02-02 09:35:55 -08003030static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003031{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003032 struct fastrpc_apps *me = &gfa;
Sathish Ambley36849af2017-02-02 09:35:55 -08003033 int cid, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003034
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303035 mutex_lock(&me->smd_mutex);
3036
Sathish Ambley36849af2017-02-02 09:35:55 -08003037 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003038 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303039 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003040 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303041 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3042 if (err)
3043 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303044 if (me->channel[cid].ssrcount !=
3045 me->channel[cid].prevssrcount) {
3046 if (!me->channel[cid].issubsystemup) {
3047 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303048 if (err) {
3049 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303050 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303051 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303052 }
3053 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003054 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303055 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003056 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303057 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303058 if (me->glink) {
3059 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3060 if (err)
3061 goto bail;
3062 VERIFY(err, 0 == fastrpc_glink_open(cid));
3063 } else {
3064 VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
3065 gcinfo[cid].channel,
3066 (smd_channel_t **)&me->channel[cid].chan,
3067 (void *)(uintptr_t)cid,
3068 smd_event_handler));
3069 }
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303070 VERIFY(err,
3071 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072 RPC_TIMEOUT));
3073 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303074 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003075 goto bail;
3076 }
3077 kref_init(&me->channel[cid].kref);
3078 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3079 MAJOR(me->dev_no), cid);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303080 if (me->glink) {
3081 err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
3082 FASTRPC_GLINK_INTENT_LEN);
3083 err |= glink_queue_rx_intent(me->channel[cid].chan,
3084 NULL, FASTRPC_GLINK_INTENT_LEN);
3085 if (err)
3086 pr_warn("adsprpc: initial intent fail for %d err %d\n",
3087 cid, err);
3088 }
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303089 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003090 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303091 if (fastrpc_mmap_remove_ssr(fl))
3092 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003093 me->channel[cid].prevssrcount =
3094 me->channel[cid].ssrcount;
3095 }
3096 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003097
3098bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303099 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003100 return err;
3101}
3102
Sathish Ambley36849af2017-02-02 09:35:55 -08003103static int fastrpc_device_open(struct inode *inode, struct file *filp)
3104{
3105 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08003106 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05303107 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003108 struct fastrpc_apps *me = &gfa;
3109
c_mtharue1a5ce12017-10-13 20:47:09 +05303110 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003111 if (err)
3112 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08003113 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
3114 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08003115 context_list_ctor(&fl->clst);
3116 spin_lock_init(&fl->hlock);
3117 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303118 INIT_HLIST_HEAD(&fl->perf);
Sathish Ambley36849af2017-02-02 09:35:55 -08003119 INIT_HLIST_HEAD(&fl->bufs);
3120 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303121 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003122 fl->tgid = current->tgid;
3123 fl->apps = me;
3124 fl->mode = FASTRPC_MODE_SERIAL;
3125 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08003126 if (debugfs_file != NULL)
3127 fl->debugfs_file = debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303128 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303129 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003130 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303131 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303132 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003133 spin_lock(&me->hlock);
3134 hlist_add_head(&fl->hn, &me->drivers);
3135 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303136 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003137 return 0;
3138}
3139
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003140static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3141{
3142 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003143 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003144
c_mtharue1a5ce12017-10-13 20:47:09 +05303145 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003146 if (err)
3147 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003148 if (fl->cid == -1) {
3149 cid = *info;
3150 VERIFY(err, cid < NUM_CHANNELS);
3151 if (err)
3152 goto bail;
3153 fl->cid = cid;
3154 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3155 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303156 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003157 if (err)
3158 goto bail;
3159 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303160 VERIFY(err, fl->sctx != NULL);
3161 if (err)
3162 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003163 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3164bail:
3165 return err;
3166}
3167
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303168static int fastrpc_internal_control(struct fastrpc_file *fl,
3169 struct fastrpc_ioctl_control *cp)
3170{
3171 int err = 0;
3172 int latency;
3173
3174 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3175 if (err)
3176 goto bail;
3177 VERIFY(err, !IS_ERR_OR_NULL(cp));
3178 if (err)
3179 goto bail;
3180
3181 switch (cp->req) {
3182 case FASTRPC_CONTROL_LATENCY:
3183 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3184 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3185 VERIFY(err, latency != 0);
3186 if (err)
3187 goto bail;
3188 if (!fl->qos_request) {
3189 pm_qos_add_request(&fl->pm_qos_req,
3190 PM_QOS_CPU_DMA_LATENCY, latency);
3191 fl->qos_request = 1;
3192 } else
3193 pm_qos_update_request(&fl->pm_qos_req, latency);
3194 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303195 case FASTRPC_CONTROL_SMMU:
3196 fl->sharedcb = cp->smmu.sharedcb;
3197 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303198 default:
3199 err = -ENOTTY;
3200 break;
3201 }
3202bail:
3203 return err;
3204}
3205
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003206static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3207 unsigned long ioctl_param)
3208{
3209 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003210 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003211 struct fastrpc_ioctl_mmap mmap;
3212 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05303213 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003214 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003215 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303216 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003217 } p;
3218 void *param = (char *)ioctl_param;
3219 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3220 int size = 0, err = 0;
3221 uint32_t info;
3222
c_mtharue1a5ce12017-10-13 20:47:09 +05303223 p.inv.fds = NULL;
3224 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003225 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303226 spin_lock(&fl->hlock);
3227 if (fl->file_close == 1) {
3228 err = EBADF;
3229 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3230 spin_unlock(&fl->hlock);
3231 goto bail;
3232 }
3233 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003234
3235 switch (ioctl_num) {
3236 case FASTRPC_IOCTL_INVOKE:
3237 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003238 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003239 case FASTRPC_IOCTL_INVOKE_FD:
3240 if (!size)
3241 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3242 /* fall through */
3243 case FASTRPC_IOCTL_INVOKE_ATTRS:
3244 if (!size)
3245 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003246 /* fall through */
3247 case FASTRPC_IOCTL_INVOKE_CRC:
3248 if (!size)
3249 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303250 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003251 if (err)
3252 goto bail;
3253 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3254 0, &p.inv)));
3255 if (err)
3256 goto bail;
3257 break;
3258 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303259 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3260 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303261 if (err)
3262 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003263 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3264 if (err)
3265 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303266 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003267 if (err)
3268 goto bail;
3269 break;
3270 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303271 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3272 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303273 if (err)
3274 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003275 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3276 &p.munmap)));
3277 if (err)
3278 goto bail;
3279 break;
Tharun Kumar Merugu55be90d2018-05-31 11:41:03 +05303280 case FASTRPC_IOCTL_MMAP_64:
3281 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3282 sizeof(p.mmap));
3283 if (err)
3284 goto bail;
3285 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3286 if (err)
3287 goto bail;
3288 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
3289 if (err)
3290 goto bail;
3291 break;
3292 case FASTRPC_IOCTL_MUNMAP_64:
3293 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3294 sizeof(p.munmap));
3295 if (err)
3296 goto bail;
3297 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3298 &p.munmap)));
3299 if (err)
3300 goto bail;
3301 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303302 case FASTRPC_IOCTL_MUNMAP_FD:
3303 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3304 sizeof(p.munmap_fd));
3305 if (err)
3306 goto bail;
3307 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3308 &p.munmap_fd)));
3309 if (err)
3310 goto bail;
3311 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003312 case FASTRPC_IOCTL_SETMODE:
3313 switch ((uint32_t)ioctl_param) {
3314 case FASTRPC_MODE_PARALLEL:
3315 case FASTRPC_MODE_SERIAL:
3316 fl->mode = (uint32_t)ioctl_param;
3317 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003318 case FASTRPC_MODE_PROFILE:
3319 fl->profile = (uint32_t)ioctl_param;
3320 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303321 case FASTRPC_MODE_SESSION:
3322 fl->sessionid = 1;
3323 fl->tgid |= (1 << SESSION_ID_INDEX);
3324 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003325 default:
3326 err = -ENOTTY;
3327 break;
3328 }
3329 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003330 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303331 K_COPY_FROM_USER(err, 0, &p.perf,
3332 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003333 if (err)
3334 goto bail;
3335 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3336 if (p.perf.keys) {
3337 char *keys = PERF_KEYS;
3338
c_mtharue1a5ce12017-10-13 20:47:09 +05303339 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3340 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003341 if (err)
3342 goto bail;
3343 }
3344 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303345 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3346 struct hlist_node *n = NULL;
3347
3348 mutex_lock(&fl->perf_mutex);
3349 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3350 if (perf->tid == current->pid) {
3351 fperf = perf;
3352 break;
3353 }
3354 }
3355
3356 mutex_unlock(&fl->perf_mutex);
3357
3358 if (fperf) {
3359 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3360 fperf, sizeof(*fperf));
3361 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003362 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303363 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003364 if (err)
3365 goto bail;
3366 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303367 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303368 K_COPY_FROM_USER(err, 0, &p.cp, param,
3369 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303370 if (err)
3371 goto bail;
3372 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3373 if (err)
3374 goto bail;
3375 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003376 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303377 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003378 if (err)
3379 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003380 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3381 if (err)
3382 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303383 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003384 if (err)
3385 goto bail;
3386 break;
3387 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003388 p.init.attrs = 0;
3389 p.init.siglen = 0;
3390 size = sizeof(struct fastrpc_ioctl_init);
3391 /* fall through */
3392 case FASTRPC_IOCTL_INIT_ATTRS:
3393 if (!size)
3394 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303395 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003396 if (err)
3397 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303398 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303399 p.init.init.filelen < INIT_FILELEN_MAX);
3400 if (err)
3401 goto bail;
3402 VERIFY(err, p.init.init.memlen >= 0 &&
3403 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303404 if (err)
3405 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303406 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003407 if (err)
3408 goto bail;
3409 break;
3410
3411 default:
3412 err = -ENOTTY;
3413 pr_info("bad ioctl: %d\n", ioctl_num);
3414 break;
3415 }
3416 bail:
3417 return err;
3418}
3419
3420static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3421 unsigned long code,
3422 void *data)
3423{
3424 struct fastrpc_apps *me = &gfa;
3425 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303426 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003427 int cid;
3428
3429 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3430 cid = ctx - &me->channel[0];
3431 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303432 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003433 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303434 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303435 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303436 if (me->glink)
3437 fastrpc_glink_close(ctx->chan, cid);
3438 else
3439 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303440 ctx->chan = NULL;
3441 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3442 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3443 }
3444 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303445 if (cid == 0)
3446 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003447 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303448 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3449 if (me->channel[0].remoteheap_ramdump_dev &&
3450 notifdata->enable_ramdump) {
3451 me->channel[0].ramdumpenabled = 1;
3452 }
3453 } else if (code == SUBSYS_AFTER_POWERUP) {
3454 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003455 }
3456
3457 return NOTIFY_DONE;
3458}
3459
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303460static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303461 unsigned long code,
3462 void *data)
3463{
3464 struct fastrpc_apps *me = &gfa;
3465 struct fastrpc_static_pd *spd;
3466 struct notif_data *notifdata = data;
3467
3468 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3469 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3470 mutex_lock(&me->smd_mutex);
3471 spd->pdrcount++;
3472 spd->ispdup = 0;
3473 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3474 MAJOR(me->dev_no), spd->spdname);
3475 mutex_unlock(&me->smd_mutex);
3476 if (!strcmp(spd->spdname,
3477 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3478 me->staticpd_flags = 0;
3479 fastrpc_notify_pdr_drivers(me, spd->spdname);
3480 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3481 if (me->channel[0].remoteheap_ramdump_dev &&
3482 notifdata->enable_ramdump) {
3483 me->channel[0].ramdumpenabled = 1;
3484 }
3485 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3486 spd->ispdup = 1;
3487 }
3488
3489 return NOTIFY_DONE;
3490}
3491
3492static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3493 unsigned long opcode, void *data)
3494{
3495 struct fastrpc_static_pd *spd;
3496 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303497 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303498
3499 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3500 if (opcode == LOCATOR_DOWN) {
3501 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3502 return NOTIFY_DONE;
3503 }
3504
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303505 for (i = 0; i < pdr->total_domains; i++) {
3506 if ((!strcmp(pdr->domain_list[i].name,
3507 "msm/adsp/audio_pd")) ||
3508 (!strcmp(pdr->domain_list[i].name,
3509 "msm/adsp/sensor_pd"))) {
3510 spd->pdrhandle =
3511 service_notif_register_notifier(
3512 pdr->domain_list[i].name,
3513 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303514 &spd->pdrnb, &curr_state);
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303515 if (IS_ERR(spd->pdrhandle)) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303516 pr_err("ADSPRPC: Unable to register notifier\n");
Tharun Kumar Meruguad4beb82018-05-10 19:51:48 +05303517 } else if (curr_state ==
3518 SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3519 pr_info("ADSPRPC: STATE_UP_V01 received\n");
3520 spd->ispdup = 1;
3521 } else if (curr_state ==
3522 SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
3523 pr_info("ADSPRPC: STATE_UNINIT_V01 received\n");
3524 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303525 break;
3526 }
3527 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303528
3529 return NOTIFY_DONE;
3530}
3531
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003532static const struct file_operations fops = {
3533 .open = fastrpc_device_open,
3534 .release = fastrpc_device_release,
3535 .unlocked_ioctl = fastrpc_device_ioctl,
3536 .compat_ioctl = compat_fastrpc_device_ioctl,
3537};
3538
3539static const struct of_device_id fastrpc_match_table[] = {
3540 { .compatible = "qcom,msm-fastrpc-adsp", },
3541 { .compatible = "qcom,msm-fastrpc-compute", },
3542 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303543 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3544 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003545 { .compatible = "qcom,msm-adsprpc-mem-region", },
3546 {}
3547};
3548
3549static int fastrpc_cb_probe(struct device *dev)
3550{
3551 struct fastrpc_channel_ctx *chan;
3552 struct fastrpc_session_ctx *sess;
3553 struct of_phandle_args iommuspec;
3554 const char *name;
3555 unsigned int start = 0x80000000;
3556 int err = 0, i;
3557 int secure_vmid = VMID_CP_PIXEL;
3558
c_mtharue1a5ce12017-10-13 20:47:09 +05303559 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3560 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003561 if (err)
3562 goto bail;
3563 for (i = 0; i < NUM_CHANNELS; i++) {
3564 if (!gcinfo[i].name)
3565 continue;
3566 if (!strcmp(name, gcinfo[i].name))
3567 break;
3568 }
3569 VERIFY(err, i < NUM_CHANNELS);
3570 if (err)
3571 goto bail;
3572 chan = &gcinfo[i];
3573 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3574 if (err)
3575 goto bail;
3576
3577 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3578 "#iommu-cells", 0, &iommuspec));
3579 if (err)
3580 goto bail;
3581 sess = &chan->session[chan->sesscount];
3582 sess->smmu.cb = iommuspec.args[0] & 0xf;
3583 sess->used = 0;
3584 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3585 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303586 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
3587 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003588 sess->smmu.secure = of_property_read_bool(dev->of_node,
3589 "qcom,secure-context-bank");
3590 if (sess->smmu.secure)
3591 start = 0x60000000;
3592 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3593 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303594 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003595 if (err)
3596 goto bail;
3597
3598 if (sess->smmu.secure)
3599 iommu_domain_set_attr(sess->smmu.mapping->domain,
3600 DOMAIN_ATTR_SECURE_VMID,
3601 &secure_vmid);
3602
3603 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
3604 if (err)
3605 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303606 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003607 sess->smmu.enabled = 1;
3608 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003609 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
3610 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003611bail:
3612 return err;
3613}
3614
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303615static int fastrpc_cb_legacy_probe(struct device *dev)
3616{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303617 struct fastrpc_channel_ctx *chan;
3618 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
3619 const char *name;
3620 unsigned int *sids = NULL, sids_size = 0;
3621 int err = 0, ret = 0, i;
3622
3623 unsigned int start = 0x80000000;
3624
3625 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3626 "label", NULL)));
3627 if (err)
3628 goto bail;
3629
3630 for (i = 0; i < NUM_CHANNELS; i++) {
3631 if (!gcinfo[i].name)
3632 continue;
3633 if (!strcmp(name, gcinfo[i].name))
3634 break;
3635 }
3636 VERIFY(err, i < NUM_CHANNELS);
3637 if (err)
3638 goto bail;
3639
3640 chan = &gcinfo[i];
3641 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3642 if (err)
3643 goto bail;
3644
3645 first_sess = &chan->session[chan->sesscount];
3646
3647 VERIFY(err, NULL != of_get_property(dev->of_node,
3648 "sids", &sids_size));
3649 if (err)
3650 goto bail;
3651
3652 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
3653 if (err)
3654 goto bail;
3655 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
3656 sids_size/sizeof(unsigned int));
3657 if (ret)
3658 goto bail;
3659
3660 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
3661 arm_iommu_create_mapping(&platform_bus_type,
3662 start, 0x78000000)));
3663 if (err)
3664 goto bail;
3665
3666 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
3667 if (err)
3668 goto bail;
3669
3670
3671 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
3672 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3673 if (err)
3674 goto bail;
3675 sess = &chan->session[chan->sesscount];
3676 sess->smmu.cb = sids[i];
3677 sess->smmu.dev = dev;
3678 sess->smmu.mapping = first_sess->smmu.mapping;
3679 sess->smmu.enabled = 1;
3680 sess->used = 0;
3681 sess->smmu.coherent = false;
3682 sess->smmu.secure = false;
3683 chan->sesscount++;
3684 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303685bail:
3686 kfree(sids);
3687 return err;
3688}
3689
3690
3691
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303692static void init_secure_vmid_list(struct device *dev, char *prop_name,
3693 struct secure_vm *destvm)
3694{
3695 int err = 0;
3696 u32 len = 0, i = 0;
3697 u32 *rhvmlist = NULL;
3698 u32 *rhvmpermlist = NULL;
3699
3700 if (!of_find_property(dev->of_node, prop_name, &len))
3701 goto bail;
3702 if (len == 0)
3703 goto bail;
3704 len /= sizeof(u32);
3705 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
3706 if (err)
3707 goto bail;
3708 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
3709 GFP_KERNEL)));
3710 if (err)
3711 goto bail;
3712 for (i = 0; i < len; i++) {
3713 err = of_property_read_u32_index(dev->of_node, prop_name, i,
3714 &rhvmlist[i]);
3715 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
3716 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
3717 if (err) {
3718 pr_err("ADSPRPC: Failed to read VMID\n");
3719 goto bail;
3720 }
3721 }
3722 destvm->vmid = rhvmlist;
3723 destvm->vmperm = rhvmpermlist;
3724 destvm->vmcount = len;
3725bail:
3726 if (err) {
3727 kfree(rhvmlist);
3728 kfree(rhvmpermlist);
3729 }
3730}
3731
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003732static int fastrpc_probe(struct platform_device *pdev)
3733{
3734 int err = 0;
3735 struct fastrpc_apps *me = &gfa;
3736 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003737 struct device_node *ion_node, *node;
3738 struct platform_device *ion_pdev;
3739 struct cma *cma;
3740 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303741 int ret = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003742
c_mtharu63ffc012017-11-16 15:26:56 +05303743
3744 if (of_device_is_compatible(dev->of_node,
3745 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303746 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
3747 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05303748
c_mtharu63ffc012017-11-16 15:26:56 +05303749
3750 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3751 &me->latency);
3752 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003753 if (of_device_is_compatible(dev->of_node,
3754 "qcom,msm-fastrpc-compute-cb"))
3755 return fastrpc_cb_probe(dev);
3756
3757 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303758 "qcom,msm-fastrpc-legacy-compute")) {
3759 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303760 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303761 }
3762
3763 if (of_device_is_compatible(dev->of_node,
3764 "qcom,msm-fastrpc-legacy-compute-cb")){
3765 return fastrpc_cb_legacy_probe(dev);
3766 }
3767
3768 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003769 "qcom,msm-adsprpc-mem-region")) {
3770 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003771 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3772 if (ion_node) {
3773 for_each_available_child_of_node(ion_node, node) {
3774 if (of_property_read_u32(node, "reg", &val))
3775 continue;
3776 if (val != ION_ADSP_HEAP_ID)
3777 continue;
3778 ion_pdev = of_find_device_by_node(node);
3779 if (!ion_pdev)
3780 break;
3781 cma = dev_get_cma_area(&ion_pdev->dev);
3782 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303783 me->range.addr = cma_get_base(cma);
3784 me->range.size =
3785 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003786 }
3787 break;
3788 }
3789 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303790 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05303791 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003792 int srcVM[1] = {VMID_HLOS};
3793 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3794 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003795 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003796 PERM_READ | PERM_WRITE | PERM_EXEC,
3797 PERM_READ | PERM_WRITE | PERM_EXEC,
3798 PERM_READ | PERM_WRITE | PERM_EXEC,
3799 };
3800
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303801 VERIFY(err, !hyp_assign_phys(me->range.addr,
3802 me->range.size, srcVM, 1,
3803 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003804 if (err)
3805 goto bail;
3806 }
3807 return 0;
3808 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303809 if (of_property_read_bool(dev->of_node,
3810 "qcom,fastrpc-adsp-audio-pdr")) {
3811 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003812
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303813 VERIFY(err, !fastrpc_get_adsp_session(
3814 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
3815 if (err)
3816 goto spdbail;
3817 me->channel[0].spd[session].get_service_nb.notifier_call =
3818 fastrpc_get_service_location_notify;
3819 ret = get_service_location(
3820 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
3821 AUDIO_PDR_ADSP_SERVICE_NAME,
3822 &me->channel[0].spd[session].get_service_nb);
3823 if (ret)
3824 pr_err("ADSPRPC: Get service location failed: %d\n",
3825 ret);
3826 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303827 if (of_property_read_bool(dev->of_node,
3828 "qcom,fastrpc-adsp-sensors-pdr")) {
3829 int session;
3830
3831 VERIFY(err, !fastrpc_get_adsp_session(
3832 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
3833 if (err)
3834 goto spdbail;
3835 me->channel[0].spd[session].get_service_nb.notifier_call =
3836 fastrpc_get_service_location_notify;
3837 ret = get_service_location(
3838 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
3839 SENSORS_PDR_ADSP_SERVICE_NAME,
3840 &me->channel[0].spd[session].get_service_nb);
3841 if (ret)
3842 pr_err("ADSPRPC: Get service location failed: %d\n",
3843 ret);
3844 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303845spdbail:
3846 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003847 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3848 fastrpc_match_table,
3849 NULL, &pdev->dev));
3850 if (err)
3851 goto bail;
3852bail:
3853 return err;
3854}
3855
3856static void fastrpc_deinit(void)
3857{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303858 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003859 struct fastrpc_channel_ctx *chan = gcinfo;
3860 int i, j;
3861
3862 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3863 if (chan->chan) {
3864 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303865 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303866 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003867 }
3868 for (j = 0; j < NUM_SESSIONS; j++) {
3869 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303870 if (sess->smmu.dev) {
3871 arm_iommu_detach_device(sess->smmu.dev);
3872 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003873 }
3874 if (sess->smmu.mapping) {
3875 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303876 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003877 }
3878 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303879 kfree(chan->rhvm.vmid);
3880 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003881 }
3882}
3883
3884static struct platform_driver fastrpc_driver = {
3885 .probe = fastrpc_probe,
3886 .driver = {
3887 .name = "fastrpc",
3888 .owner = THIS_MODULE,
3889 .of_match_table = fastrpc_match_table,
3890 },
3891};
3892
3893static int __init fastrpc_device_init(void)
3894{
3895 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303896 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003897 int err = 0, i;
3898
3899 memset(me, 0, sizeof(*me));
3900
3901 fastrpc_init(me);
3902 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303903 me->glink = true;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003904 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3905 if (err)
3906 goto register_bail;
3907 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3908 DEVICE_NAME));
3909 if (err)
3910 goto alloc_chrdev_bail;
3911 cdev_init(&me->cdev, &fops);
3912 me->cdev.owner = THIS_MODULE;
3913 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003914 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003915 if (err)
3916 goto cdev_init_bail;
3917 me->class = class_create(THIS_MODULE, "fastrpc");
3918 VERIFY(err, !IS_ERR(me->class));
3919 if (err)
3920 goto class_create_bail;
3921 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003922 dev = device_create(me->class, NULL,
3923 MKDEV(MAJOR(me->dev_no), 0),
3924 NULL, gcinfo[0].name);
3925 VERIFY(err, !IS_ERR_OR_NULL(dev));
3926 if (err)
3927 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003928 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003929 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003930 me->channel[i].ssrcount = 0;
3931 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303932 me->channel[i].issubsystemup = 1;
3933 me->channel[i].ramdumpenabled = 0;
3934 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003935 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3936 me->channel[i].handle = subsys_notif_register_notifier(
3937 gcinfo[i].subsys,
3938 &me->channel[i].nb);
3939 }
3940
3941 me->client = msm_ion_client_create(DEVICE_NAME);
3942 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3943 if (err)
3944 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003945 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003946 return 0;
3947device_create_bail:
3948 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003949 if (me->channel[i].handle)
3950 subsys_notif_unregister_notifier(me->channel[i].handle,
3951 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003952 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003953 if (!IS_ERR_OR_NULL(dev))
3954 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003955 class_destroy(me->class);
3956class_create_bail:
3957 cdev_del(&me->cdev);
3958cdev_init_bail:
3959 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3960alloc_chrdev_bail:
3961register_bail:
3962 fastrpc_deinit();
3963 return err;
3964}
3965
3966static void __exit fastrpc_device_exit(void)
3967{
3968 struct fastrpc_apps *me = &gfa;
3969 int i;
3970
3971 fastrpc_file_list_dtor(me);
3972 fastrpc_deinit();
3973 for (i = 0; i < NUM_CHANNELS; i++) {
3974 if (!gcinfo[i].name)
3975 continue;
3976 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3977 subsys_notif_unregister_notifier(me->channel[i].handle,
3978 &me->channel[i].nb);
3979 }
3980 class_destroy(me->class);
3981 cdev_del(&me->cdev);
3982 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3983 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003984 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003985}
3986
3987late_initcall(fastrpc_device_init);
3988module_exit(fastrpc_device_exit);
3989
3990MODULE_LICENSE("GPL v2");