blob: 669ceb2cb487622131a70c0a14392a822ae77471 [file] [log] [blame]
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001/*
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05302 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/dma-buf.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <soc/qcom/secure_buffer.h>
28#include <soc/qcom/glink.h>
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +053029#include <soc/qcom/smd.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070030#include <soc/qcom/subsystem_notif.h>
31#include <soc/qcom/subsystem_restart.h>
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053032#include <soc/qcom/service-notifier.h>
33#include <soc/qcom/service-locator.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070034#include <linux/scatterlist.h>
35#include <linux/fs.h>
36#include <linux/uaccess.h>
37#include <linux/device.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_platform.h>
41#include <linux/dma-contiguous.h>
42#include <linux/cma.h>
43#include <linux/iommu.h>
44#include <linux/kref.h>
45#include <linux/sort.h>
46#include <linux/msm_dma_iommu_mapping.h>
47#include <asm/dma-iommu.h>
c_mtharue1a5ce12017-10-13 20:47:09 +053048#include <soc/qcom/scm.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070049#include "adsprpc_compat.h"
50#include "adsprpc_shared.h"
c_mtharue1a5ce12017-10-13 20:47:09 +053051#include <soc/qcom/ramdump.h>
Sathish Ambley1ca68232017-01-19 10:32:55 -080052#include <linux/debugfs.h>
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053053#include <linux/pm_qos.h>
Sathish Ambley69e1ab02016-10-18 10:28:15 -070054#define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
55#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
56#define TZ_PIL_AUTH_QDSP6_PROC 1
c_mtharue1a5ce12017-10-13 20:47:09 +053057#define ADSP_MMAP_HEAP_ADDR 4
58#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +053059#define FASTRPC_DMAHANDLE_NOMAP (16)
60
Sathish Ambley69e1ab02016-10-18 10:28:15 -070061#define FASTRPC_ENOSUCH 39
62#define VMID_SSC_Q6 5
63#define VMID_ADSP_Q6 6
Sathish Ambley1ca68232017-01-19 10:32:55 -080064#define DEBUGFS_SIZE 1024
Sathish Ambley69e1ab02016-10-18 10:28:15 -070065
Tharun Kumar Merugudf860662018-01-17 19:59:50 +053066#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc"
67#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
68
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +053069#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
70#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
71
Sathish Ambley69e1ab02016-10-18 10:28:15 -070072#define RPC_TIMEOUT (5 * HZ)
73#define BALIGN 128
74#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
75#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
Sathish Ambleybae51902017-07-03 15:00:49 -070076#define M_FDLIST (16)
77#define M_CRCLIST (64)
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +053078#define SESSION_ID_INDEX (30)
c_mtharufdac6892017-10-12 13:09:01 +053079#define FASTRPC_CTX_MAGIC (0xbeeddeed)
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +053080#define FASTRPC_CTX_MAX (256)
81#define FASTRPC_CTXID_MASK (0xFF0)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070082
83#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
84
85#define FASTRPC_LINK_STATE_DOWN (0x0)
86#define FASTRPC_LINK_STATE_UP (0x1)
87#define FASTRPC_LINK_DISCONNECTED (0x0)
88#define FASTRPC_LINK_CONNECTING (0x1)
89#define FASTRPC_LINK_CONNECTED (0x3)
90#define FASTRPC_LINK_DISCONNECTING (0x7)
c_mtharu314a4202017-11-15 22:09:17 +053091#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
92#define FASTRPC_GLINK_INTENT_LEN (64)
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +053093#define FASTRPC_GLINK_INTENT_NUM (16)
Sathish Ambley69e1ab02016-10-18 10:28:15 -070094
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +053095#define PERF_KEYS \
96 "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
Sathish Ambleya21b5b52017-01-11 16:11:01 -080097#define FASTRPC_STATIC_HANDLE_LISTENER (3)
98#define FASTRPC_STATIC_HANDLE_MAX (20)
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +053099#define FASTRPC_LATENCY_CTRL_ENB (1)
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800100
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +0530101#define INIT_FILELEN_MAX (2*1024*1024)
102#define INIT_MEMLEN_MAX (8*1024*1024)
103
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800104#define PERF_END (void)0
105
106#define PERF(enb, cnt, ff) \
107 {\
108 struct timespec startT = {0};\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530109 int64_t *counter = cnt;\
110 if (enb && counter) {\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800111 getnstimeofday(&startT);\
112 } \
113 ff ;\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530114 if (enb && counter) {\
115 *counter += getnstimediff(&startT);\
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800116 } \
117 }
118
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530119#define GET_COUNTER(perf_ptr, offset) \
120 (perf_ptr != NULL ?\
121 (((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
122 (int64_t *)(perf_ptr + offset)\
123 : (int64_t *)NULL) : (int64_t *)NULL)
124
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700125static int fastrpc_glink_open(int cid);
126static void fastrpc_glink_close(void *chan, int cid);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530127static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530128 unsigned long code,
129 void *data);
Sathish Ambley1ca68232017-01-19 10:32:55 -0800130static struct dentry *debugfs_root;
131static struct dentry *debugfs_global_file;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700132
133static inline uint64_t buf_page_start(uint64_t buf)
134{
135 uint64_t start = (uint64_t) buf & PAGE_MASK;
136 return start;
137}
138
139static inline uint64_t buf_page_offset(uint64_t buf)
140{
141 uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
142 return offset;
143}
144
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530145static inline uint64_t buf_num_pages(uint64_t buf, size_t len)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700146{
147 uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
148 uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530149 uint64_t nPages = end - start + 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700150 return nPages;
151}
152
153static inline uint64_t buf_page_size(uint32_t size)
154{
155 uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
156
157 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
158}
159
160static inline void *uint64_to_ptr(uint64_t addr)
161{
162 void *ptr = (void *)((uintptr_t)addr);
163
164 return ptr;
165}
166
167static inline uint64_t ptr_to_uint64(void *ptr)
168{
169 uint64_t addr = (uint64_t)((uintptr_t)ptr);
170
171 return addr;
172}
173
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530174struct secure_vm {
175 int *vmid;
176 int *vmperm;
177 int vmcount;
178};
179
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700180struct fastrpc_file;
181
182struct fastrpc_buf {
183 struct hlist_node hn;
184 struct fastrpc_file *fl;
185 void *virt;
186 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530187 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700188};
189
190struct fastrpc_ctx_lst;
191
192struct overlap {
193 uintptr_t start;
194 uintptr_t end;
195 int raix;
196 uintptr_t mstart;
197 uintptr_t mend;
198 uintptr_t offset;
199};
200
201struct smq_invoke_ctx {
202 struct hlist_node hn;
203 struct completion work;
204 int retval;
205 int pid;
206 int tgid;
207 remote_arg_t *lpra;
208 remote_arg64_t *rpra;
209 int *fds;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700210 struct fastrpc_mmap **maps;
211 struct fastrpc_buf *buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530212 size_t used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700213 struct fastrpc_file *fl;
214 uint32_t sc;
215 struct overlap *overs;
216 struct overlap **overps;
217 struct smq_msg msg;
c_mtharufdac6892017-10-12 13:09:01 +0530218 unsigned int magic;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530219 unsigned int *attrs;
220 uint32_t *crc;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530221 uint64_t ctxid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700222};
223
224struct fastrpc_ctx_lst {
225 struct hlist_head pending;
226 struct hlist_head interrupted;
227};
228
229struct fastrpc_smmu {
c_mtharue1a5ce12017-10-13 20:47:09 +0530230 struct device *dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700231 struct dma_iommu_mapping *mapping;
232 int cb;
233 int enabled;
234 int faults;
235 int secure;
236 int coherent;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530237 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700238};
239
240struct fastrpc_session_ctx {
241 struct device *dev;
242 struct fastrpc_smmu smmu;
243 int used;
244};
245
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530246struct fastrpc_static_pd {
247 char *spdname;
248 struct notifier_block pdrnb;
249 struct notifier_block get_service_nb;
250 void *pdrhandle;
251 int pdrcount;
252 int prevpdrcount;
253 int ispdup;
254};
255
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700256struct fastrpc_glink_info {
257 int link_state;
258 int port_state;
259 struct glink_open_config cfg;
260 struct glink_link_info link_info;
261 void *link_notify_handle;
262};
263
264struct fastrpc_channel_ctx {
265 char *name;
266 char *subsys;
267 void *chan;
268 struct device *dev;
269 struct fastrpc_session_ctx session[NUM_SESSIONS];
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530270 struct fastrpc_static_pd spd[NUM_SESSIONS];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700271 struct completion work;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +0530272 struct completion workport;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700273 struct notifier_block nb;
274 struct kref kref;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530275 int channel;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700276 int sesscount;
277 int ssrcount;
278 void *handle;
279 int prevssrcount;
c_mtharue1a5ce12017-10-13 20:47:09 +0530280 int issubsystemup;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700281 int vmid;
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530282 struct secure_vm rhvm;
c_mtharue1a5ce12017-10-13 20:47:09 +0530283 int ramdumpenabled;
284 void *remoteheap_ramdump_dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700285 struct fastrpc_glink_info link;
286};
287
288struct fastrpc_apps {
289 struct fastrpc_channel_ctx *channel;
290 struct cdev cdev;
291 struct class *class;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530292 struct mutex smd_mutex;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700293 struct smq_phy_page range;
294 struct hlist_head maps;
c_mtharue1a5ce12017-10-13 20:47:09 +0530295 uint32_t staticpd_flags;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700296 dev_t dev_no;
297 int compat;
298 struct hlist_head drivers;
299 spinlock_t hlock;
300 struct ion_client *client;
301 struct device *dev;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530302 unsigned int latency;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530303 bool glink;
304 bool legacy;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +0530305 spinlock_t ctxlock;
306 struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700307};
308
309struct fastrpc_mmap {
310 struct hlist_node hn;
311 struct fastrpc_file *fl;
312 struct fastrpc_apps *apps;
313 int fd;
314 uint32_t flags;
315 struct dma_buf *buf;
316 struct sg_table *table;
317 struct dma_buf_attachment *attach;
318 struct ion_handle *handle;
319 uint64_t phys;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530320 size_t size;
321 uintptr_t va;
322 size_t len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700323 int refs;
324 uintptr_t raddr;
325 int uncached;
326 int secure;
327 uintptr_t attr;
328};
329
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530330enum fastrpc_perfkeys {
331 PERF_COUNT = 0,
332 PERF_FLUSH = 1,
333 PERF_MAP = 2,
334 PERF_COPY = 3,
335 PERF_LINK = 4,
336 PERF_GETARGS = 5,
337 PERF_PUTARGS = 6,
338 PERF_INVARGS = 7,
339 PERF_INVOKE = 8,
340 PERF_KEY_MAX = 9,
341};
342
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800343struct fastrpc_perf {
344 int64_t count;
345 int64_t flush;
346 int64_t map;
347 int64_t copy;
348 int64_t link;
349 int64_t getargs;
350 int64_t putargs;
351 int64_t invargs;
352 int64_t invoke;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530353 int64_t tid;
354 struct hlist_node hn;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800355};
356
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700357struct fastrpc_file {
358 struct hlist_node hn;
359 spinlock_t hlock;
360 struct hlist_head maps;
361 struct hlist_head bufs;
362 struct fastrpc_ctx_lst clst;
363 struct fastrpc_session_ctx *sctx;
364 struct fastrpc_session_ctx *secsctx;
365 uint32_t mode;
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800366 uint32_t profile;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +0530367 int sessionid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700368 int tgid;
369 int cid;
370 int ssrcount;
371 int pd;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530372 char *spdname;
tharun kumar9f899ea2017-07-03 17:07:03 +0530373 int file_close;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530374 int sharedcb;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700375 struct fastrpc_apps *apps;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530376 struct hlist_head perf;
Sathish Ambley1ca68232017-01-19 10:32:55 -0800377 struct dentry *debugfs_file;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530378 struct mutex perf_mutex;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +0530379 struct pm_qos_request pm_qos_req;
380 int qos_request;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +0530381 struct mutex map_mutex;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +0530382 struct mutex fl_map_mutex;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +0530383 int refcount;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700384};
385
386static struct fastrpc_apps gfa;
387
388static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
389 {
390 .name = "adsprpc-smd",
391 .subsys = "adsp",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530392 .channel = SMD_APPS_QDSP,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700393 .link.link_info.edge = "lpass",
394 .link.link_info.transport = "smem",
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530395 .spd = {
396 {
397 .spdname =
398 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
399 .pdrnb.notifier_call =
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +0530400 fastrpc_pdr_notifier_cb,
401 },
402 {
403 .spdname =
404 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
405 .pdrnb.notifier_call =
406 fastrpc_pdr_notifier_cb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +0530407 }
408 },
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700409 },
410 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700411 .name = "mdsprpc-smd",
412 .subsys = "modem",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530413 .channel = SMD_APPS_MODEM,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700414 .link.link_info.edge = "mpss",
415 .link.link_info.transport = "smem",
416 },
417 {
Sathish Ambley36849af2017-02-02 09:35:55 -0800418 .name = "sdsprpc-smd",
419 .subsys = "slpi",
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +0530420 .channel = SMD_APPS_DSPS,
Sathish Ambley36849af2017-02-02 09:35:55 -0800421 .link.link_info.edge = "dsps",
422 .link.link_info.transport = "smem",
Sathish Ambley36849af2017-02-02 09:35:55 -0800423 },
424 {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700425 .name = "cdsprpc-smd",
426 .subsys = "cdsp",
427 .link.link_info.edge = "cdsp",
428 .link.link_info.transport = "smem",
429 },
430};
431
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +0530432static int hlosvm[1] = {VMID_HLOS};
433static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
434
Sathish Ambleya21b5b52017-01-11 16:11:01 -0800435static inline int64_t getnstimediff(struct timespec *start)
436{
437 int64_t ns;
438 struct timespec ts, b;
439
440 getnstimeofday(&ts);
441 b = timespec_sub(ts, *start);
442 ns = timespec_to_ns(&b);
443 return ns;
444}
445
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +0530446static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
447{
448 int err = 0;
449 int64_t *val = NULL;
450 struct fastrpc_perf *perf = NULL, *fperf = NULL;
451 struct hlist_node *n = NULL;
452
453 VERIFY(err, !IS_ERR_OR_NULL(fl));
454 if (err)
455 goto bail;
456
457 mutex_lock(&fl->perf_mutex);
458 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
459 if (perf->tid == current->pid) {
460 fperf = perf;
461 break;
462 }
463 }
464
465 if (IS_ERR_OR_NULL(fperf)) {
466 fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
467
468 VERIFY(err, !IS_ERR_OR_NULL(fperf));
469 if (err) {
470 mutex_unlock(&fl->perf_mutex);
471 kfree(fperf);
472 goto bail;
473 }
474
475 fperf->tid = current->pid;
476 hlist_add_head(&fperf->hn, &fl->perf);
477 }
478
479 val = ((int64_t *)fperf) + key;
480 mutex_unlock(&fl->perf_mutex);
481bail:
482 return val;
483}
484
485
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700486static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
487{
c_mtharue1a5ce12017-10-13 20:47:09 +0530488 struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700489 int vmid;
490
491 if (!fl)
492 return;
493 if (cache) {
494 spin_lock(&fl->hlock);
495 hlist_add_head(&buf->hn, &fl->bufs);
496 spin_unlock(&fl->hlock);
497 return;
498 }
499 if (!IS_ERR_OR_NULL(buf->virt)) {
500 int destVM[1] = {VMID_HLOS};
501 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
502
503 if (fl->sctx->smmu.cb)
504 buf->phys &= ~((uint64_t)fl->sctx->smmu.cb << 32);
505 vmid = fl->apps->channel[fl->cid].vmid;
506 if (vmid) {
507 int srcVM[2] = {VMID_HLOS, vmid};
508
509 hyp_assign_phys(buf->phys, buf_page_size(buf->size),
510 srcVM, 2, destVM, destVMperm, 1);
511 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530512 dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700513 buf->phys);
514 }
515 kfree(buf);
516}
517
518static void fastrpc_buf_list_free(struct fastrpc_file *fl)
519{
520 struct fastrpc_buf *buf, *free;
521
522 do {
523 struct hlist_node *n;
524
c_mtharue1a5ce12017-10-13 20:47:09 +0530525 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700526 spin_lock(&fl->hlock);
527 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
528 hlist_del_init(&buf->hn);
529 free = buf;
530 break;
531 }
532 spin_unlock(&fl->hlock);
533 if (free)
534 fastrpc_buf_free(free, 0);
535 } while (free);
536}
537
538static void fastrpc_mmap_add(struct fastrpc_mmap *map)
539{
c_mtharue1a5ce12017-10-13 20:47:09 +0530540 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
541 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
542 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700543
c_mtharue1a5ce12017-10-13 20:47:09 +0530544 spin_lock(&me->hlock);
545 hlist_add_head(&map->hn, &me->maps);
546 spin_unlock(&me->hlock);
547 } else {
548 struct fastrpc_file *fl = map->fl;
549
c_mtharue1a5ce12017-10-13 20:47:09 +0530550 hlist_add_head(&map->hn, &fl->maps);
c_mtharue1a5ce12017-10-13 20:47:09 +0530551 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700552}
553
c_mtharue1a5ce12017-10-13 20:47:09 +0530554static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530555 uintptr_t va, size_t len, int mflags, int refs,
c_mtharue1a5ce12017-10-13 20:47:09 +0530556 struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700557{
c_mtharue1a5ce12017-10-13 20:47:09 +0530558 struct fastrpc_apps *me = &gfa;
559 struct fastrpc_mmap *match = NULL, *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700560 struct hlist_node *n;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530561
562 if ((va + len) < va)
563 return -EOVERFLOW;
c_mtharue1a5ce12017-10-13 20:47:09 +0530564 if (mflags == ADSP_MMAP_HEAP_ADDR ||
565 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
566 spin_lock(&me->hlock);
567 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
568 if (va >= map->va &&
569 va + len <= map->va + map->len &&
570 map->fd == fd) {
571 if (refs)
572 map->refs++;
573 match = map;
574 break;
575 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700576 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530577 spin_unlock(&me->hlock);
578 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530579 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
580 if (va >= map->va &&
581 va + len <= map->va + map->len &&
582 map->fd == fd) {
583 if (refs)
584 map->refs++;
585 match = map;
586 break;
587 }
588 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700589 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700590 if (match) {
591 *ppmap = match;
592 return 0;
593 }
594 return -ENOTTY;
595}
596
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530597static int dma_alloc_memory(dma_addr_t *region_phys, size_t size)
c_mtharue1a5ce12017-10-13 20:47:09 +0530598{
599 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530600 void *vaddr = NULL;
601 unsigned long dma_attrs = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530602
603 if (me->dev == NULL) {
604 pr_err("device adsprpc-mem is not initialized\n");
605 return -ENODEV;
606 }
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530607 dma_attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
608 vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
609 dma_attrs);
610 if (!vaddr) {
c_mtharue1a5ce12017-10-13 20:47:09 +0530611 pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
612 (unsigned int)size);
613 return -ENOMEM;
614 }
615 return 0;
616}
617
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700618static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530619 size_t len, struct fastrpc_mmap **ppmap)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700620{
c_mtharue1a5ce12017-10-13 20:47:09 +0530621 struct fastrpc_mmap *match = NULL, *map;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700622 struct hlist_node *n;
623 struct fastrpc_apps *me = &gfa;
624
625 spin_lock(&me->hlock);
626 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
627 if (map->raddr == va &&
628 map->raddr + map->len == va + len &&
629 map->refs == 1) {
630 match = map;
631 hlist_del_init(&map->hn);
632 break;
633 }
634 }
635 spin_unlock(&me->hlock);
636 if (match) {
637 *ppmap = match;
638 return 0;
639 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700640 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
641 if (map->raddr == va &&
642 map->raddr + map->len == va + len &&
643 map->refs == 1) {
644 match = map;
645 hlist_del_init(&map->hn);
646 break;
647 }
648 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700649 if (match) {
650 *ppmap = match;
651 return 0;
652 }
653 return -ENOTTY;
654}
655
c_mtharu7bd6a422017-10-17 18:15:37 +0530656static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700657{
c_mtharue1a5ce12017-10-13 20:47:09 +0530658 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700659 struct fastrpc_file *fl;
660 int vmid;
661 struct fastrpc_session_ctx *sess;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700662
663 if (!map)
664 return;
665 fl = map->fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530666 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
667 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
668 spin_lock(&me->hlock);
669 map->refs--;
670 if (!map->refs)
671 hlist_del_init(&map->hn);
672 spin_unlock(&me->hlock);
c_mtharu7bd6a422017-10-17 18:15:37 +0530673 if (map->refs > 0)
674 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530675 } else {
c_mtharue1a5ce12017-10-13 20:47:09 +0530676 map->refs--;
677 if (!map->refs)
678 hlist_del_init(&map->hn);
c_mtharu7bd6a422017-10-17 18:15:37 +0530679 if (map->refs > 0 && !flags)
680 return;
c_mtharue1a5ce12017-10-13 20:47:09 +0530681 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530682 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
683 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530684 unsigned long dma_attrs = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700685
c_mtharue1a5ce12017-10-13 20:47:09 +0530686 if (me->dev == NULL) {
687 pr_err("failed to free remote heap allocation\n");
688 return;
689 }
690 if (map->phys) {
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530691 dma_attrs |=
692 DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
693 dma_free_attrs(me->dev, map->size, (void *)map->va,
694 (dma_addr_t)map->phys, dma_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +0530695 }
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530696 } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) {
697 if (!IS_ERR_OR_NULL(map->handle))
698 ion_free(fl->apps->client, map->handle);
c_mtharue1a5ce12017-10-13 20:47:09 +0530699 } else {
700 int destVM[1] = {VMID_HLOS};
701 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
702
703 if (map->secure)
704 sess = fl->secsctx;
705 else
706 sess = fl->sctx;
707
708 if (!IS_ERR_OR_NULL(map->handle))
709 ion_free(fl->apps->client, map->handle);
710 if (sess && sess->smmu.enabled) {
711 if (map->size || map->phys)
712 msm_dma_unmap_sg(sess->smmu.dev,
713 map->table->sgl,
714 map->table->nents, DMA_BIDIRECTIONAL,
715 map->buf);
716 }
717 vmid = fl->apps->channel[fl->cid].vmid;
718 if (vmid && map->phys) {
719 int srcVM[2] = {VMID_HLOS, vmid};
720
721 hyp_assign_phys(map->phys, buf_page_size(map->size),
722 srcVM, 2, destVM, destVMperm, 1);
723 }
724
725 if (!IS_ERR_OR_NULL(map->table))
726 dma_buf_unmap_attachment(map->attach, map->table,
727 DMA_BIDIRECTIONAL);
728 if (!IS_ERR_OR_NULL(map->attach))
729 dma_buf_detach(map->buf, map->attach);
730 if (!IS_ERR_OR_NULL(map->buf))
731 dma_buf_put(map->buf);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700732 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700733 kfree(map);
734}
735
736static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
737 struct fastrpc_session_ctx **session);
738
739static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530740 unsigned int attr, uintptr_t va, size_t len, int mflags,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700741 struct fastrpc_mmap **ppmap)
742{
c_mtharue1a5ce12017-10-13 20:47:09 +0530743 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700744 struct fastrpc_session_ctx *sess;
745 struct fastrpc_apps *apps = fl->apps;
746 int cid = fl->cid;
747 struct fastrpc_channel_ctx *chan = &apps->channel[cid];
c_mtharue1a5ce12017-10-13 20:47:09 +0530748 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700749 unsigned long attrs;
c_mtharuf931ff92017-11-30 19:35:30 +0530750 dma_addr_t region_phys = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700751 unsigned long flags;
752 int err = 0, vmid;
753
Sathish Ambleyae5ee542017-01-16 22:24:23 -0800754 if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700755 return 0;
756 map = kzalloc(sizeof(*map), GFP_KERNEL);
757 VERIFY(err, !IS_ERR_OR_NULL(map));
758 if (err)
759 goto bail;
760 INIT_HLIST_NODE(&map->hn);
761 map->flags = mflags;
762 map->refs = 1;
763 map->fl = fl;
764 map->fd = fd;
765 map->attr = attr;
c_mtharue1a5ce12017-10-13 20:47:09 +0530766 if (mflags == ADSP_MMAP_HEAP_ADDR ||
767 mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
768 map->apps = me;
769 map->fl = NULL;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530770 VERIFY(err, !dma_alloc_memory(&region_phys, len));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700771 if (err)
772 goto bail;
c_mtharuf931ff92017-11-30 19:35:30 +0530773 map->phys = (uintptr_t)region_phys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530774 map->size = len;
Tharun Kumar Merugu48d5ff32018-04-16 19:24:16 +0530775 map->va = (uintptr_t)map->phys;
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +0530776 } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
777 ion_phys_addr_t iphys;
778
779 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
780 ion_import_dma_buf_fd(fl->apps->client, fd)));
781 if (err)
782 goto bail;
783
784 map->uncached = 1;
785 map->buf = NULL;
786 map->attach = NULL;
787 map->table = NULL;
788 map->va = 0;
789 map->phys = 0;
790
791 err = ion_phys(fl->apps->client, map->handle,
792 &iphys, &map->size);
793 if (err)
794 goto bail;
795 map->phys = (uint64_t)iphys;
c_mtharue1a5ce12017-10-13 20:47:09 +0530796 } else {
c_mtharu7bd6a422017-10-17 18:15:37 +0530797 if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
798 pr_info("adsprpc: buffer mapped with persist attr %x\n",
799 (unsigned int)map->attr);
800 map->refs = 2;
801 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530802 VERIFY(err, !IS_ERR_OR_NULL(map->handle =
803 ion_import_dma_buf_fd(fl->apps->client, fd)));
804 if (err)
805 goto bail;
806 VERIFY(err, !ion_handle_get_flags(fl->apps->client, map->handle,
807 &flags));
808 if (err)
809 goto bail;
810
c_mtharue1a5ce12017-10-13 20:47:09 +0530811 map->secure = flags & ION_FLAG_SECURE;
812 if (map->secure) {
813 if (!fl->secsctx)
814 err = fastrpc_session_alloc(chan, 1,
815 &fl->secsctx);
816 if (err)
817 goto bail;
818 }
819 if (map->secure)
820 sess = fl->secsctx;
821 else
822 sess = fl->sctx;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530823
c_mtharue1a5ce12017-10-13 20:47:09 +0530824 VERIFY(err, !IS_ERR_OR_NULL(sess));
825 if (err)
826 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +0530827
828 map->uncached = !ION_IS_CACHED(flags);
829 if (map->attr & FASTRPC_ATTR_NOVA && !sess->smmu.coherent)
830 map->uncached = 1;
831
c_mtharue1a5ce12017-10-13 20:47:09 +0530832 VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
833 if (err)
834 goto bail;
835 VERIFY(err, !IS_ERR_OR_NULL(map->attach =
836 dma_buf_attach(map->buf, sess->smmu.dev)));
837 if (err)
838 goto bail;
839 VERIFY(err, !IS_ERR_OR_NULL(map->table =
840 dma_buf_map_attachment(map->attach,
841 DMA_BIDIRECTIONAL)));
842 if (err)
843 goto bail;
844 if (sess->smmu.enabled) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700845 attrs = DMA_ATTR_EXEC_MAPPING;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +0530846
847 if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
848 (sess->smmu.coherent && map->uncached))
849 attrs |= DMA_ATTR_FORCE_NON_COHERENT;
850 else if (map->attr & FASTRPC_ATTR_COHERENT)
851 attrs |= DMA_ATTR_FORCE_COHERENT;
852
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700853 VERIFY(err, map->table->nents ==
c_mtharue1a5ce12017-10-13 20:47:09 +0530854 msm_dma_map_sg_attrs(sess->smmu.dev,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700855 map->table->sgl, map->table->nents,
856 DMA_BIDIRECTIONAL, map->buf, attrs));
c_mtharue1a5ce12017-10-13 20:47:09 +0530857 if (err)
858 goto bail;
859 } else {
860 VERIFY(err, map->table->nents == 1);
861 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700862 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +0530863 }
864 map->phys = sg_dma_address(map->table->sgl);
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530865
c_mtharue1a5ce12017-10-13 20:47:09 +0530866 if (sess->smmu.cb) {
867 map->phys += ((uint64_t)sess->smmu.cb << 32);
868 map->size = sg_dma_len(map->table->sgl);
869 } else {
870 map->size = buf_page_size(len);
871 }
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530872
c_mtharue1a5ce12017-10-13 20:47:09 +0530873 vmid = fl->apps->channel[fl->cid].vmid;
Tharun Kumar Merugu93f319a2018-02-01 17:35:42 +0530874 if (!sess->smmu.enabled && !vmid) {
875 VERIFY(err, map->phys >= me->range.addr &&
876 map->phys + map->size <=
877 me->range.addr + me->range.size);
878 if (err) {
879 pr_err("adsprpc: mmap fail out of range\n");
880 goto bail;
881 }
882 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530883 if (vmid) {
884 int srcVM[1] = {VMID_HLOS};
885 int destVM[2] = {VMID_HLOS, vmid};
886 int destVMperm[2] = {PERM_READ | PERM_WRITE,
887 PERM_READ | PERM_WRITE | PERM_EXEC};
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700888
c_mtharue1a5ce12017-10-13 20:47:09 +0530889 VERIFY(err, !hyp_assign_phys(map->phys,
890 buf_page_size(map->size),
891 srcVM, 1, destVM, destVMperm, 2));
892 if (err)
893 goto bail;
894 }
895 map->va = va;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700896 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700897 map->len = len;
898
899 fastrpc_mmap_add(map);
900 *ppmap = map;
901
902bail:
903 if (err && map)
c_mtharu7bd6a422017-10-17 18:15:37 +0530904 fastrpc_mmap_free(map, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700905 return err;
906}
907
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +0530908static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700909 struct fastrpc_buf **obuf)
910{
911 int err = 0, vmid;
c_mtharue1a5ce12017-10-13 20:47:09 +0530912 struct fastrpc_buf *buf = NULL, *fr = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700913 struct hlist_node *n;
914
915 VERIFY(err, size > 0);
916 if (err)
917 goto bail;
918
919 /* find the smallest buffer that fits in the cache */
920 spin_lock(&fl->hlock);
921 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
922 if (buf->size >= size && (!fr || fr->size > buf->size))
923 fr = buf;
924 }
925 if (fr)
926 hlist_del_init(&fr->hn);
927 spin_unlock(&fl->hlock);
928 if (fr) {
929 *obuf = fr;
930 return 0;
931 }
c_mtharue1a5ce12017-10-13 20:47:09 +0530932 buf = NULL;
933 VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700934 if (err)
935 goto bail;
936 INIT_HLIST_NODE(&buf->hn);
937 buf->fl = fl;
c_mtharue1a5ce12017-10-13 20:47:09 +0530938 buf->virt = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700939 buf->phys = 0;
940 buf->size = size;
c_mtharue1a5ce12017-10-13 20:47:09 +0530941 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700942 (void *)&buf->phys, GFP_KERNEL);
943 if (IS_ERR_OR_NULL(buf->virt)) {
944 /* free cache and retry */
945 fastrpc_buf_list_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +0530946 buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700947 (void *)&buf->phys, GFP_KERNEL);
948 VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
949 }
950 if (err)
951 goto bail;
952 if (fl->sctx->smmu.cb)
953 buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
954 vmid = fl->apps->channel[fl->cid].vmid;
955 if (vmid) {
956 int srcVM[1] = {VMID_HLOS};
957 int destVM[2] = {VMID_HLOS, vmid};
958 int destVMperm[2] = {PERM_READ | PERM_WRITE,
959 PERM_READ | PERM_WRITE | PERM_EXEC};
960
961 VERIFY(err, !hyp_assign_phys(buf->phys, buf_page_size(size),
962 srcVM, 1, destVM, destVMperm, 2));
963 if (err)
964 goto bail;
965 }
966
967 *obuf = buf;
968 bail:
969 if (err && buf)
970 fastrpc_buf_free(buf, 0);
971 return err;
972}
973
974
975static int context_restore_interrupted(struct fastrpc_file *fl,
Sathish Ambleybae51902017-07-03 15:00:49 -0700976 struct fastrpc_ioctl_invoke_crc *inv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700977 struct smq_invoke_ctx **po)
978{
979 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +0530980 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -0700981 struct hlist_node *n;
982 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
983
984 spin_lock(&fl->hlock);
985 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
986 if (ictx->pid == current->pid) {
987 if (invoke->sc != ictx->sc || ictx->fl != fl)
988 err = -1;
989 else {
990 ctx = ictx;
991 hlist_del_init(&ctx->hn);
992 hlist_add_head(&ctx->hn, &fl->clst.pending);
993 }
994 break;
995 }
996 }
997 spin_unlock(&fl->hlock);
998 if (ctx)
999 *po = ctx;
1000 return err;
1001}
1002
1003#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
1004static int overlap_ptr_cmp(const void *a, const void *b)
1005{
1006 struct overlap *pa = *((struct overlap **)a);
1007 struct overlap *pb = *((struct overlap **)b);
1008 /* sort with lowest starting buffer first */
1009 int st = CMP(pa->start, pb->start);
1010 /* sort with highest ending buffer first */
1011 int ed = CMP(pb->end, pa->end);
1012 return st == 0 ? ed : st;
1013}
1014
Sathish Ambley9466d672017-01-25 10:51:55 -08001015static int context_build_overlap(struct smq_invoke_ctx *ctx)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001016{
Sathish Ambley9466d672017-01-25 10:51:55 -08001017 int i, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001018 remote_arg_t *lpra = ctx->lpra;
1019 int inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
1020 int outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
1021 int nbufs = inbufs + outbufs;
1022 struct overlap max;
1023
1024 for (i = 0; i < nbufs; ++i) {
1025 ctx->overs[i].start = (uintptr_t)lpra[i].buf.pv;
1026 ctx->overs[i].end = ctx->overs[i].start + lpra[i].buf.len;
Sathish Ambley9466d672017-01-25 10:51:55 -08001027 if (lpra[i].buf.len) {
1028 VERIFY(err, ctx->overs[i].end > ctx->overs[i].start);
1029 if (err)
1030 goto bail;
1031 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001032 ctx->overs[i].raix = i;
1033 ctx->overps[i] = &ctx->overs[i];
1034 }
c_mtharue1a5ce12017-10-13 20:47:09 +05301035 sort(ctx->overps, nbufs, sizeof(*ctx->overps), overlap_ptr_cmp, NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001036 max.start = 0;
1037 max.end = 0;
1038 for (i = 0; i < nbufs; ++i) {
1039 if (ctx->overps[i]->start < max.end) {
1040 ctx->overps[i]->mstart = max.end;
1041 ctx->overps[i]->mend = ctx->overps[i]->end;
1042 ctx->overps[i]->offset = max.end -
1043 ctx->overps[i]->start;
1044 if (ctx->overps[i]->end > max.end) {
1045 max.end = ctx->overps[i]->end;
1046 } else {
1047 ctx->overps[i]->mend = 0;
1048 ctx->overps[i]->mstart = 0;
1049 }
1050 } else {
1051 ctx->overps[i]->mend = ctx->overps[i]->end;
1052 ctx->overps[i]->mstart = ctx->overps[i]->start;
1053 ctx->overps[i]->offset = 0;
1054 max = *ctx->overps[i];
1055 }
1056 }
Sathish Ambley9466d672017-01-25 10:51:55 -08001057bail:
1058 return err;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001059}
1060
1061#define K_COPY_FROM_USER(err, kernel, dst, src, size) \
1062 do {\
1063 if (!(kernel))\
c_mtharue1a5ce12017-10-13 20:47:09 +05301064 VERIFY(err, 0 == copy_from_user((dst),\
1065 (void const __user *)(src),\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001066 (size)));\
1067 else\
1068 memmove((dst), (src), (size));\
1069 } while (0)
1070
1071#define K_COPY_TO_USER(err, kernel, dst, src, size) \
1072 do {\
1073 if (!(kernel))\
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301074 VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
c_mtharue1a5ce12017-10-13 20:47:09 +05301075 (src), (size)));\
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001076 else\
1077 memmove((dst), (src), (size));\
1078 } while (0)
1079
1080
1081static void context_free(struct smq_invoke_ctx *ctx);
1082
1083static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001084 struct fastrpc_ioctl_invoke_crc *invokefd,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001085 struct smq_invoke_ctx **po)
1086{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301087 struct fastrpc_apps *me = &gfa;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301088 int err = 0, bufs, ii, size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301089 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001090 struct fastrpc_ctx_lst *clst = &fl->clst;
1091 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
1092
1093 bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
1094 size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
1095 sizeof(*ctx->fds) * (bufs) +
1096 sizeof(*ctx->attrs) * (bufs) +
1097 sizeof(*ctx->overs) * (bufs) +
1098 sizeof(*ctx->overps) * (bufs);
1099
c_mtharue1a5ce12017-10-13 20:47:09 +05301100 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001101 if (err)
1102 goto bail;
1103
1104 INIT_HLIST_NODE(&ctx->hn);
1105 hlist_add_fake(&ctx->hn);
1106 ctx->fl = fl;
1107 ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
1108 ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
1109 ctx->fds = (int *)(&ctx->lpra[bufs]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301110 if (me->legacy) {
1111 ctx->overs = (struct overlap *)(&ctx->fds[bufs]);
1112 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1113 } else {
1114 ctx->attrs = (unsigned int *)(&ctx->fds[bufs]);
1115 ctx->overs = (struct overlap *)(&ctx->attrs[bufs]);
1116 ctx->overps = (struct overlap **)(&ctx->overs[bufs]);
1117 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001118
c_mtharue1a5ce12017-10-13 20:47:09 +05301119 K_COPY_FROM_USER(err, kernel, (void *)ctx->lpra, invoke->pra,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001120 bufs * sizeof(*ctx->lpra));
1121 if (err)
1122 goto bail;
1123
1124 if (invokefd->fds) {
1125 K_COPY_FROM_USER(err, kernel, ctx->fds, invokefd->fds,
1126 bufs * sizeof(*ctx->fds));
1127 if (err)
1128 goto bail;
1129 }
1130 if (invokefd->attrs) {
1131 K_COPY_FROM_USER(err, kernel, ctx->attrs, invokefd->attrs,
1132 bufs * sizeof(*ctx->attrs));
1133 if (err)
1134 goto bail;
1135 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001136 ctx->crc = (uint32_t *)invokefd->crc;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001137 ctx->sc = invoke->sc;
Sathish Ambley9466d672017-01-25 10:51:55 -08001138 if (bufs) {
1139 VERIFY(err, 0 == context_build_overlap(ctx));
1140 if (err)
1141 goto bail;
1142 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001143 ctx->retval = -1;
1144 ctx->pid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301145 ctx->tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001146 init_completion(&ctx->work);
c_mtharufdac6892017-10-12 13:09:01 +05301147 ctx->magic = FASTRPC_CTX_MAGIC;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001148
1149 spin_lock(&fl->hlock);
1150 hlist_add_head(&ctx->hn, &clst->pending);
1151 spin_unlock(&fl->hlock);
1152
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301153 spin_lock(&me->ctxlock);
1154 for (ii = 0; ii < FASTRPC_CTX_MAX; ii++) {
1155 if (!me->ctxtable[ii]) {
1156 me->ctxtable[ii] = ctx;
1157 ctx->ctxid = (ptr_to_uint64(ctx) & ~0xFFF)|(ii << 4);
1158 break;
1159 }
1160 }
1161 spin_unlock(&me->ctxlock);
1162 VERIFY(err, ii < FASTRPC_CTX_MAX);
1163 if (err) {
1164 pr_err("adsprpc: out of context memory\n");
1165 goto bail;
1166 }
1167
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001168 *po = ctx;
1169bail:
1170 if (ctx && err)
1171 context_free(ctx);
1172 return err;
1173}
1174
1175static void context_save_interrupted(struct smq_invoke_ctx *ctx)
1176{
1177 struct fastrpc_ctx_lst *clst = &ctx->fl->clst;
1178
1179 spin_lock(&ctx->fl->hlock);
1180 hlist_del_init(&ctx->hn);
1181 hlist_add_head(&ctx->hn, &clst->interrupted);
1182 spin_unlock(&ctx->fl->hlock);
1183 /* free the cache on power collapse */
1184 fastrpc_buf_list_free(ctx->fl);
1185}
1186
1187static void context_free(struct smq_invoke_ctx *ctx)
1188{
1189 int i;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301190 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001191 int nbufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
1192 REMOTE_SCALARS_OUTBUFS(ctx->sc);
1193 spin_lock(&ctx->fl->hlock);
1194 hlist_del_init(&ctx->hn);
1195 spin_unlock(&ctx->fl->hlock);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301196 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001197 for (i = 0; i < nbufs; ++i)
c_mtharu7bd6a422017-10-17 18:15:37 +05301198 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301199
1200 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001201 fastrpc_buf_free(ctx->buf, 1);
c_mtharufdac6892017-10-12 13:09:01 +05301202 ctx->magic = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301203 ctx->ctxid = 0;
1204
1205 spin_lock(&me->ctxlock);
1206 for (i = 0; i < FASTRPC_CTX_MAX; i++) {
1207 if (me->ctxtable[i] == ctx) {
1208 me->ctxtable[i] = NULL;
1209 break;
1210 }
1211 }
1212 spin_unlock(&me->ctxlock);
1213
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001214 kfree(ctx);
1215}
1216
1217static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
1218{
1219 ctx->retval = retval;
1220 complete(&ctx->work);
1221}
1222
1223
1224static void fastrpc_notify_users(struct fastrpc_file *me)
1225{
1226 struct smq_invoke_ctx *ictx;
1227 struct hlist_node *n;
1228
1229 spin_lock(&me->hlock);
1230 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1231 complete(&ictx->work);
1232 }
1233 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1234 complete(&ictx->work);
1235 }
1236 spin_unlock(&me->hlock);
1237
1238}
1239
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301240
1241static void fastrpc_notify_users_staticpd_pdr(struct fastrpc_file *me)
1242{
1243 struct smq_invoke_ctx *ictx;
1244 struct hlist_node *n;
1245
1246 spin_lock(&me->hlock);
1247 hlist_for_each_entry_safe(ictx, n, &me->clst.pending, hn) {
1248 if (ictx->msg.pid)
1249 complete(&ictx->work);
1250 }
1251 hlist_for_each_entry_safe(ictx, n, &me->clst.interrupted, hn) {
1252 if (ictx->msg.pid)
1253 complete(&ictx->work);
1254 }
1255 spin_unlock(&me->hlock);
1256}
1257
1258
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001259static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid)
1260{
1261 struct fastrpc_file *fl;
1262 struct hlist_node *n;
1263
1264 spin_lock(&me->hlock);
1265 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1266 if (fl->cid == cid)
1267 fastrpc_notify_users(fl);
1268 }
1269 spin_unlock(&me->hlock);
1270
1271}
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301272
1273static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
1274{
1275 struct fastrpc_file *fl;
1276 struct hlist_node *n;
1277
1278 spin_lock(&me->hlock);
1279 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1280 if (fl->spdname && !strcmp(spdname, fl->spdname))
Tharun Kumar Merugu77dd5872018-04-02 12:48:17 +05301281 fastrpc_notify_users_staticpd_pdr(fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301282 }
1283 spin_unlock(&me->hlock);
1284
1285}
1286
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001287static void context_list_ctor(struct fastrpc_ctx_lst *me)
1288{
1289 INIT_HLIST_HEAD(&me->interrupted);
1290 INIT_HLIST_HEAD(&me->pending);
1291}
1292
1293static void fastrpc_context_list_dtor(struct fastrpc_file *fl)
1294{
1295 struct fastrpc_ctx_lst *clst = &fl->clst;
c_mtharue1a5ce12017-10-13 20:47:09 +05301296 struct smq_invoke_ctx *ictx = NULL, *ctxfree;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001297 struct hlist_node *n;
1298
1299 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301300 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001301 spin_lock(&fl->hlock);
1302 hlist_for_each_entry_safe(ictx, n, &clst->interrupted, hn) {
1303 hlist_del_init(&ictx->hn);
1304 ctxfree = ictx;
1305 break;
1306 }
1307 spin_unlock(&fl->hlock);
1308 if (ctxfree)
1309 context_free(ctxfree);
1310 } while (ctxfree);
1311 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301312 ctxfree = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001313 spin_lock(&fl->hlock);
1314 hlist_for_each_entry_safe(ictx, n, &clst->pending, hn) {
1315 hlist_del_init(&ictx->hn);
1316 ctxfree = ictx;
1317 break;
1318 }
1319 spin_unlock(&fl->hlock);
1320 if (ctxfree)
1321 context_free(ctxfree);
1322 } while (ctxfree);
1323}
1324
1325static int fastrpc_file_free(struct fastrpc_file *fl);
1326static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
1327{
1328 struct fastrpc_file *fl, *free;
1329 struct hlist_node *n;
1330
1331 do {
c_mtharue1a5ce12017-10-13 20:47:09 +05301332 free = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001333 spin_lock(&me->hlock);
1334 hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
1335 hlist_del_init(&fl->hn);
1336 free = fl;
1337 break;
1338 }
1339 spin_unlock(&me->hlock);
1340 if (free)
1341 fastrpc_file_free(free);
1342 } while (free);
1343}
1344
1345static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
1346{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301347 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001348 remote_arg64_t *rpra;
1349 remote_arg_t *lpra = ctx->lpra;
1350 struct smq_invoke_buf *list;
1351 struct smq_phy_page *pages, *ipage;
1352 uint32_t sc = ctx->sc;
1353 int inbufs = REMOTE_SCALARS_INBUFS(sc);
1354 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001355 int handles, bufs = inbufs + outbufs;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001356 uintptr_t args;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301357 size_t rlen = 0, copylen = 0, metalen = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001358 int i, oix;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001359 int err = 0;
1360 int mflags = 0;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001361 uint64_t *fdlist;
Sathish Ambleybae51902017-07-03 15:00:49 -07001362 uint32_t *crclist;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301363 int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001364
1365 /* calculate size of the metadata */
c_mtharue1a5ce12017-10-13 20:47:09 +05301366 rpra = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001367 list = smq_invoke_buf_start(rpra, sc);
1368 pages = smq_phy_page_start(sc, list);
1369 ipage = pages;
1370
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301371 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001372 for (i = 0; i < bufs; ++i) {
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301373 uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
1374 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001375
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301376 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301377 if (ctx->fds[i] && (ctx->fds[i] != -1)) {
1378 unsigned int attrs = 0;
1379
1380 if (ctx->attrs)
1381 attrs = ctx->attrs[i];
1382
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001383 fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301384 attrs, buf, len,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001385 mflags, &ctx->maps[i]);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301386 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301387 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001388 ipage += 1;
1389 }
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301390 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001391 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301392 mutex_lock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001393 for (i = bufs; i < bufs + handles; i++) {
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301394 int dmaflags = 0;
1395
1396 if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP))
1397 dmaflags = FASTRPC_DMAHANDLE_NOMAP;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001398 VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
Tharun Kumar Merugu35a94a52018-02-01 21:09:04 +05301399 FASTRPC_ATTR_NOVA, 0, 0, dmaflags, &ctx->maps[i]));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301400 if (err) {
1401 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001402 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301403 }
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001404 ipage += 1;
1405 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301406 mutex_unlock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301407 if (!me->legacy) {
1408 metalen = copylen = (size_t)&ipage[0] +
1409 (sizeof(uint64_t) * M_FDLIST) +
1410 (sizeof(uint32_t) * M_CRCLIST);
1411 } else {
1412 metalen = copylen = (size_t)&ipage[0];
1413 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001414
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001415 /* calculate len requreed for copying */
1416 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1417 int i = ctx->overps[oix]->raix;
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001418 uintptr_t mstart, mend;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301419 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001420
1421 if (!len)
1422 continue;
1423 if (ctx->maps[i])
1424 continue;
1425 if (ctx->overps[oix]->offset == 0)
1426 copylen = ALIGN(copylen, BALIGN);
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001427 mstart = ctx->overps[oix]->mstart;
1428 mend = ctx->overps[oix]->mend;
1429 VERIFY(err, (mend - mstart) <= LONG_MAX);
1430 if (err)
1431 goto bail;
1432 copylen += mend - mstart;
1433 VERIFY(err, copylen >= 0);
1434 if (err)
1435 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001436 }
1437 ctx->used = copylen;
1438
1439 /* allocate new buffer */
1440 if (copylen) {
1441 VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
1442 if (err)
1443 goto bail;
1444 }
Tharun Kumar Merugue3361f92017-06-22 10:45:43 +05301445 if (ctx->buf->virt && metalen <= copylen)
1446 memset(ctx->buf->virt, 0, metalen);
1447
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001448 /* copy metadata */
1449 rpra = ctx->buf->virt;
1450 ctx->rpra = rpra;
1451 list = smq_invoke_buf_start(rpra, sc);
1452 pages = smq_phy_page_start(sc, list);
1453 ipage = pages;
1454 args = (uintptr_t)ctx->buf->virt + metalen;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001455 for (i = 0; i < bufs + handles; ++i) {
1456 if (lpra[i].buf.len)
1457 list[i].num = 1;
1458 else
1459 list[i].num = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001460 list[i].pgidx = ipage - pages;
1461 ipage++;
1462 }
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301463
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001464 /* map ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301465 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301466 for (i = 0; rpra && i < inbufs + outbufs; ++i) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001467 struct fastrpc_mmap *map = ctx->maps[i];
1468 uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301469 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001470
1471 rpra[i].buf.pv = 0;
1472 rpra[i].buf.len = len;
1473 if (!len)
1474 continue;
1475 if (map) {
1476 struct vm_area_struct *vma;
1477 uintptr_t offset;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301478 uint64_t num = buf_num_pages(buf, len);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001479 int idx = list[i].pgidx;
1480
1481 if (map->attr & FASTRPC_ATTR_NOVA) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001482 offset = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001483 } else {
1484 down_read(&current->mm->mmap_sem);
1485 VERIFY(err, NULL != (vma = find_vma(current->mm,
1486 map->va)));
1487 if (err) {
1488 up_read(&current->mm->mmap_sem);
1489 goto bail;
1490 }
1491 offset = buf_page_start(buf) - vma->vm_start;
1492 up_read(&current->mm->mmap_sem);
1493 VERIFY(err, offset < (uintptr_t)map->size);
1494 if (err)
1495 goto bail;
1496 }
1497 pages[idx].addr = map->phys + offset;
1498 pages[idx].size = num << PAGE_SHIFT;
1499 }
1500 rpra[i].buf.pv = buf;
1501 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001502 PERF_END);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001503 for (i = bufs; i < bufs + handles; ++i) {
1504 struct fastrpc_mmap *map = ctx->maps[i];
1505
1506 pages[i].addr = map->phys;
1507 pages[i].size = map->size;
1508 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301509 if (!me->legacy) {
1510 fdlist = (uint64_t *)&pages[bufs + handles];
1511 for (i = 0; i < M_FDLIST; i++)
1512 fdlist[i] = 0;
1513 crclist = (uint32_t *)&fdlist[M_FDLIST];
1514 memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
1515 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001516
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001517 /* copy non ion buffers */
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301518 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001519 rlen = copylen - metalen;
Tharun Kumar Meruguc230bd72018-01-29 18:02:42 +05301520 for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001521 int i = ctx->overps[oix]->raix;
1522 struct fastrpc_mmap *map = ctx->maps[i];
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301523 size_t mlen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001524 uint64_t buf;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05301525 size_t len = lpra[i].buf.len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001526
1527 if (!len)
1528 continue;
1529 if (map)
1530 continue;
1531 if (ctx->overps[oix]->offset == 0) {
1532 rlen -= ALIGN(args, BALIGN) - args;
1533 args = ALIGN(args, BALIGN);
1534 }
Sathish Ambleyd209c1e2016-12-13 15:27:30 -08001535 mlen = ctx->overps[oix]->mend - ctx->overps[oix]->mstart;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001536 VERIFY(err, rlen >= mlen);
1537 if (err)
1538 goto bail;
1539 rpra[i].buf.pv = (args - ctx->overps[oix]->offset);
1540 pages[list[i].pgidx].addr = ctx->buf->phys -
1541 ctx->overps[oix]->offset +
1542 (copylen - rlen);
1543 pages[list[i].pgidx].addr =
1544 buf_page_start(pages[list[i].pgidx].addr);
1545 buf = rpra[i].buf.pv;
1546 pages[list[i].pgidx].size = buf_num_pages(buf, len) * PAGE_SIZE;
1547 if (i < inbufs) {
1548 K_COPY_FROM_USER(err, kernel, uint64_to_ptr(buf),
1549 lpra[i].buf.pv, len);
1550 if (err)
1551 goto bail;
1552 }
1553 args = args + mlen;
1554 rlen -= mlen;
1555 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001556 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001557
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301558 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001559 for (oix = 0; oix < inbufs + outbufs; ++oix) {
1560 int i = ctx->overps[oix]->raix;
1561 struct fastrpc_mmap *map = ctx->maps[i];
1562
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001563 if (map && map->uncached)
1564 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301565 if (ctx->fl->sctx->smmu.coherent &&
1566 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1567 continue;
1568 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1569 continue;
1570
Tharun Kumar Meruguc230bd72018-01-29 18:02:42 +05301571 if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001572 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1573 uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
1574 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001575 PERF_END);
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05301576 for (i = bufs; rpra && i < bufs + handles; i++) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001577 rpra[i].dma.fd = ctx->fds[i];
1578 rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
1579 rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001580 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001581
1582 if (!ctx->fl->sctx->smmu.coherent) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301583 PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001584 dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001585 PERF_END);
1586 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001587 bail:
1588 return err;
1589}
1590
1591static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
1592 remote_arg_t *upra)
1593{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301594 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001595 uint32_t sc = ctx->sc;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001596 struct smq_invoke_buf *list;
1597 struct smq_phy_page *pages;
1598 struct fastrpc_mmap *mmap;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301599 uint64_t *fdlist = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07001600 uint32_t *crclist = NULL;
1601
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001602 remote_arg64_t *rpra = ctx->rpra;
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001603 int i, inbufs, outbufs, handles;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001604 int err = 0;
1605
1606 inbufs = REMOTE_SCALARS_INBUFS(sc);
1607 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001608 handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
1609 list = smq_invoke_buf_start(ctx->rpra, sc);
1610 pages = smq_phy_page_start(sc, list);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301611 if (!me->legacy) {
1612 fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
1613 crclist = (uint32_t *)(fdlist + M_FDLIST);
1614 }
Sathish Ambleybae51902017-07-03 15:00:49 -07001615
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001616 for (i = inbufs; i < inbufs + outbufs; ++i) {
1617 if (!ctx->maps[i]) {
1618 K_COPY_TO_USER(err, kernel,
1619 ctx->lpra[i].buf.pv,
1620 uint64_to_ptr(rpra[i].buf.pv),
1621 rpra[i].buf.len);
1622 if (err)
1623 goto bail;
1624 } else {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301625 mutex_lock(&ctx->fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05301626 fastrpc_mmap_free(ctx->maps[i], 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301627 mutex_unlock(&ctx->fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05301628 ctx->maps[i] = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001629 }
1630 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301631 mutex_lock(&ctx->fl->fl_map_mutex);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301632 if (fdlist && (inbufs + outbufs + handles)) {
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001633 for (i = 0; i < M_FDLIST; i++) {
1634 if (!fdlist[i])
1635 break;
1636 if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
Sathish Ambleyae5ee542017-01-16 22:24:23 -08001637 0, 0, &mmap))
c_mtharu7bd6a422017-10-17 18:15:37 +05301638 fastrpc_mmap_free(mmap, 0);
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001639 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001640 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05301641 mutex_unlock(&ctx->fl->fl_map_mutex);
Sathish Ambleybae51902017-07-03 15:00:49 -07001642 if (ctx->crc && crclist && rpra)
c_mtharue1a5ce12017-10-13 20:47:09 +05301643 K_COPY_TO_USER(err, kernel, ctx->crc,
Sathish Ambleybae51902017-07-03 15:00:49 -07001644 crclist, M_CRCLIST*sizeof(uint32_t));
1645
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001646 bail:
1647 return err;
1648}
1649
1650static void inv_args_pre(struct smq_invoke_ctx *ctx)
1651{
1652 int i, inbufs, outbufs;
1653 uint32_t sc = ctx->sc;
1654 remote_arg64_t *rpra = ctx->rpra;
1655 uintptr_t end;
1656
1657 inbufs = REMOTE_SCALARS_INBUFS(sc);
1658 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1659 for (i = inbufs; i < inbufs + outbufs; ++i) {
1660 struct fastrpc_mmap *map = ctx->maps[i];
1661
1662 if (map && map->uncached)
1663 continue;
1664 if (!rpra[i].buf.len)
1665 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301666 if (ctx->fl->sctx->smmu.coherent &&
1667 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1668 continue;
1669 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1670 continue;
1671
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001672 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1673 buf_page_start(rpra[i].buf.pv))
1674 continue;
1675 if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv)))
1676 dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
1677 (char *)(uint64_to_ptr(rpra[i].buf.pv + 1)));
1678 end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
1679 rpra[i].buf.len);
1680 if (!IS_CACHE_ALIGNED(end))
1681 dmac_flush_range((char *)end,
1682 (char *)end + 1);
1683 }
1684}
1685
1686static void inv_args(struct smq_invoke_ctx *ctx)
1687{
1688 int i, inbufs, outbufs;
1689 uint32_t sc = ctx->sc;
1690 remote_arg64_t *rpra = ctx->rpra;
1691 int used = ctx->used;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001692
1693 inbufs = REMOTE_SCALARS_INBUFS(sc);
1694 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
1695 for (i = inbufs; i < inbufs + outbufs; ++i) {
1696 struct fastrpc_mmap *map = ctx->maps[i];
1697
1698 if (map && map->uncached)
1699 continue;
1700 if (!rpra[i].buf.len)
1701 continue;
Tharun Kumar Merugu2e5f12e2017-07-06 12:04:40 +05301702 if (ctx->fl->sctx->smmu.coherent &&
1703 !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
1704 continue;
1705 if (map && (map->attr & FASTRPC_ATTR_COHERENT))
1706 continue;
1707
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001708 if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
1709 buf_page_start(rpra[i].buf.pv)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001710 continue;
1711 }
1712 if (map && map->handle)
1713 msm_ion_do_cache_op(ctx->fl->apps->client, map->handle,
1714 (char *)uint64_to_ptr(rpra[i].buf.pv),
1715 rpra[i].buf.len, ION_IOC_INV_CACHES);
1716 else
1717 dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv),
1718 (char *)uint64_to_ptr(rpra[i].buf.pv
1719 + rpra[i].buf.len));
1720 }
1721
Sathish Ambley58dc64d2016-11-29 17:11:53 -08001722 if (rpra)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001723 dmac_inv_range(rpra, (char *)rpra + used);
1724}
1725
1726static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
1727 uint32_t kernel, uint32_t handle)
1728{
1729 struct smq_msg *msg = &ctx->msg;
1730 struct fastrpc_file *fl = ctx->fl;
1731 struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301732 int err = 0, len;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001733
c_mtharue1a5ce12017-10-13 20:47:09 +05301734 VERIFY(err, NULL != channel_ctx->chan);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001735 if (err)
1736 goto bail;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301737 msg->pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001738 msg->tid = current->pid;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301739 if (fl->sessionid)
1740 msg->tid |= (1 << SESSION_ID_INDEX);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001741 if (kernel)
1742 msg->pid = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301743 msg->invoke.header.ctx = ctx->ctxid | fl->pd;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001744 msg->invoke.header.handle = handle;
1745 msg->invoke.header.sc = ctx->sc;
1746 msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0;
1747 msg->invoke.page.size = buf_page_size(ctx->used);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301748 if (fl->apps->glink) {
1749 if (fl->ssrcount != channel_ctx->ssrcount) {
1750 err = -ECONNRESET;
1751 goto bail;
1752 }
1753 VERIFY(err, channel_ctx->link.port_state ==
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001754 FASTRPC_LINK_CONNECTED);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301755 if (err)
1756 goto bail;
1757 err = glink_tx(channel_ctx->chan,
1758 (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg),
1759 GLINK_TX_REQ_INTENT);
1760 } else {
1761 spin_lock(&fl->apps->hlock);
1762 len = smd_write((smd_channel_t *)
1763 channel_ctx->chan,
1764 msg, sizeof(*msg));
1765 spin_unlock(&fl->apps->hlock);
1766 VERIFY(err, len == sizeof(*msg));
1767 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001768 bail:
1769 return err;
1770}
1771
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301772static void fastrpc_smd_read_handler(int cid)
1773{
1774 struct fastrpc_apps *me = &gfa;
1775 struct smq_invoke_rsp rsp = {0};
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301776 int ret = 0, err = 0;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301777 uint32_t index;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301778
1779 do {
1780 ret = smd_read_from_cb(me->channel[cid].chan, &rsp,
1781 sizeof(rsp));
1782 if (ret != sizeof(rsp))
1783 break;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301784
1785 index = (uint32_t)((rsp.ctx & FASTRPC_CTXID_MASK) >> 4);
1786 VERIFY(err, index < FASTRPC_CTX_MAX);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301787 if (err)
1788 goto bail;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301789
1790 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
1791 if (err)
1792 goto bail;
1793
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301794 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp.ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301795 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
1796 if (err)
1797 goto bail;
1798
1799 context_notify_user(me->ctxtable[index], rsp.retval);
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05301800 } while (ret == sizeof(rsp));
1801bail:
1802 if (err)
1803 pr_err("adsprpc: invalid response or context\n");
1804
1805}
1806
1807static void smd_event_handler(void *priv, unsigned int event)
1808{
1809 struct fastrpc_apps *me = &gfa;
1810 int cid = (int)(uintptr_t)priv;
1811
1812 switch (event) {
1813 case SMD_EVENT_OPEN:
1814 complete(&me->channel[cid].workport);
1815 break;
1816 case SMD_EVENT_CLOSE:
1817 fastrpc_notify_drivers(me, cid);
1818 break;
1819 case SMD_EVENT_DATA:
1820 fastrpc_smd_read_handler(cid);
1821 break;
1822 }
1823}
1824
1825
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001826static void fastrpc_init(struct fastrpc_apps *me)
1827{
1828 int i;
1829
1830 INIT_HLIST_HEAD(&me->drivers);
Tharun Kumar Merugubcd6fbf2018-01-04 17:49:34 +05301831 INIT_HLIST_HEAD(&me->maps);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001832 spin_lock_init(&me->hlock);
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05301833 spin_lock_init(&me->ctxlock);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301834 mutex_init(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001835 me->channel = &gcinfo[0];
1836 for (i = 0; i < NUM_CHANNELS; i++) {
1837 init_completion(&me->channel[i].work);
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05301838 init_completion(&me->channel[i].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001839 me->channel[i].sesscount = 0;
1840 }
1841}
1842
1843static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
1844
1845static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
1846 uint32_t kernel,
Sathish Ambleybae51902017-07-03 15:00:49 -07001847 struct fastrpc_ioctl_invoke_crc *inv)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001848{
c_mtharue1a5ce12017-10-13 20:47:09 +05301849 struct smq_invoke_ctx *ctx = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001850 struct fastrpc_ioctl_invoke *invoke = &inv->inv;
1851 int cid = fl->cid;
1852 int interrupted = 0;
1853 int err = 0;
Maria Yu757199c2017-09-22 16:05:49 +08001854 struct timespec invoket = {0};
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301855 int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001856
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001857 if (fl->profile)
1858 getnstimeofday(&invoket);
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301859
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301860
Tharun Kumar Merugue3edf3e2017-07-27 12:34:07 +05301861 VERIFY(err, fl->sctx != NULL);
1862 if (err)
1863 goto bail;
1864 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
1865 if (err)
1866 goto bail;
1867
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001868 if (!kernel) {
1869 VERIFY(err, 0 == context_restore_interrupted(fl, inv,
1870 &ctx));
1871 if (err)
1872 goto bail;
1873 if (fl->sctx->smmu.faults)
1874 err = FASTRPC_ENOSUCH;
1875 if (err)
1876 goto bail;
1877 if (ctx)
1878 goto wait;
1879 }
1880
1881 VERIFY(err, 0 == context_alloc(fl, kernel, inv, &ctx));
1882 if (err)
1883 goto bail;
1884
1885 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301886 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001887 VERIFY(err, 0 == get_args(kernel, ctx));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001888 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001889 if (err)
1890 goto bail;
1891 }
1892
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301893 if (!fl->sctx->smmu.coherent) {
1894 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001895 inv_args_pre(ctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301896 PERF_END);
1897 }
1898
1899 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001900 VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001901 PERF_END);
1902
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001903 if (err)
1904 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001905 wait:
1906 if (kernel)
1907 wait_for_completion(&ctx->work);
1908 else {
1909 interrupted = wait_for_completion_interruptible(&ctx->work);
1910 VERIFY(err, 0 == (err = interrupted));
1911 if (err)
1912 goto bail;
1913 }
Sathish Ambleyc432b502017-06-05 12:03:42 -07001914
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301915 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
Sathish Ambleyc432b502017-06-05 12:03:42 -07001916 if (!fl->sctx->smmu.coherent)
1917 inv_args(ctx);
1918 PERF_END);
1919
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001920 VERIFY(err, 0 == (err = ctx->retval));
1921 if (err)
1922 goto bail;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001923
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301924 PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001925 VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001926 PERF_END);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001927 if (err)
1928 goto bail;
1929 bail:
1930 if (ctx && interrupted == -ERESTARTSYS)
1931 context_save_interrupted(ctx);
1932 else if (ctx)
1933 context_free(ctx);
1934 if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
1935 err = ECONNRESET;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001936
1937 if (fl->profile && !interrupted) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05301938 if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
1939 int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
1940
1941 if (count)
1942 *count += getnstimediff(&invoket);
1943 }
1944 if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
1945 int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
1946
1947 if (count)
1948 *count = *count+1;
1949 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08001950 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001951 return err;
1952}
1953
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301954static int fastrpc_get_adsp_session(char *name, int *session)
1955{
1956 struct fastrpc_apps *me = &gfa;
1957 int err = 0, i;
1958
1959 for (i = 0; i < NUM_SESSIONS; i++) {
1960 if (!me->channel[0].spd[i].spdname)
1961 continue;
1962 if (!strcmp(name, me->channel[0].spd[i].spdname))
1963 break;
1964 }
1965 VERIFY(err, i < NUM_SESSIONS);
1966 if (err)
1967 goto bail;
1968 *session = i;
1969bail:
1970 return err;
1971}
1972
1973static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl);
Sathish Ambley36849af2017-02-02 09:35:55 -08001974static int fastrpc_channel_open(struct fastrpc_file *fl);
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05301975static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001976static int fastrpc_init_process(struct fastrpc_file *fl,
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001977 struct fastrpc_ioctl_init_attrs *uproc)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001978{
1979 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05301980 struct fastrpc_apps *me = &gfa;
Sathish Ambleybae51902017-07-03 15:00:49 -07001981 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08001982 struct fastrpc_ioctl_init *init = &uproc->init;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001983 struct smq_phy_page pages[1];
c_mtharue1a5ce12017-10-13 20:47:09 +05301984 struct fastrpc_mmap *file = NULL, *mem = NULL;
1985 char *proc_name = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001986
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05301987 VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
Sathish Ambley36849af2017-02-02 09:35:55 -08001988 if (err)
1989 goto bail;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05301990 if (init->flags == FASTRPC_INIT_ATTACH ||
1991 init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001992 remote_arg_t ra[1];
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05301993 int tgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07001994
1995 ra[0].buf.pv = (void *)&tgid;
1996 ra[0].buf.len = sizeof(tgid);
1997 ioctl.inv.handle = 1;
1998 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1999 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302000 ioctl.fds = NULL;
2001 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002002 ioctl.crc = NULL;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302003 if (init->flags == FASTRPC_INIT_ATTACH)
2004 fl->pd = 0;
2005 else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
2006 fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
2007 fl->pd = 2;
2008 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002009 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2010 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2011 if (err)
2012 goto bail;
2013 } else if (init->flags == FASTRPC_INIT_CREATE) {
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002014 remote_arg_t ra[6];
2015 int fds[6];
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002016 int mflags = 0;
2017 struct {
2018 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302019 unsigned int namelen;
2020 unsigned int filelen;
2021 unsigned int pageslen;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002022 int attrs;
2023 int siglen;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002024 } inbuf;
2025
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302026 inbuf.pgid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002027 inbuf.namelen = strlen(current->comm) + 1;
2028 inbuf.filelen = init->filelen;
2029 fl->pd = 1;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302030
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302031 VERIFY(err, access_ok(0, (void __user *)init->file,
2032 init->filelen));
2033 if (err)
2034 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002035 if (init->filelen) {
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302036 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002037 VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0,
2038 init->file, init->filelen, mflags, &file));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302039 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002040 if (err)
2041 goto bail;
2042 }
2043 inbuf.pageslen = 1;
Tharun Kumar Merugudf852892017-12-07 16:27:37 +05302044 VERIFY(err, access_ok(1, (void __user *)init->mem,
2045 init->memlen));
2046 if (err)
2047 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302048 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002049 VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
2050 init->mem, init->memlen, mflags, &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302051 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002052 if (err)
2053 goto bail;
2054 inbuf.pageslen = 1;
2055 ra[0].buf.pv = (void *)&inbuf;
2056 ra[0].buf.len = sizeof(inbuf);
2057 fds[0] = 0;
2058
2059 ra[1].buf.pv = (void *)current->comm;
2060 ra[1].buf.len = inbuf.namelen;
2061 fds[1] = 0;
2062
2063 ra[2].buf.pv = (void *)init->file;
2064 ra[2].buf.len = inbuf.filelen;
2065 fds[2] = init->filefd;
2066
2067 pages[0].addr = mem->phys;
2068 pages[0].size = mem->size;
2069 ra[3].buf.pv = (void *)pages;
2070 ra[3].buf.len = 1 * sizeof(*pages);
2071 fds[3] = 0;
2072
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002073 inbuf.attrs = uproc->attrs;
2074 ra[4].buf.pv = (void *)&(inbuf.attrs);
2075 ra[4].buf.len = sizeof(inbuf.attrs);
2076 fds[4] = 0;
2077
2078 inbuf.siglen = uproc->siglen;
2079 ra[5].buf.pv = (void *)&(inbuf.siglen);
2080 ra[5].buf.len = sizeof(inbuf.siglen);
2081 fds[5] = 0;
2082
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002083 ioctl.inv.handle = 1;
2084 ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
Sathish Ambleyd6300c32017-01-18 09:50:43 -08002085 if (uproc->attrs)
2086 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002087 ioctl.inv.pra = ra;
2088 ioctl.fds = fds;
c_mtharue1a5ce12017-10-13 20:47:09 +05302089 ioctl.attrs = NULL;
2090 ioctl.crc = NULL;
2091 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2092 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2093 if (err)
2094 goto bail;
2095 } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
2096 remote_arg_t ra[3];
2097 uint64_t phys = 0;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302098 size_t size = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302099 int fds[3];
2100 struct {
2101 int pgid;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302102 unsigned int namelen;
2103 unsigned int pageslen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302104 } inbuf;
2105
2106 if (!init->filelen)
2107 goto bail;
2108
2109 proc_name = kzalloc(init->filelen, GFP_KERNEL);
2110 VERIFY(err, !IS_ERR_OR_NULL(proc_name));
2111 if (err)
2112 goto bail;
2113 VERIFY(err, 0 == copy_from_user((void *)proc_name,
2114 (void __user *)init->file, init->filelen));
2115 if (err)
2116 goto bail;
2117
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302118 fl->pd = 1;
c_mtharue1a5ce12017-10-13 20:47:09 +05302119 inbuf.pgid = current->tgid;
c_mtharu81a0aa72017-11-07 16:13:21 +05302120 inbuf.namelen = init->filelen;
c_mtharue1a5ce12017-10-13 20:47:09 +05302121 inbuf.pageslen = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302122
2123 if (!strcmp(proc_name, "audiopd")) {
2124 fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
2125 VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302126 if (err)
2127 goto bail;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302128 }
2129
c_mtharue1a5ce12017-10-13 20:47:09 +05302130 if (!me->staticpd_flags) {
2131 inbuf.pageslen = 1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302132 mutex_lock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302133 VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
2134 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
2135 &mem));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302136 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302137 if (err)
2138 goto bail;
2139 phys = mem->phys;
2140 size = mem->size;
2141 VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302142 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2143 me->channel[fl->cid].rhvm.vmperm,
2144 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302145 if (err) {
2146 pr_err("ADSPRPC: hyp_assign_phys fail err %d",
2147 err);
2148 pr_err("map->phys %llx, map->size %d\n",
2149 phys, (int)size);
2150 goto bail;
2151 }
2152 me->staticpd_flags = 1;
2153 }
2154
2155 ra[0].buf.pv = (void *)&inbuf;
2156 ra[0].buf.len = sizeof(inbuf);
2157 fds[0] = 0;
2158
2159 ra[1].buf.pv = (void *)proc_name;
2160 ra[1].buf.len = inbuf.namelen;
2161 fds[1] = 0;
2162
2163 pages[0].addr = phys;
2164 pages[0].size = size;
2165
2166 ra[2].buf.pv = (void *)pages;
2167 ra[2].buf.len = sizeof(*pages);
2168 fds[2] = 0;
2169 ioctl.inv.handle = 1;
2170
2171 ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
2172 ioctl.inv.pra = ra;
2173 ioctl.fds = NULL;
2174 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002175 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002176 VERIFY(err, !(err = fastrpc_internal_invoke(fl,
2177 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2178 if (err)
2179 goto bail;
2180 } else {
2181 err = -ENOTTY;
2182 }
2183bail:
c_mtharud91205a2017-11-07 16:01:06 +05302184 kfree(proc_name);
c_mtharue1a5ce12017-10-13 20:47:09 +05302185 if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
2186 me->staticpd_flags = 0;
2187 if (mem && err) {
2188 if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
2189 hyp_assign_phys(mem->phys, (uint64_t)mem->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302190 me->channel[fl->cid].rhvm.vmid,
2191 me->channel[fl->cid].rhvm.vmcount,
2192 hlosvm, hlosvmperm, 1);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302193 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302194 fastrpc_mmap_free(mem, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302195 mutex_unlock(&fl->fl_map_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05302196 }
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302197 if (file) {
2198 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302199 fastrpc_mmap_free(file, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302200 mutex_unlock(&fl->fl_map_mutex);
2201 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002202 return err;
2203}
2204
2205static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
2206{
2207 int err = 0;
Sathish Ambleybae51902017-07-03 15:00:49 -07002208 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002209 remote_arg_t ra[1];
2210 int tgid = 0;
2211
Sathish Ambley36849af2017-02-02 09:35:55 -08002212 VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
2213 if (err)
2214 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05302215 VERIFY(err, fl->apps->channel[fl->cid].chan != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002216 if (err)
2217 goto bail;
2218 tgid = fl->tgid;
2219 ra[0].buf.pv = (void *)&tgid;
2220 ra[0].buf.len = sizeof(tgid);
2221 ioctl.inv.handle = 1;
2222 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
2223 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302224 ioctl.fds = NULL;
2225 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002226 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002227 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2228 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2229bail:
2230 return err;
2231}
2232
2233static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
2234 struct fastrpc_mmap *map)
2235{
Sathish Ambleybae51902017-07-03 15:00:49 -07002236 struct fastrpc_ioctl_invoke_crc ioctl;
c_mtharu63ffc012017-11-16 15:26:56 +05302237 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002238 struct smq_phy_page page;
2239 int num = 1;
2240 remote_arg_t ra[3];
2241 int err = 0;
2242 struct {
2243 int pid;
2244 uint32_t flags;
2245 uintptr_t vaddrin;
2246 int num;
2247 } inargs;
2248 struct {
2249 uintptr_t vaddrout;
2250 } routargs;
2251
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302252 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002253 inargs.vaddrin = (uintptr_t)map->va;
2254 inargs.flags = flags;
2255 inargs.num = fl->apps->compat ? num * sizeof(page) : num;
2256 ra[0].buf.pv = (void *)&inargs;
2257 ra[0].buf.len = sizeof(inargs);
2258 page.addr = map->phys;
2259 page.size = map->size;
2260 ra[1].buf.pv = (void *)&page;
2261 ra[1].buf.len = num * sizeof(page);
2262
2263 ra[2].buf.pv = (void *)&routargs;
2264 ra[2].buf.len = sizeof(routargs);
2265
2266 ioctl.inv.handle = 1;
2267 if (fl->apps->compat)
2268 ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
2269 else
2270 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
2271 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302272 ioctl.fds = NULL;
2273 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002274 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002275 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2276 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2277 map->raddr = (uintptr_t)routargs.vaddrout;
c_mtharue1a5ce12017-10-13 20:47:09 +05302278 if (err)
2279 goto bail;
2280 if (flags == ADSP_MMAP_HEAP_ADDR) {
2281 struct scm_desc desc = {0};
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002282
c_mtharue1a5ce12017-10-13 20:47:09 +05302283 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
2284 desc.args[1] = map->phys;
2285 desc.args[2] = map->size;
2286 desc.arginfo = SCM_ARGS(3);
2287 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2288 TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
2289 } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
c_mtharue1a5ce12017-10-13 20:47:09 +05302290 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302291 hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
2292 me->channel[fl->cid].rhvm.vmperm,
2293 me->channel[fl->cid].rhvm.vmcount));
c_mtharue1a5ce12017-10-13 20:47:09 +05302294 if (err)
2295 goto bail;
2296 }
2297bail:
2298 return err;
2299}
2300
2301static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
2302 struct fastrpc_mmap *map)
2303{
2304 int err = 0;
c_mtharu63ffc012017-11-16 15:26:56 +05302305 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05302306 int destVM[1] = {VMID_HLOS};
2307 int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
2308
2309 if (map->flags == ADSP_MMAP_HEAP_ADDR) {
2310 struct fastrpc_ioctl_invoke_crc ioctl;
2311 struct scm_desc desc = {0};
2312 remote_arg_t ra[1];
2313 int err = 0;
2314 struct {
2315 uint8_t skey;
2316 } routargs;
2317
2318 ra[0].buf.pv = (void *)&routargs;
2319 ra[0].buf.len = sizeof(routargs);
2320
2321 ioctl.inv.handle = 1;
2322 ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
2323 ioctl.inv.pra = ra;
2324 ioctl.fds = NULL;
2325 ioctl.attrs = NULL;
2326 ioctl.crc = NULL;
2327 if (fl == NULL)
2328 goto bail;
2329
2330 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2331 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
2332 if (err)
2333 goto bail;
2334 desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
2335 desc.args[1] = map->phys;
2336 desc.args[2] = map->size;
2337 desc.args[3] = routargs.skey;
2338 desc.arginfo = SCM_ARGS(4);
2339 err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
2340 TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
2341 } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2342 VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05302343 me->channel[fl->cid].rhvm.vmid,
2344 me->channel[fl->cid].rhvm.vmcount,
2345 destVM, destVMperm, 1));
c_mtharue1a5ce12017-10-13 20:47:09 +05302346 if (err)
2347 goto bail;
2348 }
2349
2350bail:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002351 return err;
2352}
2353
2354static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
2355 struct fastrpc_mmap *map)
2356{
Sathish Ambleybae51902017-07-03 15:00:49 -07002357 struct fastrpc_ioctl_invoke_crc ioctl;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002358 remote_arg_t ra[1];
2359 int err = 0;
2360 struct {
2361 int pid;
2362 uintptr_t vaddrout;
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302363 size_t size;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002364 } inargs;
2365
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05302366 inargs.pid = fl->tgid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002367 inargs.size = map->size;
2368 inargs.vaddrout = map->raddr;
2369 ra[0].buf.pv = (void *)&inargs;
2370 ra[0].buf.len = sizeof(inargs);
2371
2372 ioctl.inv.handle = 1;
2373 if (fl->apps->compat)
2374 ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
2375 else
2376 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
2377 ioctl.inv.pra = ra;
c_mtharue1a5ce12017-10-13 20:47:09 +05302378 ioctl.fds = NULL;
2379 ioctl.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07002380 ioctl.crc = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002381 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
2382 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
c_mtharue1a5ce12017-10-13 20:47:09 +05302383 if (err)
2384 goto bail;
2385 if (map->flags == ADSP_MMAP_HEAP_ADDR ||
2386 map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
2387 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
2388 if (err)
2389 goto bail;
2390 }
2391bail:
2392 return err;
2393}
2394
2395static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
2396{
2397 struct fastrpc_mmap *match = NULL, *map = NULL;
2398 struct hlist_node *n = NULL;
2399 int err = 0, ret = 0;
2400 struct fastrpc_apps *me = &gfa;
2401 struct ramdump_segment *ramdump_segments_rh = NULL;
2402
2403 do {
2404 match = NULL;
2405 spin_lock(&me->hlock);
2406 hlist_for_each_entry_safe(map, n, &me->maps, hn) {
2407 match = map;
2408 hlist_del_init(&map->hn);
2409 break;
2410 }
2411 spin_unlock(&me->hlock);
2412
2413 if (match) {
2414 VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
2415 if (err)
2416 goto bail;
2417 if (me->channel[0].ramdumpenabled) {
2418 ramdump_segments_rh = kcalloc(1,
2419 sizeof(struct ramdump_segment), GFP_KERNEL);
2420 if (ramdump_segments_rh) {
2421 ramdump_segments_rh->address =
2422 match->phys;
2423 ramdump_segments_rh->size = match->size;
2424 ret = do_elf_ramdump(
2425 me->channel[0].remoteheap_ramdump_dev,
2426 ramdump_segments_rh, 1);
2427 if (ret < 0)
2428 pr_err("ADSPRPC: unable to dump heap");
2429 kfree(ramdump_segments_rh);
2430 }
2431 }
c_mtharu7bd6a422017-10-17 18:15:37 +05302432 fastrpc_mmap_free(match, 0);
c_mtharue1a5ce12017-10-13 20:47:09 +05302433 }
2434 } while (match);
2435bail:
2436 if (err && match)
2437 fastrpc_mmap_add(match);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002438 return err;
2439}
2440
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05302441static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
2442{
2443 struct fastrpc_apps *me = &gfa;
2444 int session = 0, err = 0;
2445
2446 VERIFY(err, !fastrpc_get_adsp_session(
2447 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
2448 if (err)
2449 goto bail;
2450 if (me->channel[fl->cid].spd[session].pdrcount !=
2451 me->channel[fl->cid].spd[session].prevpdrcount) {
2452 if (fastrpc_mmap_remove_ssr(fl))
2453 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
2454 me->channel[fl->cid].spd[session].prevpdrcount =
2455 me->channel[fl->cid].spd[session].pdrcount;
2456 }
2457 if (!me->channel[fl->cid].spd[session].ispdup) {
2458 VERIFY(err, 0);
2459 if (err) {
2460 err = -ENOTCONN;
2461 goto bail;
2462 }
2463 }
2464bail:
2465 return err;
2466}
2467
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002468static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302469 size_t len, struct fastrpc_mmap **ppmap);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002470
2471static void fastrpc_mmap_add(struct fastrpc_mmap *map);
2472
2473static int fastrpc_internal_munmap(struct fastrpc_file *fl,
2474 struct fastrpc_ioctl_munmap *ud)
2475{
2476 int err = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05302477 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002478
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302479 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302480 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002481 VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302482 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002483 if (err)
2484 goto bail;
2485 VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
2486 if (err)
2487 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302488 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302489 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302490 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002491bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302492 if (err && map) {
2493 mutex_lock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002494 fastrpc_mmap_add(map);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302495 mutex_unlock(&fl->fl_map_mutex);
2496 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302497 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002498 return err;
2499}
2500
c_mtharu7bd6a422017-10-17 18:15:37 +05302501static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
2502 struct fastrpc_ioctl_munmap_fd *ud) {
2503 int err = 0;
2504 struct fastrpc_mmap *map = NULL;
2505
2506 VERIFY(err, (fl && ud));
2507 if (err)
2508 goto bail;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302509 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu09fc6152018-02-16 13:13:12 +05302510 if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
2511 pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
c_mtharu7bd6a422017-10-17 18:15:37 +05302512 ud->fd, (unsigned long long)ud->va,
2513 (unsigned int)ud->len);
2514 err = -1;
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302515 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302516 goto bail;
2517 }
2518 if (map)
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302519 fastrpc_mmap_free(map, 0);
2520 mutex_unlock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302521bail:
2522 return err;
2523}
2524
2525
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002526static int fastrpc_internal_mmap(struct fastrpc_file *fl,
2527 struct fastrpc_ioctl_mmap *ud)
2528{
2529
c_mtharue1a5ce12017-10-13 20:47:09 +05302530 struct fastrpc_mmap *map = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002531 int err = 0;
2532
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302533 mutex_lock(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302534 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302535 if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302536 ud->size, ud->flags, 1, &map)) {
2537 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302538 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002539 return 0;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302540 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002541 VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
Tharun Kumar Merugufb51f942017-12-07 10:32:15 +05302542 (uintptr_t)ud->vaddrin, ud->size,
c_mtharue1a5ce12017-10-13 20:47:09 +05302543 ud->flags, &map));
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302544 mutex_unlock(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002545 if (err)
2546 goto bail;
2547 VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
2548 if (err)
2549 goto bail;
2550 ud->vaddrout = map->raddr;
2551 bail:
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302552 if (err && map) {
2553 mutex_lock(&fl->fl_map_mutex);
c_mtharu7bd6a422017-10-17 18:15:37 +05302554 fastrpc_mmap_free(map, 0);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302555 mutex_unlock(&fl->fl_map_mutex);
2556 }
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302557 mutex_unlock(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002558 return err;
2559}
2560
2561static void fastrpc_channel_close(struct kref *kref)
2562{
2563 struct fastrpc_apps *me = &gfa;
2564 struct fastrpc_channel_ctx *ctx;
2565 int cid;
2566
2567 ctx = container_of(kref, struct fastrpc_channel_ctx, kref);
2568 cid = ctx - &gcinfo[0];
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05302569 if (!me->glink)
2570 smd_close(ctx->chan);
2571 else
2572 fastrpc_glink_close(ctx->chan, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05302573 ctx->chan = NULL;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302574 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002575 pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
2576 MAJOR(me->dev_no), cid);
2577}
2578
2579static void fastrpc_context_list_dtor(struct fastrpc_file *fl);
2580
2581static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302582 int secure, int sharedcb, struct fastrpc_session_ctx **session)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002583{
2584 struct fastrpc_apps *me = &gfa;
2585 int idx = 0, err = 0;
2586
2587 if (chan->sesscount) {
2588 for (idx = 0; idx < chan->sesscount; ++idx) {
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302589 if ((sharedcb && chan->session[idx].smmu.sharedcb) ||
2590 (!chan->session[idx].used &&
2591 chan->session[idx].smmu.secure
2592 == secure && !sharedcb)) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002593 chan->session[idx].used = 1;
2594 break;
2595 }
2596 }
2597 VERIFY(err, idx < chan->sesscount);
2598 if (err)
2599 goto bail;
2600 chan->session[idx].smmu.faults = 0;
2601 } else {
2602 VERIFY(err, me->dev != NULL);
2603 if (err)
2604 goto bail;
2605 chan->session[0].dev = me->dev;
c_mtharue1a5ce12017-10-13 20:47:09 +05302606 chan->session[0].smmu.dev = me->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002607 }
2608
2609 *session = &chan->session[idx];
2610 bail:
2611 return err;
2612}
2613
c_mtharue1a5ce12017-10-13 20:47:09 +05302614static bool fastrpc_glink_notify_rx_intent_req(void *h, const void *priv,
2615 size_t size)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002616{
2617 if (glink_queue_rx_intent(h, NULL, size))
2618 return false;
2619 return true;
2620}
2621
c_mtharue1a5ce12017-10-13 20:47:09 +05302622static void fastrpc_glink_notify_tx_done(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002623 const void *pkt_priv, const void *ptr)
2624{
2625}
2626
c_mtharue1a5ce12017-10-13 20:47:09 +05302627static void fastrpc_glink_notify_rx(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002628 const void *pkt_priv, const void *ptr, size_t size)
2629{
2630 struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302631 struct fastrpc_apps *me = &gfa;
2632 uint32_t index;
c_mtharufdac6892017-10-12 13:09:01 +05302633 int err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002634
c_mtharufdac6892017-10-12 13:09:01 +05302635 VERIFY(err, (rsp && size >= sizeof(*rsp)));
2636 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302637 goto bail;
2638
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302639 index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
2640 VERIFY(err, index < FASTRPC_CTX_MAX);
c_mtharufdac6892017-10-12 13:09:01 +05302641 if (err)
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302642 goto bail;
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302643
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302644 VERIFY(err, !IS_ERR_OR_NULL(me->ctxtable[index]));
2645 if (err)
2646 goto bail;
2647
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302648 VERIFY(err, ((me->ctxtable[index]->ctxid == (rsp->ctx & ~3)) &&
Tharun Kumar Merugu9c908aa2018-02-06 12:03:48 +05302649 me->ctxtable[index]->magic == FASTRPC_CTX_MAGIC));
2650 if (err)
2651 goto bail;
2652
2653 context_notify_user(me->ctxtable[index], rsp->retval);
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302654bail:
c_mtharufdac6892017-10-12 13:09:01 +05302655 if (err)
2656 pr_err("adsprpc: invalid response or context\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002657 glink_rx_done(handle, ptr, true);
2658}
2659
c_mtharue1a5ce12017-10-13 20:47:09 +05302660static void fastrpc_glink_notify_state(void *handle, const void *priv,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002661 unsigned int event)
2662{
2663 struct fastrpc_apps *me = &gfa;
2664 int cid = (int)(uintptr_t)priv;
2665 struct fastrpc_glink_info *link;
2666
2667 if (cid < 0 || cid >= NUM_CHANNELS)
2668 return;
2669 link = &me->channel[cid].link;
2670 switch (event) {
2671 case GLINK_CONNECTED:
2672 link->port_state = FASTRPC_LINK_CONNECTED;
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05302673 complete(&me->channel[cid].workport);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002674 break;
2675 case GLINK_LOCAL_DISCONNECTED:
2676 link->port_state = FASTRPC_LINK_DISCONNECTED;
2677 break;
2678 case GLINK_REMOTE_DISCONNECTED:
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002679 break;
2680 default:
2681 break;
2682 }
2683}
2684
2685static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
2686 struct fastrpc_session_ctx **session)
2687{
2688 int err = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302689 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002690
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302691 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002692 if (!*session)
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05302693 err = fastrpc_session_alloc_locked(chan, secure, 0, session);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302694 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002695 return err;
2696}
2697
2698static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
2699 struct fastrpc_session_ctx *session)
2700{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302701 struct fastrpc_apps *me = &gfa;
2702
2703 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002704 session->used = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302705 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002706}
2707
2708static int fastrpc_file_free(struct fastrpc_file *fl)
2709{
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302710 struct hlist_node *n = NULL;
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302711 struct fastrpc_mmap *map = NULL, *lmap = NULL;
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302712 struct fastrpc_perf *perf = NULL, *fperf = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002713 int cid;
2714
2715 if (!fl)
2716 return 0;
2717 cid = fl->cid;
2718
Tharun Kumar Merugu622d8712017-09-15 15:30:06 +05302719 (void)fastrpc_release_current_dsp_process(fl);
2720
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002721 spin_lock(&fl->apps->hlock);
2722 hlist_del_init(&fl->hn);
2723 spin_unlock(&fl->apps->hlock);
2724
Sathish Ambleyd7fbcbb2017-03-08 10:55:48 -08002725 if (!fl->sctx) {
2726 kfree(fl);
2727 return 0;
2728 }
tharun kumar9f899ea2017-07-03 17:07:03 +05302729 spin_lock(&fl->hlock);
2730 fl->file_close = 1;
2731 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002732 fastrpc_context_list_dtor(fl);
2733 fastrpc_buf_list_free(fl);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302734 mutex_lock(&fl->fl_map_mutex);
Tharun Kumar Merugu3e966762018-04-04 10:56:44 +05302735 do {
2736 lmap = NULL;
2737 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2738 hlist_del_init(&map->hn);
2739 lmap = map;
2740 break;
2741 }
2742 fastrpc_mmap_free(lmap, 1);
2743 } while (lmap);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302744 mutex_unlock(&fl->fl_map_mutex);
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05302745 if (fl->refcount && (fl->ssrcount == fl->apps->channel[cid].ssrcount))
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002746 kref_put_mutex(&fl->apps->channel[cid].kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05302747 fastrpc_channel_close, &fl->apps->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002748 if (fl->sctx)
2749 fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
2750 if (fl->secsctx)
2751 fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05302752
2753 mutex_lock(&fl->perf_mutex);
2754 do {
2755 struct hlist_node *pn = NULL;
2756
2757 fperf = NULL;
2758 hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
2759 hlist_del_init(&perf->hn);
2760 fperf = perf;
2761 break;
2762 }
2763 kfree(fperf);
2764 } while (fperf);
2765 mutex_unlock(&fl->perf_mutex);
2766 mutex_destroy(&fl->perf_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05302767 mutex_destroy(&fl->fl_map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002768 kfree(fl);
2769 return 0;
2770}
2771
2772static int fastrpc_device_release(struct inode *inode, struct file *file)
2773{
2774 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
2775
2776 if (fl) {
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05302777 if (fl->qos_request && pm_qos_request_active(&fl->pm_qos_req))
2778 pm_qos_remove_request(&fl->pm_qos_req);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002779 if (fl->debugfs_file != NULL)
2780 debugfs_remove(fl->debugfs_file);
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05302781 mutex_destroy(&fl->map_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002782 fastrpc_file_free(fl);
c_mtharue1a5ce12017-10-13 20:47:09 +05302783 file->private_data = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002784 }
2785 return 0;
2786}
2787
2788static void fastrpc_link_state_handler(struct glink_link_state_cb_info *cb_info,
2789 void *priv)
2790{
2791 struct fastrpc_apps *me = &gfa;
2792 int cid = (int)((uintptr_t)priv);
2793 struct fastrpc_glink_info *link;
2794
2795 if (cid < 0 || cid >= NUM_CHANNELS)
2796 return;
2797
2798 link = &me->channel[cid].link;
2799 switch (cb_info->link_state) {
2800 case GLINK_LINK_STATE_UP:
2801 link->link_state = FASTRPC_LINK_STATE_UP;
2802 complete(&me->channel[cid].work);
2803 break;
2804 case GLINK_LINK_STATE_DOWN:
2805 link->link_state = FASTRPC_LINK_STATE_DOWN;
2806 break;
2807 default:
2808 pr_err("adsprpc: unknown link state %d\n", cb_info->link_state);
2809 break;
2810 }
2811}
2812
2813static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
2814{
2815 int err = 0;
2816 struct fastrpc_glink_info *link;
2817
2818 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2819 if (err)
2820 goto bail;
2821
2822 link = &me->channel[cid].link;
2823 if (link->link_notify_handle != NULL)
2824 goto bail;
2825
2826 link->link_info.glink_link_state_notif_cb = fastrpc_link_state_handler;
2827 link->link_notify_handle = glink_register_link_state_cb(
2828 &link->link_info,
2829 (void *)((uintptr_t)cid));
2830 VERIFY(err, !IS_ERR_OR_NULL(me->channel[cid].link.link_notify_handle));
2831 if (err) {
2832 link->link_notify_handle = NULL;
2833 goto bail;
2834 }
2835 VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
2836 RPC_TIMEOUT));
2837bail:
2838 return err;
2839}
2840
2841static void fastrpc_glink_close(void *chan, int cid)
2842{
2843 int err = 0;
2844 struct fastrpc_glink_info *link;
2845
2846 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2847 if (err)
2848 return;
2849 link = &gfa.channel[cid].link;
2850
c_mtharu314a4202017-11-15 22:09:17 +05302851 if (link->port_state == FASTRPC_LINK_CONNECTED ||
2852 link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002853 link->port_state = FASTRPC_LINK_DISCONNECTING;
2854 glink_close(chan);
2855 }
2856}
2857
2858static int fastrpc_glink_open(int cid)
2859{
2860 int err = 0;
2861 void *handle = NULL;
2862 struct fastrpc_apps *me = &gfa;
2863 struct glink_open_config *cfg;
2864 struct fastrpc_glink_info *link;
2865
2866 VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
2867 if (err)
2868 goto bail;
2869 link = &me->channel[cid].link;
2870 cfg = &me->channel[cid].link.cfg;
2871 VERIFY(err, (link->link_state == FASTRPC_LINK_STATE_UP));
2872 if (err)
2873 goto bail;
2874
Tharun Kumar Meruguca0db5262017-05-10 12:53:12 +05302875 VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
2876 if (err)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002877 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002878
2879 link->port_state = FASTRPC_LINK_CONNECTING;
2880 cfg->priv = (void *)(uintptr_t)cid;
2881 cfg->edge = gcinfo[cid].link.link_info.edge;
2882 cfg->transport = gcinfo[cid].link.link_info.transport;
2883 cfg->name = FASTRPC_GLINK_GUID;
2884 cfg->notify_rx = fastrpc_glink_notify_rx;
2885 cfg->notify_tx_done = fastrpc_glink_notify_tx_done;
2886 cfg->notify_state = fastrpc_glink_notify_state;
2887 cfg->notify_rx_intent_req = fastrpc_glink_notify_rx_intent_req;
2888 handle = glink_open(cfg);
2889 VERIFY(err, !IS_ERR_OR_NULL(handle));
c_mtharu6e1d26b2017-10-09 16:05:24 +05302890 if (err) {
2891 if (link->port_state == FASTRPC_LINK_CONNECTING)
2892 link->port_state = FASTRPC_LINK_DISCONNECTED;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002893 goto bail;
c_mtharu6e1d26b2017-10-09 16:05:24 +05302894 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07002895 me->channel[cid].chan = handle;
2896bail:
2897 return err;
2898}
2899
Sathish Ambley1ca68232017-01-19 10:32:55 -08002900static int fastrpc_debugfs_open(struct inode *inode, struct file *filp)
2901{
2902 filp->private_data = inode->i_private;
2903 return 0;
2904}
2905
2906static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
2907 size_t count, loff_t *position)
2908{
2909 struct fastrpc_file *fl = filp->private_data;
2910 struct hlist_node *n;
c_mtharue1a5ce12017-10-13 20:47:09 +05302911 struct fastrpc_buf *buf = NULL;
2912 struct fastrpc_mmap *map = NULL;
2913 struct smq_invoke_ctx *ictx = NULL;
Sathish Ambley1ca68232017-01-19 10:32:55 -08002914 struct fastrpc_channel_ctx *chan;
2915 struct fastrpc_session_ctx *sess;
2916 unsigned int len = 0;
2917 int i, j, ret = 0;
2918 char *fileinfo = NULL;
2919
2920 fileinfo = kzalloc(DEBUGFS_SIZE, GFP_KERNEL);
2921 if (!fileinfo)
2922 goto bail;
2923 if (fl == NULL) {
2924 for (i = 0; i < NUM_CHANNELS; i++) {
2925 chan = &gcinfo[i];
2926 len += scnprintf(fileinfo + len,
2927 DEBUGFS_SIZE - len, "%s\n\n",
2928 chan->name);
2929 len += scnprintf(fileinfo + len,
2930 DEBUGFS_SIZE - len, "%s %d\n",
2931 "sesscount:", chan->sesscount);
2932 for (j = 0; j < chan->sesscount; j++) {
2933 sess = &chan->session[j];
2934 len += scnprintf(fileinfo + len,
2935 DEBUGFS_SIZE - len,
2936 "%s%d\n\n", "SESSION", j);
2937 len += scnprintf(fileinfo + len,
2938 DEBUGFS_SIZE - len,
2939 "%s %d\n", "sid:",
2940 sess->smmu.cb);
2941 len += scnprintf(fileinfo + len,
2942 DEBUGFS_SIZE - len,
2943 "%s %d\n", "SECURE:",
2944 sess->smmu.secure);
2945 }
2946 }
2947 } else {
2948 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2949 "%s %d\n\n",
2950 "PROCESS_ID:", fl->tgid);
2951 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2952 "%s %d\n\n",
2953 "CHANNEL_ID:", fl->cid);
2954 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2955 "%s %d\n\n",
2956 "SSRCOUNT:", fl->ssrcount);
2957 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2958 "%s\n",
2959 "LIST OF BUFS:");
2960 spin_lock(&fl->hlock);
2961 hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
2962 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302963 "%s %pK %s %pK %s %llx\n", "buf:",
2964 buf, "buf->virt:", buf->virt,
2965 "buf->phys:", buf->phys);
Sathish Ambley1ca68232017-01-19 10:32:55 -08002966 }
2967 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2968 "\n%s\n",
2969 "LIST OF MAPS:");
2970 hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
2971 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302972 "%s %pK %s %lx %s %llx\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002973 "map:", map,
2974 "map->va:", map->va,
2975 "map->phys:", map->phys);
2976 }
2977 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2978 "\n%s\n",
2979 "LIST OF PENDING SMQCONTEXTS:");
2980 hlist_for_each_entry_safe(ictx, n, &fl->clst.pending, hn) {
2981 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302982 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002983 "smqcontext:", ictx,
2984 "sc:", ictx->sc,
2985 "tid:", ictx->pid,
2986 "handle", ictx->rpra->h);
2987 }
2988 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
2989 "\n%s\n",
2990 "LIST OF INTERRUPTED SMQCONTEXTS:");
2991 hlist_for_each_entry_safe(ictx, n, &fl->clst.interrupted, hn) {
2992 len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
Tharun Kumar Meruguce566452017-08-17 15:29:59 +05302993 "%s %pK %s %u %s %u %s %u\n",
Sathish Ambley1ca68232017-01-19 10:32:55 -08002994 "smqcontext:", ictx,
2995 "sc:", ictx->sc,
2996 "tid:", ictx->pid,
2997 "handle", ictx->rpra->h);
2998 }
2999 spin_unlock(&fl->hlock);
3000 }
3001 if (len > DEBUGFS_SIZE)
3002 len = DEBUGFS_SIZE;
3003 ret = simple_read_from_buffer(buffer, count, position, fileinfo, len);
3004 kfree(fileinfo);
3005bail:
3006 return ret;
3007}
3008
3009static const struct file_operations debugfs_fops = {
3010 .open = fastrpc_debugfs_open,
3011 .read = fastrpc_debugfs_read,
3012};
Sathish Ambley36849af2017-02-02 09:35:55 -08003013static int fastrpc_channel_open(struct fastrpc_file *fl)
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003014{
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003015 struct fastrpc_apps *me = &gfa;
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303016 int cid, ii, err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003017
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303018 mutex_lock(&me->smd_mutex);
3019
Sathish Ambley36849af2017-02-02 09:35:55 -08003020 VERIFY(err, fl && fl->sctx);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003021 if (err)
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303022 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003023 cid = fl->cid;
c_mtharu314a4202017-11-15 22:09:17 +05303024 VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
3025 if (err)
3026 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303027 if (me->channel[cid].ssrcount !=
3028 me->channel[cid].prevssrcount) {
3029 if (!me->channel[cid].issubsystemup) {
3030 VERIFY(err, 0);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303031 if (err) {
3032 err = -ENOTCONN;
c_mtharue1a5ce12017-10-13 20:47:09 +05303033 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303034 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303035 }
3036 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003037 fl->ssrcount = me->channel[cid].ssrcount;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303038 fl->refcount = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003039 if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
c_mtharue1a5ce12017-10-13 20:47:09 +05303040 (me->channel[cid].chan == NULL)) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303041 if (me->glink) {
3042 VERIFY(err, 0 == fastrpc_glink_register(cid, me));
3043 if (err)
3044 goto bail;
3045 VERIFY(err, 0 == fastrpc_glink_open(cid));
3046 } else {
3047 VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID,
3048 gcinfo[cid].channel,
3049 (smd_channel_t **)&me->channel[cid].chan,
3050 (void *)(uintptr_t)cid,
3051 smd_event_handler));
3052 }
Tharun Kumar Merugu53a8ec92017-07-14 15:52:49 +05303053 VERIFY(err,
3054 wait_for_completion_timeout(&me->channel[cid].workport,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003055 RPC_TIMEOUT));
3056 if (err) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303057 me->channel[cid].chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003058 goto bail;
3059 }
3060 kref_init(&me->channel[cid].kref);
3061 pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
3062 MAJOR(me->dev_no), cid);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303063
3064 for (ii = 0; ii < FASTRPC_GLINK_INTENT_NUM && me->glink; ii++)
3065 glink_queue_rx_intent(me->channel[cid].chan, NULL,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303066 FASTRPC_GLINK_INTENT_LEN);
Tharun Kumar Meruguc42c6e22018-05-29 15:50:46 +05303067
Tharun Kumar Merugud86fc8c2018-01-04 16:35:31 +05303068 if (cid == 0 && me->channel[cid].ssrcount !=
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003069 me->channel[cid].prevssrcount) {
c_mtharue1a5ce12017-10-13 20:47:09 +05303070 if (fastrpc_mmap_remove_ssr(fl))
3071 pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003072 me->channel[cid].prevssrcount =
3073 me->channel[cid].ssrcount;
3074 }
3075 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003076
3077bail:
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303078 mutex_unlock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003079 return err;
3080}
3081
Sathish Ambley36849af2017-02-02 09:35:55 -08003082static int fastrpc_device_open(struct inode *inode, struct file *filp)
3083{
3084 int err = 0;
Sathish Ambley567012b2017-03-06 11:55:04 -08003085 struct dentry *debugfs_file;
c_mtharue1a5ce12017-10-13 20:47:09 +05303086 struct fastrpc_file *fl = NULL;
Sathish Ambley36849af2017-02-02 09:35:55 -08003087 struct fastrpc_apps *me = &gfa;
3088
c_mtharue1a5ce12017-10-13 20:47:09 +05303089 VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
Sathish Ambley36849af2017-02-02 09:35:55 -08003090 if (err)
3091 return err;
Sathish Ambley567012b2017-03-06 11:55:04 -08003092 debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
3093 fl, &debugfs_fops);
Sathish Ambley36849af2017-02-02 09:35:55 -08003094 context_list_ctor(&fl->clst);
3095 spin_lock_init(&fl->hlock);
3096 INIT_HLIST_HEAD(&fl->maps);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303097 INIT_HLIST_HEAD(&fl->perf);
Sathish Ambley36849af2017-02-02 09:35:55 -08003098 INIT_HLIST_HEAD(&fl->bufs);
3099 INIT_HLIST_NODE(&fl->hn);
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303100 fl->sessionid = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003101 fl->tgid = current->tgid;
3102 fl->apps = me;
3103 fl->mode = FASTRPC_MODE_SERIAL;
3104 fl->cid = -1;
Sathish Ambley567012b2017-03-06 11:55:04 -08003105 if (debugfs_file != NULL)
3106 fl->debugfs_file = debugfs_file;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303107 fl->qos_request = 0;
Tharun Kumar Merugu35173342018-02-08 16:13:17 +05303108 fl->refcount = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003109 filp->private_data = fl;
Tharun Kumar Meruguc31eac52018-01-02 11:42:45 +05303110 mutex_init(&fl->map_mutex);
Tharun Kumar Merugued7a8472018-01-25 12:10:15 +05303111 mutex_init(&fl->fl_map_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003112 spin_lock(&me->hlock);
3113 hlist_add_head(&fl->hn, &me->drivers);
3114 spin_unlock(&me->hlock);
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303115 mutex_init(&fl->perf_mutex);
Sathish Ambley36849af2017-02-02 09:35:55 -08003116 return 0;
3117}
3118
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003119static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
3120{
3121 int err = 0;
Sathish Ambley36849af2017-02-02 09:35:55 -08003122 uint32_t cid;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003123
c_mtharue1a5ce12017-10-13 20:47:09 +05303124 VERIFY(err, fl != NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003125 if (err)
3126 goto bail;
Sathish Ambley36849af2017-02-02 09:35:55 -08003127 if (fl->cid == -1) {
3128 cid = *info;
3129 VERIFY(err, cid < NUM_CHANNELS);
3130 if (err)
3131 goto bail;
3132 fl->cid = cid;
3133 fl->ssrcount = fl->apps->channel[cid].ssrcount;
3134 VERIFY(err, !fastrpc_session_alloc_locked(
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303135 &fl->apps->channel[cid], 0, fl->sharedcb, &fl->sctx));
Sathish Ambley36849af2017-02-02 09:35:55 -08003136 if (err)
3137 goto bail;
3138 }
Tharun Kumar Merugu80be7d62017-08-02 11:03:22 +05303139 VERIFY(err, fl->sctx != NULL);
3140 if (err)
3141 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003142 *info = (fl->sctx->smmu.enabled ? 1 : 0);
3143bail:
3144 return err;
3145}
3146
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303147static int fastrpc_internal_control(struct fastrpc_file *fl,
3148 struct fastrpc_ioctl_control *cp)
3149{
3150 int err = 0;
3151 int latency;
3152
3153 VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
3154 if (err)
3155 goto bail;
3156 VERIFY(err, !IS_ERR_OR_NULL(cp));
3157 if (err)
3158 goto bail;
3159
3160 switch (cp->req) {
3161 case FASTRPC_CONTROL_LATENCY:
3162 latency = cp->lp.enable == FASTRPC_LATENCY_CTRL_ENB ?
3163 fl->apps->latency : PM_QOS_DEFAULT_VALUE;
3164 VERIFY(err, latency != 0);
3165 if (err)
3166 goto bail;
3167 if (!fl->qos_request) {
3168 pm_qos_add_request(&fl->pm_qos_req,
3169 PM_QOS_CPU_DMA_LATENCY, latency);
3170 fl->qos_request = 1;
3171 } else
3172 pm_qos_update_request(&fl->pm_qos_req, latency);
3173 break;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303174 case FASTRPC_CONTROL_SMMU:
3175 fl->sharedcb = cp->smmu.sharedcb;
3176 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303177 default:
3178 err = -ENOTTY;
3179 break;
3180 }
3181bail:
3182 return err;
3183}
3184
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003185static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
3186 unsigned long ioctl_param)
3187{
3188 union {
Sathish Ambleybae51902017-07-03 15:00:49 -07003189 struct fastrpc_ioctl_invoke_crc inv;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003190 struct fastrpc_ioctl_mmap mmap;
3191 struct fastrpc_ioctl_munmap munmap;
c_mtharu7bd6a422017-10-17 18:15:37 +05303192 struct fastrpc_ioctl_munmap_fd munmap_fd;
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003193 struct fastrpc_ioctl_init_attrs init;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003194 struct fastrpc_ioctl_perf perf;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303195 struct fastrpc_ioctl_control cp;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003196 } p;
3197 void *param = (char *)ioctl_param;
3198 struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
3199 int size = 0, err = 0;
3200 uint32_t info;
3201
c_mtharue1a5ce12017-10-13 20:47:09 +05303202 p.inv.fds = NULL;
3203 p.inv.attrs = NULL;
Sathish Ambleybae51902017-07-03 15:00:49 -07003204 p.inv.crc = NULL;
tharun kumar9f899ea2017-07-03 17:07:03 +05303205 spin_lock(&fl->hlock);
3206 if (fl->file_close == 1) {
3207 err = EBADF;
3208 pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
3209 spin_unlock(&fl->hlock);
3210 goto bail;
3211 }
3212 spin_unlock(&fl->hlock);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003213
3214 switch (ioctl_num) {
3215 case FASTRPC_IOCTL_INVOKE:
3216 size = sizeof(struct fastrpc_ioctl_invoke);
Sathish Ambleybae51902017-07-03 15:00:49 -07003217 /* fall through */
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003218 case FASTRPC_IOCTL_INVOKE_FD:
3219 if (!size)
3220 size = sizeof(struct fastrpc_ioctl_invoke_fd);
3221 /* fall through */
3222 case FASTRPC_IOCTL_INVOKE_ATTRS:
3223 if (!size)
3224 size = sizeof(struct fastrpc_ioctl_invoke_attrs);
Sathish Ambleybae51902017-07-03 15:00:49 -07003225 /* fall through */
3226 case FASTRPC_IOCTL_INVOKE_CRC:
3227 if (!size)
3228 size = sizeof(struct fastrpc_ioctl_invoke_crc);
c_mtharue1a5ce12017-10-13 20:47:09 +05303229 K_COPY_FROM_USER(err, 0, &p.inv, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003230 if (err)
3231 goto bail;
3232 VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
3233 0, &p.inv)));
3234 if (err)
3235 goto bail;
3236 break;
3237 case FASTRPC_IOCTL_MMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303238 K_COPY_FROM_USER(err, 0, &p.mmap, param,
3239 sizeof(p.mmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303240 if (err)
3241 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003242 VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
3243 if (err)
3244 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303245 K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003246 if (err)
3247 goto bail;
3248 break;
3249 case FASTRPC_IOCTL_MUNMAP:
c_mtharue1a5ce12017-10-13 20:47:09 +05303250 K_COPY_FROM_USER(err, 0, &p.munmap, param,
3251 sizeof(p.munmap));
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303252 if (err)
3253 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003254 VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
3255 &p.munmap)));
3256 if (err)
3257 goto bail;
3258 break;
c_mtharu7bd6a422017-10-17 18:15:37 +05303259 case FASTRPC_IOCTL_MUNMAP_FD:
3260 K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
3261 sizeof(p.munmap_fd));
3262 if (err)
3263 goto bail;
3264 VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
3265 &p.munmap_fd)));
3266 if (err)
3267 goto bail;
3268 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003269 case FASTRPC_IOCTL_SETMODE:
3270 switch ((uint32_t)ioctl_param) {
3271 case FASTRPC_MODE_PARALLEL:
3272 case FASTRPC_MODE_SERIAL:
3273 fl->mode = (uint32_t)ioctl_param;
3274 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003275 case FASTRPC_MODE_PROFILE:
3276 fl->profile = (uint32_t)ioctl_param;
3277 break;
Tharun Kumar Merugud4d079482017-09-06 11:22:19 +05303278 case FASTRPC_MODE_SESSION:
3279 fl->sessionid = 1;
3280 fl->tgid |= (1 << SESSION_ID_INDEX);
3281 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003282 default:
3283 err = -ENOTTY;
3284 break;
3285 }
3286 break;
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003287 case FASTRPC_IOCTL_GETPERF:
c_mtharue1a5ce12017-10-13 20:47:09 +05303288 K_COPY_FROM_USER(err, 0, &p.perf,
3289 param, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003290 if (err)
3291 goto bail;
3292 p.perf.numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
3293 if (p.perf.keys) {
3294 char *keys = PERF_KEYS;
3295
c_mtharue1a5ce12017-10-13 20:47:09 +05303296 K_COPY_TO_USER(err, 0, (void *)p.perf.keys,
3297 keys, strlen(keys)+1);
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003298 if (err)
3299 goto bail;
3300 }
3301 if (p.perf.data) {
Tharun Kumar Merugu7c966dd2018-01-04 18:07:03 +05303302 struct fastrpc_perf *perf = NULL, *fperf = NULL;
3303 struct hlist_node *n = NULL;
3304
3305 mutex_lock(&fl->perf_mutex);
3306 hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
3307 if (perf->tid == current->pid) {
3308 fperf = perf;
3309 break;
3310 }
3311 }
3312
3313 mutex_unlock(&fl->perf_mutex);
3314
3315 if (fperf) {
3316 K_COPY_TO_USER(err, 0, (void *)p.perf.data,
3317 fperf, sizeof(*fperf));
3318 }
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003319 }
c_mtharue1a5ce12017-10-13 20:47:09 +05303320 K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
Sathish Ambleya21b5b52017-01-11 16:11:01 -08003321 if (err)
3322 goto bail;
3323 break;
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303324 case FASTRPC_IOCTL_CONTROL:
c_mtharue1a5ce12017-10-13 20:47:09 +05303325 K_COPY_FROM_USER(err, 0, &p.cp, param,
3326 sizeof(p.cp));
Tharun Kumar Merugu5f6ca61c2017-08-11 11:43:11 +05303327 if (err)
3328 goto bail;
3329 VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
3330 if (err)
3331 goto bail;
3332 break;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003333 case FASTRPC_IOCTL_GETINFO:
c_mtharue1a5ce12017-10-13 20:47:09 +05303334 K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
Sathish Ambley36849af2017-02-02 09:35:55 -08003335 if (err)
3336 goto bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003337 VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
3338 if (err)
3339 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303340 K_COPY_TO_USER(err, 0, param, &info, sizeof(info));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003341 if (err)
3342 goto bail;
3343 break;
3344 case FASTRPC_IOCTL_INIT:
Sathish Ambleyd6300c32017-01-18 09:50:43 -08003345 p.init.attrs = 0;
3346 p.init.siglen = 0;
3347 size = sizeof(struct fastrpc_ioctl_init);
3348 /* fall through */
3349 case FASTRPC_IOCTL_INIT_ATTRS:
3350 if (!size)
3351 size = sizeof(struct fastrpc_ioctl_init_attrs);
c_mtharue1a5ce12017-10-13 20:47:09 +05303352 K_COPY_FROM_USER(err, 0, &p.init, param, size);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003353 if (err)
3354 goto bail;
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303355 VERIFY(err, p.init.init.filelen >= 0 &&
Tharun Kumar Merugud1f388a2017-10-01 10:51:11 +05303356 p.init.init.filelen < INIT_FILELEN_MAX);
3357 if (err)
3358 goto bail;
3359 VERIFY(err, p.init.init.memlen >= 0 &&
3360 p.init.init.memlen < INIT_MEMLEN_MAX);
Tharun Kumar Merugu4ea0eac2017-08-22 11:42:51 +05303361 if (err)
3362 goto bail;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303363 VERIFY(err, 0 == (err = fastrpc_init_process(fl, &p.init)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003364 if (err)
3365 goto bail;
3366 break;
3367
3368 default:
3369 err = -ENOTTY;
3370 pr_info("bad ioctl: %d\n", ioctl_num);
3371 break;
3372 }
3373 bail:
3374 return err;
3375}
3376
3377static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
3378 unsigned long code,
3379 void *data)
3380{
3381 struct fastrpc_apps *me = &gfa;
3382 struct fastrpc_channel_ctx *ctx;
c_mtharue1a5ce12017-10-13 20:47:09 +05303383 struct notif_data *notifdata = data;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003384 int cid;
3385
3386 ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
3387 cid = ctx - &me->channel[0];
3388 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303389 mutex_lock(&me->smd_mutex);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003390 ctx->ssrcount++;
c_mtharue1a5ce12017-10-13 20:47:09 +05303391 ctx->issubsystemup = 0;
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303392 if (ctx->chan) {
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303393 if (me->glink)
3394 fastrpc_glink_close(ctx->chan, cid);
3395 else
3396 smd_close(ctx->chan);
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303397 ctx->chan = NULL;
3398 pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
3399 gcinfo[cid].name, MAJOR(me->dev_no), cid);
3400 }
3401 mutex_unlock(&me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303402 if (cid == 0)
3403 me->staticpd_flags = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003404 fastrpc_notify_drivers(me, cid);
c_mtharue1a5ce12017-10-13 20:47:09 +05303405 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3406 if (me->channel[0].remoteheap_ramdump_dev &&
3407 notifdata->enable_ramdump) {
3408 me->channel[0].ramdumpenabled = 1;
3409 }
3410 } else if (code == SUBSYS_AFTER_POWERUP) {
3411 ctx->issubsystemup = 1;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003412 }
3413
3414 return NOTIFY_DONE;
3415}
3416
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303417static int fastrpc_pdr_notifier_cb(struct notifier_block *pdrnb,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303418 unsigned long code,
3419 void *data)
3420{
3421 struct fastrpc_apps *me = &gfa;
3422 struct fastrpc_static_pd *spd;
3423 struct notif_data *notifdata = data;
3424
3425 spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
3426 if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
3427 mutex_lock(&me->smd_mutex);
3428 spd->pdrcount++;
3429 spd->ispdup = 0;
3430 pr_info("ADSPRPC: Audio PDR notifier %d %s\n",
3431 MAJOR(me->dev_no), spd->spdname);
3432 mutex_unlock(&me->smd_mutex);
3433 if (!strcmp(spd->spdname,
3434 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
3435 me->staticpd_flags = 0;
3436 fastrpc_notify_pdr_drivers(me, spd->spdname);
3437 } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
3438 if (me->channel[0].remoteheap_ramdump_dev &&
3439 notifdata->enable_ramdump) {
3440 me->channel[0].ramdumpenabled = 1;
3441 }
3442 } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
3443 spd->ispdup = 1;
3444 }
3445
3446 return NOTIFY_DONE;
3447}
3448
3449static int fastrpc_get_service_location_notify(struct notifier_block *nb,
3450 unsigned long opcode, void *data)
3451{
3452 struct fastrpc_static_pd *spd;
3453 struct pd_qmi_client_data *pdr = data;
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303454 int curr_state = 0, i = 0;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303455
3456 spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
3457 if (opcode == LOCATOR_DOWN) {
3458 pr_err("ADSPRPC: Audio PD restart notifier locator down\n");
3459 return NOTIFY_DONE;
3460 }
3461
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303462 for (i = 0; i < pdr->total_domains; i++) {
3463 if ((!strcmp(pdr->domain_list[i].name,
3464 "msm/adsp/audio_pd")) ||
3465 (!strcmp(pdr->domain_list[i].name,
3466 "msm/adsp/sensor_pd"))) {
3467 spd->pdrhandle =
3468 service_notif_register_notifier(
3469 pdr->domain_list[i].name,
3470 pdr->domain_list[i].instance_id,
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303471 &spd->pdrnb, &curr_state);
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303472 if (IS_ERR(spd->pdrhandle))
3473 pr_err("ADSPRPC: Unable to register notifier\n");
3474 break;
3475 }
3476 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303477
3478 return NOTIFY_DONE;
3479}
3480
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003481static const struct file_operations fops = {
3482 .open = fastrpc_device_open,
3483 .release = fastrpc_device_release,
3484 .unlocked_ioctl = fastrpc_device_ioctl,
3485 .compat_ioctl = compat_fastrpc_device_ioctl,
3486};
3487
3488static const struct of_device_id fastrpc_match_table[] = {
3489 { .compatible = "qcom,msm-fastrpc-adsp", },
3490 { .compatible = "qcom,msm-fastrpc-compute", },
3491 { .compatible = "qcom,msm-fastrpc-compute-cb", },
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303492 { .compatible = "qcom,msm-fastrpc-legacy-compute", },
3493 { .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003494 { .compatible = "qcom,msm-adsprpc-mem-region", },
3495 {}
3496};
3497
3498static int fastrpc_cb_probe(struct device *dev)
3499{
3500 struct fastrpc_channel_ctx *chan;
3501 struct fastrpc_session_ctx *sess;
3502 struct of_phandle_args iommuspec;
3503 const char *name;
3504 unsigned int start = 0x80000000;
3505 int err = 0, i;
3506 int secure_vmid = VMID_CP_PIXEL;
3507
c_mtharue1a5ce12017-10-13 20:47:09 +05303508 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3509 "label", NULL)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003510 if (err)
3511 goto bail;
3512 for (i = 0; i < NUM_CHANNELS; i++) {
3513 if (!gcinfo[i].name)
3514 continue;
3515 if (!strcmp(name, gcinfo[i].name))
3516 break;
3517 }
3518 VERIFY(err, i < NUM_CHANNELS);
3519 if (err)
3520 goto bail;
3521 chan = &gcinfo[i];
3522 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3523 if (err)
3524 goto bail;
3525
3526 VERIFY(err, !of_parse_phandle_with_args(dev->of_node, "iommus",
3527 "#iommu-cells", 0, &iommuspec));
3528 if (err)
3529 goto bail;
3530 sess = &chan->session[chan->sesscount];
3531 sess->smmu.cb = iommuspec.args[0] & 0xf;
3532 sess->used = 0;
3533 sess->smmu.coherent = of_property_read_bool(dev->of_node,
3534 "dma-coherent");
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303535 sess->smmu.sharedcb = of_property_read_bool(dev->of_node,
3536 "shared-cb");
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003537 sess->smmu.secure = of_property_read_bool(dev->of_node,
3538 "qcom,secure-context-bank");
3539 if (sess->smmu.secure)
3540 start = 0x60000000;
3541 VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
3542 arm_iommu_create_mapping(&platform_bus_type,
Tharun Kumar Meruguca183f92017-04-27 17:43:27 +05303543 start, 0x78000000)));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003544 if (err)
3545 goto bail;
3546
3547 if (sess->smmu.secure)
3548 iommu_domain_set_attr(sess->smmu.mapping->domain,
3549 DOMAIN_ATTR_SECURE_VMID,
3550 &secure_vmid);
3551
3552 VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
3553 if (err)
3554 goto bail;
c_mtharue1a5ce12017-10-13 20:47:09 +05303555 sess->smmu.dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003556 sess->smmu.enabled = 1;
3557 chan->sesscount++;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003558 debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
3559 NULL, &debugfs_fops);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003560bail:
3561 return err;
3562}
3563
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303564static int fastrpc_cb_legacy_probe(struct device *dev)
3565{
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303566 struct fastrpc_channel_ctx *chan;
3567 struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
3568 const char *name;
3569 unsigned int *sids = NULL, sids_size = 0;
3570 int err = 0, ret = 0, i;
3571
3572 unsigned int start = 0x80000000;
3573
3574 VERIFY(err, NULL != (name = of_get_property(dev->of_node,
3575 "label", NULL)));
3576 if (err)
3577 goto bail;
3578
3579 for (i = 0; i < NUM_CHANNELS; i++) {
3580 if (!gcinfo[i].name)
3581 continue;
3582 if (!strcmp(name, gcinfo[i].name))
3583 break;
3584 }
3585 VERIFY(err, i < NUM_CHANNELS);
3586 if (err)
3587 goto bail;
3588
3589 chan = &gcinfo[i];
3590 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3591 if (err)
3592 goto bail;
3593
3594 first_sess = &chan->session[chan->sesscount];
3595
3596 VERIFY(err, NULL != of_get_property(dev->of_node,
3597 "sids", &sids_size));
3598 if (err)
3599 goto bail;
3600
3601 VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
3602 if (err)
3603 goto bail;
3604 ret = of_property_read_u32_array(dev->of_node, "sids", sids,
3605 sids_size/sizeof(unsigned int));
3606 if (ret)
3607 goto bail;
3608
3609 VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
3610 arm_iommu_create_mapping(&platform_bus_type,
3611 start, 0x78000000)));
3612 if (err)
3613 goto bail;
3614
3615 VERIFY(err, !arm_iommu_attach_device(dev, first_sess->smmu.mapping));
3616 if (err)
3617 goto bail;
3618
3619
3620 for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
3621 VERIFY(err, chan->sesscount < NUM_SESSIONS);
3622 if (err)
3623 goto bail;
3624 sess = &chan->session[chan->sesscount];
3625 sess->smmu.cb = sids[i];
3626 sess->smmu.dev = dev;
3627 sess->smmu.mapping = first_sess->smmu.mapping;
3628 sess->smmu.enabled = 1;
3629 sess->used = 0;
3630 sess->smmu.coherent = false;
3631 sess->smmu.secure = false;
3632 chan->sesscount++;
3633 }
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303634bail:
3635 kfree(sids);
3636 return err;
3637}
3638
3639
3640
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303641static void init_secure_vmid_list(struct device *dev, char *prop_name,
3642 struct secure_vm *destvm)
3643{
3644 int err = 0;
3645 u32 len = 0, i = 0;
3646 u32 *rhvmlist = NULL;
3647 u32 *rhvmpermlist = NULL;
3648
3649 if (!of_find_property(dev->of_node, prop_name, &len))
3650 goto bail;
3651 if (len == 0)
3652 goto bail;
3653 len /= sizeof(u32);
3654 VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
3655 if (err)
3656 goto bail;
3657 VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
3658 GFP_KERNEL)));
3659 if (err)
3660 goto bail;
3661 for (i = 0; i < len; i++) {
3662 err = of_property_read_u32_index(dev->of_node, prop_name, i,
3663 &rhvmlist[i]);
3664 rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
3665 pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
3666 if (err) {
3667 pr_err("ADSPRPC: Failed to read VMID\n");
3668 goto bail;
3669 }
3670 }
3671 destvm->vmid = rhvmlist;
3672 destvm->vmperm = rhvmpermlist;
3673 destvm->vmcount = len;
3674bail:
3675 if (err) {
3676 kfree(rhvmlist);
3677 kfree(rhvmpermlist);
3678 }
3679}
3680
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003681static int fastrpc_probe(struct platform_device *pdev)
3682{
3683 int err = 0;
3684 struct fastrpc_apps *me = &gfa;
3685 struct device *dev = &pdev->dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003686 struct device_node *ion_node, *node;
3687 struct platform_device *ion_pdev;
3688 struct cma *cma;
3689 uint32_t val;
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303690 int ret = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003691
c_mtharu63ffc012017-11-16 15:26:56 +05303692
3693 if (of_device_is_compatible(dev->of_node,
3694 "qcom,msm-fastrpc-compute")) {
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303695 init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
3696 &gcinfo[0].rhvm);
c_mtharu63ffc012017-11-16 15:26:56 +05303697
c_mtharu63ffc012017-11-16 15:26:56 +05303698
3699 of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
3700 &me->latency);
3701 }
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003702 if (of_device_is_compatible(dev->of_node,
3703 "qcom,msm-fastrpc-compute-cb"))
3704 return fastrpc_cb_probe(dev);
3705
3706 if (of_device_is_compatible(dev->of_node,
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303707 "qcom,msm-fastrpc-legacy-compute")) {
3708 me->glink = false;
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303709 me->legacy = 1;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303710 }
3711
3712 if (of_device_is_compatible(dev->of_node,
3713 "qcom,msm-fastrpc-legacy-compute-cb")){
3714 return fastrpc_cb_legacy_probe(dev);
3715 }
3716
3717 if (of_device_is_compatible(dev->of_node,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003718 "qcom,msm-adsprpc-mem-region")) {
3719 me->dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003720 ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
3721 if (ion_node) {
3722 for_each_available_child_of_node(ion_node, node) {
3723 if (of_property_read_u32(node, "reg", &val))
3724 continue;
3725 if (val != ION_ADSP_HEAP_ID)
3726 continue;
3727 ion_pdev = of_find_device_by_node(node);
3728 if (!ion_pdev)
3729 break;
3730 cma = dev_get_cma_area(&ion_pdev->dev);
3731 if (cma) {
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303732 me->range.addr = cma_get_base(cma);
3733 me->range.size =
3734 (size_t)cma_get_size(cma);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003735 }
3736 break;
3737 }
3738 }
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303739 if (me->range.addr && !of_property_read_bool(dev->of_node,
Tharun Kumar Merugu6b7a4a22018-01-17 16:08:07 +05303740 "restrict-access")) {
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003741 int srcVM[1] = {VMID_HLOS};
3742 int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6,
3743 VMID_ADSP_Q6};
Sathish Ambley84d11862017-05-15 14:36:05 -07003744 int destVMperm[4] = {PERM_READ | PERM_WRITE | PERM_EXEC,
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003745 PERM_READ | PERM_WRITE | PERM_EXEC,
3746 PERM_READ | PERM_WRITE | PERM_EXEC,
3747 PERM_READ | PERM_WRITE | PERM_EXEC,
3748 };
3749
Tharun Kumar Merugu4f2dcc82018-03-29 00:35:49 +05303750 VERIFY(err, !hyp_assign_phys(me->range.addr,
3751 me->range.size, srcVM, 1,
3752 destVM, destVMperm, 4));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003753 if (err)
3754 goto bail;
3755 }
3756 return 0;
3757 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303758 if (of_property_read_bool(dev->of_node,
3759 "qcom,fastrpc-adsp-audio-pdr")) {
3760 int session;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003761
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303762 VERIFY(err, !fastrpc_get_adsp_session(
3763 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
3764 if (err)
3765 goto spdbail;
3766 me->channel[0].spd[session].get_service_nb.notifier_call =
3767 fastrpc_get_service_location_notify;
3768 ret = get_service_location(
3769 AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
3770 AUDIO_PDR_ADSP_SERVICE_NAME,
3771 &me->channel[0].spd[session].get_service_nb);
3772 if (ret)
3773 pr_err("ADSPRPC: Get service location failed: %d\n",
3774 ret);
3775 }
Tharun Kumar Merugu848c0952018-02-07 21:37:19 +05303776 if (of_property_read_bool(dev->of_node,
3777 "qcom,fastrpc-adsp-sensors-pdr")) {
3778 int session;
3779
3780 VERIFY(err, !fastrpc_get_adsp_session(
3781 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
3782 if (err)
3783 goto spdbail;
3784 me->channel[0].spd[session].get_service_nb.notifier_call =
3785 fastrpc_get_service_location_notify;
3786 ret = get_service_location(
3787 SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
3788 SENSORS_PDR_ADSP_SERVICE_NAME,
3789 &me->channel[0].spd[session].get_service_nb);
3790 if (ret)
3791 pr_err("ADSPRPC: Get service location failed: %d\n",
3792 ret);
3793 }
Tharun Kumar Merugudf860662018-01-17 19:59:50 +05303794spdbail:
3795 err = 0;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003796 VERIFY(err, !of_platform_populate(pdev->dev.of_node,
3797 fastrpc_match_table,
3798 NULL, &pdev->dev));
3799 if (err)
3800 goto bail;
3801bail:
3802 return err;
3803}
3804
3805static void fastrpc_deinit(void)
3806{
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303807 struct fastrpc_apps *me = &gfa;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003808 struct fastrpc_channel_ctx *chan = gcinfo;
3809 int i, j;
3810
3811 for (i = 0; i < NUM_CHANNELS; i++, chan++) {
3812 if (chan->chan) {
3813 kref_put_mutex(&chan->kref,
Tharun Kumar Merugu642fcce2017-12-07 19:22:10 +05303814 fastrpc_channel_close, &me->smd_mutex);
c_mtharue1a5ce12017-10-13 20:47:09 +05303815 chan->chan = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003816 }
3817 for (j = 0; j < NUM_SESSIONS; j++) {
3818 struct fastrpc_session_ctx *sess = &chan->session[j];
c_mtharue1a5ce12017-10-13 20:47:09 +05303819 if (sess->smmu.dev) {
3820 arm_iommu_detach_device(sess->smmu.dev);
3821 sess->smmu.dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003822 }
3823 if (sess->smmu.mapping) {
3824 arm_iommu_release_mapping(sess->smmu.mapping);
c_mtharue1a5ce12017-10-13 20:47:09 +05303825 sess->smmu.mapping = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003826 }
3827 }
Tharun Kumar Merugu3937e912017-12-21 16:24:37 +05303828 kfree(chan->rhvm.vmid);
3829 kfree(chan->rhvm.vmperm);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003830 }
3831}
3832
3833static struct platform_driver fastrpc_driver = {
3834 .probe = fastrpc_probe,
3835 .driver = {
3836 .name = "fastrpc",
3837 .owner = THIS_MODULE,
3838 .of_match_table = fastrpc_match_table,
3839 },
3840};
3841
3842static int __init fastrpc_device_init(void)
3843{
3844 struct fastrpc_apps *me = &gfa;
c_mtharue1a5ce12017-10-13 20:47:09 +05303845 struct device *dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003846 int err = 0, i;
3847
3848 memset(me, 0, sizeof(*me));
3849
3850 fastrpc_init(me);
3851 me->dev = NULL;
Tharun Kumar Merugubbc78f22018-01-22 19:26:44 +05303852 me->glink = true;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003853 VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
3854 if (err)
3855 goto register_bail;
3856 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
3857 DEVICE_NAME));
3858 if (err)
3859 goto alloc_chrdev_bail;
3860 cdev_init(&me->cdev, &fops);
3861 me->cdev.owner = THIS_MODULE;
3862 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
Sathish Ambley36849af2017-02-02 09:35:55 -08003863 1));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003864 if (err)
3865 goto cdev_init_bail;
3866 me->class = class_create(THIS_MODULE, "fastrpc");
3867 VERIFY(err, !IS_ERR(me->class));
3868 if (err)
3869 goto class_create_bail;
3870 me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
Sathish Ambley36849af2017-02-02 09:35:55 -08003871 dev = device_create(me->class, NULL,
3872 MKDEV(MAJOR(me->dev_no), 0),
3873 NULL, gcinfo[0].name);
3874 VERIFY(err, !IS_ERR_OR_NULL(dev));
3875 if (err)
3876 goto device_create_bail;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003877 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003878 me->channel[i].dev = dev;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003879 me->channel[i].ssrcount = 0;
3880 me->channel[i].prevssrcount = 0;
c_mtharue1a5ce12017-10-13 20:47:09 +05303881 me->channel[i].issubsystemup = 1;
3882 me->channel[i].ramdumpenabled = 0;
3883 me->channel[i].remoteheap_ramdump_dev = NULL;
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003884 me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
3885 me->channel[i].handle = subsys_notif_register_notifier(
3886 gcinfo[i].subsys,
3887 &me->channel[i].nb);
3888 }
3889
3890 me->client = msm_ion_client_create(DEVICE_NAME);
3891 VERIFY(err, !IS_ERR_OR_NULL(me->client));
3892 if (err)
3893 goto device_create_bail;
Sathish Ambley1ca68232017-01-19 10:32:55 -08003894 debugfs_root = debugfs_create_dir("adsprpc", NULL);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003895 return 0;
3896device_create_bail:
3897 for (i = 0; i < NUM_CHANNELS; i++) {
Sathish Ambley36849af2017-02-02 09:35:55 -08003898 if (me->channel[i].handle)
3899 subsys_notif_unregister_notifier(me->channel[i].handle,
3900 &me->channel[i].nb);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003901 }
Sathish Ambley36849af2017-02-02 09:35:55 -08003902 if (!IS_ERR_OR_NULL(dev))
3903 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003904 class_destroy(me->class);
3905class_create_bail:
3906 cdev_del(&me->cdev);
3907cdev_init_bail:
3908 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3909alloc_chrdev_bail:
3910register_bail:
3911 fastrpc_deinit();
3912 return err;
3913}
3914
3915static void __exit fastrpc_device_exit(void)
3916{
3917 struct fastrpc_apps *me = &gfa;
3918 int i;
3919
3920 fastrpc_file_list_dtor(me);
3921 fastrpc_deinit();
3922 for (i = 0; i < NUM_CHANNELS; i++) {
3923 if (!gcinfo[i].name)
3924 continue;
3925 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
3926 subsys_notif_unregister_notifier(me->channel[i].handle,
3927 &me->channel[i].nb);
3928 }
3929 class_destroy(me->class);
3930 cdev_del(&me->cdev);
3931 unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
3932 ion_client_destroy(me->client);
Sathish Ambley1ca68232017-01-19 10:32:55 -08003933 debugfs_remove_recursive(debugfs_root);
Sathish Ambley69e1ab02016-10-18 10:28:15 -07003934}
3935
3936late_initcall(fastrpc_device_init);
3937module_exit(fastrpc_device_exit);
3938
3939MODULE_LICENSE("GPL v2");