blob: dc5541aa5093e8eb6442d5eb6ea0f1ad8b417f18 [file] [log] [blame]
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001/*
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +05302 * Copyright (c) 2012-2014, 2018-2019 The Linux Foundation. All rights reserved.
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080014#include "adsprpc_shared.h"
15
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <mach/msm_smd.h>
28#include <mach/ion.h>
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070029#include <mach/iommu_domains.h>
Mitchel Humpherys42e806e2012-09-30 22:27:53 -070030#include <linux/scatterlist.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080031#include <linux/fs.h>
32#include <linux/uaccess.h>
33#include <linux/device.h>
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070034#include <linux/of.h>
35#include <linux/iommu.h>
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -080036#include <linux/kref.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080037
38#ifndef ION_ADSPRPC_HEAP_ID
39#define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID
40#endif /*ION_ADSPRPC_HEAP_ID*/
41
42#define RPC_TIMEOUT (5 * HZ)
43#define RPC_HASH_BITS 5
44#define RPC_HASH_SZ (1 << RPC_HASH_BITS)
45#define BALIGN 32
46
47#define LOCK_MMAP(kernel)\
48 do {\
49 if (!kernel)\
50 down_read(&current->mm->mmap_sem);\
51 } while (0)
52
53#define UNLOCK_MMAP(kernel)\
54 do {\
55 if (!kernel)\
56 up_read(&current->mm->mmap_sem);\
57 } while (0)
58
59
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -070060#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
61
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +053062#define FASTRPC_STATIC_HANDLE_KERNEL (1)
63
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080064static inline uint32_t buf_page_start(void *buf)
65{
66 uint32_t start = (uint32_t) buf & PAGE_MASK;
67 return start;
68}
69
70static inline uint32_t buf_page_offset(void *buf)
71{
72 uint32_t offset = (uint32_t) buf & (PAGE_SIZE - 1);
73 return offset;
74}
75
Dennis Caglec1a72df2018-04-13 11:49:03 -070076static inline int buf_num_pages(void *buf, size_t len)
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080077{
78 uint32_t start = buf_page_start(buf) >> PAGE_SHIFT;
79 uint32_t end = (((uint32_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
80 int nPages = end - start + 1;
81 return nPages;
82}
83
84static inline uint32_t buf_page_size(uint32_t size)
85{
86 uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
87 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
88}
89
90static inline int buf_get_pages(void *addr, int sz, int nr_pages, int access,
91 struct smq_phy_page *pages, int nr_elems)
92{
Mitchel Humpherys80411eb2013-06-14 11:23:23 -070093 struct vm_area_struct *vma, *vmaend;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080094 uint32_t start = buf_page_start(addr);
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070095 uint32_t end = buf_page_start((void *)((uint32_t)addr + sz - 1));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080096 uint32_t len = nr_pages << PAGE_SHIFT;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070097 unsigned long pfn, pfnend;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080098 int n = -1, err = 0;
99
100 VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ,
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800101 (void __user *)start, len));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800102 if (err)
103 goto bail;
104 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
105 if (err)
106 goto bail;
Mitchel Humpherys80411eb2013-06-14 11:23:23 -0700107 VERIFY(err, 0 != (vmaend = find_vma(current->mm, end)));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800108 if (err)
109 goto bail;
110 n = 0;
111 VERIFY(err, 0 == follow_pfn(vma, start, &pfn));
112 if (err)
113 goto bail;
Mitchel Humpherys80411eb2013-06-14 11:23:23 -0700114 VERIFY(err, 0 == follow_pfn(vmaend, end, &pfnend));
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700115 if (err)
116 goto bail;
117 VERIFY(err, (pfn + nr_pages - 1) == pfnend);
118 if (err)
119 goto bail;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800120 VERIFY(err, nr_elems > 0);
121 if (err)
122 goto bail;
123 pages->addr = __pfn_to_phys(pfn);
124 pages->size = len;
125 n++;
126 bail:
127 return n;
128}
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700129
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800130struct fastrpc_buf {
131 struct ion_handle *handle;
132 void *virt;
133 ion_phys_addr_t phys;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700134 size_t size;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800135 int used;
136};
137
138struct smq_context_list;
139
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700140struct smq_invoke_ctx {
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800141 struct hlist_node hn;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700142 struct completion work;
143 int retval;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800144 int pid;
145 remote_arg_t *pra;
146 remote_arg_t *rpra;
147 struct fastrpc_buf obuf;
148 struct fastrpc_buf *abufs;
149 struct fastrpc_device *dev;
150 struct fastrpc_apps *apps;
151 int *fds;
152 struct ion_handle **handles;
153 int nbufs;
154 bool smmu;
155 uint32_t sc;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700156};
157
158struct smq_context_list {
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800159 struct hlist_head pending;
160 struct hlist_head interrupted;
161 spinlock_t hlock;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700162};
163
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700164struct fastrpc_smmu {
165 struct iommu_group *group;
166 struct iommu_domain *domain;
167 int domain_id;
168 bool enabled;
169};
170
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700171struct fastrpc_apps {
172 smd_channel_t *chan;
173 struct smq_context_list clst;
174 struct completion work;
175 struct ion_client *iclient;
176 struct cdev cdev;
Mitchel Humpherys55877652013-02-02 11:23:42 -0800177 struct class *class;
178 struct device *dev;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700179 struct fastrpc_smmu smmu;
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -0800180 struct mutex smd_mutex;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700181 dev_t dev_no;
182 spinlock_t wrlock;
183 spinlock_t hlock;
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -0800184 struct kref kref;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700185 struct hlist_head htbl[RPC_HASH_SZ];
186};
187
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800188struct fastrpc_mmap {
189 struct hlist_node hn;
190 struct ion_handle *handle;
191 void *virt;
Mitchel Humpherys2b8b7552013-12-12 14:27:38 -0800192 ion_phys_addr_t phys;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800193 uint32_t vaddrin;
194 uint32_t vaddrout;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700195 size_t size;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800196};
197
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800198struct file_data {
199 spinlock_t hlock;
200 struct hlist_head hlst;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800201 uint32_t mode;
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +0530202 struct mutex map_mutex;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800203};
204
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700205struct fastrpc_device {
206 uint32_t tgid;
207 struct hlist_node hn;
208 struct fastrpc_buf buf;
209};
210
211static struct fastrpc_apps gfa;
212
213static void free_mem(struct fastrpc_buf *buf)
214{
215 struct fastrpc_apps *me = &gfa;
216
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700217 if (!IS_ERR_OR_NULL(buf->handle)) {
218 if (me->smmu.enabled && buf->phys) {
219 ion_unmap_iommu(me->iclient, buf->handle,
220 me->smmu.domain_id, 0);
221 buf->phys = 0;
222 }
Mitchel Humpherysb03d70d2013-10-25 12:05:47 -0700223 if (!IS_ERR_OR_NULL(buf->virt)) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700224 ion_unmap_kernel(me->iclient, buf->handle);
Dennis Caglec1a72df2018-04-13 11:49:03 -0700225 buf->virt = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700226 }
227 ion_free(me->iclient, buf->handle);
Dennis Caglec1a72df2018-04-13 11:49:03 -0700228 buf->handle = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700229 }
230}
231
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800232static void free_map(struct fastrpc_mmap *map)
233{
234 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700235 if (!IS_ERR_OR_NULL(map->handle)) {
Mitchel Humpherys2b8b7552013-12-12 14:27:38 -0800236 if (me->smmu.enabled && map->phys) {
237 ion_unmap_iommu(me->iclient, map->handle,
238 me->smmu.domain_id, 0);
239 map->phys = 0;
240 }
Mitchel Humpherysb03d70d2013-10-25 12:05:47 -0700241 if (!IS_ERR_OR_NULL(map->virt)) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800242 ion_unmap_kernel(me->iclient, map->handle);
Dennis Caglec1a72df2018-04-13 11:49:03 -0700243 map->virt = NULL;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800244 }
245 ion_free(me->iclient, map->handle);
246 }
Dennis Caglec1a72df2018-04-13 11:49:03 -0700247 map->handle = NULL;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800248}
249
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700250static int alloc_mem(struct fastrpc_buf *buf)
251{
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700252 struct fastrpc_apps *me = &gfa;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700253 struct ion_client *clnt = gfa.iclient;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700254 struct sg_table *sg;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700255 int err = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700256 unsigned int heap;
257 unsigned long len;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700258 buf->handle = NULL;
259 buf->virt = NULL;
Mitchel Humpherysb03d70d2013-10-25 12:05:47 -0700260 buf->phys = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700261 heap = me->smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) :
262 ION_HEAP(ION_ADSP_HEAP_ID) | ION_HEAP(ION_AUDIO_HEAP_ID);
Mitchel Humpherys0b204e32013-09-11 10:40:28 -0700263 buf->handle = ion_alloc(clnt, buf->size, SZ_4K, heap, ION_FLAG_CACHED);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700264 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
265 if (err)
266 goto bail;
Mitchel Humpherysb03d70d2013-10-25 12:05:47 -0700267 buf->virt = ion_map_kernel(clnt, buf->handle);
268 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->virt));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700269 if (err)
270 goto bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700271 if (me->smmu.enabled) {
272 len = buf->size;
273 VERIFY(err, 0 == ion_map_iommu(clnt, buf->handle,
274 me->smmu.domain_id, 0, SZ_4K, 0,
275 &buf->phys, &len, 0, 0));
276 if (err)
277 goto bail;
278 } else {
279 VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle)));
280 if (err)
281 goto bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700282 buf->phys = sg_dma_address(sg->sgl);
283 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700284 bail:
285 if (err && !IS_ERR_OR_NULL(buf->handle))
286 free_mem(buf);
287 return err;
288}
289
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800290static int context_restore_interrupted(struct fastrpc_apps *me,
291 struct fastrpc_ioctl_invoke_fd *invokefd,
292 struct smq_invoke_ctx **po)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700293{
294 int err = 0;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700295 struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800296 struct hlist_node *pos, *n;
297 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
298 spin_lock(&me->clst.hlock);
299 hlist_for_each_entry_safe(ictx, pos, n, &me->clst.interrupted, hn) {
300 if (ictx->pid == current->pid) {
301 if (invoke->sc != ictx->sc)
302 err = -1;
303 else {
304 ctx = ictx;
305 hlist_del(&ctx->hn);
306 hlist_add_head(&ctx->hn, &me->clst.pending);
307 }
308 break;
309 }
310 }
311 spin_unlock(&me->clst.hlock);
312 if (ctx)
313 *po = ctx;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700314 return err;
315}
316
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800317static int context_alloc(struct fastrpc_apps *me, uint32_t kernel,
318 struct fastrpc_ioctl_invoke_fd *invokefd,
319 struct smq_invoke_ctx **po)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700320{
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800321 int err = 0, bufs, size = 0;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700322 struct smq_invoke_ctx *ctx = NULL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800323 struct smq_context_list *clst = &me->clst;
324 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700325
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800326 bufs = REMOTE_SCALARS_INBUFS(invoke->sc) +
327 REMOTE_SCALARS_OUTBUFS(invoke->sc);
328 if (bufs) {
329 size = bufs * sizeof(*ctx->pra);
330 if (invokefd->fds)
331 size = size + bufs * sizeof(*ctx->fds) +
332 bufs * sizeof(*ctx->handles);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700333 }
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800334
Dennis Caglec1a72df2018-04-13 11:49:03 -0700335 VERIFY(err, NULL != (ctx = kzalloc(sizeof(*ctx) + size, GFP_KERNEL)));
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800336 if (err)
337 goto bail;
338
339 INIT_HLIST_NODE(&ctx->hn);
Jessica Wagantall4ddbc102015-06-11 12:05:12 -0700340 hlist_add_fake(&ctx->hn);
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800341 ctx->pra = (remote_arg_t *)(&ctx[1]);
342 ctx->fds = invokefd->fds == 0 ? 0 : (int *)(&ctx->pra[bufs]);
343 ctx->handles = invokefd->fds == 0 ? 0 :
344 (struct ion_handle **)(&ctx->fds[bufs]);
345 if (!kernel) {
346 VERIFY(err, 0 == copy_from_user(ctx->pra, invoke->pra,
347 bufs * sizeof(*ctx->pra)));
348 if (err)
349 goto bail;
350 } else {
351 memmove(ctx->pra, invoke->pra, bufs * sizeof(*ctx->pra));
352 }
353
354 if (invokefd->fds) {
355 if (!kernel) {
356 VERIFY(err, 0 == copy_from_user(ctx->fds, invokefd->fds,
357 bufs * sizeof(*ctx->fds)));
358 if (err)
359 goto bail;
360 } else {
361 memmove(ctx->fds, invokefd->fds,
362 bufs * sizeof(*ctx->fds));
363 }
364 }
365 ctx->sc = invoke->sc;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700366 ctx->retval = -1;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800367 ctx->pid = current->pid;
368 ctx->apps = me;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700369 init_completion(&ctx->work);
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800370 spin_lock(&clst->hlock);
371 hlist_add_head(&ctx->hn, &clst->pending);
372 spin_unlock(&clst->hlock);
373
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700374 *po = ctx;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800375bail:
376 if (ctx && err)
377 kfree(ctx);
378 return err;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700379}
380
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800381static void context_save_interrupted(struct smq_invoke_ctx *ctx)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700382{
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800383 struct smq_context_list *clst = &ctx->apps->clst;
384 spin_lock(&clst->hlock);
385 hlist_del(&ctx->hn);
386 hlist_add_head(&ctx->hn, &clst->interrupted);
387 spin_unlock(&clst->hlock);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700388}
389
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800390static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev);
391
392static void context_free(struct smq_invoke_ctx *ctx, bool lock)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700393{
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800394 struct smq_context_list *clst = &ctx->apps->clst;
395 struct fastrpc_apps *apps = ctx->apps;
396 struct ion_client *clnt = apps->iclient;
397 struct fastrpc_smmu *smmu = &apps->smmu;
398 struct fastrpc_buf *b;
399 int i, bufs;
400 if (ctx->smmu) {
401 bufs = REMOTE_SCALARS_INBUFS(ctx->sc) +
402 REMOTE_SCALARS_OUTBUFS(ctx->sc);
403 if (ctx->fds) {
404 for (i = 0; i < bufs; i++)
405 if (!IS_ERR_OR_NULL(ctx->handles[i])) {
406 ion_unmap_iommu(clnt, ctx->handles[i],
407 smmu->domain_id, 0);
408 ion_free(clnt, ctx->handles[i]);
409 }
410 }
411 iommu_detach_group(smmu->domain, smmu->group);
412 }
413 for (i = 0, b = ctx->abufs; i < ctx->nbufs; ++i, ++b)
414 free_mem(b);
415
416 kfree(ctx->abufs);
417 if (ctx->dev) {
418 add_dev(apps, ctx->dev);
419 if (ctx->obuf.handle != ctx->dev->buf.handle)
420 free_mem(&ctx->obuf);
421 }
422 if (lock)
423 spin_lock(&clst->hlock);
424 hlist_del(&ctx->hn);
425 if (lock)
426 spin_unlock(&clst->hlock);
427 kfree(ctx);
428}
429
430static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
431{
432 ctx->retval = retval;
433 complete(&ctx->work);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700434}
435
436static void context_notify_all_users(struct smq_context_list *me)
437{
Dennis Caglec1a72df2018-04-13 11:49:03 -0700438 struct smq_invoke_ctx *ictx = NULL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800439 struct hlist_node *pos, *n;
440 spin_lock(&me->hlock);
441 hlist_for_each_entry_safe(ictx, pos, n, &me->pending, hn) {
442 complete(&ictx->work);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700443 }
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800444 hlist_for_each_entry_safe(ictx, pos, n, &me->interrupted, hn) {
445 complete(&ictx->work);
446 }
447 spin_unlock(&me->hlock);
448
449}
450
451static void context_list_ctor(struct smq_context_list *me)
452{
453 INIT_HLIST_HEAD(&me->interrupted);
454 INIT_HLIST_HEAD(&me->pending);
455 spin_lock_init(&me->hlock);
456}
457
458static void context_list_dtor(struct fastrpc_apps *me,
459 struct smq_context_list *clst)
460{
Dennis Caglec1a72df2018-04-13 11:49:03 -0700461 struct smq_invoke_ctx *ictx = NULL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800462 struct hlist_node *pos, *n;
463 spin_lock(&clst->hlock);
464 hlist_for_each_entry_safe(ictx, pos, n, &clst->interrupted, hn) {
465 context_free(ictx, 0);
466 }
467 hlist_for_each_entry_safe(ictx, pos, n, &clst->pending, hn) {
468 context_free(ictx, 0);
469 }
470 spin_unlock(&clst->hlock);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700471}
472
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530473static int get_page_list(uint32_t kernel, struct smq_invoke_ctx *ctx)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700474{
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530475 struct fastrpc_apps *me = &gfa;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700476 struct smq_phy_page *pgstart, *pages;
477 struct smq_invoke_buf *list;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530478 struct fastrpc_buf *ibuf = &ctx->dev->buf;
479 struct fastrpc_buf *obuf = &ctx->obuf;
480 remote_arg_t *pra = ctx->pra;
481 uint32_t sc = ctx->sc;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700482 size_t rlen;
483 int i, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700484 int inbufs = REMOTE_SCALARS_INBUFS(sc);
485 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
486
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700487 LOCK_MMAP(kernel);
488 *obuf = *ibuf;
489 retry:
490 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
491 pgstart = smq_phy_page_start(sc, list);
492 pages = pgstart + 1;
493 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
494 if (rlen < 0) {
495 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
496 obuf->size += buf_page_size(rlen);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700497 VERIFY(err, 0 == alloc_mem(obuf));
498 if (err)
499 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700500 goto retry;
501 }
502 pgstart->addr = obuf->phys;
503 pgstart->size = obuf->size;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700504 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700505 void *buf;
Sathish Ambleyb0d636f2017-02-20 15:45:20 +0800506 int num;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700507 size_t len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700508
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700509 list[i].num = 0;
510 list[i].pgidx = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700511 len = pra[i].buf.len;
Mitchel Humpherysb03d70d2013-10-25 12:05:47 -0700512 VERIFY(err, len >= 0);
513 if (err)
514 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700515 if (!len)
516 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700517 buf = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700518 num = buf_num_pages(buf, len);
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530519 if (!kernel) {
520 if (me->smmu.enabled) {
521 VERIFY(err, 0 != access_ok(i >= inbufs ?
522 VERIFY_WRITE : VERIFY_READ,
523 (void __user *)buf, len));
524 if (err)
525 goto bail;
526 if (ctx->fds && (ctx->fds[i] >= 0))
527 list[i].num = 1;
528 } else {
529 list[i].num = buf_get_pages(buf, len, num,
530 i >= inbufs, pages,
531 rlen / sizeof(*pages));
532 }
533 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700534 VERIFY(err, list[i].num >= 0);
535 if (err)
536 goto bail;
537 if (list[i].num) {
538 list[i].pgidx = pages - pgstart;
539 pages = pages + list[i].num;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700540 } else if (rlen > sizeof(*pages)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700541 list[i].pgidx = pages - pgstart;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700542 pages = pages + 1;
543 } else {
544 if (obuf->handle != ibuf->handle)
545 free_mem(obuf);
546 obuf->size += buf_page_size(sizeof(*pages));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700547 VERIFY(err, 0 == alloc_mem(obuf));
548 if (err)
549 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700550 goto retry;
551 }
552 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
553 }
554 obuf->used = obuf->size - rlen;
555 bail:
556 if (err && (obuf->handle != ibuf->handle))
557 free_mem(obuf);
558 UNLOCK_MMAP(kernel);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700559 return err;
560}
561
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530562static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
563 remote_arg_t *upra)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700564{
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700565 struct fastrpc_apps *me = &gfa;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700566 struct smq_invoke_buf *list;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700567 struct fastrpc_buf *pbuf = &ctx->obuf, *obufs = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700568 struct smq_phy_page *pages;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530569 struct vm_area_struct *vma;
570 struct ion_handle **handles = ctx->handles;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700571 void *args;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530572 remote_arg_t *pra = ctx->pra;
573 remote_arg_t *rpra = ctx->rpra;
574 uint32_t sc = ctx->sc, start;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700575 size_t rlen, used, size;
576 int i, inh, bufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700577 int inbufs = REMOTE_SCALARS_INBUFS(sc);
578 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530579 int *fds = ctx->fds, idx, num;
580 unsigned long len;
581 ion_phys_addr_t iova;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700582
583 list = smq_invoke_buf_start(rpra, sc);
584 pages = smq_phy_page_start(sc, list);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700585 used = ALIGN(pbuf->used, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700586 args = (void *)((char *)pbuf->virt + used);
587 rlen = pbuf->size - used;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700588 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700589
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700590 rpra[i].buf.len = pra[i].buf.len;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700591 if (!rpra[i].buf.len)
592 continue;
Mitchel Humpherysec5759a2014-01-14 13:00:23 -0800593 if (me->smmu.enabled && fds && (fds[i] >= 0)) {
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530594 start = buf_page_start(pra[i].buf.pv);
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700595 len = buf_page_size(pra[i].buf.len);
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530596 num = buf_num_pages(pra[i].buf.pv, pra[i].buf.len);
597 idx = list[i].pgidx;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700598 handles[i] = ion_import_dma_buf(me->iclient, fds[i]);
599 VERIFY(err, 0 == IS_ERR_OR_NULL(handles[i]));
600 if (err)
601 goto bail;
602 VERIFY(err, 0 == ion_map_iommu(me->iclient, handles[i],
603 me->smmu.domain_id, 0, SZ_4K, 0,
604 &iova, &len, 0, 0));
605 if (err)
606 goto bail;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530607 VERIFY(err, (num << PAGE_SHIFT) <= len);
608 if (err)
609 goto bail;
610 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
611 if (err)
612 goto bail;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700613 rpra[i].buf.pv = pra[i].buf.pv;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530614 pages[idx].addr = iova + (start - vma->vm_start);
615 pages[idx].size = num << PAGE_SHIFT;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700616 continue;
Mitchel Humpherysec5759a2014-01-14 13:00:23 -0800617 } else if (list[i].num) {
618 rpra[i].buf.pv = pra[i].buf.pv;
619 continue;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700620 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700621 if (rlen < pra[i].buf.len) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700622 struct fastrpc_buf *b;
623 pbuf->used = pbuf->size - rlen;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700624 VERIFY(err, 0 != (b = krealloc(obufs,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700625 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700626 if (err)
627 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700628 obufs = b;
629 pbuf = obufs + bufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700630 pbuf->size = buf_num_pages(0, pra[i].buf.len) *
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700631 PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700632 VERIFY(err, 0 == alloc_mem(pbuf));
633 if (err)
634 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700635 bufs++;
636 args = pbuf->virt;
637 rlen = pbuf->size;
638 }
Mitchel Humpherys0bc3aa52013-02-02 11:31:15 -0800639 list[i].num = 1;
640 pages[list[i].pgidx].addr =
641 buf_page_start((void *)(pbuf->phys +
642 (pbuf->size - rlen)));
643 pages[list[i].pgidx].size =
644 buf_page_size(pra[i].buf.len);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700645 if (i < inbufs) {
646 if (!kernel) {
647 VERIFY(err, 0 == copy_from_user(args,
648 pra[i].buf.pv, pra[i].buf.len));
649 if (err)
650 goto bail;
651 } else {
652 memmove(args, pra[i].buf.pv, pra[i].buf.len);
653 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700654 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700655 rpra[i].buf.pv = args;
656 args = (void *)((char *)args + ALIGN(pra[i].buf.len, BALIGN));
657 rlen -= ALIGN(pra[i].buf.len, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700658 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700659 for (i = 0; i < inbufs; ++i) {
660 if (rpra[i].buf.len)
661 dmac_flush_range(rpra[i].buf.pv,
662 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700663 }
664 pbuf->used = pbuf->size - rlen;
665 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
666 if (size) {
667 inh = inbufs + outbufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700668 if (!kernel) {
669 VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700670 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700671 if (err)
672 goto bail;
673 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700674 memmove(&rpra[inh], &upra[inh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700675 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700676 }
677 dmac_flush_range(rpra, (char *)rpra + used);
678 bail:
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530679 ctx->abufs = obufs;
680 ctx->nbufs = bufs;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700681 return err;
682}
683
684static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
685 remote_arg_t *rpra, remote_arg_t *upra)
686{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700687 int i, inbufs, outbufs, outh, size;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700688 int err = 0;
689
690 inbufs = REMOTE_SCALARS_INBUFS(sc);
691 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700692 for (i = inbufs; i < inbufs + outbufs; ++i) {
693 if (rpra[i].buf.pv != pra[i].buf.pv) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800694 if (!kernel) {
695 VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700696 rpra[i].buf.pv, rpra[i].buf.len));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800697 if (err)
698 goto bail;
699 } else {
700 memmove(pra[i].buf.pv, rpra[i].buf.pv,
701 rpra[i].buf.len);
702 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700703 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700704 }
705 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
706 if (size) {
707 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700708 if (!kernel) {
709 VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700710 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700711 if (err)
712 goto bail;
713 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700714 memmove(&upra[outh], &rpra[outh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700715 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700716 }
717 bail:
718 return err;
719}
720
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -0700721static void inv_args_pre(uint32_t sc, remote_arg_t *rpra)
722{
723 int i, inbufs, outbufs;
724 uint32_t end;
725
726 inbufs = REMOTE_SCALARS_INBUFS(sc);
727 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
728 for (i = inbufs; i < inbufs + outbufs; ++i) {
729 if (!rpra[i].buf.len)
730 continue;
731 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
732 continue;
733 if (!IS_CACHE_ALIGNED((uint32_t)rpra[i].buf.pv))
734 dmac_flush_range(rpra[i].buf.pv,
735 (char *)rpra[i].buf.pv + 1);
736 end = (uint32_t)rpra[i].buf.pv + rpra[i].buf.len;
737 if (!IS_CACHE_ALIGNED(end))
738 dmac_flush_range((char *)end,
739 (char *)end + 1);
740 }
741}
742
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700743static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
744{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700745 int i, inbufs, outbufs;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700746 int inv = 0;
747
748 inbufs = REMOTE_SCALARS_INBUFS(sc);
749 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700750 for (i = inbufs; i < inbufs + outbufs; ++i) {
751 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700752 inv = 1;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700753 else if (rpra[i].buf.len)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700754 dmac_inv_range(rpra[i].buf.pv,
755 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700756 }
757
758 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
759 dmac_inv_range(rpra, (char *)rpra + used);
760}
761
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800762static int fastrpc_invoke_send(struct fastrpc_apps *me,
763 uint32_t kernel, uint32_t handle,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700764 uint32_t sc, struct smq_invoke_ctx *ctx,
765 struct fastrpc_buf *buf)
766{
767 struct smq_msg msg;
768 int err = 0, len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700769 msg.pid = current->tgid;
770 msg.tid = current->pid;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800771 if (kernel)
772 msg.pid = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700773 msg.invoke.header.ctx = ctx;
774 msg.invoke.header.handle = handle;
775 msg.invoke.header.sc = sc;
776 msg.invoke.page.addr = buf->phys;
777 msg.invoke.page.size = buf_page_size(buf->used);
778 spin_lock(&me->wrlock);
779 len = smd_write(me->chan, &msg, sizeof(msg));
780 spin_unlock(&me->wrlock);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700781 VERIFY(err, len == sizeof(msg));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700782 return err;
783}
784
785static void fastrpc_deinit(void)
786{
787 struct fastrpc_apps *me = &gfa;
788
Matt Wagantall818e23f2013-04-22 20:36:52 -0700789 smd_close(me->chan);
Matt Wagantall818e23f2013-04-22 20:36:52 -0700790 ion_client_destroy(me->iclient);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700791 me->iclient = 0;
792 me->chan = 0;
793}
794
795static void fastrpc_read_handler(void)
796{
797 struct fastrpc_apps *me = &gfa;
798 struct smq_invoke_rsp rsp;
799 int err = 0;
800
801 do {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700802 VERIFY(err, sizeof(rsp) ==
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700803 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700804 if (err)
805 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700806 context_notify_user(rsp.ctx, rsp.retval);
807 } while (!err);
808 bail:
809 return;
810}
811
812static void smd_event_handler(void *priv, unsigned event)
813{
814 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
815
816 switch (event) {
817 case SMD_EVENT_OPEN:
818 complete(&(me->work));
819 break;
820 case SMD_EVENT_CLOSE:
821 context_notify_all_users(&me->clst);
822 break;
823 case SMD_EVENT_DATA:
824 fastrpc_read_handler();
825 break;
826 }
827}
828
829static int fastrpc_init(void)
830{
831 int err = 0;
832 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700833 struct device_node *node;
834 bool enabled = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700835
836 if (me->chan == 0) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700837 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700838 spin_lock_init(&me->hlock);
839 spin_lock_init(&me->wrlock);
840 init_completion(&me->work);
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -0800841 mutex_init(&me->smd_mutex);
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800842 context_list_ctor(&me->clst);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700843 for (i = 0; i < RPC_HASH_SZ; ++i)
844 INIT_HLIST_HEAD(&me->htbl[i]);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700845 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
846 DEVICE_NAME);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700847 VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
848 if (err)
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800849 goto bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700850 node = of_find_compatible_node(NULL, NULL,
851 "qcom,msm-audio-ion");
852 if (node)
853 enabled = of_property_read_bool(node,
854 "qcom,smmu-enabled");
855 if (enabled)
856 me->smmu.group = iommu_group_find("lpass_audio");
857 if (me->smmu.group)
858 me->smmu.domain = iommu_group_get_iommudata(
859 me->smmu.group);
860 if (!IS_ERR_OR_NULL(me->smmu.domain)) {
861 me->smmu.domain_id = msm_find_domain_no(
862 me->smmu.domain);
863 if (me->smmu.domain_id >= 0)
864 me->smmu.enabled = enabled;
865 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700866 }
Matt Wagantall818e23f2013-04-22 20:36:52 -0700867
868 return 0;
869
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800870bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700871 return err;
872}
873
874static void free_dev(struct fastrpc_device *dev)
875{
876 if (dev) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700877 free_mem(&dev->buf);
878 kfree(dev);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800879 module_put(THIS_MODULE);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700880 }
881}
882
883static int alloc_dev(struct fastrpc_device **dev)
884{
885 int err = 0;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700886 struct fastrpc_device *fd = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700887
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700888 VERIFY(err, 0 != try_module_get(THIS_MODULE));
889 if (err)
890 goto bail;
891 VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
892 if (err)
893 goto bail;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800894
895 INIT_HLIST_NODE(&fd->hn);
896
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700897 fd->buf.size = PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700898 VERIFY(err, 0 == alloc_mem(&fd->buf));
899 if (err)
900 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700901 fd->tgid = current->tgid;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800902
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700903 *dev = fd;
904 bail:
905 if (err)
906 free_dev(fd);
907 return err;
908}
909
910static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
911{
912 struct hlist_head *head;
Dennis Caglec1a72df2018-04-13 11:49:03 -0700913 struct fastrpc_device *dev = NULL, *devfree = NULL;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800914 struct hlist_node *pos, *n;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700915 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
916 int err = 0;
917
918 spin_lock(&me->hlock);
919 head = &me->htbl[h];
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800920 hlist_for_each_entry_safe(dev, pos, n, head, hn) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700921 if (dev->tgid == current->tgid) {
922 hlist_del(&dev->hn);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800923 devfree = dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700924 break;
925 }
926 }
927 spin_unlock(&me->hlock);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800928 VERIFY(err, devfree != 0);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700929 if (err)
930 goto bail;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800931 *rdev = devfree;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700932 bail:
933 if (err) {
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800934 free_dev(devfree);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700935 err = alloc_dev(rdev);
936 }
937 return err;
938}
939
940static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
941{
942 struct hlist_head *head;
943 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
944
945 spin_lock(&me->hlock);
946 head = &me->htbl[h];
947 hlist_add_head(&dev->hn, head);
948 spin_unlock(&me->hlock);
949 return;
950}
951
952static int fastrpc_release_current_dsp_process(void);
953
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800954static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t mode,
955 uint32_t kernel,
956 struct fastrpc_ioctl_invoke_fd *invokefd)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700957{
Dennis Caglec1a72df2018-04-13 11:49:03 -0700958 struct smq_invoke_ctx *ctx = NULL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800959 struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700960 int interrupted = 0;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800961 int err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700962
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800963 if (!kernel) {
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +0530964 VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
965 if (err) {
966 pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message",
967 __func__, current->comm);
968 goto bail;
969 }
970 }
971
972 if (!kernel) {
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800973 VERIFY(err, 0 == context_restore_interrupted(me, invokefd,
974 &ctx));
975 if (err)
976 goto bail;
977 if (ctx)
978 goto wait;
979 }
980
981 VERIFY(err, 0 == context_alloc(me, kernel, invokefd, &ctx));
982 if (err)
983 goto bail;
984
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700985 if (me->smmu.enabled) {
986 VERIFY(err, 0 == iommu_attach_group(me->smmu.domain,
987 me->smmu.group));
988 if (err)
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800989 goto bail;
990 ctx->smmu = 1;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700991 }
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800992 if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
993 VERIFY(err, 0 == get_dev(me, &ctx->dev));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700994 if (err)
995 goto bail;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +0530996 VERIFY(err, 0 == get_page_list(kernel, ctx));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700997 if (err)
998 goto bail;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -0800999 ctx->rpra = (remote_arg_t *)ctx->obuf.virt;
Akhila Musunuri2d4ee212014-11-06 18:24:34 +05301000 VERIFY(err, 0 == get_args(kernel, ctx, invoke->pra));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001001 if (err)
1002 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001003 }
1004
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001005 inv_args_pre(ctx->sc, ctx->rpra);
1006 if (FASTRPC_MODE_SERIAL == mode)
1007 inv_args(ctx->sc, ctx->rpra, ctx->obuf.used);
1008 VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle,
1009 ctx->sc, ctx, &ctx->obuf));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001010 if (err)
1011 goto bail;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001012 if (FASTRPC_MODE_PARALLEL == mode)
1013 inv_args(ctx->sc, ctx->rpra, ctx->obuf.used);
1014 wait:
1015 if (kernel)
1016 wait_for_completion(&ctx->work);
1017 else {
1018 interrupted = wait_for_completion_interruptible(&ctx->work);
1019 VERIFY(err, 0 == (err = interrupted));
1020 if (err)
1021 goto bail;
1022 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001023 VERIFY(err, 0 == (err = ctx->retval));
1024 if (err)
1025 goto bail;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001026 VERIFY(err, 0 == put_args(kernel, ctx->sc, ctx->pra, ctx->rpra,
1027 invoke->pra));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001028 if (err)
1029 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001030 bail:
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001031 if (ctx && interrupted == -ERESTARTSYS)
1032 context_save_interrupted(ctx);
1033 else if (ctx)
1034 context_free(ctx, 1);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001035 return err;
1036}
1037
1038static int fastrpc_create_current_dsp_process(void)
1039{
1040 int err = 0;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001041 struct fastrpc_ioctl_invoke_fd ioctl;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001042 struct fastrpc_apps *me = &gfa;
1043 remote_arg_t ra[1];
1044 int tgid = 0;
1045
1046 tgid = current->tgid;
1047 ra[0].buf.pv = &tgid;
1048 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +05301049 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001050 ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
1051 ioctl.inv.pra = ra;
1052 ioctl.fds = 0;
1053 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
1054 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001055 return err;
1056}
1057
1058static int fastrpc_release_current_dsp_process(void)
1059{
1060 int err = 0;
1061 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001062 struct fastrpc_ioctl_invoke_fd ioctl;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001063 remote_arg_t ra[1];
1064 int tgid = 0;
1065
1066 tgid = current->tgid;
1067 ra[0].buf.pv = &tgid;
1068 ra[0].buf.len = sizeof(tgid);
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +05301069 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001070 ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
1071 ioctl.inv.pra = ra;
1072 ioctl.fds = 0;
1073 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
1074 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001075 return err;
1076}
1077
1078static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
1079 struct fastrpc_ioctl_mmap *mmap,
1080 struct smq_phy_page *pages,
1081 int num)
1082{
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001083 struct fastrpc_ioctl_invoke_fd ioctl;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001084 remote_arg_t ra[3];
1085 int err = 0;
1086 struct {
1087 int pid;
1088 uint32_t flags;
1089 uint32_t vaddrin;
1090 int num;
1091 } inargs;
1092
1093 struct {
1094 uint32_t vaddrout;
1095 } routargs;
1096 inargs.pid = current->tgid;
1097 inargs.vaddrin = mmap->vaddrin;
1098 inargs.flags = mmap->flags;
1099 inargs.num = num;
1100 ra[0].buf.pv = &inargs;
1101 ra[0].buf.len = sizeof(inargs);
1102
1103 ra[1].buf.pv = pages;
1104 ra[1].buf.len = num * sizeof(*pages);
1105
1106 ra[2].buf.pv = &routargs;
1107 ra[2].buf.len = sizeof(routargs);
1108
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +05301109 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001110 ioctl.inv.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
1111 ioctl.inv.pra = ra;
1112 ioctl.fds = 0;
1113 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
1114 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001115 mmap->vaddrout = routargs.vaddrout;
1116 if (err)
1117 goto bail;
1118bail:
1119 return err;
1120}
1121
1122static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
1123 struct fastrpc_ioctl_munmap *munmap)
1124{
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001125 struct fastrpc_ioctl_invoke_fd ioctl;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001126 remote_arg_t ra[1];
1127 int err = 0;
1128 struct {
1129 int pid;
1130 uint32_t vaddrout;
Dennis Caglec1a72df2018-04-13 11:49:03 -07001131 size_t size;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001132 } inargs;
1133
1134 inargs.pid = current->tgid;
1135 inargs.size = munmap->size;
1136 inargs.vaddrout = munmap->vaddrout;
1137 ra[0].buf.pv = &inargs;
1138 ra[0].buf.len = sizeof(inargs);
1139
Tharun Kumar Merugu7999c412019-02-02 01:22:47 +05301140 ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001141 ioctl.inv.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
1142 ioctl.inv.pra = ra;
1143 ioctl.fds = 0;
1144 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me,
1145 FASTRPC_MODE_PARALLEL, 1, &ioctl)));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001146 return err;
1147}
1148
1149static int fastrpc_internal_munmap(struct fastrpc_apps *me,
1150 struct file_data *fdata,
1151 struct fastrpc_ioctl_munmap *munmap)
1152{
1153 int err = 0;
1154 struct fastrpc_mmap *map = 0, *mapfree = 0;
1155 struct hlist_node *pos, *n;
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +05301156
1157 mutex_lock(&fdata->map_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001158 VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap)));
1159 if (err)
1160 goto bail;
Vamsi krishna Gattupalli9ace8232020-12-14 11:06:27 +05301161 VERIFY(err, map != NULL);
1162 if (err) {
1163 err = -EINVAL;
1164 goto bail;
1165 }
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001166 spin_lock(&fdata->hlock);
1167 hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
1168 if (map->vaddrout == munmap->vaddrout &&
1169 map->size == munmap->size) {
1170 hlist_del(&map->hn);
1171 mapfree = map;
1172 map = 0;
1173 break;
1174 }
1175 }
1176 spin_unlock(&fdata->hlock);
1177bail:
1178 if (mapfree) {
1179 free_map(mapfree);
1180 kfree(mapfree);
1181 }
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +05301182 mutex_unlock(&fdata->map_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001183 return err;
1184}
1185
1186
1187static int fastrpc_internal_mmap(struct fastrpc_apps *me,
1188 struct file_data *fdata,
1189 struct fastrpc_ioctl_mmap *mmap)
1190{
1191 struct ion_client *clnt = gfa.iclient;
Dennis Caglec1a72df2018-04-13 11:49:03 -07001192 struct fastrpc_mmap *map = NULL;
1193 struct smq_phy_page *pages = NULL;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001194 void *buf;
Mitchel Humpherys2b8b7552013-12-12 14:27:38 -08001195 unsigned long len;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001196 int num;
1197 int err = 0;
1198
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +05301199 mutex_lock(&fdata->map_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001200 VERIFY(err, 0 != (map = kzalloc(sizeof(*map), GFP_KERNEL)));
1201 if (err)
1202 goto bail;
1203 map->handle = ion_import_dma_buf(clnt, mmap->fd);
1204 VERIFY(err, 0 == IS_ERR_OR_NULL(map->handle));
1205 if (err)
1206 goto bail;
Mitchel Humpherysb03d70d2013-10-25 12:05:47 -07001207 map->virt = ion_map_kernel(clnt, map->handle);
1208 VERIFY(err, 0 == IS_ERR_OR_NULL(map->virt));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001209 if (err)
1210 goto bail;
1211 buf = (void *)mmap->vaddrin;
1212 len = mmap->size;
1213 num = buf_num_pages(buf, len);
1214 VERIFY(err, 0 != (pages = kzalloc(num * sizeof(*pages), GFP_KERNEL)));
1215 if (err)
1216 goto bail;
Mitchel Humpherys2b8b7552013-12-12 14:27:38 -08001217
1218 if (me->smmu.enabled) {
1219 VERIFY(err, 0 == ion_map_iommu(clnt, map->handle,
1220 me->smmu.domain_id, 0,
1221 SZ_4K, 0, &map->phys, &len, 0, 0));
1222 if (err)
1223 goto bail;
1224 pages->addr = map->phys;
1225 pages->size = len;
1226 num = 1;
1227 } else {
1228 VERIFY(err, 0 < (num = buf_get_pages(buf, len, num, 1,
1229 pages, num)));
1230 if (err)
1231 goto bail;
1232 }
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001233
1234 VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, num));
1235 if (err)
1236 goto bail;
1237 map->vaddrin = mmap->vaddrin;
1238 map->vaddrout = mmap->vaddrout;
1239 map->size = mmap->size;
1240 INIT_HLIST_NODE(&map->hn);
1241 spin_lock(&fdata->hlock);
1242 hlist_add_head(&map->hn, &fdata->hlst);
1243 spin_unlock(&fdata->hlock);
1244 bail:
1245 if (err && map) {
1246 free_map(map);
1247 kfree(map);
1248 }
1249 kfree(pages);
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +05301250 mutex_unlock(&fdata->map_mutex);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001251 return err;
1252}
1253
1254static void cleanup_current_dev(void)
1255{
1256 struct fastrpc_apps *me = &gfa;
1257 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
1258 struct hlist_head *head;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001259 struct hlist_node *pos, *n;
1260 struct fastrpc_device *dev, *devfree;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001261
1262 rnext:
Dennis Caglec1a72df2018-04-13 11:49:03 -07001263 devfree = dev = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001264 spin_lock(&me->hlock);
1265 head = &me->htbl[h];
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001266 hlist_for_each_entry_safe(dev, pos, n, head, hn) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001267 if (dev->tgid == current->tgid) {
1268 hlist_del(&dev->hn);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001269 devfree = dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001270 break;
1271 }
1272 }
1273 spin_unlock(&me->hlock);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001274 if (devfree) {
1275 free_dev(devfree);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001276 goto rnext;
1277 }
1278 return;
1279}
1280
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001281static void fastrpc_channel_close(struct kref *kref)
1282{
1283 struct fastrpc_apps *me = &gfa;
1284
1285 smd_close(me->chan);
1286 me->chan = 0;
1287 mutex_unlock(&me->smd_mutex);
1288 pr_info("'closed /dev/%s c %d 0'\n", DEVICE_NAME,
1289 MAJOR(me->dev_no));
1290}
1291
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001292static int fastrpc_device_release(struct inode *inode, struct file *file)
1293{
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001294 struct file_data *fdata = (struct file_data *)file->private_data;
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001295 struct fastrpc_apps *me = &gfa;
1296
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001297 (void)fastrpc_release_current_dsp_process();
1298 cleanup_current_dev();
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001299 if (fdata) {
Dennis Caglec1a72df2018-04-13 11:49:03 -07001300 struct fastrpc_mmap *map = NULL;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001301 struct hlist_node *n, *pos;
1302 file->private_data = 0;
1303 hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
1304 hlist_del(&map->hn);
1305 free_map(map);
1306 kfree(map);
1307 }
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +05301308 mutex_destroy(&fdata->map_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001309 kfree(fdata);
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001310 kref_put_mutex(&me->kref, fastrpc_channel_close,
1311 &me->smd_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001312 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001313 return 0;
1314}
1315
1316static int fastrpc_device_open(struct inode *inode, struct file *filp)
1317{
1318 int err = 0;
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001319 struct fastrpc_apps *me = &gfa;
1320
1321 mutex_lock(&me->smd_mutex);
1322 if (kref_get_unless_zero(&me->kref) == 0) {
1323 VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
1324 SMD_APPS_QDSP, &me->chan,
1325 me, smd_event_handler));
1326 if (err)
1327 goto smd_bail;
1328 VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
1329 RPC_TIMEOUT));
1330 if (err)
1331 goto completion_bail;
1332 kref_init(&me->kref);
1333 pr_info("'opened /dev/%s c %d 0'\n", DEVICE_NAME,
1334 MAJOR(me->dev_no));
1335 }
1336 mutex_unlock(&me->smd_mutex);
1337
Dennis Caglec1a72df2018-04-13 11:49:03 -07001338 filp->private_data = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001339 if (0 != try_module_get(THIS_MODULE)) {
Dennis Caglec1a72df2018-04-13 11:49:03 -07001340 struct file_data *fdata = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001341 /* This call will cause a dev to be created
1342 * which will addref this module
1343 */
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001344 VERIFY(err, 0 != (fdata = kzalloc(sizeof(*fdata), GFP_KERNEL)));
1345 if (err)
1346 goto bail;
1347
1348 spin_lock_init(&fdata->hlock);
1349 INIT_HLIST_HEAD(&fdata->hlst);
1350
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001351 VERIFY(err, 0 == fastrpc_create_current_dsp_process());
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001352 if (err)
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001353 goto bail;
Tharun Kumar Merugubf51ef82018-01-02 11:42:45 +05301354 mutex_init(&fdata->map_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001355 filp->private_data = fdata;
1356bail:
1357 if (err) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001358 cleanup_current_dev();
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001359 kfree(fdata);
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001360 kref_put_mutex(&me->kref, fastrpc_channel_close,
1361 &me->smd_mutex);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001362 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001363 module_put(THIS_MODULE);
1364 }
1365 return err;
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001366
1367completion_bail:
1368 smd_close(me->chan);
Dennis Caglec1a72df2018-04-13 11:49:03 -07001369 me->chan = NULL;
Mitchel Humpherysbf6a2452013-11-12 13:09:47 -08001370smd_bail:
1371 mutex_unlock(&me->smd_mutex);
1372 return err;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001373}
1374
1375
1376static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1377 unsigned long ioctl_param)
1378{
1379 struct fastrpc_apps *me = &gfa;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001380 struct fastrpc_ioctl_invoke_fd invokefd;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001381 struct fastrpc_ioctl_mmap mmap;
1382 struct fastrpc_ioctl_munmap munmap;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001383 void *param = (char *)ioctl_param;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001384 struct file_data *fdata = (struct file_data *)file->private_data;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001385 int size = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001386
1387 switch (ioctl_num) {
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001388 case FASTRPC_IOCTL_INVOKE_FD:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001389 case FASTRPC_IOCTL_INVOKE:
Dennis Caglec1a72df2018-04-13 11:49:03 -07001390 invokefd.fds = NULL;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001391 size = (ioctl_num == FASTRPC_IOCTL_INVOKE) ?
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001392 sizeof(invokefd.inv) : sizeof(invokefd);
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001393 VERIFY(err, 0 == copy_from_user(&invokefd, param, size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001394 if (err)
1395 goto bail;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001396 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, fdata->mode,
1397 0, &invokefd)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001398 if (err)
1399 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001400 break;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001401 case FASTRPC_IOCTL_MMAP:
1402 VERIFY(err, 0 == copy_from_user(&mmap, param,
1403 sizeof(mmap)));
1404 if (err)
1405 goto bail;
1406 VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata,
1407 &mmap)));
1408 if (err)
1409 goto bail;
1410 VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap)));
1411 if (err)
1412 goto bail;
1413 break;
1414 case FASTRPC_IOCTL_MUNMAP:
1415 VERIFY(err, 0 == copy_from_user(&munmap, param,
1416 sizeof(munmap)));
1417 if (err)
1418 goto bail;
1419 VERIFY(err, 0 == (err = fastrpc_internal_munmap(me, fdata,
1420 &munmap)));
1421 if (err)
1422 goto bail;
1423 break;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001424 case FASTRPC_IOCTL_SETMODE:
1425 switch ((uint32_t)ioctl_param) {
1426 case FASTRPC_MODE_PARALLEL:
1427 case FASTRPC_MODE_SERIAL:
1428 fdata->mode = (uint32_t)ioctl_param;
1429 break;
1430 default:
1431 err = -ENOTTY;
1432 break;
1433 }
1434 break;
Luca Weissf2d96d02021-08-03 14:34:03 +02001435 // Handle FASTRPC_IOCTL_CONTROL so a STS test passes, we don't have most functionality of this driver in 3.4
1436 // #define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control)
1437 case _IOC(_IOC_READ|_IOC_WRITE, 'R', 12, 12):
1438 pr_info("adsprpc: FASTRPC_IOCTL_CONTROL is a stub!\n");
1439 break;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001440 default:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001441 err = -ENOTTY;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001442 break;
1443 }
1444 bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001445 return err;
1446}
1447
1448static const struct file_operations fops = {
1449 .open = fastrpc_device_open,
1450 .release = fastrpc_device_release,
1451 .unlocked_ioctl = fastrpc_device_ioctl,
1452};
1453
1454static int __init fastrpc_device_init(void)
1455{
1456 struct fastrpc_apps *me = &gfa;
1457 int err = 0;
1458
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -07001459 memset(me, 0, sizeof(*me));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001460 VERIFY(err, 0 == fastrpc_init());
1461 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001462 goto fastrpc_bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001463 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
1464 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001465 goto alloc_chrdev_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001466 cdev_init(&me->cdev, &fops);
1467 me->cdev.owner = THIS_MODULE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001468 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
1469 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001470 goto cdev_init_bail;
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001471 me->class = class_create(THIS_MODULE, "fastrpc");
Mitchel Humpherys55877652013-02-02 11:23:42 -08001472 VERIFY(err, !IS_ERR(me->class));
1473 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001474 goto class_create_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001475 me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
1476 NULL, DEVICE_NAME);
1477 VERIFY(err, !IS_ERR(me->dev));
1478 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001479 goto device_create_bail;
Matt Wagantall818e23f2013-04-22 20:36:52 -07001480
1481 return 0;
1482
1483device_create_bail:
1484 class_destroy(me->class);
1485class_create_bail:
1486 cdev_del(&me->cdev);
1487cdev_init_bail:
1488 unregister_chrdev_region(me->dev_no, 1);
1489alloc_chrdev_bail:
1490 fastrpc_deinit();
1491fastrpc_bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001492 return err;
1493}
1494
1495static void __exit fastrpc_device_exit(void)
1496{
1497 struct fastrpc_apps *me = &gfa;
1498
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001499 context_list_dtor(me, &me->clst);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001500 fastrpc_deinit();
Mitchel Humpherysb9a56682013-12-12 14:27:45 -08001501 cleanup_current_dev();
Mitchel Humpherys55877652013-02-02 11:23:42 -08001502 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
1503 class_destroy(me->class);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001504 cdev_del(&me->cdev);
1505 unregister_chrdev_region(me->dev_no, 1);
1506}
1507
1508module_init(fastrpc_device_init);
1509module_exit(fastrpc_device_exit);
1510
1511MODULE_LICENSE("GPL v2");