blob: 4507f80dbc2db518d16c9469786378bef54c88c3 [file] [log] [blame]
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001/*
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080014#include "adsprpc_shared.h"
15
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <mach/msm_smd.h>
28#include <mach/ion.h>
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070029#include <mach/iommu_domains.h>
Mitchel Humpherys42e806e2012-09-30 22:27:53 -070030#include <linux/scatterlist.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080031#include <linux/fs.h>
32#include <linux/uaccess.h>
33#include <linux/device.h>
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070034#include <linux/of.h>
35#include <linux/iommu.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080036
37#ifndef ION_ADSPRPC_HEAP_ID
38#define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID
39#endif /*ION_ADSPRPC_HEAP_ID*/
40
41#define RPC_TIMEOUT (5 * HZ)
42#define RPC_HASH_BITS 5
43#define RPC_HASH_SZ (1 << RPC_HASH_BITS)
44#define BALIGN 32
45
46#define LOCK_MMAP(kernel)\
47 do {\
48 if (!kernel)\
49 down_read(&current->mm->mmap_sem);\
50 } while (0)
51
52#define UNLOCK_MMAP(kernel)\
53 do {\
54 if (!kernel)\
55 up_read(&current->mm->mmap_sem);\
56 } while (0)
57
58
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -070059#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
60
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080061static inline uint32_t buf_page_start(void *buf)
62{
63 uint32_t start = (uint32_t) buf & PAGE_MASK;
64 return start;
65}
66
67static inline uint32_t buf_page_offset(void *buf)
68{
69 uint32_t offset = (uint32_t) buf & (PAGE_SIZE - 1);
70 return offset;
71}
72
73static inline int buf_num_pages(void *buf, int len)
74{
75 uint32_t start = buf_page_start(buf) >> PAGE_SHIFT;
76 uint32_t end = (((uint32_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
77 int nPages = end - start + 1;
78 return nPages;
79}
80
81static inline uint32_t buf_page_size(uint32_t size)
82{
83 uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
84 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
85}
86
87static inline int buf_get_pages(void *addr, int sz, int nr_pages, int access,
88 struct smq_phy_page *pages, int nr_elems)
89{
Mitchel Humpherys80411eb2013-06-14 11:23:23 -070090 struct vm_area_struct *vma, *vmaend;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080091 uint32_t start = buf_page_start(addr);
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070092 uint32_t end = buf_page_start((void *)((uint32_t)addr + sz - 1));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080093 uint32_t len = nr_pages << PAGE_SHIFT;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070094 unsigned long pfn, pfnend;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080095 int n = -1, err = 0;
96
97 VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ,
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -080098 (void __user *)start, len));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080099 if (err)
100 goto bail;
101 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
102 if (err)
103 goto bail;
Mitchel Humpherys80411eb2013-06-14 11:23:23 -0700104 VERIFY(err, 0 != (vmaend = find_vma(current->mm, end)));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800105 if (err)
106 goto bail;
107 n = 0;
108 VERIFY(err, 0 == follow_pfn(vma, start, &pfn));
109 if (err)
110 goto bail;
Mitchel Humpherys80411eb2013-06-14 11:23:23 -0700111 VERIFY(err, 0 == follow_pfn(vmaend, end, &pfnend));
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700112 if (err)
113 goto bail;
114 VERIFY(err, (pfn + nr_pages - 1) == pfnend);
115 if (err)
116 goto bail;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800117 VERIFY(err, nr_elems > 0);
118 if (err)
119 goto bail;
120 pages->addr = __pfn_to_phys(pfn);
121 pages->size = len;
122 n++;
123 bail:
124 return n;
125}
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700126
127struct smq_invoke_ctx {
128 struct completion work;
129 int retval;
130 atomic_t free;
131};
132
133struct smq_context_list {
134 struct smq_invoke_ctx *ls;
135 int size;
136 int last;
137};
138
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700139struct fastrpc_smmu {
140 struct iommu_group *group;
141 struct iommu_domain *domain;
142 int domain_id;
143 bool enabled;
144};
145
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700146struct fastrpc_apps {
147 smd_channel_t *chan;
148 struct smq_context_list clst;
149 struct completion work;
150 struct ion_client *iclient;
151 struct cdev cdev;
Mitchel Humpherys55877652013-02-02 11:23:42 -0800152 struct class *class;
153 struct device *dev;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700154 struct fastrpc_smmu smmu;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700155 dev_t dev_no;
156 spinlock_t wrlock;
157 spinlock_t hlock;
158 struct hlist_head htbl[RPC_HASH_SZ];
159};
160
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800161struct fastrpc_mmap {
162 struct hlist_node hn;
163 struct ion_handle *handle;
164 void *virt;
165 uint32_t vaddrin;
166 uint32_t vaddrout;
167 int size;
168};
169
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700170struct fastrpc_buf {
171 struct ion_handle *handle;
172 void *virt;
173 ion_phys_addr_t phys;
174 int size;
175 int used;
176};
177
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800178struct file_data {
179 spinlock_t hlock;
180 struct hlist_head hlst;
181};
182
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700183struct fastrpc_device {
184 uint32_t tgid;
185 struct hlist_node hn;
186 struct fastrpc_buf buf;
187};
188
189static struct fastrpc_apps gfa;
190
191static void free_mem(struct fastrpc_buf *buf)
192{
193 struct fastrpc_apps *me = &gfa;
194
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700195 if (!IS_ERR_OR_NULL(buf->handle)) {
196 if (me->smmu.enabled && buf->phys) {
197 ion_unmap_iommu(me->iclient, buf->handle,
198 me->smmu.domain_id, 0);
199 buf->phys = 0;
200 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700201 if (buf->virt) {
202 ion_unmap_kernel(me->iclient, buf->handle);
203 buf->virt = 0;
204 }
205 ion_free(me->iclient, buf->handle);
206 buf->handle = 0;
207 }
208}
209
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800210static void free_map(struct fastrpc_mmap *map)
211{
212 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700213 if (!IS_ERR_OR_NULL(map->handle)) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800214 if (map->virt) {
215 ion_unmap_kernel(me->iclient, map->handle);
216 map->virt = 0;
217 }
218 ion_free(me->iclient, map->handle);
219 }
220 map->handle = 0;
221}
222
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700223static int alloc_mem(struct fastrpc_buf *buf)
224{
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700225 struct fastrpc_apps *me = &gfa;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700226 struct ion_client *clnt = gfa.iclient;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700227 struct sg_table *sg;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700228 int err = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700229 unsigned int heap;
230 unsigned long len;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800231 buf->handle = 0;
232 buf->virt = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700233 heap = me->smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) :
234 ION_HEAP(ION_ADSP_HEAP_ID) | ION_HEAP(ION_AUDIO_HEAP_ID);
Mitchel Humpherys0b204e32013-09-11 10:40:28 -0700235 buf->handle = ion_alloc(clnt, buf->size, SZ_4K, heap, ION_FLAG_CACHED);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700236 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
237 if (err)
238 goto bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700239 VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
240 if (err)
241 goto bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700242 if (me->smmu.enabled) {
243 len = buf->size;
244 VERIFY(err, 0 == ion_map_iommu(clnt, buf->handle,
245 me->smmu.domain_id, 0, SZ_4K, 0,
246 &buf->phys, &len, 0, 0));
247 if (err)
248 goto bail;
249 } else {
250 VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle)));
251 if (err)
252 goto bail;
253 VERIFY(err, 1 == sg->nents);
254 if (err)
255 goto bail;
256 buf->phys = sg_dma_address(sg->sgl);
257 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700258 bail:
259 if (err && !IS_ERR_OR_NULL(buf->handle))
260 free_mem(buf);
261 return err;
262}
263
264static int context_list_ctor(struct smq_context_list *me, int size)
265{
266 int err = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700267 VERIFY(err, 0 != (me->ls = kzalloc(size, GFP_KERNEL)));
268 if (err)
269 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700270 me->size = size / sizeof(*me->ls);
271 me->last = 0;
272 bail:
273 return err;
274}
275
276static void context_list_dtor(struct smq_context_list *me)
277{
278 kfree(me->ls);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700279}
280
281static void context_list_alloc_ctx(struct smq_context_list *me,
282 struct smq_invoke_ctx **po)
283{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700284 int i = me->last;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700285 struct smq_invoke_ctx *ctx;
286
287 for (;;) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700288 i = i % me->size;
289 ctx = &me->ls[i];
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700290 if (atomic_read(&ctx->free) == 0)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700291 if (atomic_cmpxchg(&ctx->free, 0, 1) == 0)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700292 break;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700293 i++;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700294 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700295 me->last = i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700296 ctx->retval = -1;
297 init_completion(&ctx->work);
298 *po = ctx;
299}
300
301static void context_free(struct smq_invoke_ctx *me)
302{
303 if (me)
304 atomic_set(&me->free, 0);
305}
306
307static void context_notify_user(struct smq_invoke_ctx *me, int retval)
308{
309 me->retval = retval;
310 complete(&me->work);
311}
312
313static void context_notify_all_users(struct smq_context_list *me)
314{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700315 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700316
317 if (!me->ls)
318 return;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700319 for (i = 0; i < me->size; ++i) {
320 if (atomic_read(&me->ls[i].free) != 0)
321 complete(&me->ls[i].work);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700322 }
323}
324
325static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
326 struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf)
327{
328 struct smq_phy_page *pgstart, *pages;
329 struct smq_invoke_buf *list;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700330 int i, rlen, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700331 int inbufs = REMOTE_SCALARS_INBUFS(sc);
332 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
333
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700334 LOCK_MMAP(kernel);
335 *obuf = *ibuf;
336 retry:
337 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
338 pgstart = smq_phy_page_start(sc, list);
339 pages = pgstart + 1;
340 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
341 if (rlen < 0) {
342 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
343 obuf->size += buf_page_size(rlen);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700344 VERIFY(err, 0 == alloc_mem(obuf));
345 if (err)
346 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700347 goto retry;
348 }
349 pgstart->addr = obuf->phys;
350 pgstart->size = obuf->size;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700351 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700352 void *buf;
353 int len, num;
354
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700355 list[i].num = 0;
356 list[i].pgidx = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700357 len = pra[i].buf.len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700358 if (!len)
359 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700360 buf = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700361 num = buf_num_pages(buf, len);
362 if (!kernel)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700363 list[i].num = buf_get_pages(buf, len, num,
364 i >= inbufs, pages, rlen / sizeof(*pages));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700365 else
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700366 list[i].num = 0;
367 VERIFY(err, list[i].num >= 0);
368 if (err)
369 goto bail;
370 if (list[i].num) {
371 list[i].pgidx = pages - pgstart;
372 pages = pages + list[i].num;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700373 } else if (rlen > sizeof(*pages)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700374 list[i].pgidx = pages - pgstart;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700375 pages = pages + 1;
376 } else {
377 if (obuf->handle != ibuf->handle)
378 free_mem(obuf);
379 obuf->size += buf_page_size(sizeof(*pages));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700380 VERIFY(err, 0 == alloc_mem(obuf));
381 if (err)
382 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700383 goto retry;
384 }
385 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
386 }
387 obuf->used = obuf->size - rlen;
388 bail:
389 if (err && (obuf->handle != ibuf->handle))
390 free_mem(obuf);
391 UNLOCK_MMAP(kernel);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700392 return err;
393}
394
395static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
396 remote_arg_t *rpra, remote_arg_t *upra,
397 struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700398 int *nbufs, int *fds)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700399{
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700400 struct fastrpc_apps *me = &gfa;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700401 struct smq_invoke_buf *list;
402 struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
403 struct smq_phy_page *pages;
Mitchel Humpherys33398942013-07-15 10:29:59 -0700404 struct ion_handle **handles = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700405 void *args;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700406 int i, rlen, size, used, inh, bufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700407 int inbufs = REMOTE_SCALARS_INBUFS(sc);
408 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700409 unsigned long iova, len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700410
411 list = smq_invoke_buf_start(rpra, sc);
412 pages = smq_phy_page_start(sc, list);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700413 used = ALIGN(pbuf->used, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700414 args = (void *)((char *)pbuf->virt + used);
415 rlen = pbuf->size - used;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700416 if (fds)
417 handles = (struct ion_handle **)(fds + inbufs + outbufs);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700418 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700419
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700420 rpra[i].buf.len = pra[i].buf.len;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700421 if (!rpra[i].buf.len)
422 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700423 if (list[i].num) {
424 rpra[i].buf.pv = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700425 continue;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700426 } else if (me->smmu.enabled && fds && (fds[i] >= 0)) {
427 len = buf_page_size(pra[i].buf.len);
428 handles[i] = ion_import_dma_buf(me->iclient, fds[i]);
429 VERIFY(err, 0 == IS_ERR_OR_NULL(handles[i]));
430 if (err)
431 goto bail;
432 VERIFY(err, 0 == ion_map_iommu(me->iclient, handles[i],
433 me->smmu.domain_id, 0, SZ_4K, 0,
434 &iova, &len, 0, 0));
435 if (err)
436 goto bail;
437 rpra[i].buf.pv = pra[i].buf.pv;
438 list[i].num = 1;
439 pages[list[i].pgidx].addr = iova;
440 pages[list[i].pgidx].size = len;
441 continue;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700442 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700443 if (rlen < pra[i].buf.len) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700444 struct fastrpc_buf *b;
445 pbuf->used = pbuf->size - rlen;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700446 VERIFY(err, 0 != (b = krealloc(obufs,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700447 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700448 if (err)
449 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700450 obufs = b;
451 pbuf = obufs + bufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700452 pbuf->size = buf_num_pages(0, pra[i].buf.len) *
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700453 PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700454 VERIFY(err, 0 == alloc_mem(pbuf));
455 if (err)
456 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700457 bufs++;
458 args = pbuf->virt;
459 rlen = pbuf->size;
460 }
Mitchel Humpherys0bc3aa52013-02-02 11:31:15 -0800461 list[i].num = 1;
462 pages[list[i].pgidx].addr =
463 buf_page_start((void *)(pbuf->phys +
464 (pbuf->size - rlen)));
465 pages[list[i].pgidx].size =
466 buf_page_size(pra[i].buf.len);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700467 if (i < inbufs) {
468 if (!kernel) {
469 VERIFY(err, 0 == copy_from_user(args,
470 pra[i].buf.pv, pra[i].buf.len));
471 if (err)
472 goto bail;
473 } else {
474 memmove(args, pra[i].buf.pv, pra[i].buf.len);
475 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700476 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700477 rpra[i].buf.pv = args;
478 args = (void *)((char *)args + ALIGN(pra[i].buf.len, BALIGN));
479 rlen -= ALIGN(pra[i].buf.len, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700480 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700481 for (i = 0; i < inbufs; ++i) {
482 if (rpra[i].buf.len)
483 dmac_flush_range(rpra[i].buf.pv,
484 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700485 }
486 pbuf->used = pbuf->size - rlen;
487 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
488 if (size) {
489 inh = inbufs + outbufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700490 if (!kernel) {
491 VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700492 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700493 if (err)
494 goto bail;
495 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700496 memmove(&rpra[inh], &upra[inh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700497 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700498 }
499 dmac_flush_range(rpra, (char *)rpra + used);
500 bail:
501 *abufs = obufs;
502 *nbufs = bufs;
503 return err;
504}
505
506static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
507 remote_arg_t *rpra, remote_arg_t *upra)
508{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700509 int i, inbufs, outbufs, outh, size;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700510 int err = 0;
511
512 inbufs = REMOTE_SCALARS_INBUFS(sc);
513 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700514 for (i = inbufs; i < inbufs + outbufs; ++i) {
515 if (rpra[i].buf.pv != pra[i].buf.pv) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800516 if (!kernel) {
517 VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700518 rpra[i].buf.pv, rpra[i].buf.len));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800519 if (err)
520 goto bail;
521 } else {
522 memmove(pra[i].buf.pv, rpra[i].buf.pv,
523 rpra[i].buf.len);
524 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700525 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700526 }
527 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
528 if (size) {
529 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700530 if (!kernel) {
531 VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700532 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700533 if (err)
534 goto bail;
535 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700536 memmove(&upra[outh], &rpra[outh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700537 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700538 }
539 bail:
540 return err;
541}
542
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -0700543static void inv_args_pre(uint32_t sc, remote_arg_t *rpra)
544{
545 int i, inbufs, outbufs;
546 uint32_t end;
547
548 inbufs = REMOTE_SCALARS_INBUFS(sc);
549 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
550 for (i = inbufs; i < inbufs + outbufs; ++i) {
551 if (!rpra[i].buf.len)
552 continue;
553 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
554 continue;
555 if (!IS_CACHE_ALIGNED((uint32_t)rpra[i].buf.pv))
556 dmac_flush_range(rpra[i].buf.pv,
557 (char *)rpra[i].buf.pv + 1);
558 end = (uint32_t)rpra[i].buf.pv + rpra[i].buf.len;
559 if (!IS_CACHE_ALIGNED(end))
560 dmac_flush_range((char *)end,
561 (char *)end + 1);
562 }
563}
564
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700565static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
566{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700567 int i, inbufs, outbufs;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700568 int inv = 0;
569
570 inbufs = REMOTE_SCALARS_INBUFS(sc);
571 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700572 for (i = inbufs; i < inbufs + outbufs; ++i) {
573 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700574 inv = 1;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700575 else if (rpra[i].buf.len)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700576 dmac_inv_range(rpra[i].buf.pv,
577 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700578 }
579
580 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
581 dmac_inv_range(rpra, (char *)rpra + used);
582}
583
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800584static int fastrpc_invoke_send(struct fastrpc_apps *me,
585 uint32_t kernel, uint32_t handle,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700586 uint32_t sc, struct smq_invoke_ctx *ctx,
587 struct fastrpc_buf *buf)
588{
589 struct smq_msg msg;
590 int err = 0, len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700591 msg.pid = current->tgid;
592 msg.tid = current->pid;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800593 if (kernel)
594 msg.pid = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700595 msg.invoke.header.ctx = ctx;
596 msg.invoke.header.handle = handle;
597 msg.invoke.header.sc = sc;
598 msg.invoke.page.addr = buf->phys;
599 msg.invoke.page.size = buf_page_size(buf->used);
600 spin_lock(&me->wrlock);
601 len = smd_write(me->chan, &msg, sizeof(msg));
602 spin_unlock(&me->wrlock);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700603 VERIFY(err, len == sizeof(msg));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700604 return err;
605}
606
607static void fastrpc_deinit(void)
608{
609 struct fastrpc_apps *me = &gfa;
610
Matt Wagantall818e23f2013-04-22 20:36:52 -0700611 smd_close(me->chan);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700612 context_list_dtor(&me->clst);
Matt Wagantall818e23f2013-04-22 20:36:52 -0700613 ion_client_destroy(me->iclient);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700614 me->iclient = 0;
615 me->chan = 0;
616}
617
618static void fastrpc_read_handler(void)
619{
620 struct fastrpc_apps *me = &gfa;
621 struct smq_invoke_rsp rsp;
622 int err = 0;
623
624 do {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700625 VERIFY(err, sizeof(rsp) ==
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700626 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700627 if (err)
628 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700629 context_notify_user(rsp.ctx, rsp.retval);
630 } while (!err);
631 bail:
632 return;
633}
634
635static void smd_event_handler(void *priv, unsigned event)
636{
637 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
638
639 switch (event) {
640 case SMD_EVENT_OPEN:
641 complete(&(me->work));
642 break;
643 case SMD_EVENT_CLOSE:
644 context_notify_all_users(&me->clst);
645 break;
646 case SMD_EVENT_DATA:
647 fastrpc_read_handler();
648 break;
649 }
650}
651
652static int fastrpc_init(void)
653{
654 int err = 0;
655 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700656 struct device_node *node;
657 bool enabled = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700658
659 if (me->chan == 0) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700660 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700661 spin_lock_init(&me->hlock);
662 spin_lock_init(&me->wrlock);
663 init_completion(&me->work);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700664 for (i = 0; i < RPC_HASH_SZ; ++i)
665 INIT_HLIST_HEAD(&me->htbl[i]);
666 VERIFY(err, 0 == context_list_ctor(&me->clst, SZ_4K));
667 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700668 goto context_list_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700669 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
670 DEVICE_NAME);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700671 VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
672 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700673 goto ion_bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700674 node = of_find_compatible_node(NULL, NULL,
675 "qcom,msm-audio-ion");
676 if (node)
677 enabled = of_property_read_bool(node,
678 "qcom,smmu-enabled");
679 if (enabled)
680 me->smmu.group = iommu_group_find("lpass_audio");
681 if (me->smmu.group)
682 me->smmu.domain = iommu_group_get_iommudata(
683 me->smmu.group);
684 if (!IS_ERR_OR_NULL(me->smmu.domain)) {
685 me->smmu.domain_id = msm_find_domain_no(
686 me->smmu.domain);
687 if (me->smmu.domain_id >= 0)
688 me->smmu.enabled = enabled;
689 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700690 VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700691 SMD_APPS_QDSP, &me->chan,
692 me, smd_event_handler));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700693 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700694 goto smd_bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700695 VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700696 RPC_TIMEOUT));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700697 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700698 goto completion_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700699 }
Matt Wagantall818e23f2013-04-22 20:36:52 -0700700
701 return 0;
702
703completion_bail:
704 smd_close(me->chan);
705smd_bail:
706 ion_client_destroy(me->iclient);
707ion_bail:
708 context_list_dtor(&me->clst);
709context_list_bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700710 return err;
711}
712
713static void free_dev(struct fastrpc_device *dev)
714{
715 if (dev) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700716 free_mem(&dev->buf);
717 kfree(dev);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800718 module_put(THIS_MODULE);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700719 }
720}
721
722static int alloc_dev(struct fastrpc_device **dev)
723{
724 int err = 0;
725 struct fastrpc_device *fd = 0;
726
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700727 VERIFY(err, 0 != try_module_get(THIS_MODULE));
728 if (err)
729 goto bail;
730 VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
731 if (err)
732 goto bail;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800733
734 INIT_HLIST_NODE(&fd->hn);
735
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700736 fd->buf.size = PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700737 VERIFY(err, 0 == alloc_mem(&fd->buf));
738 if (err)
739 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700740 fd->tgid = current->tgid;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800741
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700742 *dev = fd;
743 bail:
744 if (err)
745 free_dev(fd);
746 return err;
747}
748
749static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
750{
751 struct hlist_head *head;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800752 struct fastrpc_device *dev = 0, *devfree = 0;
753 struct hlist_node *pos, *n;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700754 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
755 int err = 0;
756
757 spin_lock(&me->hlock);
758 head = &me->htbl[h];
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800759 hlist_for_each_entry_safe(dev, pos, n, head, hn) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700760 if (dev->tgid == current->tgid) {
761 hlist_del(&dev->hn);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800762 devfree = dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700763 break;
764 }
765 }
766 spin_unlock(&me->hlock);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800767 VERIFY(err, devfree != 0);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700768 if (err)
769 goto bail;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800770 *rdev = devfree;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700771 bail:
772 if (err) {
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800773 free_dev(devfree);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700774 err = alloc_dev(rdev);
775 }
776 return err;
777}
778
779static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
780{
781 struct hlist_head *head;
782 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
783
784 spin_lock(&me->hlock);
785 head = &me->htbl[h];
786 hlist_add_head(&dev->hn, head);
787 spin_unlock(&me->hlock);
788 return;
789}
790
791static int fastrpc_release_current_dsp_process(void);
792
793static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t kernel,
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700794 struct fastrpc_ioctl_invoke *invoke, remote_arg_t *pra,
795 int *fds)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700796{
797 remote_arg_t *rpra = 0;
798 struct fastrpc_device *dev = 0;
799 struct smq_invoke_ctx *ctx = 0;
800 struct fastrpc_buf obuf, *abufs = 0, *b;
Mitchel Humpherys33398942013-07-15 10:29:59 -0700801 struct ion_handle **handles = NULL;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700802 int interrupted = 0;
803 uint32_t sc;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700804 int i, bufs, nbufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700805
806 sc = invoke->sc;
807 obuf.handle = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700808 if (me->smmu.enabled) {
809 VERIFY(err, 0 == iommu_attach_group(me->smmu.domain,
810 me->smmu.group));
811 if (err)
812 return err;
813 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700814 if (REMOTE_SCALARS_LENGTH(sc)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700815 VERIFY(err, 0 == get_dev(me, &dev));
816 if (err)
817 goto bail;
818 VERIFY(err, 0 == get_page_list(kernel, sc, pra, &dev->buf,
819 &obuf));
820 if (err)
821 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700822 rpra = (remote_arg_t *)obuf.virt;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700823 VERIFY(err, 0 == get_args(kernel, sc, pra, rpra, invoke->pra,
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700824 &obuf, &abufs, &nbufs, fds));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700825 if (err)
826 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700827 }
828
829 context_list_alloc_ctx(&me->clst, &ctx);
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -0700830 inv_args_pre(sc, rpra);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800831 VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle, sc,
832 ctx, &obuf));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700833 if (err)
834 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700835 inv_args(sc, rpra, obuf.used);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700836 VERIFY(err, 0 == (interrupted =
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700837 wait_for_completion_interruptible(&ctx->work)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700838 if (err)
839 goto bail;
840 VERIFY(err, 0 == (err = ctx->retval));
841 if (err)
842 goto bail;
843 VERIFY(err, 0 == put_args(kernel, sc, pra, rpra, invoke->pra));
844 if (err)
845 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700846 bail:
847 if (interrupted) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700848 if (!kernel)
849 (void)fastrpc_release_current_dsp_process();
850 wait_for_completion(&ctx->work);
851 }
852 context_free(ctx);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800853
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700854 if (me->smmu.enabled) {
855 bufs = REMOTE_SCALARS_LENGTH(sc);
856 if (fds) {
857 handles = (struct ion_handle **)(fds + bufs);
858 for (i = 0; i < bufs; i++)
859 if (!IS_ERR_OR_NULL(handles[i])) {
860 ion_unmap_iommu(me->iclient, handles[i],
861 me->smmu.domain_id, 0);
862 ion_free(me->iclient, handles[i]);
863 }
864 }
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700865 iommu_detach_group(me->smmu.domain, me->smmu.group);
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700866 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700867 for (i = 0, b = abufs; i < nbufs; ++i, ++b)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700868 free_mem(b);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800869
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700870 kfree(abufs);
871 if (dev) {
872 add_dev(me, dev);
873 if (obuf.handle != dev->buf.handle)
874 free_mem(&obuf);
875 }
876 return err;
877}
878
879static int fastrpc_create_current_dsp_process(void)
880{
881 int err = 0;
882 struct fastrpc_ioctl_invoke ioctl;
883 struct fastrpc_apps *me = &gfa;
884 remote_arg_t ra[1];
885 int tgid = 0;
886
887 tgid = current->tgid;
888 ra[0].buf.pv = &tgid;
889 ra[0].buf.len = sizeof(tgid);
890 ioctl.handle = 1;
891 ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
892 ioctl.pra = ra;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700893 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra, 0)));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700894 return err;
895}
896
897static int fastrpc_release_current_dsp_process(void)
898{
899 int err = 0;
900 struct fastrpc_apps *me = &gfa;
901 struct fastrpc_ioctl_invoke ioctl;
902 remote_arg_t ra[1];
903 int tgid = 0;
904
905 tgid = current->tgid;
906 ra[0].buf.pv = &tgid;
907 ra[0].buf.len = sizeof(tgid);
908 ioctl.handle = 1;
909 ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
910 ioctl.pra = ra;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700911 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra, 0)));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800912 return err;
913}
914
915static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
916 struct fastrpc_ioctl_mmap *mmap,
917 struct smq_phy_page *pages,
918 int num)
919{
920 struct fastrpc_ioctl_invoke ioctl;
921 remote_arg_t ra[3];
922 int err = 0;
923 struct {
924 int pid;
925 uint32_t flags;
926 uint32_t vaddrin;
927 int num;
928 } inargs;
929
930 struct {
931 uint32_t vaddrout;
932 } routargs;
933 inargs.pid = current->tgid;
934 inargs.vaddrin = mmap->vaddrin;
935 inargs.flags = mmap->flags;
936 inargs.num = num;
937 ra[0].buf.pv = &inargs;
938 ra[0].buf.len = sizeof(inargs);
939
940 ra[1].buf.pv = pages;
941 ra[1].buf.len = num * sizeof(*pages);
942
943 ra[2].buf.pv = &routargs;
944 ra[2].buf.len = sizeof(routargs);
945
946 ioctl.handle = 1;
947 ioctl.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
948 ioctl.pra = ra;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700949 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra, 0)));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800950 mmap->vaddrout = routargs.vaddrout;
951 if (err)
952 goto bail;
953bail:
954 return err;
955}
956
957static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
958 struct fastrpc_ioctl_munmap *munmap)
959{
960 struct fastrpc_ioctl_invoke ioctl;
961 remote_arg_t ra[1];
962 int err = 0;
963 struct {
964 int pid;
965 uint32_t vaddrout;
966 int size;
967 } inargs;
968
969 inargs.pid = current->tgid;
970 inargs.size = munmap->size;
971 inargs.vaddrout = munmap->vaddrout;
972 ra[0].buf.pv = &inargs;
973 ra[0].buf.len = sizeof(inargs);
974
975 ioctl.handle = 1;
976 ioctl.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
977 ioctl.pra = ra;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -0700978 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra, 0)));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800979 return err;
980}
981
982static int fastrpc_internal_munmap(struct fastrpc_apps *me,
983 struct file_data *fdata,
984 struct fastrpc_ioctl_munmap *munmap)
985{
986 int err = 0;
987 struct fastrpc_mmap *map = 0, *mapfree = 0;
988 struct hlist_node *pos, *n;
989 VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap)));
990 if (err)
991 goto bail;
992 spin_lock(&fdata->hlock);
993 hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
994 if (map->vaddrout == munmap->vaddrout &&
995 map->size == munmap->size) {
996 hlist_del(&map->hn);
997 mapfree = map;
998 map = 0;
999 break;
1000 }
1001 }
1002 spin_unlock(&fdata->hlock);
1003bail:
1004 if (mapfree) {
1005 free_map(mapfree);
1006 kfree(mapfree);
1007 }
1008 return err;
1009}
1010
1011
1012static int fastrpc_internal_mmap(struct fastrpc_apps *me,
1013 struct file_data *fdata,
1014 struct fastrpc_ioctl_mmap *mmap)
1015{
1016 struct ion_client *clnt = gfa.iclient;
1017 struct fastrpc_mmap *map = 0;
1018 struct smq_phy_page *pages = 0;
1019 void *buf;
1020 int len;
1021 int num;
1022 int err = 0;
1023
1024 VERIFY(err, 0 != (map = kzalloc(sizeof(*map), GFP_KERNEL)));
1025 if (err)
1026 goto bail;
1027 map->handle = ion_import_dma_buf(clnt, mmap->fd);
1028 VERIFY(err, 0 == IS_ERR_OR_NULL(map->handle));
1029 if (err)
1030 goto bail;
1031 VERIFY(err, 0 != (map->virt = ion_map_kernel(clnt, map->handle)));
1032 if (err)
1033 goto bail;
1034 buf = (void *)mmap->vaddrin;
1035 len = mmap->size;
1036 num = buf_num_pages(buf, len);
1037 VERIFY(err, 0 != (pages = kzalloc(num * sizeof(*pages), GFP_KERNEL)));
1038 if (err)
1039 goto bail;
1040 VERIFY(err, 0 < (num = buf_get_pages(buf, len, num, 1, pages, num)));
1041 if (err)
1042 goto bail;
1043
1044 VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, num));
1045 if (err)
1046 goto bail;
1047 map->vaddrin = mmap->vaddrin;
1048 map->vaddrout = mmap->vaddrout;
1049 map->size = mmap->size;
1050 INIT_HLIST_NODE(&map->hn);
1051 spin_lock(&fdata->hlock);
1052 hlist_add_head(&map->hn, &fdata->hlst);
1053 spin_unlock(&fdata->hlock);
1054 bail:
1055 if (err && map) {
1056 free_map(map);
1057 kfree(map);
1058 }
1059 kfree(pages);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001060 return err;
1061}
1062
1063static void cleanup_current_dev(void)
1064{
1065 struct fastrpc_apps *me = &gfa;
1066 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
1067 struct hlist_head *head;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001068 struct hlist_node *pos, *n;
1069 struct fastrpc_device *dev, *devfree;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001070
1071 rnext:
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001072 devfree = dev = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001073 spin_lock(&me->hlock);
1074 head = &me->htbl[h];
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001075 hlist_for_each_entry_safe(dev, pos, n, head, hn) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001076 if (dev->tgid == current->tgid) {
1077 hlist_del(&dev->hn);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001078 devfree = dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001079 break;
1080 }
1081 }
1082 spin_unlock(&me->hlock);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001083 if (devfree) {
1084 free_dev(devfree);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001085 goto rnext;
1086 }
1087 return;
1088}
1089
1090static int fastrpc_device_release(struct inode *inode, struct file *file)
1091{
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001092 struct file_data *fdata = (struct file_data *)file->private_data;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001093 (void)fastrpc_release_current_dsp_process();
1094 cleanup_current_dev();
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001095 if (fdata) {
1096 struct fastrpc_mmap *map;
1097 struct hlist_node *n, *pos;
1098 file->private_data = 0;
1099 hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
1100 hlist_del(&map->hn);
1101 free_map(map);
1102 kfree(map);
1103 }
1104 kfree(fdata);
1105 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001106 return 0;
1107}
1108
1109static int fastrpc_device_open(struct inode *inode, struct file *filp)
1110{
1111 int err = 0;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001112 filp->private_data = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001113 if (0 != try_module_get(THIS_MODULE)) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001114 struct file_data *fdata = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001115 /* This call will cause a dev to be created
1116 * which will addref this module
1117 */
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001118 VERIFY(err, 0 != (fdata = kzalloc(sizeof(*fdata), GFP_KERNEL)));
1119 if (err)
1120 goto bail;
1121
1122 spin_lock_init(&fdata->hlock);
1123 INIT_HLIST_HEAD(&fdata->hlst);
1124
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001125 VERIFY(err, 0 == fastrpc_create_current_dsp_process());
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001126 if (err)
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001127 goto bail;
1128 filp->private_data = fdata;
1129bail:
1130 if (err) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001131 cleanup_current_dev();
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001132 kfree(fdata);
1133 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001134 module_put(THIS_MODULE);
1135 }
1136 return err;
1137}
1138
1139
1140static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1141 unsigned long ioctl_param)
1142{
1143 struct fastrpc_apps *me = &gfa;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001144 struct fastrpc_ioctl_invoke_fd invokefd;
1145 struct fastrpc_ioctl_invoke *invoke = &invokefd.inv;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001146 struct fastrpc_ioctl_mmap mmap;
1147 struct fastrpc_ioctl_munmap munmap;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001148 remote_arg_t *pra = 0;
1149 void *param = (char *)ioctl_param;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001150 struct file_data *fdata = (struct file_data *)file->private_data;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001151 int *fds = 0;
1152 int bufs, size = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001153
1154 switch (ioctl_num) {
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001155 case FASTRPC_IOCTL_INVOKE_FD:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001156 case FASTRPC_IOCTL_INVOKE:
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001157 invokefd.fds = 0;
1158 size = (ioctl_num == FASTRPC_IOCTL_INVOKE) ?
1159 sizeof(*invoke) : sizeof(invokefd);
1160 VERIFY(err, 0 == copy_from_user(&invokefd, param, size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001161 if (err)
1162 goto bail;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001163 bufs = REMOTE_SCALARS_INBUFS(invoke->sc) +
1164 REMOTE_SCALARS_OUTBUFS(invoke->sc);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001165 if (bufs) {
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001166 size = bufs * sizeof(*pra);
1167 if (invokefd.fds)
1168 size = size + bufs * sizeof(*fds) +
1169 bufs * sizeof(struct ion_handle *);
1170 VERIFY(err, 0 != (pra = kzalloc(size, GFP_KERNEL)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001171 if (err)
1172 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001173 }
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001174 VERIFY(err, 0 == copy_from_user(pra, invoke->pra,
1175 bufs * sizeof(*pra)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001176 if (err)
1177 goto bail;
Mitchel Humpheryscf5d3c82013-07-10 12:53:18 -07001178 if (invokefd.fds) {
1179 fds = (int *)(pra + bufs);
1180 VERIFY(err, 0 == copy_from_user(fds, invokefd.fds,
1181 bufs * sizeof(*fds)));
1182 if (err)
1183 goto bail;
1184 }
1185 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 0, invoke,
1186 pra, fds)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001187 if (err)
1188 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001189 break;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001190 case FASTRPC_IOCTL_MMAP:
1191 VERIFY(err, 0 == copy_from_user(&mmap, param,
1192 sizeof(mmap)));
1193 if (err)
1194 goto bail;
1195 VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata,
1196 &mmap)));
1197 if (err)
1198 goto bail;
1199 VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap)));
1200 if (err)
1201 goto bail;
1202 break;
1203 case FASTRPC_IOCTL_MUNMAP:
1204 VERIFY(err, 0 == copy_from_user(&munmap, param,
1205 sizeof(munmap)));
1206 if (err)
1207 goto bail;
1208 VERIFY(err, 0 == (err = fastrpc_internal_munmap(me, fdata,
1209 &munmap)));
1210 if (err)
1211 goto bail;
1212 break;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001213 default:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001214 err = -ENOTTY;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001215 break;
1216 }
1217 bail:
1218 kfree(pra);
1219 return err;
1220}
1221
1222static const struct file_operations fops = {
1223 .open = fastrpc_device_open,
1224 .release = fastrpc_device_release,
1225 .unlocked_ioctl = fastrpc_device_ioctl,
1226};
1227
1228static int __init fastrpc_device_init(void)
1229{
1230 struct fastrpc_apps *me = &gfa;
1231 int err = 0;
1232
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -07001233 memset(me, 0, sizeof(*me));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001234 VERIFY(err, 0 == fastrpc_init());
1235 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001236 goto fastrpc_bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001237 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
1238 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001239 goto alloc_chrdev_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001240 cdev_init(&me->cdev, &fops);
1241 me->cdev.owner = THIS_MODULE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001242 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
1243 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001244 goto cdev_init_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001245 me->class = class_create(THIS_MODULE, "chardrv");
1246 VERIFY(err, !IS_ERR(me->class));
1247 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001248 goto class_create_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001249 me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
1250 NULL, DEVICE_NAME);
1251 VERIFY(err, !IS_ERR(me->dev));
1252 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001253 goto device_create_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001254 pr_info("'created /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
Matt Wagantall818e23f2013-04-22 20:36:52 -07001255
1256 return 0;
1257
1258device_create_bail:
1259 class_destroy(me->class);
1260class_create_bail:
1261 cdev_del(&me->cdev);
1262cdev_init_bail:
1263 unregister_chrdev_region(me->dev_no, 1);
1264alloc_chrdev_bail:
1265 fastrpc_deinit();
1266fastrpc_bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001267 return err;
1268}
1269
1270static void __exit fastrpc_device_exit(void)
1271{
1272 struct fastrpc_apps *me = &gfa;
1273
1274 fastrpc_deinit();
Mitchel Humpherys55877652013-02-02 11:23:42 -08001275 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
1276 class_destroy(me->class);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001277 cdev_del(&me->cdev);
1278 unregister_chrdev_region(me->dev_no, 1);
1279}
1280
1281module_init(fastrpc_device_init);
1282module_exit(fastrpc_device_exit);
1283
1284MODULE_LICENSE("GPL v2");