blob: 83d94f1a8f6c430c63bf5a67d9311a96ccee971c [file] [log] [blame]
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001/*
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080014#include "adsprpc_shared.h"
15
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <mach/msm_smd.h>
28#include <mach/ion.h>
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070029#include <mach/iommu_domains.h>
Mitchel Humpherys42e806e2012-09-30 22:27:53 -070030#include <linux/scatterlist.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080031#include <linux/fs.h>
32#include <linux/uaccess.h>
33#include <linux/device.h>
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070034#include <linux/of.h>
35#include <linux/iommu.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080036
37#ifndef ION_ADSPRPC_HEAP_ID
38#define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID
39#endif /*ION_ADSPRPC_HEAP_ID*/
40
41#define RPC_TIMEOUT (5 * HZ)
42#define RPC_HASH_BITS 5
43#define RPC_HASH_SZ (1 << RPC_HASH_BITS)
44#define BALIGN 32
45
46#define LOCK_MMAP(kernel)\
47 do {\
48 if (!kernel)\
49 down_read(&current->mm->mmap_sem);\
50 } while (0)
51
52#define UNLOCK_MMAP(kernel)\
53 do {\
54 if (!kernel)\
55 up_read(&current->mm->mmap_sem);\
56 } while (0)
57
58
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -070059#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
60
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080061static inline uint32_t buf_page_start(void *buf)
62{
63 uint32_t start = (uint32_t) buf & PAGE_MASK;
64 return start;
65}
66
67static inline uint32_t buf_page_offset(void *buf)
68{
69 uint32_t offset = (uint32_t) buf & (PAGE_SIZE - 1);
70 return offset;
71}
72
73static inline int buf_num_pages(void *buf, int len)
74{
75 uint32_t start = buf_page_start(buf) >> PAGE_SHIFT;
76 uint32_t end = (((uint32_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
77 int nPages = end - start + 1;
78 return nPages;
79}
80
81static inline uint32_t buf_page_size(uint32_t size)
82{
83 uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
84 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
85}
86
87static inline int buf_get_pages(void *addr, int sz, int nr_pages, int access,
88 struct smq_phy_page *pages, int nr_elems)
89{
Mitchel Humpherys80411eb2013-06-14 11:23:23 -070090 struct vm_area_struct *vma, *vmaend;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080091 uint32_t start = buf_page_start(addr);
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070092 uint32_t end = buf_page_start((void *)((uint32_t)addr + sz - 1));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080093 uint32_t len = nr_pages << PAGE_SHIFT;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -070094 unsigned long pfn, pfnend;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080095 int n = -1, err = 0;
96
97 VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ,
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -080098 (void __user *)start, len));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080099 if (err)
100 goto bail;
101 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
102 if (err)
103 goto bail;
Mitchel Humpherys80411eb2013-06-14 11:23:23 -0700104 VERIFY(err, 0 != (vmaend = find_vma(current->mm, end)));
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800105 if (err)
106 goto bail;
107 n = 0;
108 VERIFY(err, 0 == follow_pfn(vma, start, &pfn));
109 if (err)
110 goto bail;
Mitchel Humpherys80411eb2013-06-14 11:23:23 -0700111 VERIFY(err, 0 == follow_pfn(vmaend, end, &pfnend));
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700112 if (err)
113 goto bail;
114 VERIFY(err, (pfn + nr_pages - 1) == pfnend);
115 if (err)
116 goto bail;
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800117 VERIFY(err, nr_elems > 0);
118 if (err)
119 goto bail;
120 pages->addr = __pfn_to_phys(pfn);
121 pages->size = len;
122 n++;
123 bail:
124 return n;
125}
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700126
127struct smq_invoke_ctx {
128 struct completion work;
129 int retval;
130 atomic_t free;
131};
132
133struct smq_context_list {
134 struct smq_invoke_ctx *ls;
135 int size;
136 int last;
137};
138
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700139struct fastrpc_smmu {
140 struct iommu_group *group;
141 struct iommu_domain *domain;
142 int domain_id;
143 bool enabled;
144};
145
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700146struct fastrpc_apps {
147 smd_channel_t *chan;
148 struct smq_context_list clst;
149 struct completion work;
150 struct ion_client *iclient;
151 struct cdev cdev;
Mitchel Humpherys55877652013-02-02 11:23:42 -0800152 struct class *class;
153 struct device *dev;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700154 struct fastrpc_smmu smmu;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700155 dev_t dev_no;
156 spinlock_t wrlock;
157 spinlock_t hlock;
158 struct hlist_head htbl[RPC_HASH_SZ];
159};
160
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800161struct fastrpc_mmap {
162 struct hlist_node hn;
163 struct ion_handle *handle;
164 void *virt;
165 uint32_t vaddrin;
166 uint32_t vaddrout;
167 int size;
168};
169
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700170struct fastrpc_buf {
171 struct ion_handle *handle;
172 void *virt;
173 ion_phys_addr_t phys;
174 int size;
175 int used;
176};
177
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800178struct file_data {
179 spinlock_t hlock;
180 struct hlist_head hlst;
181};
182
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700183struct fastrpc_device {
184 uint32_t tgid;
185 struct hlist_node hn;
186 struct fastrpc_buf buf;
187};
188
189static struct fastrpc_apps gfa;
190
191static void free_mem(struct fastrpc_buf *buf)
192{
193 struct fastrpc_apps *me = &gfa;
194
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700195 if (!IS_ERR_OR_NULL(buf->handle)) {
196 if (me->smmu.enabled && buf->phys) {
197 ion_unmap_iommu(me->iclient, buf->handle,
198 me->smmu.domain_id, 0);
199 buf->phys = 0;
200 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700201 if (buf->virt) {
202 ion_unmap_kernel(me->iclient, buf->handle);
203 buf->virt = 0;
204 }
205 ion_free(me->iclient, buf->handle);
206 buf->handle = 0;
207 }
208}
209
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800210static void free_map(struct fastrpc_mmap *map)
211{
212 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700213 if (!IS_ERR_OR_NULL(map->handle)) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800214 if (map->virt) {
215 ion_unmap_kernel(me->iclient, map->handle);
216 map->virt = 0;
217 }
218 ion_free(me->iclient, map->handle);
219 }
220 map->handle = 0;
221}
222
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700223static int alloc_mem(struct fastrpc_buf *buf)
224{
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700225 struct fastrpc_apps *me = &gfa;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700226 struct ion_client *clnt = gfa.iclient;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700227 struct sg_table *sg;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700228 int err = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700229 unsigned int heap;
230 unsigned long len;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800231 buf->handle = 0;
232 buf->virt = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700233 heap = me->smmu.enabled ? ION_HEAP(ION_IOMMU_HEAP_ID) :
234 ION_HEAP(ION_ADSP_HEAP_ID) | ION_HEAP(ION_AUDIO_HEAP_ID);
235 buf->handle = ion_alloc(clnt, buf->size, SZ_4K, heap, 0);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700236 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
237 if (err)
238 goto bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700239 VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
240 if (err)
241 goto bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700242 if (me->smmu.enabled) {
243 len = buf->size;
244 VERIFY(err, 0 == ion_map_iommu(clnt, buf->handle,
245 me->smmu.domain_id, 0, SZ_4K, 0,
246 &buf->phys, &len, 0, 0));
247 if (err)
248 goto bail;
249 } else {
250 VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle)));
251 if (err)
252 goto bail;
253 VERIFY(err, 1 == sg->nents);
254 if (err)
255 goto bail;
256 buf->phys = sg_dma_address(sg->sgl);
257 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700258 bail:
259 if (err && !IS_ERR_OR_NULL(buf->handle))
260 free_mem(buf);
261 return err;
262}
263
264static int context_list_ctor(struct smq_context_list *me, int size)
265{
266 int err = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700267 VERIFY(err, 0 != (me->ls = kzalloc(size, GFP_KERNEL)));
268 if (err)
269 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700270 me->size = size / sizeof(*me->ls);
271 me->last = 0;
272 bail:
273 return err;
274}
275
276static void context_list_dtor(struct smq_context_list *me)
277{
278 kfree(me->ls);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700279}
280
281static void context_list_alloc_ctx(struct smq_context_list *me,
282 struct smq_invoke_ctx **po)
283{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700284 int i = me->last;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700285 struct smq_invoke_ctx *ctx;
286
287 for (;;) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700288 i = i % me->size;
289 ctx = &me->ls[i];
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700290 if (atomic_read(&ctx->free) == 0)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700291 if (atomic_cmpxchg(&ctx->free, 0, 1) == 0)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700292 break;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700293 i++;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700294 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700295 me->last = i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700296 ctx->retval = -1;
297 init_completion(&ctx->work);
298 *po = ctx;
299}
300
301static void context_free(struct smq_invoke_ctx *me)
302{
303 if (me)
304 atomic_set(&me->free, 0);
305}
306
307static void context_notify_user(struct smq_invoke_ctx *me, int retval)
308{
309 me->retval = retval;
310 complete(&me->work);
311}
312
313static void context_notify_all_users(struct smq_context_list *me)
314{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700315 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700316
317 if (!me->ls)
318 return;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700319 for (i = 0; i < me->size; ++i) {
320 if (atomic_read(&me->ls[i].free) != 0)
321 complete(&me->ls[i].work);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700322 }
323}
324
325static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
326 struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf)
327{
328 struct smq_phy_page *pgstart, *pages;
329 struct smq_invoke_buf *list;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700330 int i, rlen, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700331 int inbufs = REMOTE_SCALARS_INBUFS(sc);
332 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
333
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700334 LOCK_MMAP(kernel);
335 *obuf = *ibuf;
336 retry:
337 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
338 pgstart = smq_phy_page_start(sc, list);
339 pages = pgstart + 1;
340 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
341 if (rlen < 0) {
342 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
343 obuf->size += buf_page_size(rlen);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700344 VERIFY(err, 0 == alloc_mem(obuf));
345 if (err)
346 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700347 goto retry;
348 }
349 pgstart->addr = obuf->phys;
350 pgstart->size = obuf->size;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700351 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700352 void *buf;
353 int len, num;
354
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700355 list[i].num = 0;
356 list[i].pgidx = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700357 len = pra[i].buf.len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700358 if (!len)
359 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700360 buf = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700361 num = buf_num_pages(buf, len);
362 if (!kernel)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700363 list[i].num = buf_get_pages(buf, len, num,
364 i >= inbufs, pages, rlen / sizeof(*pages));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700365 else
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700366 list[i].num = 0;
367 VERIFY(err, list[i].num >= 0);
368 if (err)
369 goto bail;
370 if (list[i].num) {
371 list[i].pgidx = pages - pgstart;
372 pages = pages + list[i].num;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700373 } else if (rlen > sizeof(*pages)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700374 list[i].pgidx = pages - pgstart;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700375 pages = pages + 1;
376 } else {
377 if (obuf->handle != ibuf->handle)
378 free_mem(obuf);
379 obuf->size += buf_page_size(sizeof(*pages));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700380 VERIFY(err, 0 == alloc_mem(obuf));
381 if (err)
382 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700383 goto retry;
384 }
385 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
386 }
387 obuf->used = obuf->size - rlen;
388 bail:
389 if (err && (obuf->handle != ibuf->handle))
390 free_mem(obuf);
391 UNLOCK_MMAP(kernel);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700392 return err;
393}
394
395static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
396 remote_arg_t *rpra, remote_arg_t *upra,
397 struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
398 int *nbufs)
399{
400 struct smq_invoke_buf *list;
401 struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
402 struct smq_phy_page *pages;
403 void *args;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700404 int i, rlen, size, used, inh, bufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700405 int inbufs = REMOTE_SCALARS_INBUFS(sc);
406 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
407
408 list = smq_invoke_buf_start(rpra, sc);
409 pages = smq_phy_page_start(sc, list);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700410 used = ALIGN(pbuf->used, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700411 args = (void *)((char *)pbuf->virt + used);
412 rlen = pbuf->size - used;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700413 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700414
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700415 rpra[i].buf.len = pra[i].buf.len;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700416 if (!rpra[i].buf.len)
417 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700418 if (list[i].num) {
419 rpra[i].buf.pv = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700420 continue;
421 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700422 if (rlen < pra[i].buf.len) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700423 struct fastrpc_buf *b;
424 pbuf->used = pbuf->size - rlen;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700425 VERIFY(err, 0 != (b = krealloc(obufs,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700426 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700427 if (err)
428 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700429 obufs = b;
430 pbuf = obufs + bufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700431 pbuf->size = buf_num_pages(0, pra[i].buf.len) *
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700432 PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700433 VERIFY(err, 0 == alloc_mem(pbuf));
434 if (err)
435 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700436 bufs++;
437 args = pbuf->virt;
438 rlen = pbuf->size;
439 }
Mitchel Humpherys0bc3aa52013-02-02 11:31:15 -0800440 list[i].num = 1;
441 pages[list[i].pgidx].addr =
442 buf_page_start((void *)(pbuf->phys +
443 (pbuf->size - rlen)));
444 pages[list[i].pgidx].size =
445 buf_page_size(pra[i].buf.len);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700446 if (i < inbufs) {
447 if (!kernel) {
448 VERIFY(err, 0 == copy_from_user(args,
449 pra[i].buf.pv, pra[i].buf.len));
450 if (err)
451 goto bail;
452 } else {
453 memmove(args, pra[i].buf.pv, pra[i].buf.len);
454 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700455 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700456 rpra[i].buf.pv = args;
457 args = (void *)((char *)args + ALIGN(pra[i].buf.len, BALIGN));
458 rlen -= ALIGN(pra[i].buf.len, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700459 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700460 for (i = 0; i < inbufs; ++i) {
461 if (rpra[i].buf.len)
462 dmac_flush_range(rpra[i].buf.pv,
463 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700464 }
465 pbuf->used = pbuf->size - rlen;
466 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
467 if (size) {
468 inh = inbufs + outbufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700469 if (!kernel) {
470 VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700471 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700472 if (err)
473 goto bail;
474 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700475 memmove(&rpra[inh], &upra[inh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700476 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700477 }
478 dmac_flush_range(rpra, (char *)rpra + used);
479 bail:
480 *abufs = obufs;
481 *nbufs = bufs;
482 return err;
483}
484
485static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
486 remote_arg_t *rpra, remote_arg_t *upra)
487{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700488 int i, inbufs, outbufs, outh, size;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700489 int err = 0;
490
491 inbufs = REMOTE_SCALARS_INBUFS(sc);
492 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700493 for (i = inbufs; i < inbufs + outbufs; ++i) {
494 if (rpra[i].buf.pv != pra[i].buf.pv) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800495 if (!kernel) {
496 VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700497 rpra[i].buf.pv, rpra[i].buf.len));
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800498 if (err)
499 goto bail;
500 } else {
501 memmove(pra[i].buf.pv, rpra[i].buf.pv,
502 rpra[i].buf.len);
503 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700504 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700505 }
506 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
507 if (size) {
508 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700509 if (!kernel) {
510 VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700511 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700512 if (err)
513 goto bail;
514 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700515 memmove(&upra[outh], &rpra[outh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700516 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700517 }
518 bail:
519 return err;
520}
521
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -0700522static void inv_args_pre(uint32_t sc, remote_arg_t *rpra)
523{
524 int i, inbufs, outbufs;
525 uint32_t end;
526
527 inbufs = REMOTE_SCALARS_INBUFS(sc);
528 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
529 for (i = inbufs; i < inbufs + outbufs; ++i) {
530 if (!rpra[i].buf.len)
531 continue;
532 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
533 continue;
534 if (!IS_CACHE_ALIGNED((uint32_t)rpra[i].buf.pv))
535 dmac_flush_range(rpra[i].buf.pv,
536 (char *)rpra[i].buf.pv + 1);
537 end = (uint32_t)rpra[i].buf.pv + rpra[i].buf.len;
538 if (!IS_CACHE_ALIGNED(end))
539 dmac_flush_range((char *)end,
540 (char *)end + 1);
541 }
542}
543
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700544static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
545{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700546 int i, inbufs, outbufs;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700547 int inv = 0;
548
549 inbufs = REMOTE_SCALARS_INBUFS(sc);
550 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700551 for (i = inbufs; i < inbufs + outbufs; ++i) {
552 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700553 inv = 1;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700554 else if (rpra[i].buf.len)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700555 dmac_inv_range(rpra[i].buf.pv,
556 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700557 }
558
559 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
560 dmac_inv_range(rpra, (char *)rpra + used);
561}
562
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800563static int fastrpc_invoke_send(struct fastrpc_apps *me,
564 uint32_t kernel, uint32_t handle,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700565 uint32_t sc, struct smq_invoke_ctx *ctx,
566 struct fastrpc_buf *buf)
567{
568 struct smq_msg msg;
569 int err = 0, len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700570 msg.pid = current->tgid;
571 msg.tid = current->pid;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800572 if (kernel)
573 msg.pid = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700574 msg.invoke.header.ctx = ctx;
575 msg.invoke.header.handle = handle;
576 msg.invoke.header.sc = sc;
577 msg.invoke.page.addr = buf->phys;
578 msg.invoke.page.size = buf_page_size(buf->used);
579 spin_lock(&me->wrlock);
580 len = smd_write(me->chan, &msg, sizeof(msg));
581 spin_unlock(&me->wrlock);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700582 VERIFY(err, len == sizeof(msg));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700583 return err;
584}
585
586static void fastrpc_deinit(void)
587{
588 struct fastrpc_apps *me = &gfa;
589
Matt Wagantall818e23f2013-04-22 20:36:52 -0700590 smd_close(me->chan);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700591 context_list_dtor(&me->clst);
Matt Wagantall818e23f2013-04-22 20:36:52 -0700592 ion_client_destroy(me->iclient);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700593 me->iclient = 0;
594 me->chan = 0;
595}
596
597static void fastrpc_read_handler(void)
598{
599 struct fastrpc_apps *me = &gfa;
600 struct smq_invoke_rsp rsp;
601 int err = 0;
602
603 do {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700604 VERIFY(err, sizeof(rsp) ==
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700605 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700606 if (err)
607 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700608 context_notify_user(rsp.ctx, rsp.retval);
609 } while (!err);
610 bail:
611 return;
612}
613
614static void smd_event_handler(void *priv, unsigned event)
615{
616 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
617
618 switch (event) {
619 case SMD_EVENT_OPEN:
620 complete(&(me->work));
621 break;
622 case SMD_EVENT_CLOSE:
623 context_notify_all_users(&me->clst);
624 break;
625 case SMD_EVENT_DATA:
626 fastrpc_read_handler();
627 break;
628 }
629}
630
631static int fastrpc_init(void)
632{
633 int err = 0;
634 struct fastrpc_apps *me = &gfa;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700635 struct device_node *node;
636 bool enabled = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700637
638 if (me->chan == 0) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700639 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700640 spin_lock_init(&me->hlock);
641 spin_lock_init(&me->wrlock);
642 init_completion(&me->work);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700643 for (i = 0; i < RPC_HASH_SZ; ++i)
644 INIT_HLIST_HEAD(&me->htbl[i]);
645 VERIFY(err, 0 == context_list_ctor(&me->clst, SZ_4K));
646 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700647 goto context_list_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700648 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
649 DEVICE_NAME);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700650 VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
651 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700652 goto ion_bail;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700653 node = of_find_compatible_node(NULL, NULL,
654 "qcom,msm-audio-ion");
655 if (node)
656 enabled = of_property_read_bool(node,
657 "qcom,smmu-enabled");
658 if (enabled)
659 me->smmu.group = iommu_group_find("lpass_audio");
660 if (me->smmu.group)
661 me->smmu.domain = iommu_group_get_iommudata(
662 me->smmu.group);
663 if (!IS_ERR_OR_NULL(me->smmu.domain)) {
664 me->smmu.domain_id = msm_find_domain_no(
665 me->smmu.domain);
666 if (me->smmu.domain_id >= 0)
667 me->smmu.enabled = enabled;
668 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700669 VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700670 SMD_APPS_QDSP, &me->chan,
671 me, smd_event_handler));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700672 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700673 goto smd_bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700674 VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700675 RPC_TIMEOUT));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700676 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -0700677 goto completion_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700678 }
Matt Wagantall818e23f2013-04-22 20:36:52 -0700679
680 return 0;
681
682completion_bail:
683 smd_close(me->chan);
684smd_bail:
685 ion_client_destroy(me->iclient);
686ion_bail:
687 context_list_dtor(&me->clst);
688context_list_bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700689 return err;
690}
691
692static void free_dev(struct fastrpc_device *dev)
693{
694 if (dev) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700695 free_mem(&dev->buf);
696 kfree(dev);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800697 module_put(THIS_MODULE);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700698 }
699}
700
701static int alloc_dev(struct fastrpc_device **dev)
702{
703 int err = 0;
704 struct fastrpc_device *fd = 0;
705
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700706 VERIFY(err, 0 != try_module_get(THIS_MODULE));
707 if (err)
708 goto bail;
709 VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
710 if (err)
711 goto bail;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800712
713 INIT_HLIST_NODE(&fd->hn);
714
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700715 fd->buf.size = PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700716 VERIFY(err, 0 == alloc_mem(&fd->buf));
717 if (err)
718 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700719 fd->tgid = current->tgid;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800720
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700721 *dev = fd;
722 bail:
723 if (err)
724 free_dev(fd);
725 return err;
726}
727
728static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
729{
730 struct hlist_head *head;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800731 struct fastrpc_device *dev = 0, *devfree = 0;
732 struct hlist_node *pos, *n;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700733 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
734 int err = 0;
735
736 spin_lock(&me->hlock);
737 head = &me->htbl[h];
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800738 hlist_for_each_entry_safe(dev, pos, n, head, hn) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700739 if (dev->tgid == current->tgid) {
740 hlist_del(&dev->hn);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800741 devfree = dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700742 break;
743 }
744 }
745 spin_unlock(&me->hlock);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800746 VERIFY(err, devfree != 0);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700747 if (err)
748 goto bail;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800749 *rdev = devfree;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700750 bail:
751 if (err) {
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -0800752 free_dev(devfree);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700753 err = alloc_dev(rdev);
754 }
755 return err;
756}
757
758static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
759{
760 struct hlist_head *head;
761 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
762
763 spin_lock(&me->hlock);
764 head = &me->htbl[h];
765 hlist_add_head(&dev->hn, head);
766 spin_unlock(&me->hlock);
767 return;
768}
769
770static int fastrpc_release_current_dsp_process(void);
771
772static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t kernel,
773 struct fastrpc_ioctl_invoke *invoke, remote_arg_t *pra)
774{
775 remote_arg_t *rpra = 0;
776 struct fastrpc_device *dev = 0;
777 struct smq_invoke_ctx *ctx = 0;
778 struct fastrpc_buf obuf, *abufs = 0, *b;
779 int interrupted = 0;
780 uint32_t sc;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700781 int i, nbufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700782
783 sc = invoke->sc;
784 obuf.handle = 0;
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700785 if (me->smmu.enabled) {
786 VERIFY(err, 0 == iommu_attach_group(me->smmu.domain,
787 me->smmu.group));
788 if (err)
789 return err;
790 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700791 if (REMOTE_SCALARS_LENGTH(sc)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700792 VERIFY(err, 0 == get_dev(me, &dev));
793 if (err)
794 goto bail;
795 VERIFY(err, 0 == get_page_list(kernel, sc, pra, &dev->buf,
796 &obuf));
797 if (err)
798 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700799 rpra = (remote_arg_t *)obuf.virt;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700800 VERIFY(err, 0 == get_args(kernel, sc, pra, rpra, invoke->pra,
801 &obuf, &abufs, &nbufs));
802 if (err)
803 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700804 }
805
806 context_list_alloc_ctx(&me->clst, &ctx);
Mitchel Humpherys9a1c8192013-05-15 12:47:38 -0700807 inv_args_pre(sc, rpra);
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800808 VERIFY(err, 0 == fastrpc_invoke_send(me, kernel, invoke->handle, sc,
809 ctx, &obuf));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700810 if (err)
811 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700812 inv_args(sc, rpra, obuf.used);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700813 VERIFY(err, 0 == (interrupted =
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700814 wait_for_completion_interruptible(&ctx->work)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700815 if (err)
816 goto bail;
817 VERIFY(err, 0 == (err = ctx->retval));
818 if (err)
819 goto bail;
820 VERIFY(err, 0 == put_args(kernel, sc, pra, rpra, invoke->pra));
821 if (err)
822 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700823 bail:
824 if (interrupted) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700825 if (!kernel)
826 (void)fastrpc_release_current_dsp_process();
827 wait_for_completion(&ctx->work);
828 }
829 context_free(ctx);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800830
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -0700831 if (me->smmu.enabled)
832 iommu_detach_group(me->smmu.domain, me->smmu.group);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700833 for (i = 0, b = abufs; i < nbufs; ++i, ++b)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700834 free_mem(b);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800835
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700836 kfree(abufs);
837 if (dev) {
838 add_dev(me, dev);
839 if (obuf.handle != dev->buf.handle)
840 free_mem(&obuf);
841 }
842 return err;
843}
844
845static int fastrpc_create_current_dsp_process(void)
846{
847 int err = 0;
848 struct fastrpc_ioctl_invoke ioctl;
849 struct fastrpc_apps *me = &gfa;
850 remote_arg_t ra[1];
851 int tgid = 0;
852
853 tgid = current->tgid;
854 ra[0].buf.pv = &tgid;
855 ra[0].buf.len = sizeof(tgid);
856 ioctl.handle = 1;
857 ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
858 ioctl.pra = ra;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800859 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700860 return err;
861}
862
863static int fastrpc_release_current_dsp_process(void)
864{
865 int err = 0;
866 struct fastrpc_apps *me = &gfa;
867 struct fastrpc_ioctl_invoke ioctl;
868 remote_arg_t ra[1];
869 int tgid = 0;
870
871 tgid = current->tgid;
872 ra[0].buf.pv = &tgid;
873 ra[0].buf.len = sizeof(tgid);
874 ioctl.handle = 1;
875 ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
876 ioctl.pra = ra;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -0800877 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
878 return err;
879}
880
881static int fastrpc_mmap_on_dsp(struct fastrpc_apps *me,
882 struct fastrpc_ioctl_mmap *mmap,
883 struct smq_phy_page *pages,
884 int num)
885{
886 struct fastrpc_ioctl_invoke ioctl;
887 remote_arg_t ra[3];
888 int err = 0;
889 struct {
890 int pid;
891 uint32_t flags;
892 uint32_t vaddrin;
893 int num;
894 } inargs;
895
896 struct {
897 uint32_t vaddrout;
898 } routargs;
899 inargs.pid = current->tgid;
900 inargs.vaddrin = mmap->vaddrin;
901 inargs.flags = mmap->flags;
902 inargs.num = num;
903 ra[0].buf.pv = &inargs;
904 ra[0].buf.len = sizeof(inargs);
905
906 ra[1].buf.pv = pages;
907 ra[1].buf.len = num * sizeof(*pages);
908
909 ra[2].buf.pv = &routargs;
910 ra[2].buf.len = sizeof(routargs);
911
912 ioctl.handle = 1;
913 ioctl.sc = REMOTE_SCALARS_MAKE(2, 2, 1);
914 ioctl.pra = ra;
915 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
916 mmap->vaddrout = routargs.vaddrout;
917 if (err)
918 goto bail;
919bail:
920 return err;
921}
922
923static int fastrpc_munmap_on_dsp(struct fastrpc_apps *me,
924 struct fastrpc_ioctl_munmap *munmap)
925{
926 struct fastrpc_ioctl_invoke ioctl;
927 remote_arg_t ra[1];
928 int err = 0;
929 struct {
930 int pid;
931 uint32_t vaddrout;
932 int size;
933 } inargs;
934
935 inargs.pid = current->tgid;
936 inargs.size = munmap->size;
937 inargs.vaddrout = munmap->vaddrout;
938 ra[0].buf.pv = &inargs;
939 ra[0].buf.len = sizeof(inargs);
940
941 ioctl.handle = 1;
942 ioctl.sc = REMOTE_SCALARS_MAKE(3, 1, 0);
943 ioctl.pra = ra;
944 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 1, &ioctl, ra)));
945 return err;
946}
947
948static int fastrpc_internal_munmap(struct fastrpc_apps *me,
949 struct file_data *fdata,
950 struct fastrpc_ioctl_munmap *munmap)
951{
952 int err = 0;
953 struct fastrpc_mmap *map = 0, *mapfree = 0;
954 struct hlist_node *pos, *n;
955 VERIFY(err, 0 == (err = fastrpc_munmap_on_dsp(me, munmap)));
956 if (err)
957 goto bail;
958 spin_lock(&fdata->hlock);
959 hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
960 if (map->vaddrout == munmap->vaddrout &&
961 map->size == munmap->size) {
962 hlist_del(&map->hn);
963 mapfree = map;
964 map = 0;
965 break;
966 }
967 }
968 spin_unlock(&fdata->hlock);
969bail:
970 if (mapfree) {
971 free_map(mapfree);
972 kfree(mapfree);
973 }
974 return err;
975}
976
977
978static int fastrpc_internal_mmap(struct fastrpc_apps *me,
979 struct file_data *fdata,
980 struct fastrpc_ioctl_mmap *mmap)
981{
982 struct ion_client *clnt = gfa.iclient;
983 struct fastrpc_mmap *map = 0;
984 struct smq_phy_page *pages = 0;
985 void *buf;
986 int len;
987 int num;
988 int err = 0;
989
990 VERIFY(err, 0 != (map = kzalloc(sizeof(*map), GFP_KERNEL)));
991 if (err)
992 goto bail;
993 map->handle = ion_import_dma_buf(clnt, mmap->fd);
994 VERIFY(err, 0 == IS_ERR_OR_NULL(map->handle));
995 if (err)
996 goto bail;
997 VERIFY(err, 0 != (map->virt = ion_map_kernel(clnt, map->handle)));
998 if (err)
999 goto bail;
1000 buf = (void *)mmap->vaddrin;
1001 len = mmap->size;
1002 num = buf_num_pages(buf, len);
1003 VERIFY(err, 0 != (pages = kzalloc(num * sizeof(*pages), GFP_KERNEL)));
1004 if (err)
1005 goto bail;
1006 VERIFY(err, 0 < (num = buf_get_pages(buf, len, num, 1, pages, num)));
1007 if (err)
1008 goto bail;
1009
1010 VERIFY(err, 0 == fastrpc_mmap_on_dsp(me, mmap, pages, num));
1011 if (err)
1012 goto bail;
1013 map->vaddrin = mmap->vaddrin;
1014 map->vaddrout = mmap->vaddrout;
1015 map->size = mmap->size;
1016 INIT_HLIST_NODE(&map->hn);
1017 spin_lock(&fdata->hlock);
1018 hlist_add_head(&map->hn, &fdata->hlst);
1019 spin_unlock(&fdata->hlock);
1020 bail:
1021 if (err && map) {
1022 free_map(map);
1023 kfree(map);
1024 }
1025 kfree(pages);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001026 return err;
1027}
1028
1029static void cleanup_current_dev(void)
1030{
1031 struct fastrpc_apps *me = &gfa;
1032 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
1033 struct hlist_head *head;
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001034 struct hlist_node *pos, *n;
1035 struct fastrpc_device *dev, *devfree;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001036
1037 rnext:
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001038 devfree = dev = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001039 spin_lock(&me->hlock);
1040 head = &me->htbl[h];
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001041 hlist_for_each_entry_safe(dev, pos, n, head, hn) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001042 if (dev->tgid == current->tgid) {
1043 hlist_del(&dev->hn);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001044 devfree = dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001045 break;
1046 }
1047 }
1048 spin_unlock(&me->hlock);
Mitchel Humpherys8388b3b2013-03-04 18:07:45 -08001049 if (devfree) {
1050 free_dev(devfree);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001051 goto rnext;
1052 }
1053 return;
1054}
1055
1056static int fastrpc_device_release(struct inode *inode, struct file *file)
1057{
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001058 struct file_data *fdata = (struct file_data *)file->private_data;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001059 (void)fastrpc_release_current_dsp_process();
1060 cleanup_current_dev();
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001061 if (fdata) {
1062 struct fastrpc_mmap *map;
1063 struct hlist_node *n, *pos;
1064 file->private_data = 0;
1065 hlist_for_each_entry_safe(map, pos, n, &fdata->hlst, hn) {
1066 hlist_del(&map->hn);
1067 free_map(map);
1068 kfree(map);
1069 }
1070 kfree(fdata);
1071 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001072 return 0;
1073}
1074
1075static int fastrpc_device_open(struct inode *inode, struct file *filp)
1076{
1077 int err = 0;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001078 filp->private_data = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001079 if (0 != try_module_get(THIS_MODULE)) {
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001080 struct file_data *fdata = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001081 /* This call will cause a dev to be created
1082 * which will addref this module
1083 */
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001084 VERIFY(err, 0 != (fdata = kzalloc(sizeof(*fdata), GFP_KERNEL)));
1085 if (err)
1086 goto bail;
1087
1088 spin_lock_init(&fdata->hlock);
1089 INIT_HLIST_HEAD(&fdata->hlst);
1090
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001091 VERIFY(err, 0 == fastrpc_create_current_dsp_process());
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001092 if (err)
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001093 goto bail;
1094 filp->private_data = fdata;
1095bail:
1096 if (err) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001097 cleanup_current_dev();
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001098 kfree(fdata);
1099 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001100 module_put(THIS_MODULE);
1101 }
1102 return err;
1103}
1104
1105
1106static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
1107 unsigned long ioctl_param)
1108{
1109 struct fastrpc_apps *me = &gfa;
1110 struct fastrpc_ioctl_invoke invoke;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001111 struct fastrpc_ioctl_mmap mmap;
1112 struct fastrpc_ioctl_munmap munmap;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001113 remote_arg_t *pra = 0;
1114 void *param = (char *)ioctl_param;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001115 struct file_data *fdata = (struct file_data *)file->private_data;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001116 int bufs, err = 0;
1117
1118 switch (ioctl_num) {
1119 case FASTRPC_IOCTL_INVOKE:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001120 VERIFY(err, 0 == copy_from_user(&invoke, param,
1121 sizeof(invoke)));
1122 if (err)
1123 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001124 bufs = REMOTE_SCALARS_INBUFS(invoke.sc) +
1125 REMOTE_SCALARS_OUTBUFS(invoke.sc);
1126 if (bufs) {
1127 bufs = bufs * sizeof(*pra);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001128 VERIFY(err, 0 != (pra = kmalloc(bufs, GFP_KERNEL)));
1129 if (err)
1130 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001131 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001132 VERIFY(err, 0 == copy_from_user(pra, invoke.pra, bufs));
1133 if (err)
1134 goto bail;
1135 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 0, &invoke,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001136 pra)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001137 if (err)
1138 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001139 break;
Mitchel Humpherys0d99a792013-03-05 13:41:14 -08001140 case FASTRPC_IOCTL_MMAP:
1141 VERIFY(err, 0 == copy_from_user(&mmap, param,
1142 sizeof(mmap)));
1143 if (err)
1144 goto bail;
1145 VERIFY(err, 0 == (err = fastrpc_internal_mmap(me, fdata,
1146 &mmap)));
1147 if (err)
1148 goto bail;
1149 VERIFY(err, 0 == copy_to_user(param, &mmap, sizeof(mmap)));
1150 if (err)
1151 goto bail;
1152 break;
1153 case FASTRPC_IOCTL_MUNMAP:
1154 VERIFY(err, 0 == copy_from_user(&munmap, param,
1155 sizeof(munmap)));
1156 if (err)
1157 goto bail;
1158 VERIFY(err, 0 == (err = fastrpc_internal_munmap(me, fdata,
1159 &munmap)));
1160 if (err)
1161 goto bail;
1162 break;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001163 default:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001164 err = -ENOTTY;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001165 break;
1166 }
1167 bail:
1168 kfree(pra);
1169 return err;
1170}
1171
1172static const struct file_operations fops = {
1173 .open = fastrpc_device_open,
1174 .release = fastrpc_device_release,
1175 .unlocked_ioctl = fastrpc_device_ioctl,
1176};
1177
1178static int __init fastrpc_device_init(void)
1179{
1180 struct fastrpc_apps *me = &gfa;
1181 int err = 0;
1182
Mitchel Humpherysef5bbbe2013-05-15 12:44:51 -07001183 memset(me, 0, sizeof(*me));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001184 VERIFY(err, 0 == fastrpc_init());
1185 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001186 goto fastrpc_bail;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001187 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
1188 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001189 goto alloc_chrdev_bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001190 cdev_init(&me->cdev, &fops);
1191 me->cdev.owner = THIS_MODULE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -07001192 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
1193 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001194 goto cdev_init_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001195 me->class = class_create(THIS_MODULE, "chardrv");
1196 VERIFY(err, !IS_ERR(me->class));
1197 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001198 goto class_create_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001199 me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
1200 NULL, DEVICE_NAME);
1201 VERIFY(err, !IS_ERR(me->dev));
1202 if (err)
Matt Wagantall818e23f2013-04-22 20:36:52 -07001203 goto device_create_bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -08001204 pr_info("'created /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
Matt Wagantall818e23f2013-04-22 20:36:52 -07001205
1206 return 0;
1207
1208device_create_bail:
1209 class_destroy(me->class);
1210class_create_bail:
1211 cdev_del(&me->cdev);
1212cdev_init_bail:
1213 unregister_chrdev_region(me->dev_no, 1);
1214alloc_chrdev_bail:
1215 fastrpc_deinit();
1216fastrpc_bail:
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001217 return err;
1218}
1219
1220static void __exit fastrpc_device_exit(void)
1221{
1222 struct fastrpc_apps *me = &gfa;
1223
1224 fastrpc_deinit();
Mitchel Humpherys55877652013-02-02 11:23:42 -08001225 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
1226 class_destroy(me->class);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001227 cdev_del(&me->cdev);
1228 unregister_chrdev_region(me->dev_no, 1);
1229}
1230
1231module_init(fastrpc_device_init);
1232module_exit(fastrpc_device_exit);
1233
1234MODULE_LICENSE("GPL v2");