blob: 73fe5d6e27446d67ee7359913ab74ae2902dc382 [file] [log] [blame]
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001/*
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -08002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080014#include "adsprpc_shared.h"
15
16#include <linux/slab.h>
17#include <linux/completion.h>
18#include <linux/pagemap.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/cdev.h>
24#include <linux/list.h>
25#include <linux/hash.h>
26#include <linux/msm_ion.h>
27#include <mach/msm_smd.h>
28#include <mach/ion.h>
Mitchel Humpherys42e806e2012-09-30 22:27:53 -070029#include <linux/scatterlist.h>
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -080030#include <linux/fs.h>
31#include <linux/uaccess.h>
32#include <linux/device.h>
33
34#ifndef ION_ADSPRPC_HEAP_ID
35#define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID
36#endif /*ION_ADSPRPC_HEAP_ID*/
37
38#define RPC_TIMEOUT (5 * HZ)
39#define RPC_HASH_BITS 5
40#define RPC_HASH_SZ (1 << RPC_HASH_BITS)
41#define BALIGN 32
42
43#define LOCK_MMAP(kernel)\
44 do {\
45 if (!kernel)\
46 down_read(&current->mm->mmap_sem);\
47 } while (0)
48
49#define UNLOCK_MMAP(kernel)\
50 do {\
51 if (!kernel)\
52 up_read(&current->mm->mmap_sem);\
53 } while (0)
54
55
56static inline uint32_t buf_page_start(void *buf)
57{
58 uint32_t start = (uint32_t) buf & PAGE_MASK;
59 return start;
60}
61
62static inline uint32_t buf_page_offset(void *buf)
63{
64 uint32_t offset = (uint32_t) buf & (PAGE_SIZE - 1);
65 return offset;
66}
67
68static inline int buf_num_pages(void *buf, int len)
69{
70 uint32_t start = buf_page_start(buf) >> PAGE_SHIFT;
71 uint32_t end = (((uint32_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
72 int nPages = end - start + 1;
73 return nPages;
74}
75
76static inline uint32_t buf_page_size(uint32_t size)
77{
78 uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
79 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
80}
81
82static inline int buf_get_pages(void *addr, int sz, int nr_pages, int access,
83 struct smq_phy_page *pages, int nr_elems)
84{
85 struct vm_area_struct *vma;
86 uint32_t start = buf_page_start(addr);
87 uint32_t len = nr_pages << PAGE_SHIFT;
88 unsigned long pfn;
89 int n = -1, err = 0;
90
91 VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ,
92 (void __user *)start, len));
93 if (err)
94 goto bail;
95 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
96 if (err)
97 goto bail;
98 VERIFY(err, ((uint32_t)addr + sz) <= vma->vm_end);
99 if (err)
100 goto bail;
101 n = 0;
102 VERIFY(err, 0 == follow_pfn(vma, start, &pfn));
103 if (err)
104 goto bail;
105 VERIFY(err, nr_elems > 0);
106 if (err)
107 goto bail;
108 pages->addr = __pfn_to_phys(pfn);
109 pages->size = len;
110 n++;
111 bail:
112 return n;
113}
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700114
115struct smq_invoke_ctx {
116 struct completion work;
117 int retval;
118 atomic_t free;
119};
120
121struct smq_context_list {
122 struct smq_invoke_ctx *ls;
123 int size;
124 int last;
125};
126
127struct fastrpc_apps {
128 smd_channel_t *chan;
129 struct smq_context_list clst;
130 struct completion work;
131 struct ion_client *iclient;
132 struct cdev cdev;
Mitchel Humpherys55877652013-02-02 11:23:42 -0800133 struct class *class;
134 struct device *dev;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700135 dev_t dev_no;
136 spinlock_t wrlock;
137 spinlock_t hlock;
138 struct hlist_head htbl[RPC_HASH_SZ];
139};
140
141struct fastrpc_buf {
142 struct ion_handle *handle;
143 void *virt;
144 ion_phys_addr_t phys;
145 int size;
146 int used;
147};
148
149struct fastrpc_device {
150 uint32_t tgid;
151 struct hlist_node hn;
152 struct fastrpc_buf buf;
153};
154
155static struct fastrpc_apps gfa;
156
157static void free_mem(struct fastrpc_buf *buf)
158{
159 struct fastrpc_apps *me = &gfa;
160
161 if (buf->handle) {
162 if (buf->virt) {
163 ion_unmap_kernel(me->iclient, buf->handle);
164 buf->virt = 0;
165 }
166 ion_free(me->iclient, buf->handle);
167 buf->handle = 0;
168 }
169}
170
171static int alloc_mem(struct fastrpc_buf *buf)
172{
173 struct ion_client *clnt = gfa.iclient;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700174 struct sg_table *sg;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700175 int err = 0;
176
177 buf->handle = ion_alloc(clnt, buf->size, SZ_4K,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700178 ION_HEAP(ION_AUDIO_HEAP_ID), 0);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700179 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
180 if (err)
181 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700182 buf->virt = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700183 VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
184 if (err)
185 goto bail;
186 VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle)));
187 if (err)
188 goto bail;
189 VERIFY(err, 1 == sg->nents);
190 if (err)
191 goto bail;
192 buf->phys = sg_dma_address(sg->sgl);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700193 bail:
194 if (err && !IS_ERR_OR_NULL(buf->handle))
195 free_mem(buf);
196 return err;
197}
198
199static int context_list_ctor(struct smq_context_list *me, int size)
200{
201 int err = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700202 VERIFY(err, 0 != (me->ls = kzalloc(size, GFP_KERNEL)));
203 if (err)
204 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700205 me->size = size / sizeof(*me->ls);
206 me->last = 0;
207 bail:
208 return err;
209}
210
211static void context_list_dtor(struct smq_context_list *me)
212{
213 kfree(me->ls);
214 me->ls = 0;
215}
216
217static void context_list_alloc_ctx(struct smq_context_list *me,
218 struct smq_invoke_ctx **po)
219{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700220 int i = me->last;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700221 struct smq_invoke_ctx *ctx;
222
223 for (;;) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700224 i = i % me->size;
225 ctx = &me->ls[i];
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700226 if (atomic_read(&ctx->free) == 0)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700227 if (atomic_cmpxchg(&ctx->free, 0, 1) == 0)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700228 break;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700229 i++;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700230 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700231 me->last = i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700232 ctx->retval = -1;
233 init_completion(&ctx->work);
234 *po = ctx;
235}
236
237static void context_free(struct smq_invoke_ctx *me)
238{
239 if (me)
240 atomic_set(&me->free, 0);
241}
242
243static void context_notify_user(struct smq_invoke_ctx *me, int retval)
244{
245 me->retval = retval;
246 complete(&me->work);
247}
248
249static void context_notify_all_users(struct smq_context_list *me)
250{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700251 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700252
253 if (!me->ls)
254 return;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700255 for (i = 0; i < me->size; ++i) {
256 if (atomic_read(&me->ls[i].free) != 0)
257 complete(&me->ls[i].work);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700258 }
259}
260
261static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
262 struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf)
263{
264 struct smq_phy_page *pgstart, *pages;
265 struct smq_invoke_buf *list;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700266 int i, rlen, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700267 int inbufs = REMOTE_SCALARS_INBUFS(sc);
268 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
269
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700270 LOCK_MMAP(kernel);
271 *obuf = *ibuf;
272 retry:
273 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
274 pgstart = smq_phy_page_start(sc, list);
275 pages = pgstart + 1;
276 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
277 if (rlen < 0) {
278 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
279 obuf->size += buf_page_size(rlen);
280 obuf->handle = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700281 VERIFY(err, 0 == alloc_mem(obuf));
282 if (err)
283 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700284 goto retry;
285 }
286 pgstart->addr = obuf->phys;
287 pgstart->size = obuf->size;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700288 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700289 void *buf;
290 int len, num;
291
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700292 list[i].num = 0;
293 list[i].pgidx = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700294 len = pra[i].buf.len;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700295 if (!len)
296 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700297 buf = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700298 num = buf_num_pages(buf, len);
299 if (!kernel)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700300 list[i].num = buf_get_pages(buf, len, num,
301 i >= inbufs, pages, rlen / sizeof(*pages));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700302 else
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700303 list[i].num = 0;
304 VERIFY(err, list[i].num >= 0);
305 if (err)
306 goto bail;
307 if (list[i].num) {
308 list[i].pgidx = pages - pgstart;
309 pages = pages + list[i].num;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700310 } else if (rlen > sizeof(*pages)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700311 list[i].pgidx = pages - pgstart;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700312 pages = pages + 1;
313 } else {
314 if (obuf->handle != ibuf->handle)
315 free_mem(obuf);
316 obuf->size += buf_page_size(sizeof(*pages));
317 obuf->handle = 0;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700318 VERIFY(err, 0 == alloc_mem(obuf));
319 if (err)
320 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700321 goto retry;
322 }
323 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
324 }
325 obuf->used = obuf->size - rlen;
326 bail:
327 if (err && (obuf->handle != ibuf->handle))
328 free_mem(obuf);
329 UNLOCK_MMAP(kernel);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700330 return err;
331}
332
333static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
334 remote_arg_t *rpra, remote_arg_t *upra,
335 struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
336 int *nbufs)
337{
338 struct smq_invoke_buf *list;
339 struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
340 struct smq_phy_page *pages;
341 void *args;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700342 int i, rlen, size, used, inh, bufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700343 int inbufs = REMOTE_SCALARS_INBUFS(sc);
344 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
345
346 list = smq_invoke_buf_start(rpra, sc);
347 pages = smq_phy_page_start(sc, list);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700348 used = ALIGN(pbuf->used, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700349 args = (void *)((char *)pbuf->virt + used);
350 rlen = pbuf->size - used;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700351 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700352
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700353 rpra[i].buf.len = pra[i].buf.len;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700354 if (!rpra[i].buf.len)
355 continue;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700356 if (list[i].num) {
357 rpra[i].buf.pv = pra[i].buf.pv;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700358 continue;
359 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700360 if (rlen < pra[i].buf.len) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700361 struct fastrpc_buf *b;
362 pbuf->used = pbuf->size - rlen;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700363 VERIFY(err, 0 != (b = krealloc(obufs,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700364 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700365 if (err)
366 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700367 obufs = b;
368 pbuf = obufs + bufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700369 pbuf->size = buf_num_pages(0, pra[i].buf.len) *
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700370 PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700371 VERIFY(err, 0 == alloc_mem(pbuf));
372 if (err)
373 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700374 bufs++;
375 args = pbuf->virt;
376 rlen = pbuf->size;
377 }
Mitchel Humpherys0bc3aa52013-02-02 11:31:15 -0800378 list[i].num = 1;
379 pages[list[i].pgidx].addr =
380 buf_page_start((void *)(pbuf->phys +
381 (pbuf->size - rlen)));
382 pages[list[i].pgidx].size =
383 buf_page_size(pra[i].buf.len);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700384 if (i < inbufs) {
385 if (!kernel) {
386 VERIFY(err, 0 == copy_from_user(args,
387 pra[i].buf.pv, pra[i].buf.len));
388 if (err)
389 goto bail;
390 } else {
391 memmove(args, pra[i].buf.pv, pra[i].buf.len);
392 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700393 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700394 rpra[i].buf.pv = args;
395 args = (void *)((char *)args + ALIGN(pra[i].buf.len, BALIGN));
396 rlen -= ALIGN(pra[i].buf.len, BALIGN);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700397 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700398 for (i = 0; i < inbufs; ++i) {
399 if (rpra[i].buf.len)
400 dmac_flush_range(rpra[i].buf.pv,
401 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700402 }
403 pbuf->used = pbuf->size - rlen;
404 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
405 if (size) {
406 inh = inbufs + outbufs;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700407 if (!kernel) {
408 VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700409 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700410 if (err)
411 goto bail;
412 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700413 memmove(&rpra[inh], &upra[inh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700414 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700415 }
416 dmac_flush_range(rpra, (char *)rpra + used);
417 bail:
418 *abufs = obufs;
419 *nbufs = bufs;
420 return err;
421}
422
423static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
424 remote_arg_t *rpra, remote_arg_t *upra)
425{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700426 int i, inbufs, outbufs, outh, size;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700427 int err = 0;
428
429 inbufs = REMOTE_SCALARS_INBUFS(sc);
430 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700431 for (i = inbufs; i < inbufs + outbufs; ++i) {
432 if (rpra[i].buf.pv != pra[i].buf.pv) {
433 VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
434 rpra[i].buf.pv, rpra[i].buf.len));
435 if (err)
436 goto bail;
437 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700438 }
439 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
440 if (size) {
441 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700442 if (!kernel) {
443 VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh],
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700444 size));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700445 if (err)
446 goto bail;
447 } else {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700448 memmove(&upra[outh], &rpra[outh], size);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700449 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700450 }
451 bail:
452 return err;
453}
454
455static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
456{
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700457 int i, inbufs, outbufs;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700458 int inv = 0;
459
460 inbufs = REMOTE_SCALARS_INBUFS(sc);
461 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700462 for (i = inbufs; i < inbufs + outbufs; ++i) {
463 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700464 inv = 1;
Mitchel Humpherysf581c512012-10-19 11:29:36 -0700465 else if (rpra[i].buf.len)
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700466 dmac_inv_range(rpra[i].buf.pv,
467 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700468 }
469
470 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
471 dmac_inv_range(rpra, (char *)rpra + used);
472}
473
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700474static int fastrpc_invoke_send(struct fastrpc_apps *me, uint32_t handle,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700475 uint32_t sc, struct smq_invoke_ctx *ctx,
476 struct fastrpc_buf *buf)
477{
478 struct smq_msg msg;
479 int err = 0, len;
480
481 msg.pid = current->tgid;
482 msg.tid = current->pid;
483 msg.invoke.header.ctx = ctx;
484 msg.invoke.header.handle = handle;
485 msg.invoke.header.sc = sc;
486 msg.invoke.page.addr = buf->phys;
487 msg.invoke.page.size = buf_page_size(buf->used);
488 spin_lock(&me->wrlock);
489 len = smd_write(me->chan, &msg, sizeof(msg));
490 spin_unlock(&me->wrlock);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700491 VERIFY(err, len == sizeof(msg));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700492 return err;
493}
494
495static void fastrpc_deinit(void)
496{
497 struct fastrpc_apps *me = &gfa;
498
499 if (me->chan)
500 (void)smd_close(me->chan);
501 context_list_dtor(&me->clst);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700502 if (me->iclient)
503 ion_client_destroy(me->iclient);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700504 me->iclient = 0;
505 me->chan = 0;
506}
507
508static void fastrpc_read_handler(void)
509{
510 struct fastrpc_apps *me = &gfa;
511 struct smq_invoke_rsp rsp;
512 int err = 0;
513
514 do {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700515 VERIFY(err, sizeof(rsp) ==
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700516 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700517 if (err)
518 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700519 context_notify_user(rsp.ctx, rsp.retval);
520 } while (!err);
521 bail:
522 return;
523}
524
525static void smd_event_handler(void *priv, unsigned event)
526{
527 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
528
529 switch (event) {
530 case SMD_EVENT_OPEN:
531 complete(&(me->work));
532 break;
533 case SMD_EVENT_CLOSE:
534 context_notify_all_users(&me->clst);
535 break;
536 case SMD_EVENT_DATA:
537 fastrpc_read_handler();
538 break;
539 }
540}
541
542static int fastrpc_init(void)
543{
544 int err = 0;
545 struct fastrpc_apps *me = &gfa;
546
547 if (me->chan == 0) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700548 int i;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700549 spin_lock_init(&me->hlock);
550 spin_lock_init(&me->wrlock);
551 init_completion(&me->work);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700552 for (i = 0; i < RPC_HASH_SZ; ++i)
553 INIT_HLIST_HEAD(&me->htbl[i]);
554 VERIFY(err, 0 == context_list_ctor(&me->clst, SZ_4K));
555 if (err)
556 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700557 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
558 DEVICE_NAME);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700559 VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
560 if (err)
561 goto bail;
562 VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700563 SMD_APPS_QDSP, &me->chan,
564 me, smd_event_handler));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700565 if (err)
566 goto bail;
567 VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700568 RPC_TIMEOUT));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700569 if (err)
570 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700571 }
572 bail:
573 if (err)
574 fastrpc_deinit();
575 return err;
576}
577
578static void free_dev(struct fastrpc_device *dev)
579{
580 if (dev) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700581 free_mem(&dev->buf);
582 kfree(dev);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800583 module_put(THIS_MODULE);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700584 }
585}
586
587static int alloc_dev(struct fastrpc_device **dev)
588{
589 int err = 0;
590 struct fastrpc_device *fd = 0;
591
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700592 VERIFY(err, 0 != try_module_get(THIS_MODULE));
593 if (err)
594 goto bail;
595 VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
596 if (err)
597 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700598 fd->buf.size = PAGE_SIZE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700599 VERIFY(err, 0 == alloc_mem(&fd->buf));
600 if (err)
601 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700602 fd->tgid = current->tgid;
603 INIT_HLIST_NODE(&fd->hn);
604 *dev = fd;
605 bail:
606 if (err)
607 free_dev(fd);
608 return err;
609}
610
611static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
612{
613 struct hlist_head *head;
614 struct fastrpc_device *dev = 0;
615 struct hlist_node *n;
616 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
617 int err = 0;
618
619 spin_lock(&me->hlock);
620 head = &me->htbl[h];
621 hlist_for_each_entry(dev, n, head, hn) {
622 if (dev->tgid == current->tgid) {
623 hlist_del(&dev->hn);
624 break;
625 }
626 }
627 spin_unlock(&me->hlock);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700628 VERIFY(err, dev != 0);
629 if (err)
630 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700631 *rdev = dev;
632 bail:
633 if (err) {
634 free_dev(dev);
635 err = alloc_dev(rdev);
636 }
637 return err;
638}
639
640static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
641{
642 struct hlist_head *head;
643 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
644
645 spin_lock(&me->hlock);
646 head = &me->htbl[h];
647 hlist_add_head(&dev->hn, head);
648 spin_unlock(&me->hlock);
649 return;
650}
651
652static int fastrpc_release_current_dsp_process(void);
653
654static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t kernel,
655 struct fastrpc_ioctl_invoke *invoke, remote_arg_t *pra)
656{
657 remote_arg_t *rpra = 0;
658 struct fastrpc_device *dev = 0;
659 struct smq_invoke_ctx *ctx = 0;
660 struct fastrpc_buf obuf, *abufs = 0, *b;
661 int interrupted = 0;
662 uint32_t sc;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700663 int i, nbufs = 0, err = 0;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700664
665 sc = invoke->sc;
666 obuf.handle = 0;
667 if (REMOTE_SCALARS_LENGTH(sc)) {
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700668 VERIFY(err, 0 == get_dev(me, &dev));
669 if (err)
670 goto bail;
671 VERIFY(err, 0 == get_page_list(kernel, sc, pra, &dev->buf,
672 &obuf));
673 if (err)
674 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700675 rpra = (remote_arg_t *)obuf.virt;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700676 VERIFY(err, 0 == get_args(kernel, sc, pra, rpra, invoke->pra,
677 &obuf, &abufs, &nbufs));
678 if (err)
679 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700680 }
681
682 context_list_alloc_ctx(&me->clst, &ctx);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700683 VERIFY(err, 0 == fastrpc_invoke_send(me, invoke->handle, sc, ctx,
684 &obuf));
685 if (err)
686 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700687 inv_args(sc, rpra, obuf.used);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700688 VERIFY(err, 0 == (interrupted =
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700689 wait_for_completion_interruptible(&ctx->work)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700690 if (err)
691 goto bail;
692 VERIFY(err, 0 == (err = ctx->retval));
693 if (err)
694 goto bail;
695 VERIFY(err, 0 == put_args(kernel, sc, pra, rpra, invoke->pra));
696 if (err)
697 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700698 bail:
699 if (interrupted) {
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700700 if (!kernel)
701 (void)fastrpc_release_current_dsp_process();
702 wait_for_completion(&ctx->work);
703 }
704 context_free(ctx);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800705
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700706 for (i = 0, b = abufs; i < nbufs; ++i, ++b)
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700707 free_mem(b);
Mitchel Humpherys6b4c68c2013-02-06 12:03:20 -0800708
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700709 kfree(abufs);
710 if (dev) {
711 add_dev(me, dev);
712 if (obuf.handle != dev->buf.handle)
713 free_mem(&obuf);
714 }
715 return err;
716}
717
718static int fastrpc_create_current_dsp_process(void)
719{
720 int err = 0;
721 struct fastrpc_ioctl_invoke ioctl;
722 struct fastrpc_apps *me = &gfa;
723 remote_arg_t ra[1];
724 int tgid = 0;
725
726 tgid = current->tgid;
727 ra[0].buf.pv = &tgid;
728 ra[0].buf.len = sizeof(tgid);
729 ioctl.handle = 1;
730 ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
731 ioctl.pra = ra;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700732 VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700733 return err;
734}
735
736static int fastrpc_release_current_dsp_process(void)
737{
738 int err = 0;
739 struct fastrpc_apps *me = &gfa;
740 struct fastrpc_ioctl_invoke ioctl;
741 remote_arg_t ra[1];
742 int tgid = 0;
743
744 tgid = current->tgid;
745 ra[0].buf.pv = &tgid;
746 ra[0].buf.len = sizeof(tgid);
747 ioctl.handle = 1;
748 ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
749 ioctl.pra = ra;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700750 VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700751 return err;
752}
753
754static void cleanup_current_dev(void)
755{
756 struct fastrpc_apps *me = &gfa;
757 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
758 struct hlist_head *head;
759 struct hlist_node *pos;
760 struct fastrpc_device *dev;
761
762 rnext:
763 dev = 0;
764 spin_lock(&me->hlock);
765 head = &me->htbl[h];
766 hlist_for_each_entry(dev, pos, head, hn) {
767 if (dev->tgid == current->tgid) {
768 hlist_del(&dev->hn);
769 break;
770 }
771 }
772 spin_unlock(&me->hlock);
773 if (dev) {
774 free_dev(dev);
775 goto rnext;
776 }
777 return;
778}
779
780static int fastrpc_device_release(struct inode *inode, struct file *file)
781{
782 (void)fastrpc_release_current_dsp_process();
783 cleanup_current_dev();
784 return 0;
785}
786
787static int fastrpc_device_open(struct inode *inode, struct file *filp)
788{
789 int err = 0;
790
791 if (0 != try_module_get(THIS_MODULE)) {
792 /* This call will cause a dev to be created
793 * which will addref this module
794 */
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700795 VERIFY(err, 0 == fastrpc_create_current_dsp_process());
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700796 if (err)
797 cleanup_current_dev();
798 module_put(THIS_MODULE);
799 }
800 return err;
801}
802
803
804static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
805 unsigned long ioctl_param)
806{
807 struct fastrpc_apps *me = &gfa;
808 struct fastrpc_ioctl_invoke invoke;
809 remote_arg_t *pra = 0;
810 void *param = (char *)ioctl_param;
811 int bufs, err = 0;
812
813 switch (ioctl_num) {
814 case FASTRPC_IOCTL_INVOKE:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700815 VERIFY(err, 0 == copy_from_user(&invoke, param,
816 sizeof(invoke)));
817 if (err)
818 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700819 bufs = REMOTE_SCALARS_INBUFS(invoke.sc) +
820 REMOTE_SCALARS_OUTBUFS(invoke.sc);
821 if (bufs) {
822 bufs = bufs * sizeof(*pra);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700823 VERIFY(err, 0 != (pra = kmalloc(bufs, GFP_KERNEL)));
824 if (err)
825 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700826 }
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700827 VERIFY(err, 0 == copy_from_user(pra, invoke.pra, bufs));
828 if (err)
829 goto bail;
830 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 0, &invoke,
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700831 pra)));
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700832 if (err)
833 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700834 break;
835 default:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700836 err = -ENOTTY;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700837 break;
838 }
839 bail:
840 kfree(pra);
841 return err;
842}
843
844static const struct file_operations fops = {
845 .open = fastrpc_device_open,
846 .release = fastrpc_device_release,
847 .unlocked_ioctl = fastrpc_device_ioctl,
848};
849
850static int __init fastrpc_device_init(void)
851{
852 struct fastrpc_apps *me = &gfa;
853 int err = 0;
854
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700855 VERIFY(err, 0 == fastrpc_init());
856 if (err)
857 goto bail;
858 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
859 if (err)
860 goto bail;
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700861 cdev_init(&me->cdev, &fops);
862 me->cdev.owner = THIS_MODULE;
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700863 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
864 if (err)
865 goto bail;
Mitchel Humpherys55877652013-02-02 11:23:42 -0800866 me->class = class_create(THIS_MODULE, "chardrv");
867 VERIFY(err, !IS_ERR(me->class));
868 if (err)
869 goto bail;
870 me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
871 NULL, DEVICE_NAME);
872 VERIFY(err, !IS_ERR(me->dev));
873 if (err)
874 goto bail;
875 pr_info("'created /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700876 bail:
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700877 if (err) {
878 if (me->dev_no)
879 unregister_chrdev_region(me->dev_no, 1);
Mitchel Humpherys55877652013-02-02 11:23:42 -0800880 if (me->class)
881 class_destroy(me->class);
882 if (me->cdev.owner)
883 cdev_del(&me->cdev);
Mitchel Humpherys42e806e2012-09-30 22:27:53 -0700884 fastrpc_deinit();
885 }
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700886 return err;
887}
888
889static void __exit fastrpc_device_exit(void)
890{
891 struct fastrpc_apps *me = &gfa;
892
893 fastrpc_deinit();
Mitchel Humpherys55877652013-02-02 11:23:42 -0800894 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
895 class_destroy(me->class);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -0700896 cdev_del(&me->cdev);
897 unregister_chrdev_region(me->dev_no, 1);
898}
899
900module_init(fastrpc_device_init);
901module_exit(fastrpc_device_exit);
902
903MODULE_LICENSE("GPL v2");