blob: a6973ae41e1b97457453a78079b9ede57371f612 [file] [log] [blame]
Mitchel Humpherys79d361e2012-08-29 16:20:15 -07001/*
2 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include "adsprpc.h"
15
16struct smq_invoke_ctx {
17 struct completion work;
18 int retval;
19 atomic_t free;
20};
21
22struct smq_context_list {
23 struct smq_invoke_ctx *ls;
24 int size;
25 int last;
26};
27
28struct fastrpc_apps {
29 smd_channel_t *chan;
30 struct smq_context_list clst;
31 struct completion work;
32 struct ion_client *iclient;
33 struct cdev cdev;
34 dev_t dev_no;
35 spinlock_t wrlock;
36 spinlock_t hlock;
37 struct hlist_head htbl[RPC_HASH_SZ];
38};
39
40struct fastrpc_buf {
41 struct ion_handle *handle;
42 void *virt;
43 ion_phys_addr_t phys;
44 int size;
45 int used;
46};
47
48struct fastrpc_device {
49 uint32_t tgid;
50 struct hlist_node hn;
51 struct fastrpc_buf buf;
52};
53
54static struct fastrpc_apps gfa;
55
56static void free_mem(struct fastrpc_buf *buf)
57{
58 struct fastrpc_apps *me = &gfa;
59
60 if (buf->handle) {
61 if (buf->virt) {
62 ion_unmap_kernel(me->iclient, buf->handle);
63 buf->virt = 0;
64 }
65 ion_free(me->iclient, buf->handle);
66 buf->handle = 0;
67 }
68}
69
70static int alloc_mem(struct fastrpc_buf *buf)
71{
72 struct ion_client *clnt = gfa.iclient;
73 int err = 0;
74
75 buf->handle = ion_alloc(clnt, buf->size, SZ_4K,
Hanumant Singh7d72bad2012-08-29 18:39:44 -070076 ION_HEAP(ION_AUDIO_HEAP_ID), 0);
Mitchel Humpherys79d361e2012-08-29 16:20:15 -070077 VERIFY(0 == IS_ERR_OR_NULL(buf->handle));
78 buf->virt = 0;
79 VERIFY(0 != (buf->virt = ion_map_kernel(clnt, buf->handle,
Mitchel Humpherys227a6582012-09-11 15:59:11 -070080 ION_FLAG_CACHED)));
Mitchel Humpherys79d361e2012-08-29 16:20:15 -070081 VERIFY(0 == ion_phys(clnt, buf->handle, &buf->phys, &buf->size));
82 bail:
83 if (err && !IS_ERR_OR_NULL(buf->handle))
84 free_mem(buf);
85 return err;
86}
87
88static int context_list_ctor(struct smq_context_list *me, int size)
89{
90 int err = 0;
91 VERIFY(0 != (me->ls = kzalloc(size, GFP_KERNEL)));
92 me->size = size / sizeof(*me->ls);
93 me->last = 0;
94 bail:
95 return err;
96}
97
98static void context_list_dtor(struct smq_context_list *me)
99{
100 kfree(me->ls);
101 me->ls = 0;
102}
103
104static void context_list_alloc_ctx(struct smq_context_list *me,
105 struct smq_invoke_ctx **po)
106{
107 int ii = me->last;
108 struct smq_invoke_ctx *ctx;
109
110 for (;;) {
111 ii = ii % me->size;
112 ctx = &me->ls[ii];
113 if (atomic_read(&ctx->free) == 0)
114 if (0 == atomic_cmpxchg(&ctx->free, 0, 1))
115 break;
116 ii++;
117 }
118 me->last = ii;
119 ctx->retval = -1;
120 init_completion(&ctx->work);
121 *po = ctx;
122}
123
124static void context_free(struct smq_invoke_ctx *me)
125{
126 if (me)
127 atomic_set(&me->free, 0);
128}
129
130static void context_notify_user(struct smq_invoke_ctx *me, int retval)
131{
132 me->retval = retval;
133 complete(&me->work);
134}
135
136static void context_notify_all_users(struct smq_context_list *me)
137{
138 int ii;
139
140 if (!me->ls)
141 return;
142 for (ii = 0; ii < me->size; ++ii) {
143 if (atomic_read(&me->ls[ii].free) != 0)
144 complete(&me->ls[ii].work);
145 }
146}
147
148static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
149 struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf)
150{
151 struct smq_phy_page *pgstart, *pages;
152 struct smq_invoke_buf *list;
153 int ii, rlen, err = 0;
154 int inbufs = REMOTE_SCALARS_INBUFS(sc);
155 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
156
157 VERIFY(0 != try_module_get(THIS_MODULE));
158 LOCK_MMAP(kernel);
159 *obuf = *ibuf;
160 retry:
161 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
162 pgstart = smq_phy_page_start(sc, list);
163 pages = pgstart + 1;
164 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
165 if (rlen < 0) {
166 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
167 obuf->size += buf_page_size(rlen);
168 obuf->handle = 0;
169 VERIFY(0 == alloc_mem(obuf));
170 goto retry;
171 }
172 pgstart->addr = obuf->phys;
173 pgstart->size = obuf->size;
174 for (ii = 0; ii < inbufs + outbufs; ++ii) {
175 void *buf;
176 int len, num;
177
178 len = pra[ii].buf.len;
179 if (!len)
180 continue;
181 buf = pra[ii].buf.pv;
182 num = buf_num_pages(buf, len);
183 if (!kernel)
184 list[ii].num = buf_get_pages(buf, len, num,
185 ii >= inbufs, pages, rlen / sizeof(*pages));
186 else
187 list[ii].num = 0;
188 VERIFY(list[ii].num >= 0);
189 if (list[ii].num) {
190 list[ii].pgidx = pages - pgstart;
191 pages = pages + list[ii].num;
192 } else if (rlen > sizeof(*pages)) {
193 list[ii].pgidx = pages - pgstart;
194 pages = pages + 1;
195 } else {
196 if (obuf->handle != ibuf->handle)
197 free_mem(obuf);
198 obuf->size += buf_page_size(sizeof(*pages));
199 obuf->handle = 0;
200 VERIFY(0 == alloc_mem(obuf));
201 goto retry;
202 }
203 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
204 }
205 obuf->used = obuf->size - rlen;
206 bail:
207 if (err && (obuf->handle != ibuf->handle))
208 free_mem(obuf);
209 UNLOCK_MMAP(kernel);
210 module_put(THIS_MODULE);
211 return err;
212}
213
214static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
215 remote_arg_t *rpra, remote_arg_t *upra,
216 struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
217 int *nbufs)
218{
219 struct smq_invoke_buf *list;
220 struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
221 struct smq_phy_page *pages;
222 void *args;
223 int ii, rlen, size, used, inh, bufs = 0, err = 0;
224 int inbufs = REMOTE_SCALARS_INBUFS(sc);
225 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
226
227 list = smq_invoke_buf_start(rpra, sc);
228 pages = smq_phy_page_start(sc, list);
229 used = ALIGN_8(pbuf->used);
230 args = (void *)((char *)pbuf->virt + used);
231 rlen = pbuf->size - used;
232 for (ii = 0; ii < inbufs + outbufs; ++ii) {
233 int num;
234
235 rpra[ii].buf.len = pra[ii].buf.len;
236 if (list[ii].num) {
237 rpra[ii].buf.pv = pra[ii].buf.pv;
238 continue;
239 }
240 if (rlen < pra[ii].buf.len) {
241 struct fastrpc_buf *b;
242 pbuf->used = pbuf->size - rlen;
243 VERIFY(0 != (b = krealloc(obufs,
244 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
245 obufs = b;
246 pbuf = obufs + bufs;
247 pbuf->size = buf_num_pages(0, pra[ii].buf.len) *
248 PAGE_SIZE;
249 VERIFY(0 == alloc_mem(pbuf));
250 bufs++;
251 args = pbuf->virt;
252 rlen = pbuf->size;
253 }
254 num = buf_num_pages(args, pra[ii].buf.len);
255 if (pbuf == ibuf) {
256 list[ii].num = num;
257 list[ii].pgidx = 0;
258 } else {
259 list[ii].num = 1;
260 pages[list[ii].pgidx].addr =
261 buf_page_start((void *)(pbuf->phys +
262 (pbuf->size - rlen)));
263 pages[list[ii].pgidx].size =
264 buf_page_size(pra[ii].buf.len);
265 }
266 if (ii < inbufs) {
267 if (!kernel)
268 VERIFY(0 == copy_from_user(args, pra[ii].buf.pv,
269 pra[ii].buf.len));
270 else
271 memmove(args, pra[ii].buf.pv, pra[ii].buf.len);
272 }
273 rpra[ii].buf.pv = args;
274 args = (void *)((char *)args + ALIGN_8(pra[ii].buf.len));
275 rlen -= ALIGN_8(pra[ii].buf.len);
276 }
277 for (ii = 0; ii < inbufs; ++ii) {
278 if (rpra[ii].buf.len)
279 dmac_flush_range(rpra[ii].buf.pv,
280 (char *)rpra[ii].buf.pv + rpra[ii].buf.len);
281 }
282 pbuf->used = pbuf->size - rlen;
283 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
284 if (size) {
285 inh = inbufs + outbufs;
286 if (!kernel)
287 VERIFY(0 == copy_from_user(&rpra[inh], &upra[inh],
288 size));
289 else
290 memmove(&rpra[inh], &upra[inh], size);
291 }
292 dmac_flush_range(rpra, (char *)rpra + used);
293 bail:
294 *abufs = obufs;
295 *nbufs = bufs;
296 return err;
297}
298
299static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
300 remote_arg_t *rpra, remote_arg_t *upra)
301{
302 int ii, inbufs, outbufs, outh, size;
303 int err = 0;
304
305 inbufs = REMOTE_SCALARS_INBUFS(sc);
306 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
307 for (ii = inbufs; ii < inbufs + outbufs; ++ii) {
308 if (rpra[ii].buf.pv != pra[ii].buf.pv)
309 VERIFY(0 == copy_to_user(pra[ii].buf.pv,
310 rpra[ii].buf.pv, rpra[ii].buf.len));
311 }
312 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
313 if (size) {
314 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
315 if (!kernel)
316 VERIFY(0 == copy_to_user(&upra[outh], &rpra[outh],
317 size));
318 else
319 memmove(&upra[outh], &rpra[outh], size);
320 }
321 bail:
322 return err;
323}
324
325static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
326{
327 int ii, inbufs, outbufs;
328 int inv = 0;
329
330 inbufs = REMOTE_SCALARS_INBUFS(sc);
331 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
332 for (ii = inbufs; ii < inbufs + outbufs; ++ii) {
333 if (buf_page_start(rpra) == buf_page_start(rpra[ii].buf.pv))
334 inv = 1;
335 else
336 dmac_inv_range(rpra[ii].buf.pv,
337 (char *)rpra[ii].buf.pv + rpra[ii].buf.len);
338 }
339
340 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
341 dmac_inv_range(rpra, (char *)rpra + used);
342}
343
344static int fastrpc_invoke_send(struct fastrpc_apps *me, remote_handle_t handle,
345 uint32_t sc, struct smq_invoke_ctx *ctx,
346 struct fastrpc_buf *buf)
347{
348 struct smq_msg msg;
349 int err = 0, len;
350
351 msg.pid = current->tgid;
352 msg.tid = current->pid;
353 msg.invoke.header.ctx = ctx;
354 msg.invoke.header.handle = handle;
355 msg.invoke.header.sc = sc;
356 msg.invoke.page.addr = buf->phys;
357 msg.invoke.page.size = buf_page_size(buf->used);
358 spin_lock(&me->wrlock);
359 len = smd_write(me->chan, &msg, sizeof(msg));
360 spin_unlock(&me->wrlock);
361 VERIFY(len == sizeof(msg));
362 bail:
363 return err;
364}
365
366static void fastrpc_deinit(void)
367{
368 struct fastrpc_apps *me = &gfa;
369
370 if (me->chan)
371 (void)smd_close(me->chan);
372 context_list_dtor(&me->clst);
373 ion_client_destroy(me->iclient);
374 me->iclient = 0;
375 me->chan = 0;
376}
377
378static void fastrpc_read_handler(void)
379{
380 struct fastrpc_apps *me = &gfa;
381 struct smq_invoke_rsp rsp;
382 int err = 0;
383
384 do {
385 VERIFY(sizeof(rsp) ==
386 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
387 context_notify_user(rsp.ctx, rsp.retval);
388 } while (!err);
389 bail:
390 return;
391}
392
393static void smd_event_handler(void *priv, unsigned event)
394{
395 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
396
397 switch (event) {
398 case SMD_EVENT_OPEN:
399 complete(&(me->work));
400 break;
401 case SMD_EVENT_CLOSE:
402 context_notify_all_users(&me->clst);
403 break;
404 case SMD_EVENT_DATA:
405 fastrpc_read_handler();
406 break;
407 }
408}
409
410static int fastrpc_init(void)
411{
412 int err = 0;
413 struct fastrpc_apps *me = &gfa;
414
415 if (me->chan == 0) {
416 int ii;
417 spin_lock_init(&me->hlock);
418 spin_lock_init(&me->wrlock);
419 init_completion(&me->work);
420 for (ii = 0; ii < RPC_HASH_SZ; ++ii)
421 INIT_HLIST_HEAD(&me->htbl[ii]);
422 VERIFY(0 == context_list_ctor(&me->clst, SZ_4K));
423 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
424 DEVICE_NAME);
425 VERIFY(0 == IS_ERR_OR_NULL(me->iclient));
426 VERIFY(0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
427 SMD_APPS_QDSP, &me->chan,
428 me, smd_event_handler));
429 VERIFY(0 != wait_for_completion_timeout(&me->work,
430 RPC_TIMEOUT));
431 }
432 bail:
433 if (err)
434 fastrpc_deinit();
435 return err;
436}
437
438static void free_dev(struct fastrpc_device *dev)
439{
440 if (dev) {
441 module_put(THIS_MODULE);
442 free_mem(&dev->buf);
443 kfree(dev);
444 }
445}
446
447static int alloc_dev(struct fastrpc_device **dev)
448{
449 int err = 0;
450 struct fastrpc_device *fd = 0;
451
452 VERIFY(0 != try_module_get(THIS_MODULE));
453 VERIFY(0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
454 fd->buf.size = PAGE_SIZE;
455 VERIFY(0 == alloc_mem(&fd->buf));
456 fd->tgid = current->tgid;
457 INIT_HLIST_NODE(&fd->hn);
458 *dev = fd;
459 bail:
460 if (err)
461 free_dev(fd);
462 return err;
463}
464
465static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
466{
467 struct hlist_head *head;
468 struct fastrpc_device *dev = 0;
469 struct hlist_node *n;
470 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
471 int err = 0;
472
473 spin_lock(&me->hlock);
474 head = &me->htbl[h];
475 hlist_for_each_entry(dev, n, head, hn) {
476 if (dev->tgid == current->tgid) {
477 hlist_del(&dev->hn);
478 break;
479 }
480 }
481 spin_unlock(&me->hlock);
482 VERIFY(dev != 0);
483 *rdev = dev;
484 bail:
485 if (err) {
486 free_dev(dev);
487 err = alloc_dev(rdev);
488 }
489 return err;
490}
491
492static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
493{
494 struct hlist_head *head;
495 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
496
497 spin_lock(&me->hlock);
498 head = &me->htbl[h];
499 hlist_add_head(&dev->hn, head);
500 spin_unlock(&me->hlock);
501 return;
502}
503
504static int fastrpc_release_current_dsp_process(void);
505
506static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t kernel,
507 struct fastrpc_ioctl_invoke *invoke, remote_arg_t *pra)
508{
509 remote_arg_t *rpra = 0;
510 struct fastrpc_device *dev = 0;
511 struct smq_invoke_ctx *ctx = 0;
512 struct fastrpc_buf obuf, *abufs = 0, *b;
513 int interrupted = 0;
514 uint32_t sc;
515 int ii, nbufs = 0, err = 0;
516
517 sc = invoke->sc;
518 obuf.handle = 0;
519 if (REMOTE_SCALARS_LENGTH(sc)) {
520 VERIFY(0 == get_dev(me, &dev));
521 VERIFY(0 == get_page_list(kernel, sc, pra, &dev->buf, &obuf));
522 rpra = (remote_arg_t *)obuf.virt;
523 VERIFY(0 == get_args(kernel, sc, pra, rpra, invoke->pra, &obuf,
524 &abufs, &nbufs));
525 }
526
527 context_list_alloc_ctx(&me->clst, &ctx);
528 VERIFY(0 == fastrpc_invoke_send(me, invoke->handle, sc, ctx, &obuf));
529 inv_args(sc, rpra, obuf.used);
530 VERIFY(0 == (interrupted =
531 wait_for_completion_interruptible(&ctx->work)));
532 VERIFY(0 == (err = ctx->retval));
533 VERIFY(0 == put_args(kernel, sc, pra, rpra, invoke->pra));
534 bail:
535 if (interrupted) {
536 init_completion(&ctx->work);
537 if (!kernel)
538 (void)fastrpc_release_current_dsp_process();
539 wait_for_completion(&ctx->work);
540 }
541 context_free(ctx);
542 for (ii = 0, b = abufs; ii < nbufs; ++ii, ++b)
543 free_mem(b);
544 kfree(abufs);
545 if (dev) {
546 add_dev(me, dev);
547 if (obuf.handle != dev->buf.handle)
548 free_mem(&obuf);
549 }
550 return err;
551}
552
553static int fastrpc_create_current_dsp_process(void)
554{
555 int err = 0;
556 struct fastrpc_ioctl_invoke ioctl;
557 struct fastrpc_apps *me = &gfa;
558 remote_arg_t ra[1];
559 int tgid = 0;
560
561 tgid = current->tgid;
562 ra[0].buf.pv = &tgid;
563 ra[0].buf.len = sizeof(tgid);
564 ioctl.handle = 1;
565 ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
566 ioctl.pra = ra;
567 VERIFY(0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
568 bail:
569 return err;
570}
571
572static int fastrpc_release_current_dsp_process(void)
573{
574 int err = 0;
575 struct fastrpc_apps *me = &gfa;
576 struct fastrpc_ioctl_invoke ioctl;
577 remote_arg_t ra[1];
578 int tgid = 0;
579
580 tgid = current->tgid;
581 ra[0].buf.pv = &tgid;
582 ra[0].buf.len = sizeof(tgid);
583 ioctl.handle = 1;
584 ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
585 ioctl.pra = ra;
586 VERIFY(0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
587 bail:
588 return err;
589}
590
591static void cleanup_current_dev(void)
592{
593 struct fastrpc_apps *me = &gfa;
594 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
595 struct hlist_head *head;
596 struct hlist_node *pos;
597 struct fastrpc_device *dev;
598
599 rnext:
600 dev = 0;
601 spin_lock(&me->hlock);
602 head = &me->htbl[h];
603 hlist_for_each_entry(dev, pos, head, hn) {
604 if (dev->tgid == current->tgid) {
605 hlist_del(&dev->hn);
606 break;
607 }
608 }
609 spin_unlock(&me->hlock);
610 if (dev) {
611 free_dev(dev);
612 goto rnext;
613 }
614 return;
615}
616
617static int fastrpc_device_release(struct inode *inode, struct file *file)
618{
619 (void)fastrpc_release_current_dsp_process();
620 cleanup_current_dev();
621 return 0;
622}
623
624static int fastrpc_device_open(struct inode *inode, struct file *filp)
625{
626 int err = 0;
627
628 if (0 != try_module_get(THIS_MODULE)) {
629 /* This call will cause a dev to be created
630 * which will addref this module
631 */
632 VERIFY(0 == fastrpc_create_current_dsp_process());
633 bail:
634 if (err)
635 cleanup_current_dev();
636 module_put(THIS_MODULE);
637 }
638 return err;
639}
640
641
642static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
643 unsigned long ioctl_param)
644{
645 struct fastrpc_apps *me = &gfa;
646 struct fastrpc_ioctl_invoke invoke;
647 remote_arg_t *pra = 0;
648 void *param = (char *)ioctl_param;
649 int bufs, err = 0;
650
651 switch (ioctl_num) {
652 case FASTRPC_IOCTL_INVOKE:
653 VERIFY(0 == copy_from_user(&invoke, param, sizeof(invoke)));
654 bufs = REMOTE_SCALARS_INBUFS(invoke.sc) +
655 REMOTE_SCALARS_OUTBUFS(invoke.sc);
656 if (bufs) {
657 bufs = bufs * sizeof(*pra);
658 VERIFY(0 != (pra = kmalloc(bufs, GFP_KERNEL)));
659 }
660 VERIFY(0 == copy_from_user(pra, invoke.pra, bufs));
661 VERIFY(0 == (err = fastrpc_internal_invoke(me, 0, &invoke,
662 pra)));
663 break;
664 default:
665 err = -EINVAL;
666 break;
667 }
668 bail:
669 kfree(pra);
670 return err;
671}
672
673static const struct file_operations fops = {
674 .open = fastrpc_device_open,
675 .release = fastrpc_device_release,
676 .unlocked_ioctl = fastrpc_device_ioctl,
677};
678
679static int __init fastrpc_device_init(void)
680{
681 struct fastrpc_apps *me = &gfa;
682 int err = 0;
683
684 VERIFY(0 == fastrpc_init());
685 VERIFY(0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
686 cdev_init(&me->cdev, &fops);
687 me->cdev.owner = THIS_MODULE;
688 VERIFY(0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
689 pr_info("'mknod /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
690 bail:
691 return err;
692}
693
694static void __exit fastrpc_device_exit(void)
695{
696 struct fastrpc_apps *me = &gfa;
697
698 fastrpc_deinit();
699 cdev_del(&me->cdev);
700 unregister_chrdev_region(me->dev_no, 1);
701}
702
703module_init(fastrpc_device_init);
704module_exit(fastrpc_device_exit);
705
706MODULE_LICENSE("GPL v2");