blob: 705bab5486e8af5ceff918ddc704ca13d1844276 [file] [log] [blame]
Rohit Vaswani5fd759e2012-11-07 07:05:08 -08001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define KMSG_COMPONENT "SMCMOD"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/miscdevice.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/printk.h>
29#include <linux/msm_ion.h>
30#include <asm/smcmod.h>
31#include <mach/scm.h>
32
33static DEFINE_MUTEX(ioctl_lock);
34
35#define SMCMOD_SVC_DEFAULT (0)
36#define SMCMOD_SVC_CRYPTO (1)
37#define SMCMOD_CRYPTO_CMD_CIPHER (1)
38#define SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED (2)
39#define SMCMOD_CRYPTO_CMD_MSG_DIGEST (3)
40
41/**
42 * struct smcmod_cipher_scm_req - structure for sending the cipher cmd to
43 * scm_call.
44 *
45 * @algorithm - specifies cipher algorithm
46 * @operation - specifies encryption or decryption.
47 * @mode - specifies cipher mode.
48 * @key_phys_addr - physical address for key buffer.
49 * @key_size - key size in bytes.
50 * @plain_text_phys_addr - physical address for plain text buffer.
51 * @plain_text_size - size of plain text in bytes.
52 * @cipher_text_phys_addr - physical address for cipher text buffer.
53 * @cipher_text_size - cipher text size in bytes.
54 * @init_vector_phys_addr - physical address for init vector buffer.
55 * @init_vector_size - size of initialization vector in bytes.
56 */
57struct smcmod_cipher_scm_req {
58 uint32_t algorithm;
59 uint32_t operation;
60 uint32_t mode;
61 uint32_t key_phys_addr;
62 uint32_t key_size;
63 uint32_t plain_text_phys_addr;
64 uint32_t plain_text_size;
65 uint32_t cipher_text_phys_addr;
66 uint32_t cipher_text_size;
67 uint32_t init_vector_phys_addr;
68 uint32_t init_vector_size;
69};
70
71/**
72 * struct smcmod_msg_digest_scm_req - structure for sending message digest
73 * to scm_call.
74 *
75 * @algorithm - specifies the cipher algorithm.
76 * @key_phys_addr - physical address of key buffer.
77 * @key_size - hash key size in bytes.
78 * @input_phys_addr - physical address of input buffer.
79 * @input_size - input data size in bytes.
80 * @output_phys_addr - physical address of output buffer.
81 * @output_size - size of output buffer in bytes.
82 * @verify - indicates whether to verify the hash value.
83 */
84struct smcmod_msg_digest_scm_req {
85 uint32_t algorithm;
86 uint32_t key_phys_addr;
87 uint32_t key_size;
88 uint32_t input_phys_addr;
89 uint32_t input_size;
90 uint32_t output_phys_addr;
91 uint32_t output_size;
92 uint8_t verify;
93} __packed;
94
95static void smcmod_inv_range(unsigned long start, unsigned long end)
96{
97 uint32_t cacheline_size;
98 uint32_t ctr;
99
100 /* get cache line size */
101 asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
102 cacheline_size = 4 << ((ctr >> 16) & 0xf);
103
104 /* invalidate the range */
105 start = round_down(start, cacheline_size);
106 end = round_up(end, cacheline_size);
107 while (start < end) {
108 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
109 : "memory");
110 start += cacheline_size;
111 }
112 mb();
113 isb();
114}
115
116static int smcmod_ion_fd_to_phys(int32_t fd, struct ion_client *ion_clientp,
117 struct ion_handle **ion_handlep, uint32_t *phys_addrp, size_t *sizep)
118{
119 int ret = 0;
120
121 /* sanity check args */
122 if ((fd < 0) || IS_ERR_OR_NULL(ion_clientp) ||
123 IS_ERR_OR_NULL(ion_handlep) || IS_ERR_OR_NULL(phys_addrp) ||
124 IS_ERR_OR_NULL(sizep))
125 return -EINVAL;
126
127 /* import the buffer fd */
128 *ion_handlep = ion_import_dma_buf(ion_clientp, fd);
129
130 /* sanity check the handle */
131 if (IS_ERR_OR_NULL(*ion_handlep))
132 return -EINVAL;
133
134 /* get the physical address */
135 ret = ion_phys(ion_clientp, *ion_handlep, (ion_phys_addr_t *)phys_addrp,
136 sizep);
137
138 return ret;
139}
140
141static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
142{
143 int ret = 0;
144 struct ion_client *ion_clientp = NULL;
145 struct ion_handle *ion_cmd_handlep = NULL;
146 struct ion_handle *ion_resp_handlep = NULL;
147 void *cmd_vaddrp = NULL;
148 void *resp_vaddrp = NULL;
149 unsigned long cmd_buf_size = 0;
150 unsigned long resp_buf_size = 0;
151
152 /* sanity check the argument */
153 if (IS_ERR_OR_NULL(reqp))
154 return -EINVAL;
155
156 /* sanity check the fds */
157 if (reqp->ion_cmd_fd < 0)
158 return -EINVAL;
159
160 /* create an ion client */
161 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
162
163 /* check for errors */
164 if (IS_ERR_OR_NULL(ion_clientp))
165 return -EINVAL;
166
167 /* import the command buffer fd */
168 ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);
169
170 /* sanity check the handle */
171 if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
172 ret = -EINVAL;
173 goto buf_cleanup;
174 }
175
176 /* retrieve the size of the buffer */
177 if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
178 &cmd_buf_size) < 0) {
179 ret = -EINVAL;
180 goto buf_cleanup;
181 }
182
183 /* ensure that the command buffer size is not
184 * greater than the size of the buffer.
185 */
186 if (reqp->cmd_len > cmd_buf_size) {
187 ret = -EINVAL;
188 goto buf_cleanup;
189 }
190
191 /* map the area to get a virtual address */
192 cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);
193
194 /* sanity check the address */
195 if (IS_ERR_OR_NULL(cmd_vaddrp)) {
196 ret = -EINVAL;
197 goto buf_cleanup;
198 }
199
200 /* check if there is a response buffer */
201 if (reqp->ion_resp_fd >= 0) {
202 /* import the handle */
203 ion_resp_handlep =
204 ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);
205
206 /* sanity check the handle */
207 if (IS_ERR_OR_NULL(ion_resp_handlep)) {
208 ret = -EINVAL;
209 goto buf_cleanup;
210 }
211
212 /* retrieve the size of the buffer */
213 if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
214 &resp_buf_size) < 0) {
215 ret = -EINVAL;
216 goto buf_cleanup;
217 }
218
219 /* ensure that the command buffer size is not
220 * greater than the size of the buffer.
221 */
222 if (reqp->resp_len > resp_buf_size) {
223 ret = -EINVAL;
224 goto buf_cleanup;
225 }
226
227 /* map the area to get a virtual address */
228 resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);
229
230 /* sanity check the address */
231 if (IS_ERR_OR_NULL(resp_vaddrp)) {
232 ret = -EINVAL;
233 goto buf_cleanup;
234 }
235 }
236
237 /* call scm function to switch to secure world */
238 reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
239 cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);
240
241buf_cleanup:
242 /* if the client and handle(s) are valid, free them */
243 if (!IS_ERR_OR_NULL(ion_clientp)) {
244 if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
245 if (!IS_ERR_OR_NULL(cmd_vaddrp))
246 ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
247 ion_free(ion_clientp, ion_cmd_handlep);
248 }
249
250 if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
251 if (!IS_ERR_OR_NULL(resp_vaddrp))
252 ion_unmap_kernel(ion_clientp, ion_resp_handlep);
253 ion_free(ion_clientp, ion_resp_handlep);
254 }
255
256 ion_client_destroy(ion_clientp);
257 }
258
259 return ret;
260}
261
262static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
263{
264 int ret = 0;
265 struct smcmod_cipher_scm_req scm_req;
266 struct ion_client *ion_clientp = NULL;
267 struct ion_handle *ion_key_handlep = NULL;
268 struct ion_handle *ion_plain_handlep = NULL;
269 struct ion_handle *ion_cipher_handlep = NULL;
270 struct ion_handle *ion_iv_handlep = NULL;
271 size_t size = 0;
272
273 if (IS_ERR_OR_NULL(reqp))
274 return -EINVAL;
275
276 /* sanity check the fds */
277 if ((reqp->ion_plain_text_fd < 0) ||
278 (reqp->ion_cipher_text_fd < 0) ||
279 (reqp->ion_init_vector_fd < 0))
280 return -EINVAL;
281
282 /* create an ion client */
283 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
284
285 /* check for errors */
286 if (IS_ERR_OR_NULL(ion_clientp))
287 return -EINVAL;
288
289 /* fill in the scm request structure */
290 scm_req.algorithm = reqp->algorithm;
291 scm_req.operation = reqp->operation;
292 scm_req.mode = reqp->mode;
293 scm_req.key_phys_addr = 0;
294 scm_req.key_size = reqp->key_size;
295 scm_req.plain_text_size = reqp->plain_text_size;
296 scm_req.cipher_text_size = reqp->cipher_text_size;
297 scm_req.init_vector_size = reqp->init_vector_size;
298
299 if (!reqp->key_is_null) {
300 /* import the key buffer and get the physical address */
301 ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
302 &ion_key_handlep, &scm_req.key_phys_addr, &size);
303 if (ret < 0)
304 goto buf_cleanup;
305
306 /* ensure that the key size is not
307 * greater than the size of the buffer.
308 */
309 if (reqp->key_size > size) {
310 ret = -EINVAL;
311 goto buf_cleanup;
312 }
313 }
314
315 /* import the plain text buffer and get the physical address */
316 ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
317 &ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);
318
319 if (ret < 0)
320 goto buf_cleanup;
321
322 /* ensure that the plain text size is not
323 * greater than the size of the buffer.
324 */
325 if (reqp->plain_text_size > size) {
326 ret = -EINVAL;
327 goto buf_cleanup;
328 }
329
330 /* import the cipher text buffer and get the physical address */
331 ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
332 &ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
333 if (ret < 0)
334 goto buf_cleanup;
335
336 /* ensure that the cipher text size is not
337 * greater than the size of the buffer.
338 */
339 if (reqp->cipher_text_size > size) {
340 ret = -EINVAL;
341 goto buf_cleanup;
342 }
343
344 /* import the init vector buffer and get the physical address */
345 ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
346 &ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
347 if (ret < 0)
348 goto buf_cleanup;
349
350 /* ensure that the init vector size is not
351 * greater than the size of the buffer.
352 */
353 if (reqp->init_vector_size > size) {
354 ret = -EINVAL;
355 goto buf_cleanup;
356 }
357
358 /* call scm function to switch to secure world */
359 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
360 SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
361 sizeof(scm_req), NULL, 0);
362
363 /* for decrypt, plain text is the output, otherwise it's cipher text */
364 if (reqp->operation) {
365 void *vaddrp = NULL;
366
367 /* map the plain text region to get the virtual address */
368 vaddrp = ion_map_kernel(ion_clientp, ion_plain_handlep);
369 if (IS_ERR_OR_NULL(vaddrp)) {
370 ret = -EINVAL;
371 goto buf_cleanup;
372 }
373
374 /* invalidate the range */
375 smcmod_inv_range((unsigned long)vaddrp,
376 (unsigned long)(vaddrp + scm_req.plain_text_size));
377
378 /* unmap the mapped area */
379 ion_unmap_kernel(ion_clientp, ion_plain_handlep);
380 } else {
381 void *vaddrp = NULL;
382
383 /* map the cipher text region to get the virtual address */
384 vaddrp = ion_map_kernel(ion_clientp, ion_cipher_handlep);
385 if (IS_ERR_OR_NULL(vaddrp)) {
386 ret = -EINVAL;
387 goto buf_cleanup;
388 }
389
390 /* invalidate the range */
391 smcmod_inv_range((unsigned long)vaddrp,
392 (unsigned long)(vaddrp + scm_req.cipher_text_size));
393
394 /* unmap the mapped area */
395 ion_unmap_kernel(ion_clientp, ion_cipher_handlep);
396 }
397
398buf_cleanup:
399 /* if the client and handles are valid, free them */
400 if (!IS_ERR_OR_NULL(ion_clientp)) {
401 if (!IS_ERR_OR_NULL(ion_key_handlep))
402 ion_free(ion_clientp, ion_key_handlep);
403
404 if (!IS_ERR_OR_NULL(ion_plain_handlep))
405 ion_free(ion_clientp, ion_plain_handlep);
406
407 if (!IS_ERR_OR_NULL(ion_cipher_handlep))
408 ion_free(ion_clientp, ion_cipher_handlep);
409
410 if (!IS_ERR_OR_NULL(ion_iv_handlep))
411 ion_free(ion_clientp, ion_iv_handlep);
412
413 ion_client_destroy(ion_clientp);
414 }
415
416 return ret;
417}
418static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
419{
420 int ret = 0;
421 struct smcmod_msg_digest_scm_req scm_req;
422 struct ion_client *ion_clientp = NULL;
423 struct ion_handle *ion_key_handlep = NULL;
424 struct ion_handle *ion_input_handlep = NULL;
425 struct ion_handle *ion_output_handlep = NULL;
426 size_t size = 0;
427 void *vaddrp = NULL;
428
429 if (IS_ERR_OR_NULL(reqp))
430 return -EINVAL;
431
432 /* sanity check the fds */
433 if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
434 return -EINVAL;
435
436 /* create an ion client */
437 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
438
439 /* check for errors */
440 if (IS_ERR_OR_NULL(ion_clientp))
441 return -EINVAL;
442
443 /* fill in the scm request structure */
444 scm_req.algorithm = reqp->algorithm;
445 scm_req.key_phys_addr = 0;
446 scm_req.key_size = reqp->key_size;
447 scm_req.input_size = reqp->input_size;
448 scm_req.output_size = reqp->output_size;
449 scm_req.verify = 0;
450
451 if (!reqp->key_is_null) {
452 /* import the key buffer and get the physical address */
453 ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
454 &ion_key_handlep, &scm_req.key_phys_addr, &size);
455 if (ret < 0)
456 goto buf_cleanup;
457
458 /* ensure that the key size is not
459 * greater than the size of the buffer.
460 */
461 if (reqp->key_size > size) {
462 ret = -EINVAL;
463 goto buf_cleanup;
464 }
465 }
466
467 /* import the input buffer and get the physical address */
468 ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
469 &ion_input_handlep, &scm_req.input_phys_addr, &size);
470 if (ret < 0)
471 goto buf_cleanup;
472
473 /* ensure that the input size is not
474 * greater than the size of the buffer.
475 */
476 if (reqp->input_size > size) {
477 ret = -EINVAL;
478 goto buf_cleanup;
479 }
480
481 /* import the output buffer and get the physical address */
482 ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
483 &ion_output_handlep, &scm_req.output_phys_addr, &size);
484 if (ret < 0)
485 goto buf_cleanup;
486
487 /* ensure that the output size is not
488 * greater than the size of the buffer.
489 */
490 if (reqp->output_size > size) {
491 ret = -EINVAL;
492 goto buf_cleanup;
493 }
494
495 /* call scm function to switch to secure world */
496 if (reqp->fixed_block)
497 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
498 SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
499 &scm_req,
500 sizeof(scm_req),
501 NULL, 0);
502 else
503 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
504 SMCMOD_CRYPTO_CMD_MSG_DIGEST,
505 &scm_req,
506 sizeof(scm_req),
507 NULL, 0);
508
509
510 /* map the output region to get the virtual address */
511 vaddrp = ion_map_kernel(ion_clientp, ion_output_handlep);
512 if (IS_ERR_OR_NULL(vaddrp)) {
513 ret = -EINVAL;
514 goto buf_cleanup;
515 }
516
517 /* invalidate the range */
518 smcmod_inv_range((unsigned long)vaddrp,
519 (unsigned long)(vaddrp + scm_req.output_size));
520
521 /* unmap the mapped area */
522 ion_unmap_kernel(ion_clientp, ion_output_handlep);
523
524buf_cleanup:
525 /* if the client and handles are valid, free them */
526 if (!IS_ERR_OR_NULL(ion_clientp)) {
527 if (!IS_ERR_OR_NULL(ion_key_handlep))
528 ion_free(ion_clientp, ion_key_handlep);
529
530 if (!IS_ERR_OR_NULL(ion_input_handlep))
531 ion_free(ion_clientp, ion_input_handlep);
532
533 if (!IS_ERR_OR_NULL(ion_output_handlep))
534 ion_free(ion_clientp, ion_output_handlep);
535
536 ion_client_destroy(ion_clientp);
537 }
538
539 return ret;
540}
541
542static long smcmod_ioctl(struct file *file, unsigned cmd, unsigned long arg)
543{
544 void __user *argp = (void __user *)arg;
545 int ret = 0;
546
547 /* sanity check */
548 if (!argp)
549 return -EINVAL;
550
551 /*
552 * The SMC instruction should only be initiated by one process
553 * at a time, hence the critical section here. Note that this
554 * does not prevent user space from modifying the
555 * allocated buffer contents. Extra steps are needed to
556 * prevent that from happening.
557 */
558 mutex_lock(&ioctl_lock);
559
560 switch (cmd) {
561 case SMCMOD_IOCTL_SEND_REG_CMD:
562 {
563 struct smcmod_reg_req req;
564
565 /* copy struct from user */
566 if (copy_from_user((void *)&req, argp, sizeof(req))) {
567 ret = -EFAULT;
568 goto cleanup;
569 }
570
571 /* call the correct scm function to switch to secure
572 * world
573 */
574 if (req.num_args == 1) {
575 req.return_val =
576 scm_call_atomic1(req.service_id,
577 req.command_id, req.args[0]);
578 } else if (req.num_args == 2) {
579 req.return_val =
580 scm_call_atomic2(req.service_id,
581 req.command_id, req.args[0],
582 req.args[1]);
583 } else {
584 ret = -EINVAL;
585 goto cleanup;
586 }
587
588 /* copy result back to user */
589 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
590 ret = -EFAULT;
591 goto cleanup;
592 }
593 }
594 break;
595
596 /* This is an example of how to pass buffers to/from the secure
597 * side using the ion driver.
598 */
599 case SMCMOD_IOCTL_SEND_BUF_CMD:
600 {
601 struct smcmod_buf_req req;
602
603 /* copy struct from user */
604 if (copy_from_user((void *)&req, argp, sizeof(req))) {
605 ret = -EFAULT;
606 goto cleanup;
607 }
608
609 /* send the command */
610 ret = smcmod_send_buf_cmd(&req);
611 if (ret < 0)
612 goto cleanup;
613
614 /* copy result back to user */
615 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
616 ret = -EFAULT;
617 goto cleanup;
618 }
619 }
620 break;
621
622 case SMCMOD_IOCTL_SEND_CIPHER_CMD:
623 {
624 struct smcmod_cipher_req req;
625
626 /* copy struct from user */
627 if (copy_from_user((void *)&req, argp, sizeof(req))) {
628 ret = -EFAULT;
629 goto cleanup;
630 }
631
632 ret = smcmod_send_cipher_cmd(&req);
633 if (ret < 0)
634 goto cleanup;
635
636 /* copy result back to user */
637 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
638 ret = -EFAULT;
639 goto cleanup;
640 }
641 }
642 break;
643
644 case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD:
645 {
646 struct smcmod_msg_digest_req req;
647
648 /* copy struct from user */
649 if (copy_from_user((void *)&req, argp, sizeof(req))) {
650 ret = -EFAULT;
651 goto cleanup;
652 }
653
654 ret = smcmod_send_msg_digest_cmd(&req);
655 if (ret < 0)
656 goto cleanup;
657
658 /* copy result back to user */
659 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
660 ret = -EFAULT;
661 goto cleanup;
662 }
663 }
664 break;
665
666 case SMCMOD_IOCTL_GET_VERSION:
667 {
668 uint32_t req;
669
670 /* call scm function to switch to secure world */
671 req = scm_get_version();
672
673 /* copy result back to user */
674 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
675 ret = -EFAULT;
676 goto cleanup;
677 }
678 }
679 break;
680
681 default:
682 ret = -EINVAL;
683 }
684
685cleanup:
686 mutex_unlock(&ioctl_lock);
687 return ret;
688}
689
690static int smcmod_open(struct inode *inode, struct file *file)
691{
692 return 0;
693}
694
695static int smcmod_release(struct inode *inode, struct file *file)
696{
697 return 0;
698}
699
700static const struct file_operations smcmod_fops = {
701 .owner = THIS_MODULE,
702 .unlocked_ioctl = smcmod_ioctl,
703 .open = smcmod_open,
704 .release = smcmod_release,
705};
706
707static struct miscdevice smcmod_misc_dev = {
708 .minor = MISC_DYNAMIC_MINOR,
709 .name = SMCMOD_DEV,
710 .fops = &smcmod_fops
711};
712
713static int __init smcmod_init(void)
714{
715 return misc_register(&smcmod_misc_dev);
716}
717
718static void __exit smcmod_exit(void)
719{
720 misc_deregister(&smcmod_misc_dev);
721}
722
723MODULE_DESCRIPTION("Qualcomm SMC Module");
724MODULE_LICENSE("GPL v2");
725
726module_init(smcmod_init);
727module_exit(smcmod_exit);