blob: 221a522bb66df8597f9bfa8827349db61fb14fd3 [file] [log] [blame]
Vikram Mulukutlaeca44cf2013-06-20 12:25:00 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Rohit Vaswani5fd759e2012-11-07 07:05:08 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define KMSG_COMPONENT "SMCMOD"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/miscdevice.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/printk.h>
29#include <linux/msm_ion.h>
30#include <asm/smcmod.h>
31#include <mach/scm.h>
32
33static DEFINE_MUTEX(ioctl_lock);
34
35#define SMCMOD_SVC_DEFAULT (0)
36#define SMCMOD_SVC_CRYPTO (1)
37#define SMCMOD_CRYPTO_CMD_CIPHER (1)
38#define SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED (2)
39#define SMCMOD_CRYPTO_CMD_MSG_DIGEST (3)
40
41/**
42 * struct smcmod_cipher_scm_req - structure for sending the cipher cmd to
43 * scm_call.
44 *
45 * @algorithm - specifies cipher algorithm
46 * @operation - specifies encryption or decryption.
47 * @mode - specifies cipher mode.
48 * @key_phys_addr - physical address for key buffer.
49 * @key_size - key size in bytes.
50 * @plain_text_phys_addr - physical address for plain text buffer.
51 * @plain_text_size - size of plain text in bytes.
52 * @cipher_text_phys_addr - physical address for cipher text buffer.
53 * @cipher_text_size - cipher text size in bytes.
54 * @init_vector_phys_addr - physical address for init vector buffer.
55 * @init_vector_size - size of initialization vector in bytes.
56 */
57struct smcmod_cipher_scm_req {
58 uint32_t algorithm;
59 uint32_t operation;
60 uint32_t mode;
61 uint32_t key_phys_addr;
62 uint32_t key_size;
63 uint32_t plain_text_phys_addr;
64 uint32_t plain_text_size;
65 uint32_t cipher_text_phys_addr;
66 uint32_t cipher_text_size;
67 uint32_t init_vector_phys_addr;
68 uint32_t init_vector_size;
69};
70
71/**
72 * struct smcmod_msg_digest_scm_req - structure for sending message digest
73 * to scm_call.
74 *
75 * @algorithm - specifies the cipher algorithm.
76 * @key_phys_addr - physical address of key buffer.
77 * @key_size - hash key size in bytes.
78 * @input_phys_addr - physical address of input buffer.
79 * @input_size - input data size in bytes.
80 * @output_phys_addr - physical address of output buffer.
81 * @output_size - size of output buffer in bytes.
82 * @verify - indicates whether to verify the hash value.
83 */
84struct smcmod_msg_digest_scm_req {
85 uint32_t algorithm;
86 uint32_t key_phys_addr;
87 uint32_t key_size;
88 uint32_t input_phys_addr;
89 uint32_t input_size;
90 uint32_t output_phys_addr;
91 uint32_t output_size;
92 uint8_t verify;
93} __packed;
94
Rohit Vaswani5fd759e2012-11-07 07:05:08 -080095static int smcmod_ion_fd_to_phys(int32_t fd, struct ion_client *ion_clientp,
96 struct ion_handle **ion_handlep, uint32_t *phys_addrp, size_t *sizep)
97{
98 int ret = 0;
99
100 /* sanity check args */
101 if ((fd < 0) || IS_ERR_OR_NULL(ion_clientp) ||
102 IS_ERR_OR_NULL(ion_handlep) || IS_ERR_OR_NULL(phys_addrp) ||
103 IS_ERR_OR_NULL(sizep))
104 return -EINVAL;
105
106 /* import the buffer fd */
107 *ion_handlep = ion_import_dma_buf(ion_clientp, fd);
108
109 /* sanity check the handle */
110 if (IS_ERR_OR_NULL(*ion_handlep))
111 return -EINVAL;
112
113 /* get the physical address */
114 ret = ion_phys(ion_clientp, *ion_handlep, (ion_phys_addr_t *)phys_addrp,
115 sizep);
116
117 return ret;
118}
119
120static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
121{
122 int ret = 0;
123 struct ion_client *ion_clientp = NULL;
124 struct ion_handle *ion_cmd_handlep = NULL;
125 struct ion_handle *ion_resp_handlep = NULL;
126 void *cmd_vaddrp = NULL;
127 void *resp_vaddrp = NULL;
128 unsigned long cmd_buf_size = 0;
129 unsigned long resp_buf_size = 0;
130
131 /* sanity check the argument */
132 if (IS_ERR_OR_NULL(reqp))
133 return -EINVAL;
134
135 /* sanity check the fds */
136 if (reqp->ion_cmd_fd < 0)
137 return -EINVAL;
138
139 /* create an ion client */
140 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
141
142 /* check for errors */
143 if (IS_ERR_OR_NULL(ion_clientp))
144 return -EINVAL;
145
146 /* import the command buffer fd */
147 ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);
148
149 /* sanity check the handle */
150 if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
151 ret = -EINVAL;
152 goto buf_cleanup;
153 }
154
155 /* retrieve the size of the buffer */
156 if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
157 &cmd_buf_size) < 0) {
158 ret = -EINVAL;
159 goto buf_cleanup;
160 }
161
162 /* ensure that the command buffer size is not
163 * greater than the size of the buffer.
164 */
165 if (reqp->cmd_len > cmd_buf_size) {
166 ret = -EINVAL;
167 goto buf_cleanup;
168 }
169
170 /* map the area to get a virtual address */
171 cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);
172
173 /* sanity check the address */
174 if (IS_ERR_OR_NULL(cmd_vaddrp)) {
175 ret = -EINVAL;
176 goto buf_cleanup;
177 }
178
179 /* check if there is a response buffer */
180 if (reqp->ion_resp_fd >= 0) {
181 /* import the handle */
182 ion_resp_handlep =
183 ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);
184
185 /* sanity check the handle */
186 if (IS_ERR_OR_NULL(ion_resp_handlep)) {
187 ret = -EINVAL;
188 goto buf_cleanup;
189 }
190
191 /* retrieve the size of the buffer */
192 if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
193 &resp_buf_size) < 0) {
194 ret = -EINVAL;
195 goto buf_cleanup;
196 }
197
198 /* ensure that the command buffer size is not
199 * greater than the size of the buffer.
200 */
201 if (reqp->resp_len > resp_buf_size) {
202 ret = -EINVAL;
203 goto buf_cleanup;
204 }
205
206 /* map the area to get a virtual address */
207 resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);
208
209 /* sanity check the address */
210 if (IS_ERR_OR_NULL(resp_vaddrp)) {
211 ret = -EINVAL;
212 goto buf_cleanup;
213 }
214 }
215
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400216 /* No need to flush the cache lines for the command buffer here,
217 * because the buffer will be flushed by scm_call.
218 */
219
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800220 /* call scm function to switch to secure world */
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400221 reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800222 cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);
223
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400224 /* The cache lines for the response buffer have already been
225 * invalidated by scm_call before returning.
226 */
227
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800228buf_cleanup:
229 /* if the client and handle(s) are valid, free them */
230 if (!IS_ERR_OR_NULL(ion_clientp)) {
231 if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
232 if (!IS_ERR_OR_NULL(cmd_vaddrp))
233 ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
234 ion_free(ion_clientp, ion_cmd_handlep);
235 }
236
237 if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
238 if (!IS_ERR_OR_NULL(resp_vaddrp))
239 ion_unmap_kernel(ion_clientp, ion_resp_handlep);
240 ion_free(ion_clientp, ion_resp_handlep);
241 }
242
243 ion_client_destroy(ion_clientp);
244 }
245
246 return ret;
247}
248
249static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
250{
251 int ret = 0;
252 struct smcmod_cipher_scm_req scm_req;
253 struct ion_client *ion_clientp = NULL;
254 struct ion_handle *ion_key_handlep = NULL;
255 struct ion_handle *ion_plain_handlep = NULL;
256 struct ion_handle *ion_cipher_handlep = NULL;
257 struct ion_handle *ion_iv_handlep = NULL;
258 size_t size = 0;
259
260 if (IS_ERR_OR_NULL(reqp))
261 return -EINVAL;
262
263 /* sanity check the fds */
264 if ((reqp->ion_plain_text_fd < 0) ||
265 (reqp->ion_cipher_text_fd < 0) ||
266 (reqp->ion_init_vector_fd < 0))
267 return -EINVAL;
268
269 /* create an ion client */
270 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
271
272 /* check for errors */
273 if (IS_ERR_OR_NULL(ion_clientp))
274 return -EINVAL;
275
276 /* fill in the scm request structure */
277 scm_req.algorithm = reqp->algorithm;
278 scm_req.operation = reqp->operation;
279 scm_req.mode = reqp->mode;
280 scm_req.key_phys_addr = 0;
281 scm_req.key_size = reqp->key_size;
282 scm_req.plain_text_size = reqp->plain_text_size;
283 scm_req.cipher_text_size = reqp->cipher_text_size;
284 scm_req.init_vector_size = reqp->init_vector_size;
285
286 if (!reqp->key_is_null) {
287 /* import the key buffer and get the physical address */
288 ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
289 &ion_key_handlep, &scm_req.key_phys_addr, &size);
290 if (ret < 0)
291 goto buf_cleanup;
292
293 /* ensure that the key size is not
294 * greater than the size of the buffer.
295 */
296 if (reqp->key_size > size) {
297 ret = -EINVAL;
298 goto buf_cleanup;
299 }
300 }
301
302 /* import the plain text buffer and get the physical address */
303 ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
304 &ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);
305
306 if (ret < 0)
307 goto buf_cleanup;
308
309 /* ensure that the plain text size is not
310 * greater than the size of the buffer.
311 */
312 if (reqp->plain_text_size > size) {
313 ret = -EINVAL;
314 goto buf_cleanup;
315 }
316
317 /* import the cipher text buffer and get the physical address */
318 ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
319 &ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
320 if (ret < 0)
321 goto buf_cleanup;
322
323 /* ensure that the cipher text size is not
324 * greater than the size of the buffer.
325 */
326 if (reqp->cipher_text_size > size) {
327 ret = -EINVAL;
328 goto buf_cleanup;
329 }
330
331 /* import the init vector buffer and get the physical address */
332 ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
333 &ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
334 if (ret < 0)
335 goto buf_cleanup;
336
337 /* ensure that the init vector size is not
338 * greater than the size of the buffer.
339 */
340 if (reqp->init_vector_size > size) {
341 ret = -EINVAL;
342 goto buf_cleanup;
343 }
344
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400345 /* Only the scm_req structure will be flushed by scm_call,
346 * so we must flush the cache for the input ion buffers here.
347 */
348 msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
349 scm_req.key_size, ION_IOC_CLEAN_CACHES);
350 msm_ion_do_cache_op(ion_clientp, ion_iv_handlep, NULL,
351 scm_req.init_vector_size, ION_IOC_CLEAN_CACHES);
352
353 /* For decrypt, cipher text is input, otherwise it's plain text. */
354 if (reqp->operation)
355 msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
356 scm_req.cipher_text_size, ION_IOC_CLEAN_CACHES);
357 else
358 msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
359 scm_req.plain_text_size, ION_IOC_CLEAN_CACHES);
360
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800361 /* call scm function to switch to secure world */
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400362 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800363 SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
364 sizeof(scm_req), NULL, 0);
365
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400366 /* Invalidate the output buffer, since it's not done by scm_call */
367
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800368 /* for decrypt, plain text is the output, otherwise it's cipher text */
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400369 if (reqp->operation)
370 msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
371 scm_req.plain_text_size, ION_IOC_INV_CACHES);
372 else
373 msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
374 scm_req.cipher_text_size, ION_IOC_INV_CACHES);
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800375
376buf_cleanup:
377 /* if the client and handles are valid, free them */
378 if (!IS_ERR_OR_NULL(ion_clientp)) {
379 if (!IS_ERR_OR_NULL(ion_key_handlep))
380 ion_free(ion_clientp, ion_key_handlep);
381
382 if (!IS_ERR_OR_NULL(ion_plain_handlep))
383 ion_free(ion_clientp, ion_plain_handlep);
384
385 if (!IS_ERR_OR_NULL(ion_cipher_handlep))
386 ion_free(ion_clientp, ion_cipher_handlep);
387
388 if (!IS_ERR_OR_NULL(ion_iv_handlep))
389 ion_free(ion_clientp, ion_iv_handlep);
390
391 ion_client_destroy(ion_clientp);
392 }
393
394 return ret;
395}
396static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
397{
398 int ret = 0;
399 struct smcmod_msg_digest_scm_req scm_req;
400 struct ion_client *ion_clientp = NULL;
401 struct ion_handle *ion_key_handlep = NULL;
402 struct ion_handle *ion_input_handlep = NULL;
403 struct ion_handle *ion_output_handlep = NULL;
404 size_t size = 0;
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800405
406 if (IS_ERR_OR_NULL(reqp))
407 return -EINVAL;
408
409 /* sanity check the fds */
410 if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
411 return -EINVAL;
412
413 /* create an ion client */
414 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
415
416 /* check for errors */
417 if (IS_ERR_OR_NULL(ion_clientp))
418 return -EINVAL;
419
420 /* fill in the scm request structure */
421 scm_req.algorithm = reqp->algorithm;
422 scm_req.key_phys_addr = 0;
423 scm_req.key_size = reqp->key_size;
424 scm_req.input_size = reqp->input_size;
425 scm_req.output_size = reqp->output_size;
426 scm_req.verify = 0;
427
428 if (!reqp->key_is_null) {
429 /* import the key buffer and get the physical address */
430 ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
431 &ion_key_handlep, &scm_req.key_phys_addr, &size);
432 if (ret < 0)
433 goto buf_cleanup;
434
435 /* ensure that the key size is not
436 * greater than the size of the buffer.
437 */
438 if (reqp->key_size > size) {
439 ret = -EINVAL;
440 goto buf_cleanup;
441 }
442 }
443
444 /* import the input buffer and get the physical address */
445 ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
446 &ion_input_handlep, &scm_req.input_phys_addr, &size);
447 if (ret < 0)
448 goto buf_cleanup;
449
450 /* ensure that the input size is not
451 * greater than the size of the buffer.
452 */
453 if (reqp->input_size > size) {
454 ret = -EINVAL;
455 goto buf_cleanup;
456 }
457
458 /* import the output buffer and get the physical address */
459 ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
460 &ion_output_handlep, &scm_req.output_phys_addr, &size);
461 if (ret < 0)
462 goto buf_cleanup;
463
464 /* ensure that the output size is not
465 * greater than the size of the buffer.
466 */
467 if (reqp->output_size > size) {
468 ret = -EINVAL;
469 goto buf_cleanup;
470 }
471
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400472 /* Only the scm_req structure will be flushed by scm_call,
473 * so we must flush the cache for the input ion buffers here.
474 */
475 msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
476 scm_req.key_size, ION_IOC_CLEAN_CACHES);
477 msm_ion_do_cache_op(ion_clientp, ion_input_handlep, NULL,
478 scm_req.input_size, ION_IOC_CLEAN_CACHES);
479
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800480 /* call scm function to switch to secure world */
481 if (reqp->fixed_block)
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400482 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800483 SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
484 &scm_req,
485 sizeof(scm_req),
486 NULL, 0);
487 else
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400488 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800489 SMCMOD_CRYPTO_CMD_MSG_DIGEST,
490 &scm_req,
491 sizeof(scm_req),
492 NULL, 0);
493
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400494 /* Invalidate the output buffer, since it's not done by scm_call */
495 msm_ion_do_cache_op(ion_clientp, ion_output_handlep, NULL,
496 scm_req.output_size, ION_IOC_INV_CACHES);
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800497
498buf_cleanup:
499 /* if the client and handles are valid, free them */
500 if (!IS_ERR_OR_NULL(ion_clientp)) {
501 if (!IS_ERR_OR_NULL(ion_key_handlep))
502 ion_free(ion_clientp, ion_key_handlep);
503
504 if (!IS_ERR_OR_NULL(ion_input_handlep))
505 ion_free(ion_clientp, ion_input_handlep);
506
507 if (!IS_ERR_OR_NULL(ion_output_handlep))
508 ion_free(ion_clientp, ion_output_handlep);
509
510 ion_client_destroy(ion_clientp);
511 }
512
513 return ret;
514}
515
516static long smcmod_ioctl(struct file *file, unsigned cmd, unsigned long arg)
517{
518 void __user *argp = (void __user *)arg;
519 int ret = 0;
520
521 /* sanity check */
522 if (!argp)
523 return -EINVAL;
524
525 /*
526 * The SMC instruction should only be initiated by one process
527 * at a time, hence the critical section here. Note that this
528 * does not prevent user space from modifying the
529 * allocated buffer contents. Extra steps are needed to
530 * prevent that from happening.
531 */
532 mutex_lock(&ioctl_lock);
533
534 switch (cmd) {
535 case SMCMOD_IOCTL_SEND_REG_CMD:
536 {
537 struct smcmod_reg_req req;
538
539 /* copy struct from user */
540 if (copy_from_user((void *)&req, argp, sizeof(req))) {
541 ret = -EFAULT;
542 goto cleanup;
543 }
544
545 /* call the correct scm function to switch to secure
546 * world
547 */
548 if (req.num_args == 1) {
549 req.return_val =
550 scm_call_atomic1(req.service_id,
551 req.command_id, req.args[0]);
552 } else if (req.num_args == 2) {
553 req.return_val =
554 scm_call_atomic2(req.service_id,
555 req.command_id, req.args[0],
556 req.args[1]);
557 } else {
558 ret = -EINVAL;
559 goto cleanup;
560 }
561
562 /* copy result back to user */
563 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
564 ret = -EFAULT;
565 goto cleanup;
566 }
567 }
568 break;
569
570 /* This is an example of how to pass buffers to/from the secure
571 * side using the ion driver.
572 */
573 case SMCMOD_IOCTL_SEND_BUF_CMD:
574 {
575 struct smcmod_buf_req req;
576
577 /* copy struct from user */
578 if (copy_from_user((void *)&req, argp, sizeof(req))) {
579 ret = -EFAULT;
580 goto cleanup;
581 }
582
583 /* send the command */
584 ret = smcmod_send_buf_cmd(&req);
585 if (ret < 0)
586 goto cleanup;
587
588 /* copy result back to user */
589 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
590 ret = -EFAULT;
591 goto cleanup;
592 }
593 }
594 break;
595
596 case SMCMOD_IOCTL_SEND_CIPHER_CMD:
597 {
598 struct smcmod_cipher_req req;
599
600 /* copy struct from user */
601 if (copy_from_user((void *)&req, argp, sizeof(req))) {
602 ret = -EFAULT;
603 goto cleanup;
604 }
605
606 ret = smcmod_send_cipher_cmd(&req);
607 if (ret < 0)
608 goto cleanup;
609
610 /* copy result back to user */
611 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
612 ret = -EFAULT;
613 goto cleanup;
614 }
615 }
616 break;
617
618 case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD:
619 {
620 struct smcmod_msg_digest_req req;
621
622 /* copy struct from user */
623 if (copy_from_user((void *)&req, argp, sizeof(req))) {
624 ret = -EFAULT;
625 goto cleanup;
626 }
627
628 ret = smcmod_send_msg_digest_cmd(&req);
629 if (ret < 0)
630 goto cleanup;
631
632 /* copy result back to user */
633 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
634 ret = -EFAULT;
635 goto cleanup;
636 }
637 }
638 break;
639
640 case SMCMOD_IOCTL_GET_VERSION:
641 {
642 uint32_t req;
643
644 /* call scm function to switch to secure world */
645 req = scm_get_version();
646
647 /* copy result back to user */
648 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
649 ret = -EFAULT;
650 goto cleanup;
651 }
652 }
653 break;
654
655 default:
656 ret = -EINVAL;
657 }
658
659cleanup:
660 mutex_unlock(&ioctl_lock);
661 return ret;
662}
663
664static int smcmod_open(struct inode *inode, struct file *file)
665{
666 return 0;
667}
668
669static int smcmod_release(struct inode *inode, struct file *file)
670{
671 return 0;
672}
673
674static const struct file_operations smcmod_fops = {
675 .owner = THIS_MODULE,
676 .unlocked_ioctl = smcmod_ioctl,
677 .open = smcmod_open,
678 .release = smcmod_release,
679};
680
681static struct miscdevice smcmod_misc_dev = {
682 .minor = MISC_DYNAMIC_MINOR,
683 .name = SMCMOD_DEV,
684 .fops = &smcmod_fops
685};
686
687static int __init smcmod_init(void)
688{
689 return misc_register(&smcmod_misc_dev);
690}
691
692static void __exit smcmod_exit(void)
693{
694 misc_deregister(&smcmod_misc_dev);
695}
696
697MODULE_DESCRIPTION("Qualcomm SMC Module");
698MODULE_LICENSE("GPL v2");
699
700module_init(smcmod_init);
701module_exit(smcmod_exit);