blob: 683fb322f3f250e0849715d3aba8dbed44fad244 [file] [log] [blame]
Vikram Mulukutlaeca44cf2013-06-20 12:25:00 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Rohit Vaswani5fd759e2012-11-07 07:05:08 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define KMSG_COMPONENT "SMCMOD"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/miscdevice.h>
25#include <linux/mutex.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/printk.h>
29#include <linux/msm_ion.h>
30#include <asm/smcmod.h>
31#include <mach/scm.h>
Stanimir Varbanov7a7f34a2013-06-10 12:54:30 +030032#include <mach/socinfo.h>
Rohit Vaswani5fd759e2012-11-07 07:05:08 -080033
34static DEFINE_MUTEX(ioctl_lock);
35
36#define SMCMOD_SVC_DEFAULT (0)
37#define SMCMOD_SVC_CRYPTO (1)
38#define SMCMOD_CRYPTO_CMD_CIPHER (1)
39#define SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED (2)
40#define SMCMOD_CRYPTO_CMD_MSG_DIGEST (3)
41
42/**
43 * struct smcmod_cipher_scm_req - structure for sending the cipher cmd to
44 * scm_call.
45 *
46 * @algorithm - specifies cipher algorithm
47 * @operation - specifies encryption or decryption.
48 * @mode - specifies cipher mode.
49 * @key_phys_addr - physical address for key buffer.
50 * @key_size - key size in bytes.
51 * @plain_text_phys_addr - physical address for plain text buffer.
52 * @plain_text_size - size of plain text in bytes.
53 * @cipher_text_phys_addr - physical address for cipher text buffer.
54 * @cipher_text_size - cipher text size in bytes.
55 * @init_vector_phys_addr - physical address for init vector buffer.
56 * @init_vector_size - size of initialization vector in bytes.
57 */
58struct smcmod_cipher_scm_req {
59 uint32_t algorithm;
60 uint32_t operation;
61 uint32_t mode;
62 uint32_t key_phys_addr;
63 uint32_t key_size;
64 uint32_t plain_text_phys_addr;
65 uint32_t plain_text_size;
66 uint32_t cipher_text_phys_addr;
67 uint32_t cipher_text_size;
68 uint32_t init_vector_phys_addr;
69 uint32_t init_vector_size;
70};
71
72/**
73 * struct smcmod_msg_digest_scm_req - structure for sending message digest
74 * to scm_call.
75 *
76 * @algorithm - specifies the cipher algorithm.
77 * @key_phys_addr - physical address of key buffer.
78 * @key_size - hash key size in bytes.
79 * @input_phys_addr - physical address of input buffer.
80 * @input_size - input data size in bytes.
81 * @output_phys_addr - physical address of output buffer.
82 * @output_size - size of output buffer in bytes.
83 * @verify - indicates whether to verify the hash value.
84 */
85struct smcmod_msg_digest_scm_req {
86 uint32_t algorithm;
87 uint32_t key_phys_addr;
88 uint32_t key_size;
89 uint32_t input_phys_addr;
90 uint32_t input_size;
91 uint32_t output_phys_addr;
92 uint32_t output_size;
93 uint8_t verify;
94} __packed;
95
Rohit Vaswani5fd759e2012-11-07 07:05:08 -080096static int smcmod_ion_fd_to_phys(int32_t fd, struct ion_client *ion_clientp,
97 struct ion_handle **ion_handlep, uint32_t *phys_addrp, size_t *sizep)
98{
99 int ret = 0;
100
101 /* sanity check args */
102 if ((fd < 0) || IS_ERR_OR_NULL(ion_clientp) ||
103 IS_ERR_OR_NULL(ion_handlep) || IS_ERR_OR_NULL(phys_addrp) ||
104 IS_ERR_OR_NULL(sizep))
105 return -EINVAL;
106
107 /* import the buffer fd */
108 *ion_handlep = ion_import_dma_buf(ion_clientp, fd);
109
110 /* sanity check the handle */
111 if (IS_ERR_OR_NULL(*ion_handlep))
112 return -EINVAL;
113
114 /* get the physical address */
115 ret = ion_phys(ion_clientp, *ion_handlep, (ion_phys_addr_t *)phys_addrp,
116 sizep);
117
118 return ret;
119}
120
121static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
122{
123 int ret = 0;
124 struct ion_client *ion_clientp = NULL;
125 struct ion_handle *ion_cmd_handlep = NULL;
126 struct ion_handle *ion_resp_handlep = NULL;
127 void *cmd_vaddrp = NULL;
128 void *resp_vaddrp = NULL;
129 unsigned long cmd_buf_size = 0;
130 unsigned long resp_buf_size = 0;
131
132 /* sanity check the argument */
133 if (IS_ERR_OR_NULL(reqp))
134 return -EINVAL;
135
136 /* sanity check the fds */
137 if (reqp->ion_cmd_fd < 0)
138 return -EINVAL;
139
140 /* create an ion client */
141 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
142
143 /* check for errors */
144 if (IS_ERR_OR_NULL(ion_clientp))
145 return -EINVAL;
146
147 /* import the command buffer fd */
148 ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);
149
150 /* sanity check the handle */
151 if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
152 ret = -EINVAL;
153 goto buf_cleanup;
154 }
155
156 /* retrieve the size of the buffer */
157 if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
158 &cmd_buf_size) < 0) {
159 ret = -EINVAL;
160 goto buf_cleanup;
161 }
162
163 /* ensure that the command buffer size is not
164 * greater than the size of the buffer.
165 */
166 if (reqp->cmd_len > cmd_buf_size) {
167 ret = -EINVAL;
168 goto buf_cleanup;
169 }
170
171 /* map the area to get a virtual address */
172 cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);
173
174 /* sanity check the address */
175 if (IS_ERR_OR_NULL(cmd_vaddrp)) {
176 ret = -EINVAL;
177 goto buf_cleanup;
178 }
179
180 /* check if there is a response buffer */
181 if (reqp->ion_resp_fd >= 0) {
182 /* import the handle */
183 ion_resp_handlep =
184 ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);
185
186 /* sanity check the handle */
187 if (IS_ERR_OR_NULL(ion_resp_handlep)) {
188 ret = -EINVAL;
189 goto buf_cleanup;
190 }
191
192 /* retrieve the size of the buffer */
193 if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
194 &resp_buf_size) < 0) {
195 ret = -EINVAL;
196 goto buf_cleanup;
197 }
198
199 /* ensure that the command buffer size is not
200 * greater than the size of the buffer.
201 */
202 if (reqp->resp_len > resp_buf_size) {
203 ret = -EINVAL;
204 goto buf_cleanup;
205 }
206
207 /* map the area to get a virtual address */
208 resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);
209
210 /* sanity check the address */
211 if (IS_ERR_OR_NULL(resp_vaddrp)) {
212 ret = -EINVAL;
213 goto buf_cleanup;
214 }
215 }
216
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400217 /* No need to flush the cache lines for the command buffer here,
218 * because the buffer will be flushed by scm_call.
219 */
220
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800221 /* call scm function to switch to secure world */
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400222 reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800223 cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);
224
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400225 /* The cache lines for the response buffer have already been
226 * invalidated by scm_call before returning.
227 */
228
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800229buf_cleanup:
230 /* if the client and handle(s) are valid, free them */
231 if (!IS_ERR_OR_NULL(ion_clientp)) {
232 if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
233 if (!IS_ERR_OR_NULL(cmd_vaddrp))
234 ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
235 ion_free(ion_clientp, ion_cmd_handlep);
236 }
237
238 if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
239 if (!IS_ERR_OR_NULL(resp_vaddrp))
240 ion_unmap_kernel(ion_clientp, ion_resp_handlep);
241 ion_free(ion_clientp, ion_resp_handlep);
242 }
243
244 ion_client_destroy(ion_clientp);
245 }
246
247 return ret;
248}
249
250static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
251{
252 int ret = 0;
253 struct smcmod_cipher_scm_req scm_req;
254 struct ion_client *ion_clientp = NULL;
255 struct ion_handle *ion_key_handlep = NULL;
256 struct ion_handle *ion_plain_handlep = NULL;
257 struct ion_handle *ion_cipher_handlep = NULL;
258 struct ion_handle *ion_iv_handlep = NULL;
259 size_t size = 0;
260
261 if (IS_ERR_OR_NULL(reqp))
262 return -EINVAL;
263
264 /* sanity check the fds */
265 if ((reqp->ion_plain_text_fd < 0) ||
266 (reqp->ion_cipher_text_fd < 0) ||
267 (reqp->ion_init_vector_fd < 0))
268 return -EINVAL;
269
270 /* create an ion client */
271 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
272
273 /* check for errors */
274 if (IS_ERR_OR_NULL(ion_clientp))
275 return -EINVAL;
276
277 /* fill in the scm request structure */
278 scm_req.algorithm = reqp->algorithm;
279 scm_req.operation = reqp->operation;
280 scm_req.mode = reqp->mode;
281 scm_req.key_phys_addr = 0;
282 scm_req.key_size = reqp->key_size;
283 scm_req.plain_text_size = reqp->plain_text_size;
284 scm_req.cipher_text_size = reqp->cipher_text_size;
285 scm_req.init_vector_size = reqp->init_vector_size;
286
287 if (!reqp->key_is_null) {
288 /* import the key buffer and get the physical address */
289 ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
290 &ion_key_handlep, &scm_req.key_phys_addr, &size);
291 if (ret < 0)
292 goto buf_cleanup;
293
294 /* ensure that the key size is not
295 * greater than the size of the buffer.
296 */
297 if (reqp->key_size > size) {
298 ret = -EINVAL;
299 goto buf_cleanup;
300 }
301 }
302
303 /* import the plain text buffer and get the physical address */
304 ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
305 &ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);
306
307 if (ret < 0)
308 goto buf_cleanup;
309
310 /* ensure that the plain text size is not
311 * greater than the size of the buffer.
312 */
313 if (reqp->plain_text_size > size) {
314 ret = -EINVAL;
315 goto buf_cleanup;
316 }
317
318 /* import the cipher text buffer and get the physical address */
319 ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
320 &ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
321 if (ret < 0)
322 goto buf_cleanup;
323
324 /* ensure that the cipher text size is not
325 * greater than the size of the buffer.
326 */
327 if (reqp->cipher_text_size > size) {
328 ret = -EINVAL;
329 goto buf_cleanup;
330 }
331
332 /* import the init vector buffer and get the physical address */
333 ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
334 &ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
335 if (ret < 0)
336 goto buf_cleanup;
337
338 /* ensure that the init vector size is not
339 * greater than the size of the buffer.
340 */
341 if (reqp->init_vector_size > size) {
342 ret = -EINVAL;
343 goto buf_cleanup;
344 }
345
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400346 /* Only the scm_req structure will be flushed by scm_call,
347 * so we must flush the cache for the input ion buffers here.
348 */
349 msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
350 scm_req.key_size, ION_IOC_CLEAN_CACHES);
351 msm_ion_do_cache_op(ion_clientp, ion_iv_handlep, NULL,
352 scm_req.init_vector_size, ION_IOC_CLEAN_CACHES);
353
354 /* For decrypt, cipher text is input, otherwise it's plain text. */
355 if (reqp->operation)
356 msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
357 scm_req.cipher_text_size, ION_IOC_CLEAN_CACHES);
358 else
359 msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
360 scm_req.plain_text_size, ION_IOC_CLEAN_CACHES);
361
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800362 /* call scm function to switch to secure world */
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400363 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800364 SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
365 sizeof(scm_req), NULL, 0);
366
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400367 /* Invalidate the output buffer, since it's not done by scm_call */
368
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800369 /* for decrypt, plain text is the output, otherwise it's cipher text */
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400370 if (reqp->operation)
371 msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
372 scm_req.plain_text_size, ION_IOC_INV_CACHES);
373 else
374 msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
375 scm_req.cipher_text_size, ION_IOC_INV_CACHES);
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800376
377buf_cleanup:
378 /* if the client and handles are valid, free them */
379 if (!IS_ERR_OR_NULL(ion_clientp)) {
380 if (!IS_ERR_OR_NULL(ion_key_handlep))
381 ion_free(ion_clientp, ion_key_handlep);
382
383 if (!IS_ERR_OR_NULL(ion_plain_handlep))
384 ion_free(ion_clientp, ion_plain_handlep);
385
386 if (!IS_ERR_OR_NULL(ion_cipher_handlep))
387 ion_free(ion_clientp, ion_cipher_handlep);
388
389 if (!IS_ERR_OR_NULL(ion_iv_handlep))
390 ion_free(ion_clientp, ion_iv_handlep);
391
392 ion_client_destroy(ion_clientp);
393 }
394
395 return ret;
396}
397static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
398{
399 int ret = 0;
400 struct smcmod_msg_digest_scm_req scm_req;
401 struct ion_client *ion_clientp = NULL;
402 struct ion_handle *ion_key_handlep = NULL;
403 struct ion_handle *ion_input_handlep = NULL;
404 struct ion_handle *ion_output_handlep = NULL;
405 size_t size = 0;
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800406
407 if (IS_ERR_OR_NULL(reqp))
408 return -EINVAL;
409
410 /* sanity check the fds */
411 if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
412 return -EINVAL;
413
414 /* create an ion client */
415 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
416
417 /* check for errors */
418 if (IS_ERR_OR_NULL(ion_clientp))
419 return -EINVAL;
420
421 /* fill in the scm request structure */
422 scm_req.algorithm = reqp->algorithm;
423 scm_req.key_phys_addr = 0;
424 scm_req.key_size = reqp->key_size;
425 scm_req.input_size = reqp->input_size;
426 scm_req.output_size = reqp->output_size;
427 scm_req.verify = 0;
428
429 if (!reqp->key_is_null) {
430 /* import the key buffer and get the physical address */
431 ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
432 &ion_key_handlep, &scm_req.key_phys_addr, &size);
433 if (ret < 0)
434 goto buf_cleanup;
435
436 /* ensure that the key size is not
437 * greater than the size of the buffer.
438 */
439 if (reqp->key_size > size) {
440 ret = -EINVAL;
441 goto buf_cleanup;
442 }
443 }
444
445 /* import the input buffer and get the physical address */
446 ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
447 &ion_input_handlep, &scm_req.input_phys_addr, &size);
448 if (ret < 0)
449 goto buf_cleanup;
450
451 /* ensure that the input size is not
452 * greater than the size of the buffer.
453 */
454 if (reqp->input_size > size) {
455 ret = -EINVAL;
456 goto buf_cleanup;
457 }
458
459 /* import the output buffer and get the physical address */
460 ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
461 &ion_output_handlep, &scm_req.output_phys_addr, &size);
462 if (ret < 0)
463 goto buf_cleanup;
464
465 /* ensure that the output size is not
466 * greater than the size of the buffer.
467 */
468 if (reqp->output_size > size) {
469 ret = -EINVAL;
470 goto buf_cleanup;
471 }
472
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400473 /* Only the scm_req structure will be flushed by scm_call,
474 * so we must flush the cache for the input ion buffers here.
475 */
476 msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
477 scm_req.key_size, ION_IOC_CLEAN_CACHES);
478 msm_ion_do_cache_op(ion_clientp, ion_input_handlep, NULL,
479 scm_req.input_size, ION_IOC_CLEAN_CACHES);
480
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800481 /* call scm function to switch to secure world */
482 if (reqp->fixed_block)
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400483 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800484 SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
485 &scm_req,
486 sizeof(scm_req),
487 NULL, 0);
488 else
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400489 reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800490 SMCMOD_CRYPTO_CMD_MSG_DIGEST,
491 &scm_req,
492 sizeof(scm_req),
493 NULL, 0);
494
Vikram Mulukutla58f297a2013-06-20 18:23:19 -0400495 /* Invalidate the output buffer, since it's not done by scm_call */
496 msm_ion_do_cache_op(ion_clientp, ion_output_handlep, NULL,
497 scm_req.output_size, ION_IOC_INV_CACHES);
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800498
499buf_cleanup:
500 /* if the client and handles are valid, free them */
501 if (!IS_ERR_OR_NULL(ion_clientp)) {
502 if (!IS_ERR_OR_NULL(ion_key_handlep))
503 ion_free(ion_clientp, ion_key_handlep);
504
505 if (!IS_ERR_OR_NULL(ion_input_handlep))
506 ion_free(ion_clientp, ion_input_handlep);
507
508 if (!IS_ERR_OR_NULL(ion_output_handlep))
509 ion_free(ion_clientp, ion_output_handlep);
510
511 ion_client_destroy(ion_clientp);
512 }
513
514 return ret;
515}
516
Stanimir Varbanov7a7f34a2013-06-10 12:54:30 +0300517static int smcmod_send_dec_cmd(struct smcmod_decrypt_req *reqp)
518{
519 struct ion_client *ion_clientp;
520 struct ion_handle *ion_handlep = NULL;
521 int ion_fd;
522 int ret;
523 u32 pa;
524 size_t size;
525 struct {
526 u32 args[4];
527 } req;
528 struct {
529 u32 args[3];
530 } rsp;
531
532 ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
533 if (IS_ERR_OR_NULL(ion_clientp))
534 return PTR_ERR(ion_clientp);
535
536 switch (reqp->operation) {
537 case SMCMOD_DECRYPT_REQ_OP_METADATA: {
538 ion_fd = reqp->request.metadata.ion_fd;
539 ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp,
540 &ion_handlep, &pa, &size);
541 if (ret)
542 goto error;
543
544 req.args[0] = reqp->request.metadata.len;
545 req.args[1] = pa;
546 break;
547 }
548 case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: {
549 ion_fd = reqp->request.img_frag.ion_fd;
550 ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp,
551 &ion_handlep, &pa, &size);
552 if (ret)
553 goto error;
554
555 req.args[0] = reqp->request.img_frag.ctx_id;
556 req.args[1] = reqp->request.img_frag.last_frag;
557 req.args[2] = reqp->request.img_frag.frag_len;
558 req.args[3] = pa + reqp->request.img_frag.offset;
559 break;
560 }
561 default:
562 ret = -EINVAL;
563 goto error;
564 }
565
566 /*
567 * scm_call does cache maintenance over request and response buffers.
568 * The userspace must flush/invalidate ion input/output buffers itself.
569 */
570
571 ret = scm_call(reqp->service_id, reqp->command_id,
572 &req, sizeof(req), &rsp, sizeof(rsp));
573 if (ret)
574 goto error;
575
576 switch (reqp->operation) {
577 case SMCMOD_DECRYPT_REQ_OP_METADATA:
578 reqp->response.metadata.status = rsp.args[0];
579 reqp->response.metadata.ctx_id = rsp.args[1];
580 reqp->response.metadata.end_offset = rsp.args[2] - pa;
581 break;
582 case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: {
583 reqp->response.img_frag.status = rsp.args[0];
584 break;
585 }
586 default:
587 break;
588 }
589
590error:
591 if (!IS_ERR_OR_NULL(ion_clientp)) {
592 if (!IS_ERR_OR_NULL(ion_handlep))
593 ion_free(ion_clientp, ion_handlep);
594 ion_client_destroy(ion_clientp);
595 }
596 return ret;
597}
598
599static int smcmod_ioctl_check(unsigned cmd)
600{
601 switch (cmd) {
602 case SMCMOD_IOCTL_SEND_REG_CMD:
603 case SMCMOD_IOCTL_SEND_BUF_CMD:
604 case SMCMOD_IOCTL_SEND_CIPHER_CMD:
605 case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD:
606 case SMCMOD_IOCTL_GET_VERSION:
607 if (!cpu_is_fsm9xxx())
608 return -EINVAL;
609 break;
610 case SMCMOD_IOCTL_SEND_DECRYPT_CMD:
611 if (!cpu_is_msm8226())
612 return -EINVAL;
613 break;
614 default:
615 return -EINVAL;
616 }
617
618 return 0;
619}
620
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800621static long smcmod_ioctl(struct file *file, unsigned cmd, unsigned long arg)
622{
623 void __user *argp = (void __user *)arg;
624 int ret = 0;
625
626 /* sanity check */
627 if (!argp)
628 return -EINVAL;
629
630 /*
631 * The SMC instruction should only be initiated by one process
632 * at a time, hence the critical section here. Note that this
633 * does not prevent user space from modifying the
634 * allocated buffer contents. Extra steps are needed to
635 * prevent that from happening.
636 */
637 mutex_lock(&ioctl_lock);
638
Stanimir Varbanov7a7f34a2013-06-10 12:54:30 +0300639 ret = smcmod_ioctl_check(cmd);
640 if (ret)
641 goto cleanup;
642
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800643 switch (cmd) {
644 case SMCMOD_IOCTL_SEND_REG_CMD:
645 {
646 struct smcmod_reg_req req;
647
648 /* copy struct from user */
649 if (copy_from_user((void *)&req, argp, sizeof(req))) {
650 ret = -EFAULT;
651 goto cleanup;
652 }
653
654 /* call the correct scm function to switch to secure
655 * world
656 */
657 if (req.num_args == 1) {
658 req.return_val =
659 scm_call_atomic1(req.service_id,
660 req.command_id, req.args[0]);
661 } else if (req.num_args == 2) {
662 req.return_val =
663 scm_call_atomic2(req.service_id,
664 req.command_id, req.args[0],
665 req.args[1]);
666 } else {
667 ret = -EINVAL;
668 goto cleanup;
669 }
670
671 /* copy result back to user */
672 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
673 ret = -EFAULT;
674 goto cleanup;
675 }
676 }
677 break;
678
679 /* This is an example of how to pass buffers to/from the secure
680 * side using the ion driver.
681 */
682 case SMCMOD_IOCTL_SEND_BUF_CMD:
683 {
684 struct smcmod_buf_req req;
685
686 /* copy struct from user */
687 if (copy_from_user((void *)&req, argp, sizeof(req))) {
688 ret = -EFAULT;
689 goto cleanup;
690 }
691
692 /* send the command */
693 ret = smcmod_send_buf_cmd(&req);
694 if (ret < 0)
695 goto cleanup;
696
697 /* copy result back to user */
698 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
699 ret = -EFAULT;
700 goto cleanup;
701 }
702 }
703 break;
704
705 case SMCMOD_IOCTL_SEND_CIPHER_CMD:
706 {
707 struct smcmod_cipher_req req;
708
709 /* copy struct from user */
710 if (copy_from_user((void *)&req, argp, sizeof(req))) {
711 ret = -EFAULT;
712 goto cleanup;
713 }
714
715 ret = smcmod_send_cipher_cmd(&req);
716 if (ret < 0)
717 goto cleanup;
718
719 /* copy result back to user */
720 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
721 ret = -EFAULT;
722 goto cleanup;
723 }
724 }
725 break;
726
727 case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD:
728 {
729 struct smcmod_msg_digest_req req;
730
731 /* copy struct from user */
732 if (copy_from_user((void *)&req, argp, sizeof(req))) {
733 ret = -EFAULT;
734 goto cleanup;
735 }
736
737 ret = smcmod_send_msg_digest_cmd(&req);
738 if (ret < 0)
739 goto cleanup;
740
741 /* copy result back to user */
742 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
743 ret = -EFAULT;
744 goto cleanup;
745 }
746 }
747 break;
748
749 case SMCMOD_IOCTL_GET_VERSION:
750 {
751 uint32_t req;
752
753 /* call scm function to switch to secure world */
754 req = scm_get_version();
755
756 /* copy result back to user */
757 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
758 ret = -EFAULT;
759 goto cleanup;
760 }
761 }
762 break;
763
Stanimir Varbanov7a7f34a2013-06-10 12:54:30 +0300764 case SMCMOD_IOCTL_SEND_DECRYPT_CMD:
765 {
766 struct smcmod_decrypt_req req;
767
768 if (copy_from_user((void *)&req, argp, sizeof(req))) {
769 ret = -EFAULT;
770 goto cleanup;
771 }
772
773 ret = smcmod_send_dec_cmd(&req);
774 if (ret < 0)
775 goto cleanup;
776
777 if (copy_to_user(argp, (void *)&req, sizeof(req))) {
778 ret = -EFAULT;
779 goto cleanup;
780 }
781 }
782 break;
783
Rohit Vaswani5fd759e2012-11-07 07:05:08 -0800784 default:
785 ret = -EINVAL;
786 }
787
788cleanup:
789 mutex_unlock(&ioctl_lock);
790 return ret;
791}
792
793static int smcmod_open(struct inode *inode, struct file *file)
794{
795 return 0;
796}
797
798static int smcmod_release(struct inode *inode, struct file *file)
799{
800 return 0;
801}
802
803static const struct file_operations smcmod_fops = {
804 .owner = THIS_MODULE,
805 .unlocked_ioctl = smcmod_ioctl,
806 .open = smcmod_open,
807 .release = smcmod_release,
808};
809
810static struct miscdevice smcmod_misc_dev = {
811 .minor = MISC_DYNAMIC_MINOR,
812 .name = SMCMOD_DEV,
813 .fops = &smcmod_fops
814};
815
816static int __init smcmod_init(void)
817{
818 return misc_register(&smcmod_misc_dev);
819}
820
821static void __exit smcmod_exit(void)
822{
823 misc_deregister(&smcmod_misc_dev);
824}
825
826MODULE_DESCRIPTION("Qualcomm SMC Module");
827MODULE_LICENSE("GPL v2");
828
829module_init(smcmod_init);
830module_exit(smcmod_exit);