blob: d58144f3b6e6b179992892416c201003c37d0361 [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/*
2 * QTI CE device driver.
3 *
Sonal Guptaeff149e2018-02-09 09:35:55 -08004 * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/mman.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <linux/platform_data/qcom_crypto_device.h>
32#include <linux/msm-bus.h>
33#include <linux/qcedev.h>
34
35#include <crypto/hash.h>
36#include "qcedevi.h"
37#include "qce.h"
Sonal Guptaeff149e2018-02-09 09:35:55 -080038#include "qcedev_smmu.h"
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070039
40#include <linux/compat.h>
41#include "compat_qcedev.h"
42
43#define CACHE_LINE_SIZE 32
44#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
45
46static uint8_t _std_init_vector_sha1_uint8[] = {
47 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
48 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
49 0xC3, 0xD2, 0xE1, 0xF0
50};
51/* standard initialization vector for SHA-256, source: FIPS 180-2 */
52static uint8_t _std_init_vector_sha256_uint8[] = {
53 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
54 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
55 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
56 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
57};
58
59static DEFINE_MUTEX(send_cmd_lock);
60static DEFINE_MUTEX(qcedev_sent_bw_req);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +053061static DEFINE_MUTEX(hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070062
Sonal Guptaeff149e2018-02-09 09:35:55 -080063MODULE_DEVICE_TABLE(of, qcedev_match);
64
65static const struct of_device_id qcedev_match[] = {
66 { .compatible = "qcom,qcedev"},
67 { .compatible = "qcom,qcedev,context-bank"},
68 {}
69};
70
AnilKumar Chimatae5e60512017-05-03 14:06:59 -070071static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
72{
73 unsigned int control_flag;
74 int ret = 0;
75
76 if (podev->ce_support.req_bw_before_clk) {
77 if (enable)
78 control_flag = QCE_BW_REQUEST_FIRST;
79 else
80 control_flag = QCE_CLK_DISABLE_FIRST;
81 } else {
82 if (enable)
83 control_flag = QCE_CLK_ENABLE_FIRST;
84 else
85 control_flag = QCE_BW_REQUEST_RESET_FIRST;
86 }
87
88 switch (control_flag) {
89 case QCE_CLK_ENABLE_FIRST:
90 ret = qce_enable_clk(podev->qce);
91 if (ret) {
92 pr_err("%s Unable enable clk\n", __func__);
93 return ret;
94 }
95 ret = msm_bus_scale_client_update_request(
96 podev->bus_scale_handle, 1);
97 if (ret) {
98 pr_err("%s Unable to set high bw\n", __func__);
99 ret = qce_disable_clk(podev->qce);
100 if (ret)
101 pr_err("%s Unable disable clk\n", __func__);
102 return ret;
103 }
104 break;
105 case QCE_BW_REQUEST_FIRST:
106 ret = msm_bus_scale_client_update_request(
107 podev->bus_scale_handle, 1);
108 if (ret) {
109 pr_err("%s Unable to set high bw\n", __func__);
110 return ret;
111 }
112 ret = qce_enable_clk(podev->qce);
113 if (ret) {
114 pr_err("%s Unable enable clk\n", __func__);
115 ret = msm_bus_scale_client_update_request(
116 podev->bus_scale_handle, 0);
117 if (ret)
118 pr_err("%s Unable to set low bw\n", __func__);
119 return ret;
120 }
121 break;
122 case QCE_CLK_DISABLE_FIRST:
123 ret = qce_disable_clk(podev->qce);
124 if (ret) {
125 pr_err("%s Unable to disable clk\n", __func__);
126 return ret;
127 }
128 ret = msm_bus_scale_client_update_request(
129 podev->bus_scale_handle, 0);
130 if (ret) {
131 pr_err("%s Unable to set low bw\n", __func__);
132 ret = qce_enable_clk(podev->qce);
133 if (ret)
134 pr_err("%s Unable enable clk\n", __func__);
135 return ret;
136 }
137 break;
138 case QCE_BW_REQUEST_RESET_FIRST:
139 ret = msm_bus_scale_client_update_request(
140 podev->bus_scale_handle, 0);
141 if (ret) {
142 pr_err("%s Unable to set low bw\n", __func__);
143 return ret;
144 }
145 ret = qce_disable_clk(podev->qce);
146 if (ret) {
147 pr_err("%s Unable to disable clk\n", __func__);
148 ret = msm_bus_scale_client_update_request(
149 podev->bus_scale_handle, 1);
150 if (ret)
151 pr_err("%s Unable to set high bw\n", __func__);
152 return ret;
153 }
154 break;
155 default:
156 return -ENOENT;
157 }
158
159 return 0;
160}
161
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700162static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
163 bool high_bw_req)
164{
165 int ret = 0;
166
167 mutex_lock(&qcedev_sent_bw_req);
168 if (high_bw_req) {
169 if (podev->high_bw_req_count == 0) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700170 ret = qcedev_control_clocks(podev, true);
171 if (ret)
172 goto exit_unlock_mutex;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700173 }
174 podev->high_bw_req_count++;
175 } else {
176 if (podev->high_bw_req_count == 1) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700177 ret = qcedev_control_clocks(podev, false);
178 if (ret)
179 goto exit_unlock_mutex;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700180 }
181 podev->high_bw_req_count--;
182 }
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700183
184exit_unlock_mutex:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700185 mutex_unlock(&qcedev_sent_bw_req);
186}
187
188#define QCEDEV_MAGIC 0x56434544 /* "qced" */
189
190static int qcedev_open(struct inode *inode, struct file *file);
191static int qcedev_release(struct inode *inode, struct file *file);
192static int start_cipher_req(struct qcedev_control *podev);
193static int start_sha_req(struct qcedev_control *podev);
194static inline long qcedev_ioctl(struct file *file,
195 unsigned int cmd, unsigned long arg);
196
197#ifdef CONFIG_COMPAT
198#include "compat_qcedev.c"
199#else
200#define compat_qcedev_ioctl NULL
201#endif
202
203static const struct file_operations qcedev_fops = {
204 .owner = THIS_MODULE,
205 .unlocked_ioctl = qcedev_ioctl,
206 .compat_ioctl = compat_qcedev_ioctl,
207 .open = qcedev_open,
208 .release = qcedev_release,
209};
210
211static struct qcedev_control qce_dev[] = {
212 {
213 .miscdevice = {
214 .minor = MISC_DYNAMIC_MINOR,
215 .name = "qce",
216 .fops = &qcedev_fops,
217 },
218 .magic = QCEDEV_MAGIC,
219 },
220};
221
222#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
223#define DEBUG_MAX_FNAME 16
224#define DEBUG_MAX_RW_BUF 1024
225
226struct qcedev_stat {
227 u32 qcedev_dec_success;
228 u32 qcedev_dec_fail;
229 u32 qcedev_enc_success;
230 u32 qcedev_enc_fail;
231 u32 qcedev_sha_success;
232 u32 qcedev_sha_fail;
233};
234
235static struct qcedev_stat _qcedev_stat;
236static struct dentry *_debug_dent;
237static char _debug_read_buf[DEBUG_MAX_RW_BUF];
238static int _debug_qcedev;
239
240static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
241{
242 int i;
243
244 for (i = 0; i < MAX_QCE_DEVICE; i++) {
245 if (qce_dev[i].miscdevice.minor == n)
246 return &qce_dev[i];
247 }
248 return NULL;
249}
250
251static int qcedev_open(struct inode *inode, struct file *file)
252{
253 struct qcedev_handle *handle;
254 struct qcedev_control *podev;
255
256 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
257 if (podev == NULL) {
258 pr_err("%s: no such device %d\n", __func__,
259 MINOR(inode->i_rdev));
260 return -ENOENT;
261 }
262
263 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
264 if (handle == NULL)
265 return -ENOMEM;
266
267 handle->cntl = podev;
268 file->private_data = handle;
269 if (podev->platform_support.bus_scale_table != NULL)
270 qcedev_ce_high_bw_req(podev, true);
Sonal Guptaa540a072018-02-15 14:11:02 -0800271
272 mutex_init(&handle->registeredbufs.lock);
273 INIT_LIST_HEAD(&handle->registeredbufs.list);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700274 return 0;
275}
276
277static int qcedev_release(struct inode *inode, struct file *file)
278{
279 struct qcedev_control *podev;
280 struct qcedev_handle *handle;
281
282 handle = file->private_data;
283 podev = handle->cntl;
284 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +0530285 pr_err("%s: invalid handle %pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700286 __func__, podev);
287 }
288 kzfree(handle);
289 file->private_data = NULL;
290 if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
291 qcedev_ce_high_bw_req(podev, false);
292 return 0;
293}
294
295static void req_done(unsigned long data)
296{
297 struct qcedev_control *podev = (struct qcedev_control *)data;
298 struct qcedev_async_req *areq;
299 unsigned long flags = 0;
300 struct qcedev_async_req *new_req = NULL;
301 int ret = 0;
302
303 spin_lock_irqsave(&podev->lock, flags);
304 areq = podev->active_command;
305 podev->active_command = NULL;
306
307again:
308 if (!list_empty(&podev->ready_commands)) {
309 new_req = container_of(podev->ready_commands.next,
310 struct qcedev_async_req, list);
311 list_del(&new_req->list);
312 podev->active_command = new_req;
313 new_req->err = 0;
314 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
315 ret = start_cipher_req(podev);
316 else
317 ret = start_sha_req(podev);
318 }
319
320 spin_unlock_irqrestore(&podev->lock, flags);
321
322 if (areq)
323 complete(&areq->complete);
324
325 if (new_req && ret) {
326 complete(&new_req->complete);
327 spin_lock_irqsave(&podev->lock, flags);
328 podev->active_command = NULL;
329 areq = NULL;
330 ret = 0;
331 new_req = NULL;
332 goto again;
333 }
334}
335
336void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
337 unsigned char *authdata, int ret)
338{
339 struct qcedev_sha_req *areq;
340 struct qcedev_control *pdev;
341 struct qcedev_handle *handle;
342
343 uint32_t *auth32 = (uint32_t *)authdata;
344
345 areq = (struct qcedev_sha_req *) cookie;
346 handle = (struct qcedev_handle *) areq->cookie;
347 pdev = handle->cntl;
348
349 if (digest)
350 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
351
352 if (authdata) {
353 handle->sha_ctxt.auth_data[0] = auth32[0];
354 handle->sha_ctxt.auth_data[1] = auth32[1];
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700355 }
356
357 tasklet_schedule(&pdev->done_tasklet);
358};
359
360
361void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
362 unsigned char *iv, int ret)
363{
364 struct qcedev_cipher_req *areq;
365 struct qcedev_handle *handle;
366 struct qcedev_control *podev;
367 struct qcedev_async_req *qcedev_areq;
368
369 areq = (struct qcedev_cipher_req *) cookie;
370 handle = (struct qcedev_handle *) areq->cookie;
371 podev = handle->cntl;
372 qcedev_areq = podev->active_command;
373
374 if (iv)
375 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
376 qcedev_areq->cipher_op_req.ivlen);
377 tasklet_schedule(&podev->done_tasklet);
378};
379
380static int start_cipher_req(struct qcedev_control *podev)
381{
382 struct qcedev_async_req *qcedev_areq;
383 struct qce_req creq;
384 int ret = 0;
385
386 /* start the command on the podev->active_command */
387 qcedev_areq = podev->active_command;
388 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
389 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
390 pr_err("%s: Use of PMEM is not supported\n", __func__);
391 goto unsupported;
392 }
393 creq.pmem = NULL;
394 switch (qcedev_areq->cipher_op_req.alg) {
395 case QCEDEV_ALG_DES:
396 creq.alg = CIPHER_ALG_DES;
397 break;
398 case QCEDEV_ALG_3DES:
399 creq.alg = CIPHER_ALG_3DES;
400 break;
401 case QCEDEV_ALG_AES:
402 creq.alg = CIPHER_ALG_AES;
403 break;
404 default:
405 return -EINVAL;
406 };
407
408 switch (qcedev_areq->cipher_op_req.mode) {
409 case QCEDEV_AES_MODE_CBC:
410 case QCEDEV_DES_MODE_CBC:
411 creq.mode = QCE_MODE_CBC;
412 break;
413 case QCEDEV_AES_MODE_ECB:
414 case QCEDEV_DES_MODE_ECB:
415 creq.mode = QCE_MODE_ECB;
416 break;
417 case QCEDEV_AES_MODE_CTR:
418 creq.mode = QCE_MODE_CTR;
419 break;
420 case QCEDEV_AES_MODE_XTS:
421 creq.mode = QCE_MODE_XTS;
422 break;
423 default:
424 return -EINVAL;
425 };
426
427 if ((creq.alg == CIPHER_ALG_AES) &&
428 (creq.mode == QCE_MODE_CTR)) {
429 creq.dir = QCE_ENCRYPT;
430 } else {
431 if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
432 creq.dir = QCE_ENCRYPT;
433 else
434 creq.dir = QCE_DECRYPT;
435 }
436
437 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
438 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
439
440 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
441 creq.encklen = qcedev_areq->cipher_op_req.encklen;
442
443 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
444
445 if (qcedev_areq->cipher_op_req.encklen == 0) {
446 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
447 || (qcedev_areq->cipher_op_req.op ==
448 QCEDEV_OPER_DEC_NO_KEY))
449 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
450 else {
451 int i;
452
453 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
454 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
455 break;
456 }
457
458 if ((podev->platform_support.hw_key_support == 1) &&
459 (i == QCEDEV_MAX_KEY_SIZE))
460 creq.op = QCE_REQ_ABLK_CIPHER;
461 else {
462 ret = -EINVAL;
463 goto unsupported;
464 }
465 }
466 } else {
467 creq.op = QCE_REQ_ABLK_CIPHER;
468 }
469
470 creq.qce_cb = qcedev_cipher_req_cb;
471 creq.areq = (void *)&qcedev_areq->cipher_req;
472 creq.flags = 0;
473 ret = qce_ablk_cipher_req(podev->qce, &creq);
474unsupported:
475 if (ret)
476 qcedev_areq->err = -ENXIO;
477 else
478 qcedev_areq->err = 0;
479 return ret;
480};
481
482static int start_sha_req(struct qcedev_control *podev)
483{
484 struct qcedev_async_req *qcedev_areq;
485 struct qce_sha_req sreq;
486 int ret = 0;
487 struct qcedev_handle *handle;
488
489 /* start the command on the podev->active_command */
490 qcedev_areq = podev->active_command;
491 handle = qcedev_areq->handle;
492
493 switch (qcedev_areq->sha_op_req.alg) {
494 case QCEDEV_ALG_SHA1:
495 sreq.alg = QCE_HASH_SHA1;
496 break;
497 case QCEDEV_ALG_SHA256:
498 sreq.alg = QCE_HASH_SHA256;
499 break;
500 case QCEDEV_ALG_SHA1_HMAC:
501 if (podev->ce_support.sha_hmac) {
502 sreq.alg = QCE_HASH_SHA1_HMAC;
503 sreq.authkey = &handle->sha_ctxt.authkey[0];
504 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
505
506 } else {
507 sreq.alg = QCE_HASH_SHA1;
508 sreq.authkey = NULL;
509 }
510 break;
511 case QCEDEV_ALG_SHA256_HMAC:
512 if (podev->ce_support.sha_hmac) {
513 sreq.alg = QCE_HASH_SHA256_HMAC;
514 sreq.authkey = &handle->sha_ctxt.authkey[0];
515 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
516 } else {
517 sreq.alg = QCE_HASH_SHA256;
518 sreq.authkey = NULL;
519 }
520 break;
521 case QCEDEV_ALG_AES_CMAC:
522 sreq.alg = QCE_HASH_AES_CMAC;
523 sreq.authkey = &handle->sha_ctxt.authkey[0];
524 sreq.authklen = qcedev_areq->sha_op_req.authklen;
525 break;
526 default:
527 pr_err("Algorithm %d not supported, exiting\n",
528 qcedev_areq->sha_op_req.alg);
529 return -EINVAL;
530 };
531
532 qcedev_areq->sha_req.cookie = handle;
533
534 sreq.qce_cb = qcedev_sha_req_cb;
535 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
536 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
537 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
538 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
539 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
540 sreq.digest = &handle->sha_ctxt.digest[0];
541 sreq.first_blk = handle->sha_ctxt.first_blk;
542 sreq.last_blk = handle->sha_ctxt.last_blk;
543 }
544 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
545 sreq.src = qcedev_areq->sha_req.sreq.src;
546 sreq.areq = (void *)&qcedev_areq->sha_req;
547 sreq.flags = 0;
548
549 ret = qce_process_sha_req(podev->qce, &sreq);
550
551 if (ret)
552 qcedev_areq->err = -ENXIO;
553 else
554 qcedev_areq->err = 0;
555 return ret;
556};
557
558static int submit_req(struct qcedev_async_req *qcedev_areq,
559 struct qcedev_handle *handle)
560{
561 struct qcedev_control *podev;
562 unsigned long flags = 0;
563 int ret = 0;
564 struct qcedev_stat *pstat;
565
566 qcedev_areq->err = 0;
567 podev = handle->cntl;
568
569 spin_lock_irqsave(&podev->lock, flags);
570
571 if (podev->active_command == NULL) {
572 podev->active_command = qcedev_areq;
573 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
574 ret = start_cipher_req(podev);
575 else
576 ret = start_sha_req(podev);
577 } else {
578 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
579 }
580
581 if (ret != 0)
582 podev->active_command = NULL;
583
584 spin_unlock_irqrestore(&podev->lock, flags);
585
586 if (ret == 0)
587 wait_for_completion(&qcedev_areq->complete);
588
589 if (ret)
590 qcedev_areq->err = -EIO;
591
592 pstat = &_qcedev_stat;
593 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
594 switch (qcedev_areq->cipher_op_req.op) {
595 case QCEDEV_OPER_DEC:
596 if (qcedev_areq->err)
597 pstat->qcedev_dec_fail++;
598 else
599 pstat->qcedev_dec_success++;
600 break;
601 case QCEDEV_OPER_ENC:
602 if (qcedev_areq->err)
603 pstat->qcedev_enc_fail++;
604 else
605 pstat->qcedev_enc_success++;
606 break;
607 default:
608 break;
609 };
610 } else {
611 if (qcedev_areq->err)
612 pstat->qcedev_sha_fail++;
613 else
614 pstat->qcedev_sha_success++;
615 }
616
617 return qcedev_areq->err;
618}
619
620static int qcedev_sha_init(struct qcedev_async_req *areq,
621 struct qcedev_handle *handle)
622{
623 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
624
625 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
626 sha_ctxt->first_blk = 1;
627
628 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
629 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
630 memcpy(&sha_ctxt->digest[0],
631 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
632 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
633 } else {
634 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
635 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
636 memcpy(&sha_ctxt->digest[0],
637 &_std_init_vector_sha256_uint8[0],
638 SHA256_DIGEST_SIZE);
639 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
640 }
641 }
642 sha_ctxt->init_done = true;
643 return 0;
644}
645
646
647static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
648 struct qcedev_handle *handle,
649 struct scatterlist *sg_src)
650{
651 int err = 0;
652 int i = 0;
653 uint32_t total;
654
655 uint8_t *user_src = NULL;
656 uint8_t *k_src = NULL;
657 uint8_t *k_buf_src = NULL;
658 uint8_t *k_align_src = NULL;
659
660 uint32_t sha_pad_len = 0;
661 uint32_t trailing_buf_len = 0;
662 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
663 uint32_t sha_block_size;
664
665 total = qcedev_areq->sha_op_req.data_len + t_buf;
666
667 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
668 sha_block_size = SHA1_BLOCK_SIZE;
669 else
670 sha_block_size = SHA256_BLOCK_SIZE;
671
672 if (total <= sha_block_size) {
673 uint32_t len = qcedev_areq->sha_op_req.data_len;
674
675 i = 0;
676
677 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
678
679 /* Copy data from user src(s) */
680 while (len > 0) {
681 user_src =
682 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
683 if (user_src && copy_from_user(k_src,
684 (void __user *)user_src,
685 qcedev_areq->sha_op_req.data[i].len))
686 return -EFAULT;
687
688 len -= qcedev_areq->sha_op_req.data[i].len;
689 k_src += qcedev_areq->sha_op_req.data[i].len;
690 i++;
691 }
692 handle->sha_ctxt.trailing_buf_len = total;
693
694 return 0;
695 }
696
697
698 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
699 GFP_KERNEL);
700 if (k_buf_src == NULL)
701 return -ENOMEM;
702
703 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
704 CACHE_LINE_SIZE);
705 k_src = k_align_src;
706
707 /* check for trailing buffer from previous updates and append it */
708 if (t_buf > 0) {
709 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
710 t_buf);
711 k_src += t_buf;
712 }
713
714 /* Copy data from user src(s) */
715 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
716 if (user_src && copy_from_user(k_src,
717 (void __user *)user_src,
718 qcedev_areq->sha_op_req.data[0].len)) {
719 kzfree(k_buf_src);
720 return -EFAULT;
721 }
722 k_src += qcedev_areq->sha_op_req.data[0].len;
723 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
724 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
725 if (user_src && copy_from_user(k_src,
726 (void __user *)user_src,
727 qcedev_areq->sha_op_req.data[i].len)) {
728 kzfree(k_buf_src);
729 return -EFAULT;
730 }
731 k_src += qcedev_areq->sha_op_req.data[i].len;
732 }
733
734 /* get new trailing buffer */
735 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
736 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
737
738 qcedev_areq->sha_req.sreq.src = sg_src;
739 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
740 total-trailing_buf_len);
741 sg_mark_end(qcedev_areq->sha_req.sreq.src);
742
743 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
744
745 /* update sha_ctxt trailing buf content to new trailing buf */
746 if (trailing_buf_len > 0) {
747 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
748 memcpy(&handle->sha_ctxt.trailing_buf[0],
749 (k_src - trailing_buf_len),
750 trailing_buf_len);
751 }
752 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
753
754 err = submit_req(qcedev_areq, handle);
755
756 handle->sha_ctxt.last_blk = 0;
757 handle->sha_ctxt.first_blk = 0;
758
759 kzfree(k_buf_src);
760 return err;
761}
762
763static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
764 struct qcedev_handle *handle,
765 struct scatterlist *sg_src)
766{
767 int err = 0;
768 int i = 0;
769 int j = 0;
770 int k = 0;
771 int num_entries = 0;
772 uint32_t total = 0;
773
774 if (handle->sha_ctxt.init_done == false) {
775 pr_err("%s Init was not called\n", __func__);
776 return -EINVAL;
777 }
778
779 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
780
781 struct qcedev_sha_op_req *saved_req;
782 struct qcedev_sha_op_req req;
783 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
784
785 /* save the original req structure */
786 saved_req =
787 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
788 if (saved_req == NULL) {
789 pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
790 __func__, (uintptr_t)saved_req);
791 return -ENOMEM;
792 }
793 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
794 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
795
796 i = 0;
797 /* Address 32 KB at a time */
798 while ((i < req.entries) && (err == 0)) {
799 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
800 sreq->data[0].len = QCE_MAX_OPER_DATA;
801 if (i > 0) {
802 sreq->data[0].vaddr =
803 sreq->data[i].vaddr;
804 }
805
806 sreq->data_len = QCE_MAX_OPER_DATA;
807 sreq->entries = 1;
808
809 err = qcedev_sha_update_max_xfer(qcedev_areq,
810 handle, sg_src);
811
812 sreq->data[i].len = req.data[i].len -
813 QCE_MAX_OPER_DATA;
814 sreq->data[i].vaddr = req.data[i].vaddr +
815 QCE_MAX_OPER_DATA;
816 req.data[i].vaddr = sreq->data[i].vaddr;
817 req.data[i].len = sreq->data[i].len;
818 } else {
819 total = 0;
820 for (j = i; j < req.entries; j++) {
821 num_entries++;
822 if ((total + sreq->data[j].len) >=
823 QCE_MAX_OPER_DATA) {
824 sreq->data[j].len =
825 (QCE_MAX_OPER_DATA - total);
826 total = QCE_MAX_OPER_DATA;
827 break;
828 }
829 total += sreq->data[j].len;
830 }
831
832 sreq->data_len = total;
833 if (i > 0)
834 for (k = 0; k < num_entries; k++) {
835 sreq->data[k].len =
836 sreq->data[i+k].len;
837 sreq->data[k].vaddr =
838 sreq->data[i+k].vaddr;
839 }
840 sreq->entries = num_entries;
841
842 i = j;
843 err = qcedev_sha_update_max_xfer(qcedev_areq,
844 handle, sg_src);
845 num_entries = 0;
846
847 sreq->data[i].vaddr = req.data[i].vaddr +
848 sreq->data[i].len;
849 sreq->data[i].len = req.data[i].len -
850 sreq->data[i].len;
851 req.data[i].vaddr = sreq->data[i].vaddr;
852 req.data[i].len = sreq->data[i].len;
853
854 if (sreq->data[i].len == 0)
855 i++;
856 }
857 } /* end of while ((i < req.entries) && (err == 0)) */
858
859 /* Restore the original req structure */
860 for (i = 0; i < saved_req->entries; i++) {
861 sreq->data[i].len = saved_req->data[i].len;
862 sreq->data[i].vaddr = saved_req->data[i].vaddr;
863 }
864 sreq->entries = saved_req->entries;
865 sreq->data_len = saved_req->data_len;
866 kzfree(saved_req);
867 } else
868 err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
869
870 return err;
871}
872
873static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
874 struct qcedev_handle *handle)
875{
876 int err = 0;
877 struct scatterlist sg_src;
878 uint32_t total;
879 uint8_t *k_buf_src = NULL;
880 uint8_t *k_align_src = NULL;
881
882 if (handle->sha_ctxt.init_done == false) {
883 pr_err("%s Init was not called\n", __func__);
884 return -EINVAL;
885 }
886
887 handle->sha_ctxt.last_blk = 1;
888
889 total = handle->sha_ctxt.trailing_buf_len;
890
891 if (total) {
892 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
893 GFP_KERNEL);
894 if (k_buf_src == NULL)
895 return -ENOMEM;
896
897 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
898 CACHE_LINE_SIZE);
899 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
900 }
901 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
902 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
903 sg_mark_end(qcedev_areq->sha_req.sreq.src);
904
905 qcedev_areq->sha_req.sreq.nbytes = total;
906
907 err = submit_req(qcedev_areq, handle);
908
909 handle->sha_ctxt.first_blk = 0;
910 handle->sha_ctxt.last_blk = 0;
911 handle->sha_ctxt.auth_data[0] = 0;
912 handle->sha_ctxt.auth_data[1] = 0;
913 handle->sha_ctxt.trailing_buf_len = 0;
914 handle->sha_ctxt.init_done = false;
915 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
916
917 kzfree(k_buf_src);
Zhen Kong0acaefa2017-10-18 14:27:44 -0700918 qcedev_areq->sha_req.sreq.src = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700919 return err;
920}
921
922static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
923 struct qcedev_handle *handle,
924 struct scatterlist *sg_src)
925{
926 int err = 0;
927 int i = 0;
928 uint32_t total;
929
930 uint8_t *user_src = NULL;
931 uint8_t *k_src = NULL;
932 uint8_t *k_buf_src = NULL;
933
934 total = qcedev_areq->sha_op_req.data_len;
935
936 if (copy_from_user(&handle->sha_ctxt.authkey[0],
937 (void __user *)qcedev_areq->sha_op_req.authkey,
938 qcedev_areq->sha_op_req.authklen))
939 return -EFAULT;
940
941
942 k_buf_src = kmalloc(total, GFP_KERNEL);
943 if (k_buf_src == NULL)
944 return -ENOMEM;
945
946 k_src = k_buf_src;
947
948 /* Copy data from user src(s) */
949 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
950 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
951 user_src =
952 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
953 if (user_src && copy_from_user(k_src, (void __user *)user_src,
954 qcedev_areq->sha_op_req.data[i].len)) {
955 kzfree(k_buf_src);
956 return -EFAULT;
957 }
958 k_src += qcedev_areq->sha_op_req.data[i].len;
959 }
960
961 qcedev_areq->sha_req.sreq.src = sg_src;
962 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
963 sg_mark_end(qcedev_areq->sha_req.sreq.src);
964
965 qcedev_areq->sha_req.sreq.nbytes = total;
966 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
967 err = submit_req(qcedev_areq, handle);
968
969 kzfree(k_buf_src);
970 return err;
971}
972
973static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
974 struct qcedev_handle *handle,
975 struct scatterlist *sg_src)
976{
977 int err = 0;
978
979 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
980 qcedev_sha_init(areq, handle);
981 if (copy_from_user(&handle->sha_ctxt.authkey[0],
982 (void __user *)areq->sha_op_req.authkey,
983 areq->sha_op_req.authklen))
984 return -EFAULT;
985 } else {
986 struct qcedev_async_req authkey_areq;
987 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
988
989 init_completion(&authkey_areq.complete);
990
991 authkey_areq.sha_op_req.entries = 1;
992 authkey_areq.sha_op_req.data[0].vaddr =
993 areq->sha_op_req.authkey;
994 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
995 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
996 authkey_areq.sha_op_req.diglen = 0;
997 authkey_areq.handle = handle;
998
999 memset(&authkey_areq.sha_op_req.digest[0], 0,
1000 QCEDEV_MAX_SHA_DIGEST);
1001 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1002 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1003 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1004 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1005
1006 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1007
1008 qcedev_sha_init(&authkey_areq, handle);
1009 err = qcedev_sha_update(&authkey_areq, handle, sg_src);
1010 if (!err)
1011 err = qcedev_sha_final(&authkey_areq, handle);
1012 else
1013 return err;
1014 memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
1015 handle->sha_ctxt.diglen);
1016 qcedev_sha_init(areq, handle);
1017
1018 memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
1019 handle->sha_ctxt.diglen);
1020 }
1021 return err;
1022}
1023
1024static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
1025 struct qcedev_handle *handle)
1026{
1027 int err = 0;
1028 struct scatterlist sg_src;
1029 uint8_t *k_src = NULL;
1030 uint32_t sha_block_size = 0;
1031 uint32_t sha_digest_size = 0;
1032
1033 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1034 sha_digest_size = SHA1_DIGEST_SIZE;
1035 sha_block_size = SHA1_BLOCK_SIZE;
1036 } else {
1037 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1038 sha_digest_size = SHA256_DIGEST_SIZE;
1039 sha_block_size = SHA256_BLOCK_SIZE;
1040 }
1041 }
1042 k_src = kmalloc(sha_block_size, GFP_KERNEL);
1043 if (k_src == NULL)
1044 return -ENOMEM;
1045
1046 /* check for trailing buffer from previous updates and append it */
1047 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1048 handle->sha_ctxt.trailing_buf_len);
1049
1050 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1051 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1052 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1053
1054 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
1055 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1056 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
1057 sha_digest_size);
1058 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
1059
1060 handle->sha_ctxt.first_blk = 1;
1061 handle->sha_ctxt.last_blk = 0;
1062 handle->sha_ctxt.auth_data[0] = 0;
1063 handle->sha_ctxt.auth_data[1] = 0;
1064
1065 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1066 memcpy(&handle->sha_ctxt.digest[0],
1067 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
1068 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
1069 }
1070
1071 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1072 memcpy(&handle->sha_ctxt.digest[0],
1073 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
1074 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
1075 }
1076 err = submit_req(qcedev_areq, handle);
1077
1078 handle->sha_ctxt.last_blk = 0;
1079 handle->sha_ctxt.first_blk = 0;
1080
1081 kzfree(k_src);
Zhen Kong0acaefa2017-10-18 14:27:44 -07001082 qcedev_areq->sha_req.sreq.src = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001083 return err;
1084}
1085
1086static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
1087 struct qcedev_handle *handle, bool ikey)
1088{
1089 int i;
1090 uint32_t constant;
1091 uint32_t sha_block_size;
1092
1093 if (ikey)
1094 constant = 0x36;
1095 else
1096 constant = 0x5c;
1097
1098 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1099 sha_block_size = SHA1_BLOCK_SIZE;
1100 else
1101 sha_block_size = SHA256_BLOCK_SIZE;
1102
1103 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1104 for (i = 0; i < sha_block_size; i++)
1105 handle->sha_ctxt.trailing_buf[i] =
1106 (handle->sha_ctxt.authkey[i] ^ constant);
1107
1108 handle->sha_ctxt.trailing_buf_len = sha_block_size;
1109 return 0;
1110}
1111
1112static int qcedev_hmac_init(struct qcedev_async_req *areq,
1113 struct qcedev_handle *handle,
1114 struct scatterlist *sg_src)
1115{
1116 int err;
1117 struct qcedev_control *podev = handle->cntl;
1118
1119 err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
1120 if (err)
1121 return err;
1122 if (!podev->ce_support.sha_hmac)
1123 qcedev_hmac_update_iokey(areq, handle, true);
1124 return 0;
1125}
1126
1127static int qcedev_hmac_final(struct qcedev_async_req *areq,
1128 struct qcedev_handle *handle)
1129{
1130 int err;
1131 struct qcedev_control *podev = handle->cntl;
1132
1133 err = qcedev_sha_final(areq, handle);
1134 if (podev->ce_support.sha_hmac)
1135 return err;
1136
1137 qcedev_hmac_update_iokey(areq, handle, false);
1138 err = qcedev_hmac_get_ohash(areq, handle);
1139 if (err)
1140 return err;
1141 err = qcedev_sha_final(areq, handle);
1142
1143 return err;
1144}
1145
1146static int qcedev_hash_init(struct qcedev_async_req *areq,
1147 struct qcedev_handle *handle,
1148 struct scatterlist *sg_src)
1149{
1150 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1151 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1152 return qcedev_sha_init(areq, handle);
1153 else
1154 return qcedev_hmac_init(areq, handle, sg_src);
1155}
1156
1157static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
1158 struct qcedev_handle *handle,
1159 struct scatterlist *sg_src)
1160{
1161 return qcedev_sha_update(qcedev_areq, handle, sg_src);
1162}
1163
1164static int qcedev_hash_final(struct qcedev_async_req *areq,
1165 struct qcedev_handle *handle)
1166{
1167 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1168 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1169 return qcedev_sha_final(areq, handle);
1170 else
1171 return qcedev_hmac_final(areq, handle);
1172}
1173
1174static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
1175 int *di, struct qcedev_handle *handle,
1176 uint8_t *k_align_src)
1177{
1178 int err = 0;
1179 int i = 0;
1180 int dst_i = *di;
1181 struct scatterlist sg_src;
1182 uint32_t byteoffset = 0;
1183 uint8_t *user_src = NULL;
1184 uint8_t *k_align_dst = k_align_src;
1185 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1186
1187
1188 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1189 byteoffset = areq->cipher_op_req.byteoffset;
1190
1191 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1192 if (user_src && copy_from_user((k_align_src + byteoffset),
1193 (void __user *)user_src,
1194 areq->cipher_op_req.vbuf.src[0].len))
1195 return -EFAULT;
1196
1197 k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
1198
1199 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1200 user_src =
1201 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1202 if (user_src && copy_from_user(k_align_src,
1203 (void __user *)user_src,
1204 areq->cipher_op_req.vbuf.src[i].len)) {
1205 return -EFAULT;
1206 }
1207 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1208 }
1209
1210 /* restore src beginning */
1211 k_align_src = k_align_dst;
1212 areq->cipher_op_req.data_len += byteoffset;
1213
1214 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1215 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1216
1217 /* In place encryption/decryption */
1218 sg_set_buf(areq->cipher_req.creq.src,
1219 k_align_dst,
1220 areq->cipher_op_req.data_len);
1221 sg_mark_end(areq->cipher_req.creq.src);
1222
1223 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1224 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1225 areq->cipher_op_req.entries = 1;
1226
1227 err = submit_req(areq, handle);
1228
1229 /* copy data to destination buffer*/
1230 creq->data_len -= byteoffset;
1231
1232 while (creq->data_len > 0) {
1233 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1234 if (err == 0 && copy_to_user(
1235 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1236 (k_align_dst + byteoffset),
Zhen Kong0acaefa2017-10-18 14:27:44 -07001237 creq->vbuf.dst[dst_i].len)) {
1238 err = -EFAULT;
1239 goto exit;
1240 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001241
Monika Singha125bf02018-11-22 12:18:34 +05301242 k_align_dst += creq->vbuf.dst[dst_i].len;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001243 creq->data_len -= creq->vbuf.dst[dst_i].len;
1244 dst_i++;
1245 } else {
1246 if (err == 0 && copy_to_user(
1247 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1248 (k_align_dst + byteoffset),
Zhen Kong0acaefa2017-10-18 14:27:44 -07001249 creq->data_len)) {
1250 err = -EFAULT;
1251 goto exit;
1252 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001253
1254 k_align_dst += creq->data_len;
1255 creq->vbuf.dst[dst_i].len -= creq->data_len;
1256 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1257 creq->data_len = 0;
1258 }
1259 }
1260 *di = dst_i;
Zhen Kong0acaefa2017-10-18 14:27:44 -07001261exit:
1262 areq->cipher_req.creq.src = NULL;
1263 areq->cipher_req.creq.dst = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001264 return err;
1265};
1266
1267static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
1268 struct qcedev_handle *handle)
1269{
1270 int err = 0;
1271 int di = 0;
1272 int i = 0;
1273 int j = 0;
1274 int k = 0;
1275 uint32_t byteoffset = 0;
1276 int num_entries = 0;
1277 uint32_t total = 0;
1278 uint32_t len;
1279 uint8_t *k_buf_src = NULL;
1280 uint8_t *k_align_src = NULL;
1281 uint32_t max_data_xfer;
1282 struct qcedev_cipher_op_req *saved_req;
1283 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1284
1285 total = 0;
1286
1287 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1288 byteoffset = areq->cipher_op_req.byteoffset;
1289 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1290 GFP_KERNEL);
1291 if (k_buf_src == NULL)
1292 return -ENOMEM;
1293 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
1294 CACHE_LINE_SIZE);
1295 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1296
1297 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1298 if (saved_req == NULL) {
1299 kzfree(k_buf_src);
1300 return -ENOMEM;
1301
1302 }
1303 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1304
1305 if (areq->cipher_op_req.data_len > max_data_xfer) {
1306 struct qcedev_cipher_op_req req;
1307
1308 /* save the original req structure */
1309 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1310
1311 i = 0;
1312 /* Address 32 KB at a time */
1313 while ((i < req.entries) && (err == 0)) {
1314 if (creq->vbuf.src[i].len > max_data_xfer) {
1315 creq->vbuf.src[0].len = max_data_xfer;
1316 if (i > 0) {
1317 creq->vbuf.src[0].vaddr =
1318 creq->vbuf.src[i].vaddr;
1319 }
1320
1321 creq->data_len = max_data_xfer;
1322 creq->entries = 1;
1323
1324 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1325 &di, handle, k_align_src);
1326 if (err < 0) {
1327 kzfree(k_buf_src);
1328 kzfree(saved_req);
1329 return err;
1330 }
1331
1332 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1333 max_data_xfer;
1334 creq->vbuf.src[i].vaddr =
1335 req.vbuf.src[i].vaddr +
1336 max_data_xfer;
1337 req.vbuf.src[i].vaddr =
1338 creq->vbuf.src[i].vaddr;
1339 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1340
1341 } else {
1342 total = areq->cipher_op_req.byteoffset;
1343 for (j = i; j < req.entries; j++) {
1344 num_entries++;
1345 if ((total + creq->vbuf.src[j].len)
1346 >= max_data_xfer) {
1347 creq->vbuf.src[j].len =
1348 max_data_xfer - total;
1349 total = max_data_xfer;
1350 break;
1351 }
1352 total += creq->vbuf.src[j].len;
1353 }
1354
1355 creq->data_len = total;
1356 if (i > 0)
1357 for (k = 0; k < num_entries; k++) {
1358 creq->vbuf.src[k].len =
1359 creq->vbuf.src[i+k].len;
1360 creq->vbuf.src[k].vaddr =
1361 creq->vbuf.src[i+k].vaddr;
1362 }
1363 creq->entries = num_entries;
1364
1365 i = j;
1366 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1367 &di, handle, k_align_src);
1368 if (err < 0) {
1369 kzfree(k_buf_src);
1370 kzfree(saved_req);
1371 return err;
1372 }
1373
1374 num_entries = 0;
1375 areq->cipher_op_req.byteoffset = 0;
1376
1377 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1378 + creq->vbuf.src[i].len;
1379 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1380 creq->vbuf.src[i].len;
1381
1382 req.vbuf.src[i].vaddr =
1383 creq->vbuf.src[i].vaddr;
1384 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1385
1386 if (creq->vbuf.src[i].len == 0)
1387 i++;
1388 }
1389
1390 areq->cipher_op_req.byteoffset = 0;
1391 max_data_xfer = QCE_MAX_OPER_DATA;
1392 byteoffset = 0;
1393
1394 } /* end of while ((i < req.entries) && (err == 0)) */
1395 } else
1396 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
1397 k_align_src);
1398
1399 /* Restore the original req structure */
1400 for (i = 0; i < saved_req->entries; i++) {
1401 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1402 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1403 }
1404 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1405 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1406 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1407 len += saved_req->vbuf.dst[i].len;
1408 }
1409 creq->entries = saved_req->entries;
1410 creq->data_len = saved_req->data_len;
1411 creq->byteoffset = saved_req->byteoffset;
1412
1413 kzfree(saved_req);
1414 kzfree(k_buf_src);
1415 return err;
1416
1417}
1418
1419static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
1420 struct qcedev_control *podev)
1421{
1422 /* if intending to use HW key make sure key fields are set
1423 * correctly and HW key is indeed supported in target
1424 */
1425 if (req->encklen == 0) {
1426 int i;
1427
1428 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
1429 if (req->enckey[i]) {
1430 pr_err("%s: Invalid key: non-zero key input\n",
1431 __func__);
1432 goto error;
1433 }
1434 }
1435 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1436 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1437 if (!podev->platform_support.hw_key_support) {
1438 pr_err("%s: Invalid op %d\n", __func__,
1439 (uint32_t)req->op);
1440 goto error;
1441 }
1442 } else {
1443 if (req->encklen == QCEDEV_AES_KEY_192) {
1444 if (!podev->ce_support.aes_key_192) {
1445 pr_err("%s: AES-192 not supported\n", __func__);
1446 goto error;
1447 }
1448 } else {
1449 /* if not using HW key make sure key
1450 * length is valid
1451 */
1452 if (req->mode == QCEDEV_AES_MODE_XTS) {
1453 if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
1454 (req->encklen != QCEDEV_AES_KEY_256*2)) {
1455 pr_err("%s: unsupported key size: %d\n",
1456 __func__, req->encklen);
1457 goto error;
1458 }
1459 } else {
1460 if ((req->encklen != QCEDEV_AES_KEY_128) &&
1461 (req->encklen != QCEDEV_AES_KEY_256)) {
1462 pr_err("%s: unsupported key size %d\n",
1463 __func__, req->encklen);
1464 goto error;
1465 }
1466 }
1467 }
1468 }
1469 return 0;
1470error:
1471 return -EINVAL;
1472}
1473
1474static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1475 struct qcedev_control *podev)
1476{
1477 uint32_t total = 0;
1478 uint32_t i;
1479
1480 if (req->use_pmem) {
1481 pr_err("%s: Use of PMEM is not supported\n", __func__);
1482 goto error;
1483 }
1484 if ((req->entries == 0) || (req->data_len == 0) ||
1485 (req->entries > QCEDEV_MAX_BUFFERS)) {
1486 pr_err("%s: Invalid cipher length/entries\n", __func__);
1487 goto error;
1488 }
1489 if ((req->alg >= QCEDEV_ALG_LAST) ||
1490 (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
1491 pr_err("%s: Invalid algorithm %d\n", __func__,
1492 (uint32_t)req->alg);
1493 goto error;
1494 }
1495 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1496 (!podev->ce_support.aes_xts)) {
1497 pr_err("%s: XTS algorithm is not supported\n", __func__);
1498 goto error;
1499 }
1500 if (req->alg == QCEDEV_ALG_AES) {
1501 if (qcedev_check_cipher_key(req, podev))
1502 goto error;
1503
1504 }
1505 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1506 if (req->byteoffset) {
1507 if (req->mode != QCEDEV_AES_MODE_CTR) {
1508 pr_err("%s: Operation on byte offset not supported\n",
1509 __func__);
1510 goto error;
1511 }
1512 if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
1513 pr_err("%s: Invalid byte offset\n", __func__);
1514 goto error;
1515 }
1516 total = req->byteoffset;
1517 for (i = 0; i < req->entries; i++) {
1518 if (total > U32_MAX - req->vbuf.src[i].len) {
1519 pr_err("%s:Integer overflow on total src len\n",
1520 __func__);
1521 goto error;
1522 }
1523 total += req->vbuf.src[i].len;
1524 }
1525 }
1526
1527 if (req->data_len < req->byteoffset) {
1528 pr_err("%s: req data length %u is less than byteoffset %u\n",
1529 __func__, req->data_len, req->byteoffset);
1530 goto error;
1531 }
1532
1533 /* Ensure IV size */
1534 if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
1535 pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
1536 goto error;
1537 }
1538
1539 /* Ensure Key size */
1540 if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
1541 pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
1542 goto error;
1543 }
1544
1545 /* Ensure zer ivlen for ECB mode */
1546 if (req->ivlen > 0) {
1547 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1548 (req->mode == QCEDEV_DES_MODE_ECB)) {
1549 pr_err("%s: Expecting a zero length IV\n", __func__);
1550 goto error;
1551 }
1552 } else {
1553 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1554 (req->mode != QCEDEV_DES_MODE_ECB)) {
1555 pr_err("%s: Expecting a non-zero ength IV\n", __func__);
1556 goto error;
1557 }
1558 }
1559 /* Check for sum of all dst length is equal to data_len */
1560 for (i = 0, total = 0; i < req->entries; i++) {
1561 if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
1562 pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
1563 __func__, i, req->vbuf.dst[i].len);
1564 goto error;
1565 }
1566 if (req->vbuf.dst[i].len >= U32_MAX - total) {
1567 pr_err("%s: Integer overflow on total req dst vbuf length\n",
1568 __func__);
1569 goto error;
1570 }
1571 total += req->vbuf.dst[i].len;
1572 }
1573 if (total != req->data_len) {
1574 pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
1575 __func__, i, total, req->data_len);
1576 goto error;
1577 }
1578 /* Check for sum of all src length is equal to data_len */
1579 for (i = 0, total = 0; i < req->entries; i++) {
1580 if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
1581 pr_err("%s: NULL req src vbuf[%d] with length %d\n",
1582 __func__, i, req->vbuf.src[i].len);
1583 goto error;
1584 }
1585 if (req->vbuf.src[i].len > U32_MAX - total) {
1586 pr_err("%s: Integer overflow on total req src vbuf length\n",
1587 __func__);
1588 goto error;
1589 }
1590 total += req->vbuf.src[i].len;
1591 }
1592 if (total != req->data_len) {
1593 pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
1594 __func__, total, req->data_len);
1595 goto error;
1596 }
1597 return 0;
1598error:
1599 return -EINVAL;
1600
1601}
1602
1603static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1604 struct qcedev_control *podev)
1605{
1606 uint32_t total = 0;
1607 uint32_t i;
1608
1609 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1610 (!podev->ce_support.cmac)) {
1611 pr_err("%s: CMAC not supported\n", __func__);
1612 goto sha_error;
1613 }
1614 if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
1615 pr_err("%s: Invalid num entries (%d)\n",
1616 __func__, req->entries);
1617 goto sha_error;
1618 }
1619
1620 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
1621 pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
1622 goto sha_error;
1623 }
1624 if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
1625 (req->alg == QCEDEV_ALG_SHA1_HMAC)) {
1626 if (req->authkey == NULL) {
1627 pr_err("%s: Invalid authkey pointer\n", __func__);
1628 goto sha_error;
1629 }
1630 if (req->authklen <= 0) {
1631 pr_err("%s: Invalid authkey length (%d)\n",
1632 __func__, req->authklen);
1633 goto sha_error;
1634 }
1635 }
1636
1637 if (req->alg == QCEDEV_ALG_AES_CMAC) {
1638 if ((req->authklen != QCEDEV_AES_KEY_128) &&
1639 (req->authklen != QCEDEV_AES_KEY_256)) {
1640 pr_err("%s: unsupported key length\n", __func__);
1641 goto sha_error;
1642 }
1643 }
1644
1645 /* Check for sum of all src length is equal to data_len */
1646 for (i = 0, total = 0; i < req->entries; i++) {
1647 if (req->data[i].len > U32_MAX - total) {
1648 pr_err("%s: Integer overflow on total req buf length\n",
1649 __func__);
1650 goto sha_error;
1651 }
1652 total += req->data[i].len;
1653 }
1654
1655 if (total != req->data_len) {
1656 pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
1657 __func__, total, req->data_len);
1658 goto sha_error;
1659 }
1660 return 0;
1661sha_error:
1662 return -EINVAL;
1663}
1664
1665static inline long qcedev_ioctl(struct file *file,
1666 unsigned int cmd, unsigned long arg)
1667{
1668 int err = 0;
1669 struct qcedev_handle *handle;
1670 struct qcedev_control *podev;
1671 struct qcedev_async_req qcedev_areq;
1672 struct qcedev_stat *pstat;
1673
1674 handle = file->private_data;
1675 podev = handle->cntl;
1676 qcedev_areq.handle = handle;
1677 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05301678 pr_err("%s: invalid handle %pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001679 __func__, podev);
1680 return -ENOENT;
1681 }
1682
1683 /* Verify user arguments. */
1684 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1685 return -ENOTTY;
1686
1687 init_completion(&qcedev_areq.complete);
1688 pstat = &_qcedev_stat;
1689
1690 switch (cmd) {
1691 case QCEDEV_IOCTL_ENC_REQ:
1692 case QCEDEV_IOCTL_DEC_REQ:
1693 if (copy_from_user(&qcedev_areq.cipher_op_req,
1694 (void __user *)arg,
1695 sizeof(struct qcedev_cipher_op_req)))
1696 return -EFAULT;
1697 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1698
1699 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1700 podev))
1701 return -EINVAL;
1702
1703 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
1704 if (err)
1705 return err;
1706 if (copy_to_user((void __user *)arg,
1707 &qcedev_areq.cipher_op_req,
1708 sizeof(struct qcedev_cipher_op_req)))
1709 return -EFAULT;
1710 break;
1711
1712 case QCEDEV_IOCTL_SHA_INIT_REQ:
1713 {
1714 struct scatterlist sg_src;
1715
1716 if (copy_from_user(&qcedev_areq.sha_op_req,
1717 (void __user *)arg,
1718 sizeof(struct qcedev_sha_op_req)))
1719 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301720 mutex_lock(&hash_access_lock);
1721 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1722 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001723 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301724 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001725 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1726 err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301727 if (err) {
1728 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001729 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301730 }
1731 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001732 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1733 sizeof(struct qcedev_sha_op_req)))
1734 return -EFAULT;
1735 }
1736 handle->sha_ctxt.init_done = true;
1737 break;
1738 case QCEDEV_IOCTL_GET_CMAC_REQ:
1739 if (!podev->ce_support.cmac)
1740 return -ENOTTY;
1741 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1742 {
1743 struct scatterlist sg_src;
1744
1745 if (copy_from_user(&qcedev_areq.sha_op_req,
1746 (void __user *)arg,
1747 sizeof(struct qcedev_sha_op_req)))
1748 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301749 mutex_lock(&hash_access_lock);
1750 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1751 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001752 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301753 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001754 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1755
1756 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
1757 err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301758 if (err) {
1759 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001760 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301761 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001762 } else {
1763 if (handle->sha_ctxt.init_done == false) {
1764 pr_err("%s Init was not called\n", __func__);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301765 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001766 return -EINVAL;
1767 }
1768 err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301769 if (err) {
1770 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001771 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301772 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001773 }
1774
1775 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1776 pr_err("Invalid sha_ctxt.diglen %d\n",
1777 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301778 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001779 return -EINVAL;
1780 }
1781 memcpy(&qcedev_areq.sha_op_req.digest[0],
1782 &handle->sha_ctxt.digest[0],
1783 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301784 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001785 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1786 sizeof(struct qcedev_sha_op_req)))
1787 return -EFAULT;
1788 }
1789 break;
1790
1791 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1792
1793 if (handle->sha_ctxt.init_done == false) {
1794 pr_err("%s Init was not called\n", __func__);
1795 return -EINVAL;
1796 }
1797 if (copy_from_user(&qcedev_areq.sha_op_req,
1798 (void __user *)arg,
1799 sizeof(struct qcedev_sha_op_req)))
1800 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301801 mutex_lock(&hash_access_lock);
1802 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1803 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001804 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301805 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001806 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1807 err = qcedev_hash_final(&qcedev_areq, handle);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301808 if (err) {
1809 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001810 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301811 }
Brahmaji K2ec40862017-05-15 16:02:15 +05301812 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1813 pr_err("Invalid sha_ctxt.diglen %d\n",
1814 handle->sha_ctxt.diglen);
1815 mutex_unlock(&hash_access_lock);
1816 return -EINVAL;
1817 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001818 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
1819 memcpy(&qcedev_areq.sha_op_req.digest[0],
1820 &handle->sha_ctxt.digest[0],
1821 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301822 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001823 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1824 sizeof(struct qcedev_sha_op_req)))
1825 return -EFAULT;
1826 handle->sha_ctxt.init_done = false;
1827 break;
1828
1829 case QCEDEV_IOCTL_GET_SHA_REQ:
1830 {
1831 struct scatterlist sg_src;
1832
1833 if (copy_from_user(&qcedev_areq.sha_op_req,
1834 (void __user *)arg,
1835 sizeof(struct qcedev_sha_op_req)))
1836 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301837 mutex_lock(&hash_access_lock);
1838 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1839 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001840 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301841 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001842 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1843 qcedev_hash_init(&qcedev_areq, handle, &sg_src);
1844 err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301845 if (err) {
1846 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001847 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301848 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001849 err = qcedev_hash_final(&qcedev_areq, handle);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301850 if (err) {
1851 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001852 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301853 }
Brahmaji K2ec40862017-05-15 16:02:15 +05301854 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1855 pr_err("Invalid sha_ctxt.diglen %d\n",
1856 handle->sha_ctxt.diglen);
1857 mutex_unlock(&hash_access_lock);
1858 return -EINVAL;
1859 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001860 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
1861 memcpy(&qcedev_areq.sha_op_req.digest[0],
1862 &handle->sha_ctxt.digest[0],
1863 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301864 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001865 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1866 sizeof(struct qcedev_sha_op_req)))
1867 return -EFAULT;
1868 }
1869 break;
1870
Sonal Guptaa540a072018-02-15 14:11:02 -08001871 case QCEDEV_IOCTL_MAP_BUF_REQ:
1872 {
1873 unsigned long long vaddr = 0;
1874 struct qcedev_map_buf_req map_buf = { {0} };
1875 int i = 0;
1876
1877 if (copy_from_user(&map_buf,
1878 (void __user *)arg, sizeof(map_buf)))
1879 return -EFAULT;
1880
1881 for (i = 0; i < map_buf.num_fds; i++) {
1882 err = qcedev_check_and_map_buffer(handle,
1883 map_buf.fd[i],
1884 map_buf.fd_offset[i],
1885 map_buf.fd_size[i],
1886 &vaddr);
1887 if (err) {
1888 pr_err(
1889 "%s: err: failed to map fd(%d) - %d\n",
1890 __func__, map_buf.fd[i], err);
1891 return err;
1892 }
1893 map_buf.buf_vaddr[i] = vaddr;
1894 pr_info("%s: info: vaddr = %llx\n",
1895 __func__, vaddr);
1896 }
1897
1898 if (copy_to_user((void __user *)arg, &map_buf,
1899 sizeof(map_buf)))
1900 return -EFAULT;
1901 break;
1902 }
1903
1904 case QCEDEV_IOCTL_UNMAP_BUF_REQ:
1905 {
1906 struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
1907 int i = 0;
1908
1909 if (copy_from_user(&unmap_buf,
1910 (void __user *)arg, sizeof(unmap_buf)))
1911 return -EFAULT;
1912
1913 for (i = 0; i < unmap_buf.num_fds; i++) {
1914 err = qcedev_check_and_unmap_buffer(handle,
1915 unmap_buf.fd[i]);
1916 if (err) {
1917 pr_err(
1918 "%s: err: failed to unmap fd(%d) - %d\n",
1919 __func__,
1920 unmap_buf.fd[i], err);
1921 return err;
1922 }
1923 }
1924 break;
1925 }
1926
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001927 default:
1928 return -ENOTTY;
1929 }
1930
1931 return err;
1932}
1933
Sonal Guptaeff149e2018-02-09 09:35:55 -08001934static int qcedev_probe_device(struct platform_device *pdev)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001935{
1936 void *handle = NULL;
1937 int rc = 0;
1938 struct qcedev_control *podev;
1939 struct msm_ce_hw_support *platform_support;
1940
1941 podev = &qce_dev[0];
1942
1943 podev->high_bw_req_count = 0;
1944 INIT_LIST_HEAD(&podev->ready_commands);
1945 podev->active_command = NULL;
1946
Sonal Guptaeff149e2018-02-09 09:35:55 -08001947 INIT_LIST_HEAD(&podev->context_banks);
1948
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001949 spin_lock_init(&podev->lock);
1950
1951 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
1952
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001953 podev->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
1954 msm_bus_cl_get_pdata(pdev);
1955 if (!podev->platform_support.bus_scale_table) {
1956 pr_err("bus_scale_table is NULL\n");
1957 return -ENODATA;
1958 }
1959 podev->bus_scale_handle = msm_bus_scale_register_client(
1960 (struct msm_bus_scale_pdata *)
1961 podev->platform_support.bus_scale_table);
1962 if (!podev->bus_scale_handle) {
1963 pr_err("%s not able to get bus scale\n", __func__);
1964 return -ENOMEM;
1965 }
1966
1967 rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 1);
1968 if (rc) {
1969 pr_err("%s Unable to set to high bandwidth\n", __func__);
1970 goto exit_unregister_bus_scale;
1971 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001972 handle = qce_open(pdev, &rc);
1973 if (handle == NULL) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001974 rc = -ENODEV;
1975 goto exit_scale_busbandwidth;
1976 }
1977 rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
1978 if (rc) {
1979 pr_err("%s Unable to set to low bandwidth\n", __func__);
1980 goto exit_qce_close;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001981 }
1982
1983 podev->qce = handle;
1984 podev->pdev = pdev;
1985 platform_set_drvdata(pdev, podev);
1986
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001987 qce_hw_support(podev->qce, &podev->ce_support);
1988 if (podev->ce_support.bam) {
1989 podev->platform_support.ce_shared = 0;
1990 podev->platform_support.shared_ce_resource = 0;
1991 podev->platform_support.hw_key_support =
1992 podev->ce_support.hw_key;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001993 podev->platform_support.sha_hmac = 1;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001994 } else {
1995 platform_support =
1996 (struct msm_ce_hw_support *)pdev->dev.platform_data;
1997 podev->platform_support.ce_shared = platform_support->ce_shared;
1998 podev->platform_support.shared_ce_resource =
1999 platform_support->shared_ce_resource;
2000 podev->platform_support.hw_key_support =
2001 platform_support->hw_key_support;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002002 podev->platform_support.sha_hmac = platform_support->sha_hmac;
2003 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002004
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002005 rc = misc_register(&podev->miscdevice);
Sonal Guptaeff149e2018-02-09 09:35:55 -08002006 if (rc) {
2007 pr_err("%s: err: register failed for misc: %d\n", __func__, rc);
2008 goto exit_qce_close;
2009 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002010
Sonal Guptaa540a072018-02-15 14:11:02 -08002011 podev->mem_client = qcedev_mem_new_client(MEM_ION);
2012 if (!podev->mem_client) {
2013 pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
2014 goto err;
2015 }
2016
Sonal Guptaeff149e2018-02-09 09:35:55 -08002017 rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
2018 NULL, &pdev->dev);
2019 if (rc) {
2020 pr_err("%s: err: of_platform_populate failed: %d\n",
2021 __func__, rc);
2022 goto err;
2023 }
2024
2025 return 0;
2026
2027err:
Sonal Guptaa540a072018-02-15 14:11:02 -08002028 if (podev->mem_client)
2029 qcedev_mem_delete_client(podev->mem_client);
2030 podev->mem_client = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002031
Sonal Guptaa540a072018-02-15 14:11:02 -08002032 misc_deregister(&podev->miscdevice);
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002033exit_qce_close:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002034 if (handle)
2035 qce_close(handle);
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002036exit_scale_busbandwidth:
2037 msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
2038exit_unregister_bus_scale:
2039 if (podev->platform_support.bus_scale_table != NULL)
2040 msm_bus_scale_unregister_client(podev->bus_scale_handle);
Sonal Guptaeff149e2018-02-09 09:35:55 -08002041 podev->bus_scale_handle = 0;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002042 platform_set_drvdata(pdev, NULL);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002043 podev->pdev = NULL;
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002044 podev->qce = NULL;
2045
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002046 return rc;
Sonal Guptaeff149e2018-02-09 09:35:55 -08002047}
2048
2049static int qcedev_probe(struct platform_device *pdev)
2050{
2051 if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
2052 return qcedev_probe_device(pdev);
2053 else if (of_device_is_compatible(pdev->dev.of_node,
2054 "qcom,qcedev,context-bank"))
2055 return qcedev_parse_context_bank(pdev);
2056
2057 return -EINVAL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002058};
2059
2060static int qcedev_remove(struct platform_device *pdev)
2061{
2062 struct qcedev_control *podev;
2063
2064 podev = platform_get_drvdata(pdev);
2065 if (!podev)
2066 return 0;
2067 if (podev->qce)
2068 qce_close(podev->qce);
2069
2070 if (podev->platform_support.bus_scale_table != NULL)
2071 msm_bus_scale_unregister_client(podev->bus_scale_handle);
2072
2073 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2074 misc_deregister(&podev->miscdevice);
2075 tasklet_kill(&podev->done_tasklet);
2076 return 0;
2077};
2078
2079static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
2080{
2081 struct qcedev_control *podev;
2082 int ret;
2083
2084 podev = platform_get_drvdata(pdev);
2085
2086 if (!podev || !podev->platform_support.bus_scale_table)
2087 return 0;
2088
2089 mutex_lock(&qcedev_sent_bw_req);
2090 if (podev->high_bw_req_count) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002091 ret = qcedev_control_clocks(podev, false);
2092 if (ret)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002093 goto suspend_exit;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002094 }
2095
2096suspend_exit:
2097 mutex_unlock(&qcedev_sent_bw_req);
2098 return 0;
2099}
2100
2101static int qcedev_resume(struct platform_device *pdev)
2102{
2103 struct qcedev_control *podev;
2104 int ret;
2105
2106 podev = platform_get_drvdata(pdev);
2107
2108 if (!podev || !podev->platform_support.bus_scale_table)
2109 return 0;
2110
2111 mutex_lock(&qcedev_sent_bw_req);
2112 if (podev->high_bw_req_count) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002113 ret = qcedev_control_clocks(podev, true);
2114 if (ret)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002115 goto resume_exit;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002116 }
2117
2118resume_exit:
2119 mutex_unlock(&qcedev_sent_bw_req);
2120 return 0;
2121}
2122
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002123static struct platform_driver qcedev_plat_driver = {
2124 .probe = qcedev_probe,
2125 .remove = qcedev_remove,
2126 .suspend = qcedev_suspend,
2127 .resume = qcedev_resume,
2128 .driver = {
2129 .name = "qce",
2130 .owner = THIS_MODULE,
2131 .of_match_table = qcedev_match,
2132 },
2133};
2134
2135static int _disp_stats(int id)
2136{
2137 struct qcedev_stat *pstat;
2138 int len = 0;
2139
2140 pstat = &_qcedev_stat;
2141 len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2142 "\nQTI QCE dev driver %d Statistics:\n",
2143 id + 1);
2144
2145 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2146 " Encryption operation success : %d\n",
2147 pstat->qcedev_enc_success);
2148 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2149 " Encryption operation fail : %d\n",
2150 pstat->qcedev_enc_fail);
2151 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2152 " Decryption operation success : %d\n",
2153 pstat->qcedev_dec_success);
2154
2155 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2156 " Encryption operation fail : %d\n",
2157 pstat->qcedev_dec_fail);
2158
2159 return len;
2160}
2161
2162static int _debug_stats_open(struct inode *inode, struct file *file)
2163{
2164 file->private_data = inode->i_private;
2165 return 0;
2166}
2167
2168static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2169 size_t count, loff_t *ppos)
2170{
2171 ssize_t rc = -EINVAL;
2172 int qcedev = *((int *) file->private_data);
2173 int len;
2174
2175 len = _disp_stats(qcedev);
2176
2177 if (len <= count)
2178 rc = simple_read_from_buffer((void __user *) buf, len,
2179 ppos, (void *) _debug_read_buf, len);
2180 return rc;
2181}
2182
2183static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2184 size_t count, loff_t *ppos)
2185{
2186 memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
2187 return count;
2188};
2189
2190static const struct file_operations _debug_stats_ops = {
2191 .open = _debug_stats_open,
2192 .read = _debug_stats_read,
2193 .write = _debug_stats_write,
2194};
2195
2196static int _qcedev_debug_init(void)
2197{
2198 int rc;
2199 char name[DEBUG_MAX_FNAME];
2200 struct dentry *dent;
2201
2202 _debug_dent = debugfs_create_dir("qcedev", NULL);
2203 if (IS_ERR(_debug_dent)) {
2204 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2205 PTR_ERR(_debug_dent));
2206 return PTR_ERR(_debug_dent);
2207 }
2208
2209 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
2210 _debug_qcedev = 0;
2211 dent = debugfs_create_file(name, 0644, _debug_dent,
2212 &_debug_qcedev, &_debug_stats_ops);
2213 if (dent == NULL) {
2214 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2215 PTR_ERR(dent));
2216 rc = PTR_ERR(dent);
2217 goto err;
2218 }
2219 return 0;
2220err:
2221 debugfs_remove_recursive(_debug_dent);
2222 return rc;
2223}
2224
2225static int qcedev_init(void)
2226{
2227 int rc;
2228
2229 rc = _qcedev_debug_init();
2230 if (rc)
2231 return rc;
2232 return platform_driver_register(&qcedev_plat_driver);
2233}
2234
2235static void qcedev_exit(void)
2236{
2237 debugfs_remove_recursive(_debug_dent);
2238 platform_driver_unregister(&qcedev_plat_driver);
2239}
2240
2241MODULE_LICENSE("GPL v2");
2242MODULE_DESCRIPTION("QTI DEV Crypto driver");
2243
2244module_init(qcedev_init);
2245module_exit(qcedev_exit);