blob: 59694dec313df9c6b3a869b8ce13c5eb184156b1 [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/*
2 * QTI CE device driver.
3 *
Sonal Guptaeff149e2018-02-09 09:35:55 -08004 * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/mman.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <linux/platform_data/qcom_crypto_device.h>
32#include <linux/msm-bus.h>
33#include <linux/qcedev.h>
34
35#include <crypto/hash.h>
36#include "qcedevi.h"
37#include "qce.h"
Sonal Guptaeff149e2018-02-09 09:35:55 -080038#include "qcedev_smmu.h"
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070039
40#include <linux/compat.h>
41#include "compat_qcedev.h"
42
43#define CACHE_LINE_SIZE 32
44#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
45
46static uint8_t _std_init_vector_sha1_uint8[] = {
47 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
48 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
49 0xC3, 0xD2, 0xE1, 0xF0
50};
51/* standard initialization vector for SHA-256, source: FIPS 180-2 */
52static uint8_t _std_init_vector_sha256_uint8[] = {
53 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
54 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
55 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
56 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
57};
58
59static DEFINE_MUTEX(send_cmd_lock);
60static DEFINE_MUTEX(qcedev_sent_bw_req);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +053061static DEFINE_MUTEX(hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070062
Sonal Guptaeff149e2018-02-09 09:35:55 -080063MODULE_DEVICE_TABLE(of, qcedev_match);
64
65static const struct of_device_id qcedev_match[] = {
66 { .compatible = "qcom,qcedev"},
67 { .compatible = "qcom,qcedev,context-bank"},
68 {}
69};
70
AnilKumar Chimatae5e60512017-05-03 14:06:59 -070071static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
72{
73 unsigned int control_flag;
74 int ret = 0;
75
76 if (podev->ce_support.req_bw_before_clk) {
77 if (enable)
78 control_flag = QCE_BW_REQUEST_FIRST;
79 else
80 control_flag = QCE_CLK_DISABLE_FIRST;
81 } else {
82 if (enable)
83 control_flag = QCE_CLK_ENABLE_FIRST;
84 else
85 control_flag = QCE_BW_REQUEST_RESET_FIRST;
86 }
87
88 switch (control_flag) {
89 case QCE_CLK_ENABLE_FIRST:
90 ret = qce_enable_clk(podev->qce);
91 if (ret) {
92 pr_err("%s Unable enable clk\n", __func__);
93 return ret;
94 }
95 ret = msm_bus_scale_client_update_request(
96 podev->bus_scale_handle, 1);
97 if (ret) {
98 pr_err("%s Unable to set high bw\n", __func__);
99 ret = qce_disable_clk(podev->qce);
100 if (ret)
101 pr_err("%s Unable disable clk\n", __func__);
102 return ret;
103 }
104 break;
105 case QCE_BW_REQUEST_FIRST:
106 ret = msm_bus_scale_client_update_request(
107 podev->bus_scale_handle, 1);
108 if (ret) {
109 pr_err("%s Unable to set high bw\n", __func__);
110 return ret;
111 }
112 ret = qce_enable_clk(podev->qce);
113 if (ret) {
114 pr_err("%s Unable enable clk\n", __func__);
115 ret = msm_bus_scale_client_update_request(
116 podev->bus_scale_handle, 0);
117 if (ret)
118 pr_err("%s Unable to set low bw\n", __func__);
119 return ret;
120 }
121 break;
122 case QCE_CLK_DISABLE_FIRST:
123 ret = qce_disable_clk(podev->qce);
124 if (ret) {
125 pr_err("%s Unable to disable clk\n", __func__);
126 return ret;
127 }
128 ret = msm_bus_scale_client_update_request(
129 podev->bus_scale_handle, 0);
130 if (ret) {
131 pr_err("%s Unable to set low bw\n", __func__);
132 ret = qce_enable_clk(podev->qce);
133 if (ret)
134 pr_err("%s Unable enable clk\n", __func__);
135 return ret;
136 }
137 break;
138 case QCE_BW_REQUEST_RESET_FIRST:
139 ret = msm_bus_scale_client_update_request(
140 podev->bus_scale_handle, 0);
141 if (ret) {
142 pr_err("%s Unable to set low bw\n", __func__);
143 return ret;
144 }
145 ret = qce_disable_clk(podev->qce);
146 if (ret) {
147 pr_err("%s Unable to disable clk\n", __func__);
148 ret = msm_bus_scale_client_update_request(
149 podev->bus_scale_handle, 1);
150 if (ret)
151 pr_err("%s Unable to set high bw\n", __func__);
152 return ret;
153 }
154 break;
155 default:
156 return -ENOENT;
157 }
158
159 return 0;
160}
161
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700162static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
163 bool high_bw_req)
164{
165 int ret = 0;
166
167 mutex_lock(&qcedev_sent_bw_req);
168 if (high_bw_req) {
169 if (podev->high_bw_req_count == 0) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700170 ret = qcedev_control_clocks(podev, true);
171 if (ret)
172 goto exit_unlock_mutex;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700173 }
174 podev->high_bw_req_count++;
175 } else {
176 if (podev->high_bw_req_count == 1) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700177 ret = qcedev_control_clocks(podev, false);
178 if (ret)
179 goto exit_unlock_mutex;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700180 }
181 podev->high_bw_req_count--;
182 }
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700183
184exit_unlock_mutex:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700185 mutex_unlock(&qcedev_sent_bw_req);
186}
187
188#define QCEDEV_MAGIC 0x56434544 /* "qced" */
189
190static int qcedev_open(struct inode *inode, struct file *file);
191static int qcedev_release(struct inode *inode, struct file *file);
192static int start_cipher_req(struct qcedev_control *podev);
193static int start_sha_req(struct qcedev_control *podev);
194static inline long qcedev_ioctl(struct file *file,
195 unsigned int cmd, unsigned long arg);
196
197#ifdef CONFIG_COMPAT
198#include "compat_qcedev.c"
199#else
200#define compat_qcedev_ioctl NULL
201#endif
202
203static const struct file_operations qcedev_fops = {
204 .owner = THIS_MODULE,
205 .unlocked_ioctl = qcedev_ioctl,
206 .compat_ioctl = compat_qcedev_ioctl,
207 .open = qcedev_open,
208 .release = qcedev_release,
209};
210
211static struct qcedev_control qce_dev[] = {
212 {
213 .miscdevice = {
214 .minor = MISC_DYNAMIC_MINOR,
215 .name = "qce",
216 .fops = &qcedev_fops,
217 },
218 .magic = QCEDEV_MAGIC,
219 },
220};
221
222#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
223#define DEBUG_MAX_FNAME 16
224#define DEBUG_MAX_RW_BUF 1024
225
226struct qcedev_stat {
227 u32 qcedev_dec_success;
228 u32 qcedev_dec_fail;
229 u32 qcedev_enc_success;
230 u32 qcedev_enc_fail;
231 u32 qcedev_sha_success;
232 u32 qcedev_sha_fail;
233};
234
235static struct qcedev_stat _qcedev_stat;
236static struct dentry *_debug_dent;
237static char _debug_read_buf[DEBUG_MAX_RW_BUF];
238static int _debug_qcedev;
239
240static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
241{
242 int i;
243
244 for (i = 0; i < MAX_QCE_DEVICE; i++) {
245 if (qce_dev[i].miscdevice.minor == n)
246 return &qce_dev[i];
247 }
248 return NULL;
249}
250
251static int qcedev_open(struct inode *inode, struct file *file)
252{
253 struct qcedev_handle *handle;
254 struct qcedev_control *podev;
255
256 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
257 if (podev == NULL) {
258 pr_err("%s: no such device %d\n", __func__,
259 MINOR(inode->i_rdev));
260 return -ENOENT;
261 }
262
263 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
264 if (handle == NULL)
265 return -ENOMEM;
266
267 handle->cntl = podev;
268 file->private_data = handle;
269 if (podev->platform_support.bus_scale_table != NULL)
270 qcedev_ce_high_bw_req(podev, true);
271 return 0;
272}
273
274static int qcedev_release(struct inode *inode, struct file *file)
275{
276 struct qcedev_control *podev;
277 struct qcedev_handle *handle;
278
279 handle = file->private_data;
280 podev = handle->cntl;
281 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +0530282 pr_err("%s: invalid handle %pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700283 __func__, podev);
284 }
285 kzfree(handle);
286 file->private_data = NULL;
287 if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
288 qcedev_ce_high_bw_req(podev, false);
289 return 0;
290}
291
292static void req_done(unsigned long data)
293{
294 struct qcedev_control *podev = (struct qcedev_control *)data;
295 struct qcedev_async_req *areq;
296 unsigned long flags = 0;
297 struct qcedev_async_req *new_req = NULL;
298 int ret = 0;
299
300 spin_lock_irqsave(&podev->lock, flags);
301 areq = podev->active_command;
302 podev->active_command = NULL;
303
304again:
305 if (!list_empty(&podev->ready_commands)) {
306 new_req = container_of(podev->ready_commands.next,
307 struct qcedev_async_req, list);
308 list_del(&new_req->list);
309 podev->active_command = new_req;
310 new_req->err = 0;
311 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
312 ret = start_cipher_req(podev);
313 else
314 ret = start_sha_req(podev);
315 }
316
317 spin_unlock_irqrestore(&podev->lock, flags);
318
319 if (areq)
320 complete(&areq->complete);
321
322 if (new_req && ret) {
323 complete(&new_req->complete);
324 spin_lock_irqsave(&podev->lock, flags);
325 podev->active_command = NULL;
326 areq = NULL;
327 ret = 0;
328 new_req = NULL;
329 goto again;
330 }
331}
332
333void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
334 unsigned char *authdata, int ret)
335{
336 struct qcedev_sha_req *areq;
337 struct qcedev_control *pdev;
338 struct qcedev_handle *handle;
339
340 uint32_t *auth32 = (uint32_t *)authdata;
341
342 areq = (struct qcedev_sha_req *) cookie;
343 handle = (struct qcedev_handle *) areq->cookie;
344 pdev = handle->cntl;
345
346 if (digest)
347 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
348
349 if (authdata) {
350 handle->sha_ctxt.auth_data[0] = auth32[0];
351 handle->sha_ctxt.auth_data[1] = auth32[1];
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700352 }
353
354 tasklet_schedule(&pdev->done_tasklet);
355};
356
357
358void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
359 unsigned char *iv, int ret)
360{
361 struct qcedev_cipher_req *areq;
362 struct qcedev_handle *handle;
363 struct qcedev_control *podev;
364 struct qcedev_async_req *qcedev_areq;
365
366 areq = (struct qcedev_cipher_req *) cookie;
367 handle = (struct qcedev_handle *) areq->cookie;
368 podev = handle->cntl;
369 qcedev_areq = podev->active_command;
370
371 if (iv)
372 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
373 qcedev_areq->cipher_op_req.ivlen);
374 tasklet_schedule(&podev->done_tasklet);
375};
376
377static int start_cipher_req(struct qcedev_control *podev)
378{
379 struct qcedev_async_req *qcedev_areq;
380 struct qce_req creq;
381 int ret = 0;
382
383 /* start the command on the podev->active_command */
384 qcedev_areq = podev->active_command;
385 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
386 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
387 pr_err("%s: Use of PMEM is not supported\n", __func__);
388 goto unsupported;
389 }
390 creq.pmem = NULL;
391 switch (qcedev_areq->cipher_op_req.alg) {
392 case QCEDEV_ALG_DES:
393 creq.alg = CIPHER_ALG_DES;
394 break;
395 case QCEDEV_ALG_3DES:
396 creq.alg = CIPHER_ALG_3DES;
397 break;
398 case QCEDEV_ALG_AES:
399 creq.alg = CIPHER_ALG_AES;
400 break;
401 default:
402 return -EINVAL;
403 };
404
405 switch (qcedev_areq->cipher_op_req.mode) {
406 case QCEDEV_AES_MODE_CBC:
407 case QCEDEV_DES_MODE_CBC:
408 creq.mode = QCE_MODE_CBC;
409 break;
410 case QCEDEV_AES_MODE_ECB:
411 case QCEDEV_DES_MODE_ECB:
412 creq.mode = QCE_MODE_ECB;
413 break;
414 case QCEDEV_AES_MODE_CTR:
415 creq.mode = QCE_MODE_CTR;
416 break;
417 case QCEDEV_AES_MODE_XTS:
418 creq.mode = QCE_MODE_XTS;
419 break;
420 default:
421 return -EINVAL;
422 };
423
424 if ((creq.alg == CIPHER_ALG_AES) &&
425 (creq.mode == QCE_MODE_CTR)) {
426 creq.dir = QCE_ENCRYPT;
427 } else {
428 if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
429 creq.dir = QCE_ENCRYPT;
430 else
431 creq.dir = QCE_DECRYPT;
432 }
433
434 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
435 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
436
437 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
438 creq.encklen = qcedev_areq->cipher_op_req.encklen;
439
440 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
441
442 if (qcedev_areq->cipher_op_req.encklen == 0) {
443 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
444 || (qcedev_areq->cipher_op_req.op ==
445 QCEDEV_OPER_DEC_NO_KEY))
446 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
447 else {
448 int i;
449
450 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
451 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
452 break;
453 }
454
455 if ((podev->platform_support.hw_key_support == 1) &&
456 (i == QCEDEV_MAX_KEY_SIZE))
457 creq.op = QCE_REQ_ABLK_CIPHER;
458 else {
459 ret = -EINVAL;
460 goto unsupported;
461 }
462 }
463 } else {
464 creq.op = QCE_REQ_ABLK_CIPHER;
465 }
466
467 creq.qce_cb = qcedev_cipher_req_cb;
468 creq.areq = (void *)&qcedev_areq->cipher_req;
469 creq.flags = 0;
470 ret = qce_ablk_cipher_req(podev->qce, &creq);
471unsupported:
472 if (ret)
473 qcedev_areq->err = -ENXIO;
474 else
475 qcedev_areq->err = 0;
476 return ret;
477};
478
479static int start_sha_req(struct qcedev_control *podev)
480{
481 struct qcedev_async_req *qcedev_areq;
482 struct qce_sha_req sreq;
483 int ret = 0;
484 struct qcedev_handle *handle;
485
486 /* start the command on the podev->active_command */
487 qcedev_areq = podev->active_command;
488 handle = qcedev_areq->handle;
489
490 switch (qcedev_areq->sha_op_req.alg) {
491 case QCEDEV_ALG_SHA1:
492 sreq.alg = QCE_HASH_SHA1;
493 break;
494 case QCEDEV_ALG_SHA256:
495 sreq.alg = QCE_HASH_SHA256;
496 break;
497 case QCEDEV_ALG_SHA1_HMAC:
498 if (podev->ce_support.sha_hmac) {
499 sreq.alg = QCE_HASH_SHA1_HMAC;
500 sreq.authkey = &handle->sha_ctxt.authkey[0];
501 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
502
503 } else {
504 sreq.alg = QCE_HASH_SHA1;
505 sreq.authkey = NULL;
506 }
507 break;
508 case QCEDEV_ALG_SHA256_HMAC:
509 if (podev->ce_support.sha_hmac) {
510 sreq.alg = QCE_HASH_SHA256_HMAC;
511 sreq.authkey = &handle->sha_ctxt.authkey[0];
512 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
513 } else {
514 sreq.alg = QCE_HASH_SHA256;
515 sreq.authkey = NULL;
516 }
517 break;
518 case QCEDEV_ALG_AES_CMAC:
519 sreq.alg = QCE_HASH_AES_CMAC;
520 sreq.authkey = &handle->sha_ctxt.authkey[0];
521 sreq.authklen = qcedev_areq->sha_op_req.authklen;
522 break;
523 default:
524 pr_err("Algorithm %d not supported, exiting\n",
525 qcedev_areq->sha_op_req.alg);
526 return -EINVAL;
527 };
528
529 qcedev_areq->sha_req.cookie = handle;
530
531 sreq.qce_cb = qcedev_sha_req_cb;
532 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
533 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
534 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
535 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
536 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
537 sreq.digest = &handle->sha_ctxt.digest[0];
538 sreq.first_blk = handle->sha_ctxt.first_blk;
539 sreq.last_blk = handle->sha_ctxt.last_blk;
540 }
541 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
542 sreq.src = qcedev_areq->sha_req.sreq.src;
543 sreq.areq = (void *)&qcedev_areq->sha_req;
544 sreq.flags = 0;
545
546 ret = qce_process_sha_req(podev->qce, &sreq);
547
548 if (ret)
549 qcedev_areq->err = -ENXIO;
550 else
551 qcedev_areq->err = 0;
552 return ret;
553};
554
555static int submit_req(struct qcedev_async_req *qcedev_areq,
556 struct qcedev_handle *handle)
557{
558 struct qcedev_control *podev;
559 unsigned long flags = 0;
560 int ret = 0;
561 struct qcedev_stat *pstat;
562
563 qcedev_areq->err = 0;
564 podev = handle->cntl;
565
566 spin_lock_irqsave(&podev->lock, flags);
567
568 if (podev->active_command == NULL) {
569 podev->active_command = qcedev_areq;
570 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
571 ret = start_cipher_req(podev);
572 else
573 ret = start_sha_req(podev);
574 } else {
575 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
576 }
577
578 if (ret != 0)
579 podev->active_command = NULL;
580
581 spin_unlock_irqrestore(&podev->lock, flags);
582
583 if (ret == 0)
584 wait_for_completion(&qcedev_areq->complete);
585
586 if (ret)
587 qcedev_areq->err = -EIO;
588
589 pstat = &_qcedev_stat;
590 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
591 switch (qcedev_areq->cipher_op_req.op) {
592 case QCEDEV_OPER_DEC:
593 if (qcedev_areq->err)
594 pstat->qcedev_dec_fail++;
595 else
596 pstat->qcedev_dec_success++;
597 break;
598 case QCEDEV_OPER_ENC:
599 if (qcedev_areq->err)
600 pstat->qcedev_enc_fail++;
601 else
602 pstat->qcedev_enc_success++;
603 break;
604 default:
605 break;
606 };
607 } else {
608 if (qcedev_areq->err)
609 pstat->qcedev_sha_fail++;
610 else
611 pstat->qcedev_sha_success++;
612 }
613
614 return qcedev_areq->err;
615}
616
617static int qcedev_sha_init(struct qcedev_async_req *areq,
618 struct qcedev_handle *handle)
619{
620 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
621
622 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
623 sha_ctxt->first_blk = 1;
624
625 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
626 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
627 memcpy(&sha_ctxt->digest[0],
628 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
629 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
630 } else {
631 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
632 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
633 memcpy(&sha_ctxt->digest[0],
634 &_std_init_vector_sha256_uint8[0],
635 SHA256_DIGEST_SIZE);
636 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
637 }
638 }
639 sha_ctxt->init_done = true;
640 return 0;
641}
642
643
644static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
645 struct qcedev_handle *handle,
646 struct scatterlist *sg_src)
647{
648 int err = 0;
649 int i = 0;
650 uint32_t total;
651
652 uint8_t *user_src = NULL;
653 uint8_t *k_src = NULL;
654 uint8_t *k_buf_src = NULL;
655 uint8_t *k_align_src = NULL;
656
657 uint32_t sha_pad_len = 0;
658 uint32_t trailing_buf_len = 0;
659 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
660 uint32_t sha_block_size;
661
662 total = qcedev_areq->sha_op_req.data_len + t_buf;
663
664 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
665 sha_block_size = SHA1_BLOCK_SIZE;
666 else
667 sha_block_size = SHA256_BLOCK_SIZE;
668
669 if (total <= sha_block_size) {
670 uint32_t len = qcedev_areq->sha_op_req.data_len;
671
672 i = 0;
673
674 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
675
676 /* Copy data from user src(s) */
677 while (len > 0) {
678 user_src =
679 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
680 if (user_src && copy_from_user(k_src,
681 (void __user *)user_src,
682 qcedev_areq->sha_op_req.data[i].len))
683 return -EFAULT;
684
685 len -= qcedev_areq->sha_op_req.data[i].len;
686 k_src += qcedev_areq->sha_op_req.data[i].len;
687 i++;
688 }
689 handle->sha_ctxt.trailing_buf_len = total;
690
691 return 0;
692 }
693
694
695 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
696 GFP_KERNEL);
697 if (k_buf_src == NULL)
698 return -ENOMEM;
699
700 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
701 CACHE_LINE_SIZE);
702 k_src = k_align_src;
703
704 /* check for trailing buffer from previous updates and append it */
705 if (t_buf > 0) {
706 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
707 t_buf);
708 k_src += t_buf;
709 }
710
711 /* Copy data from user src(s) */
712 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
713 if (user_src && copy_from_user(k_src,
714 (void __user *)user_src,
715 qcedev_areq->sha_op_req.data[0].len)) {
716 kzfree(k_buf_src);
717 return -EFAULT;
718 }
719 k_src += qcedev_areq->sha_op_req.data[0].len;
720 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
721 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
722 if (user_src && copy_from_user(k_src,
723 (void __user *)user_src,
724 qcedev_areq->sha_op_req.data[i].len)) {
725 kzfree(k_buf_src);
726 return -EFAULT;
727 }
728 k_src += qcedev_areq->sha_op_req.data[i].len;
729 }
730
731 /* get new trailing buffer */
732 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
733 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
734
735 qcedev_areq->sha_req.sreq.src = sg_src;
736 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
737 total-trailing_buf_len);
738 sg_mark_end(qcedev_areq->sha_req.sreq.src);
739
740 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
741
742 /* update sha_ctxt trailing buf content to new trailing buf */
743 if (trailing_buf_len > 0) {
744 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
745 memcpy(&handle->sha_ctxt.trailing_buf[0],
746 (k_src - trailing_buf_len),
747 trailing_buf_len);
748 }
749 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
750
751 err = submit_req(qcedev_areq, handle);
752
753 handle->sha_ctxt.last_blk = 0;
754 handle->sha_ctxt.first_blk = 0;
755
756 kzfree(k_buf_src);
757 return err;
758}
759
760static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
761 struct qcedev_handle *handle,
762 struct scatterlist *sg_src)
763{
764 int err = 0;
765 int i = 0;
766 int j = 0;
767 int k = 0;
768 int num_entries = 0;
769 uint32_t total = 0;
770
771 if (handle->sha_ctxt.init_done == false) {
772 pr_err("%s Init was not called\n", __func__);
773 return -EINVAL;
774 }
775
776 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
777
778 struct qcedev_sha_op_req *saved_req;
779 struct qcedev_sha_op_req req;
780 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
781
782 /* save the original req structure */
783 saved_req =
784 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
785 if (saved_req == NULL) {
786 pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
787 __func__, (uintptr_t)saved_req);
788 return -ENOMEM;
789 }
790 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
791 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
792
793 i = 0;
794 /* Address 32 KB at a time */
795 while ((i < req.entries) && (err == 0)) {
796 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
797 sreq->data[0].len = QCE_MAX_OPER_DATA;
798 if (i > 0) {
799 sreq->data[0].vaddr =
800 sreq->data[i].vaddr;
801 }
802
803 sreq->data_len = QCE_MAX_OPER_DATA;
804 sreq->entries = 1;
805
806 err = qcedev_sha_update_max_xfer(qcedev_areq,
807 handle, sg_src);
808
809 sreq->data[i].len = req.data[i].len -
810 QCE_MAX_OPER_DATA;
811 sreq->data[i].vaddr = req.data[i].vaddr +
812 QCE_MAX_OPER_DATA;
813 req.data[i].vaddr = sreq->data[i].vaddr;
814 req.data[i].len = sreq->data[i].len;
815 } else {
816 total = 0;
817 for (j = i; j < req.entries; j++) {
818 num_entries++;
819 if ((total + sreq->data[j].len) >=
820 QCE_MAX_OPER_DATA) {
821 sreq->data[j].len =
822 (QCE_MAX_OPER_DATA - total);
823 total = QCE_MAX_OPER_DATA;
824 break;
825 }
826 total += sreq->data[j].len;
827 }
828
829 sreq->data_len = total;
830 if (i > 0)
831 for (k = 0; k < num_entries; k++) {
832 sreq->data[k].len =
833 sreq->data[i+k].len;
834 sreq->data[k].vaddr =
835 sreq->data[i+k].vaddr;
836 }
837 sreq->entries = num_entries;
838
839 i = j;
840 err = qcedev_sha_update_max_xfer(qcedev_areq,
841 handle, sg_src);
842 num_entries = 0;
843
844 sreq->data[i].vaddr = req.data[i].vaddr +
845 sreq->data[i].len;
846 sreq->data[i].len = req.data[i].len -
847 sreq->data[i].len;
848 req.data[i].vaddr = sreq->data[i].vaddr;
849 req.data[i].len = sreq->data[i].len;
850
851 if (sreq->data[i].len == 0)
852 i++;
853 }
854 } /* end of while ((i < req.entries) && (err == 0)) */
855
856 /* Restore the original req structure */
857 for (i = 0; i < saved_req->entries; i++) {
858 sreq->data[i].len = saved_req->data[i].len;
859 sreq->data[i].vaddr = saved_req->data[i].vaddr;
860 }
861 sreq->entries = saved_req->entries;
862 sreq->data_len = saved_req->data_len;
863 kzfree(saved_req);
864 } else
865 err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
866
867 return err;
868}
869
870static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
871 struct qcedev_handle *handle)
872{
873 int err = 0;
874 struct scatterlist sg_src;
875 uint32_t total;
876 uint8_t *k_buf_src = NULL;
877 uint8_t *k_align_src = NULL;
878
879 if (handle->sha_ctxt.init_done == false) {
880 pr_err("%s Init was not called\n", __func__);
881 return -EINVAL;
882 }
883
884 handle->sha_ctxt.last_blk = 1;
885
886 total = handle->sha_ctxt.trailing_buf_len;
887
888 if (total) {
889 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
890 GFP_KERNEL);
891 if (k_buf_src == NULL)
892 return -ENOMEM;
893
894 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
895 CACHE_LINE_SIZE);
896 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
897 }
898 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
899 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
900 sg_mark_end(qcedev_areq->sha_req.sreq.src);
901
902 qcedev_areq->sha_req.sreq.nbytes = total;
903
904 err = submit_req(qcedev_areq, handle);
905
906 handle->sha_ctxt.first_blk = 0;
907 handle->sha_ctxt.last_blk = 0;
908 handle->sha_ctxt.auth_data[0] = 0;
909 handle->sha_ctxt.auth_data[1] = 0;
910 handle->sha_ctxt.trailing_buf_len = 0;
911 handle->sha_ctxt.init_done = false;
912 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
913
914 kzfree(k_buf_src);
Zhen Kong0acaefa2017-10-18 14:27:44 -0700915 qcedev_areq->sha_req.sreq.src = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700916 return err;
917}
918
919static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
920 struct qcedev_handle *handle,
921 struct scatterlist *sg_src)
922{
923 int err = 0;
924 int i = 0;
925 uint32_t total;
926
927 uint8_t *user_src = NULL;
928 uint8_t *k_src = NULL;
929 uint8_t *k_buf_src = NULL;
930
931 total = qcedev_areq->sha_op_req.data_len;
932
933 if (copy_from_user(&handle->sha_ctxt.authkey[0],
934 (void __user *)qcedev_areq->sha_op_req.authkey,
935 qcedev_areq->sha_op_req.authklen))
936 return -EFAULT;
937
938
939 k_buf_src = kmalloc(total, GFP_KERNEL);
940 if (k_buf_src == NULL)
941 return -ENOMEM;
942
943 k_src = k_buf_src;
944
945 /* Copy data from user src(s) */
946 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
947 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
948 user_src =
949 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
950 if (user_src && copy_from_user(k_src, (void __user *)user_src,
951 qcedev_areq->sha_op_req.data[i].len)) {
952 kzfree(k_buf_src);
953 return -EFAULT;
954 }
955 k_src += qcedev_areq->sha_op_req.data[i].len;
956 }
957
958 qcedev_areq->sha_req.sreq.src = sg_src;
959 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
960 sg_mark_end(qcedev_areq->sha_req.sreq.src);
961
962 qcedev_areq->sha_req.sreq.nbytes = total;
963 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
964 err = submit_req(qcedev_areq, handle);
965
966 kzfree(k_buf_src);
967 return err;
968}
969
970static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
971 struct qcedev_handle *handle,
972 struct scatterlist *sg_src)
973{
974 int err = 0;
975
976 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
977 qcedev_sha_init(areq, handle);
978 if (copy_from_user(&handle->sha_ctxt.authkey[0],
979 (void __user *)areq->sha_op_req.authkey,
980 areq->sha_op_req.authklen))
981 return -EFAULT;
982 } else {
983 struct qcedev_async_req authkey_areq;
984 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
985
986 init_completion(&authkey_areq.complete);
987
988 authkey_areq.sha_op_req.entries = 1;
989 authkey_areq.sha_op_req.data[0].vaddr =
990 areq->sha_op_req.authkey;
991 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
992 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
993 authkey_areq.sha_op_req.diglen = 0;
994 authkey_areq.handle = handle;
995
996 memset(&authkey_areq.sha_op_req.digest[0], 0,
997 QCEDEV_MAX_SHA_DIGEST);
998 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
999 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1000 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1001 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1002
1003 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1004
1005 qcedev_sha_init(&authkey_areq, handle);
1006 err = qcedev_sha_update(&authkey_areq, handle, sg_src);
1007 if (!err)
1008 err = qcedev_sha_final(&authkey_areq, handle);
1009 else
1010 return err;
1011 memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
1012 handle->sha_ctxt.diglen);
1013 qcedev_sha_init(areq, handle);
1014
1015 memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
1016 handle->sha_ctxt.diglen);
1017 }
1018 return err;
1019}
1020
1021static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
1022 struct qcedev_handle *handle)
1023{
1024 int err = 0;
1025 struct scatterlist sg_src;
1026 uint8_t *k_src = NULL;
1027 uint32_t sha_block_size = 0;
1028 uint32_t sha_digest_size = 0;
1029
1030 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1031 sha_digest_size = SHA1_DIGEST_SIZE;
1032 sha_block_size = SHA1_BLOCK_SIZE;
1033 } else {
1034 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1035 sha_digest_size = SHA256_DIGEST_SIZE;
1036 sha_block_size = SHA256_BLOCK_SIZE;
1037 }
1038 }
1039 k_src = kmalloc(sha_block_size, GFP_KERNEL);
1040 if (k_src == NULL)
1041 return -ENOMEM;
1042
1043 /* check for trailing buffer from previous updates and append it */
1044 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1045 handle->sha_ctxt.trailing_buf_len);
1046
1047 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1048 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1049 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1050
1051 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
1052 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1053 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
1054 sha_digest_size);
1055 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
1056
1057 handle->sha_ctxt.first_blk = 1;
1058 handle->sha_ctxt.last_blk = 0;
1059 handle->sha_ctxt.auth_data[0] = 0;
1060 handle->sha_ctxt.auth_data[1] = 0;
1061
1062 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1063 memcpy(&handle->sha_ctxt.digest[0],
1064 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
1065 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
1066 }
1067
1068 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1069 memcpy(&handle->sha_ctxt.digest[0],
1070 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
1071 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
1072 }
1073 err = submit_req(qcedev_areq, handle);
1074
1075 handle->sha_ctxt.last_blk = 0;
1076 handle->sha_ctxt.first_blk = 0;
1077
1078 kzfree(k_src);
Zhen Kong0acaefa2017-10-18 14:27:44 -07001079 qcedev_areq->sha_req.sreq.src = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001080 return err;
1081}
1082
1083static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
1084 struct qcedev_handle *handle, bool ikey)
1085{
1086 int i;
1087 uint32_t constant;
1088 uint32_t sha_block_size;
1089
1090 if (ikey)
1091 constant = 0x36;
1092 else
1093 constant = 0x5c;
1094
1095 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1096 sha_block_size = SHA1_BLOCK_SIZE;
1097 else
1098 sha_block_size = SHA256_BLOCK_SIZE;
1099
1100 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1101 for (i = 0; i < sha_block_size; i++)
1102 handle->sha_ctxt.trailing_buf[i] =
1103 (handle->sha_ctxt.authkey[i] ^ constant);
1104
1105 handle->sha_ctxt.trailing_buf_len = sha_block_size;
1106 return 0;
1107}
1108
1109static int qcedev_hmac_init(struct qcedev_async_req *areq,
1110 struct qcedev_handle *handle,
1111 struct scatterlist *sg_src)
1112{
1113 int err;
1114 struct qcedev_control *podev = handle->cntl;
1115
1116 err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
1117 if (err)
1118 return err;
1119 if (!podev->ce_support.sha_hmac)
1120 qcedev_hmac_update_iokey(areq, handle, true);
1121 return 0;
1122}
1123
1124static int qcedev_hmac_final(struct qcedev_async_req *areq,
1125 struct qcedev_handle *handle)
1126{
1127 int err;
1128 struct qcedev_control *podev = handle->cntl;
1129
1130 err = qcedev_sha_final(areq, handle);
1131 if (podev->ce_support.sha_hmac)
1132 return err;
1133
1134 qcedev_hmac_update_iokey(areq, handle, false);
1135 err = qcedev_hmac_get_ohash(areq, handle);
1136 if (err)
1137 return err;
1138 err = qcedev_sha_final(areq, handle);
1139
1140 return err;
1141}
1142
1143static int qcedev_hash_init(struct qcedev_async_req *areq,
1144 struct qcedev_handle *handle,
1145 struct scatterlist *sg_src)
1146{
1147 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1148 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1149 return qcedev_sha_init(areq, handle);
1150 else
1151 return qcedev_hmac_init(areq, handle, sg_src);
1152}
1153
1154static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
1155 struct qcedev_handle *handle,
1156 struct scatterlist *sg_src)
1157{
1158 return qcedev_sha_update(qcedev_areq, handle, sg_src);
1159}
1160
1161static int qcedev_hash_final(struct qcedev_async_req *areq,
1162 struct qcedev_handle *handle)
1163{
1164 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1165 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1166 return qcedev_sha_final(areq, handle);
1167 else
1168 return qcedev_hmac_final(areq, handle);
1169}
1170
1171static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
1172 int *di, struct qcedev_handle *handle,
1173 uint8_t *k_align_src)
1174{
1175 int err = 0;
1176 int i = 0;
1177 int dst_i = *di;
1178 struct scatterlist sg_src;
1179 uint32_t byteoffset = 0;
1180 uint8_t *user_src = NULL;
1181 uint8_t *k_align_dst = k_align_src;
1182 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1183
1184
1185 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1186 byteoffset = areq->cipher_op_req.byteoffset;
1187
1188 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1189 if (user_src && copy_from_user((k_align_src + byteoffset),
1190 (void __user *)user_src,
1191 areq->cipher_op_req.vbuf.src[0].len))
1192 return -EFAULT;
1193
1194 k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
1195
1196 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1197 user_src =
1198 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1199 if (user_src && copy_from_user(k_align_src,
1200 (void __user *)user_src,
1201 areq->cipher_op_req.vbuf.src[i].len)) {
1202 return -EFAULT;
1203 }
1204 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1205 }
1206
1207 /* restore src beginning */
1208 k_align_src = k_align_dst;
1209 areq->cipher_op_req.data_len += byteoffset;
1210
1211 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1212 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1213
1214 /* In place encryption/decryption */
1215 sg_set_buf(areq->cipher_req.creq.src,
1216 k_align_dst,
1217 areq->cipher_op_req.data_len);
1218 sg_mark_end(areq->cipher_req.creq.src);
1219
1220 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1221 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1222 areq->cipher_op_req.entries = 1;
1223
1224 err = submit_req(areq, handle);
1225
1226 /* copy data to destination buffer*/
1227 creq->data_len -= byteoffset;
1228
1229 while (creq->data_len > 0) {
1230 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1231 if (err == 0 && copy_to_user(
1232 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1233 (k_align_dst + byteoffset),
Zhen Kong0acaefa2017-10-18 14:27:44 -07001234 creq->vbuf.dst[dst_i].len)) {
1235 err = -EFAULT;
1236 goto exit;
1237 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001238
1239 k_align_dst += creq->vbuf.dst[dst_i].len +
1240 byteoffset;
1241 creq->data_len -= creq->vbuf.dst[dst_i].len;
1242 dst_i++;
1243 } else {
1244 if (err == 0 && copy_to_user(
1245 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1246 (k_align_dst + byteoffset),
Zhen Kong0acaefa2017-10-18 14:27:44 -07001247 creq->data_len)) {
1248 err = -EFAULT;
1249 goto exit;
1250 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001251
1252 k_align_dst += creq->data_len;
1253 creq->vbuf.dst[dst_i].len -= creq->data_len;
1254 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1255 creq->data_len = 0;
1256 }
1257 }
1258 *di = dst_i;
Zhen Kong0acaefa2017-10-18 14:27:44 -07001259exit:
1260 areq->cipher_req.creq.src = NULL;
1261 areq->cipher_req.creq.dst = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001262 return err;
1263};
1264
1265static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
1266 struct qcedev_handle *handle)
1267{
1268 int err = 0;
1269 int di = 0;
1270 int i = 0;
1271 int j = 0;
1272 int k = 0;
1273 uint32_t byteoffset = 0;
1274 int num_entries = 0;
1275 uint32_t total = 0;
1276 uint32_t len;
1277 uint8_t *k_buf_src = NULL;
1278 uint8_t *k_align_src = NULL;
1279 uint32_t max_data_xfer;
1280 struct qcedev_cipher_op_req *saved_req;
1281 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1282
1283 total = 0;
1284
1285 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1286 byteoffset = areq->cipher_op_req.byteoffset;
1287 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1288 GFP_KERNEL);
1289 if (k_buf_src == NULL)
1290 return -ENOMEM;
1291 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
1292 CACHE_LINE_SIZE);
1293 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1294
1295 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1296 if (saved_req == NULL) {
1297 kzfree(k_buf_src);
1298 return -ENOMEM;
1299
1300 }
1301 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1302
1303 if (areq->cipher_op_req.data_len > max_data_xfer) {
1304 struct qcedev_cipher_op_req req;
1305
1306 /* save the original req structure */
1307 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1308
1309 i = 0;
1310 /* Address 32 KB at a time */
1311 while ((i < req.entries) && (err == 0)) {
1312 if (creq->vbuf.src[i].len > max_data_xfer) {
1313 creq->vbuf.src[0].len = max_data_xfer;
1314 if (i > 0) {
1315 creq->vbuf.src[0].vaddr =
1316 creq->vbuf.src[i].vaddr;
1317 }
1318
1319 creq->data_len = max_data_xfer;
1320 creq->entries = 1;
1321
1322 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1323 &di, handle, k_align_src);
1324 if (err < 0) {
1325 kzfree(k_buf_src);
1326 kzfree(saved_req);
1327 return err;
1328 }
1329
1330 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1331 max_data_xfer;
1332 creq->vbuf.src[i].vaddr =
1333 req.vbuf.src[i].vaddr +
1334 max_data_xfer;
1335 req.vbuf.src[i].vaddr =
1336 creq->vbuf.src[i].vaddr;
1337 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1338
1339 } else {
1340 total = areq->cipher_op_req.byteoffset;
1341 for (j = i; j < req.entries; j++) {
1342 num_entries++;
1343 if ((total + creq->vbuf.src[j].len)
1344 >= max_data_xfer) {
1345 creq->vbuf.src[j].len =
1346 max_data_xfer - total;
1347 total = max_data_xfer;
1348 break;
1349 }
1350 total += creq->vbuf.src[j].len;
1351 }
1352
1353 creq->data_len = total;
1354 if (i > 0)
1355 for (k = 0; k < num_entries; k++) {
1356 creq->vbuf.src[k].len =
1357 creq->vbuf.src[i+k].len;
1358 creq->vbuf.src[k].vaddr =
1359 creq->vbuf.src[i+k].vaddr;
1360 }
1361 creq->entries = num_entries;
1362
1363 i = j;
1364 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1365 &di, handle, k_align_src);
1366 if (err < 0) {
1367 kzfree(k_buf_src);
1368 kzfree(saved_req);
1369 return err;
1370 }
1371
1372 num_entries = 0;
1373 areq->cipher_op_req.byteoffset = 0;
1374
1375 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1376 + creq->vbuf.src[i].len;
1377 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1378 creq->vbuf.src[i].len;
1379
1380 req.vbuf.src[i].vaddr =
1381 creq->vbuf.src[i].vaddr;
1382 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1383
1384 if (creq->vbuf.src[i].len == 0)
1385 i++;
1386 }
1387
1388 areq->cipher_op_req.byteoffset = 0;
1389 max_data_xfer = QCE_MAX_OPER_DATA;
1390 byteoffset = 0;
1391
1392 } /* end of while ((i < req.entries) && (err == 0)) */
1393 } else
1394 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
1395 k_align_src);
1396
1397 /* Restore the original req structure */
1398 for (i = 0; i < saved_req->entries; i++) {
1399 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1400 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1401 }
1402 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1403 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1404 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1405 len += saved_req->vbuf.dst[i].len;
1406 }
1407 creq->entries = saved_req->entries;
1408 creq->data_len = saved_req->data_len;
1409 creq->byteoffset = saved_req->byteoffset;
1410
1411 kzfree(saved_req);
1412 kzfree(k_buf_src);
1413 return err;
1414
1415}
1416
1417static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
1418 struct qcedev_control *podev)
1419{
1420 /* if intending to use HW key make sure key fields are set
1421 * correctly and HW key is indeed supported in target
1422 */
1423 if (req->encklen == 0) {
1424 int i;
1425
1426 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
1427 if (req->enckey[i]) {
1428 pr_err("%s: Invalid key: non-zero key input\n",
1429 __func__);
1430 goto error;
1431 }
1432 }
1433 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1434 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1435 if (!podev->platform_support.hw_key_support) {
1436 pr_err("%s: Invalid op %d\n", __func__,
1437 (uint32_t)req->op);
1438 goto error;
1439 }
1440 } else {
1441 if (req->encklen == QCEDEV_AES_KEY_192) {
1442 if (!podev->ce_support.aes_key_192) {
1443 pr_err("%s: AES-192 not supported\n", __func__);
1444 goto error;
1445 }
1446 } else {
1447 /* if not using HW key make sure key
1448 * length is valid
1449 */
1450 if (req->mode == QCEDEV_AES_MODE_XTS) {
1451 if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
1452 (req->encklen != QCEDEV_AES_KEY_256*2)) {
1453 pr_err("%s: unsupported key size: %d\n",
1454 __func__, req->encklen);
1455 goto error;
1456 }
1457 } else {
1458 if ((req->encklen != QCEDEV_AES_KEY_128) &&
1459 (req->encklen != QCEDEV_AES_KEY_256)) {
1460 pr_err("%s: unsupported key size %d\n",
1461 __func__, req->encklen);
1462 goto error;
1463 }
1464 }
1465 }
1466 }
1467 return 0;
1468error:
1469 return -EINVAL;
1470}
1471
1472static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1473 struct qcedev_control *podev)
1474{
1475 uint32_t total = 0;
1476 uint32_t i;
1477
1478 if (req->use_pmem) {
1479 pr_err("%s: Use of PMEM is not supported\n", __func__);
1480 goto error;
1481 }
1482 if ((req->entries == 0) || (req->data_len == 0) ||
1483 (req->entries > QCEDEV_MAX_BUFFERS)) {
1484 pr_err("%s: Invalid cipher length/entries\n", __func__);
1485 goto error;
1486 }
1487 if ((req->alg >= QCEDEV_ALG_LAST) ||
1488 (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
1489 pr_err("%s: Invalid algorithm %d\n", __func__,
1490 (uint32_t)req->alg);
1491 goto error;
1492 }
1493 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1494 (!podev->ce_support.aes_xts)) {
1495 pr_err("%s: XTS algorithm is not supported\n", __func__);
1496 goto error;
1497 }
1498 if (req->alg == QCEDEV_ALG_AES) {
1499 if (qcedev_check_cipher_key(req, podev))
1500 goto error;
1501
1502 }
1503 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1504 if (req->byteoffset) {
1505 if (req->mode != QCEDEV_AES_MODE_CTR) {
1506 pr_err("%s: Operation on byte offset not supported\n",
1507 __func__);
1508 goto error;
1509 }
1510 if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
1511 pr_err("%s: Invalid byte offset\n", __func__);
1512 goto error;
1513 }
1514 total = req->byteoffset;
1515 for (i = 0; i < req->entries; i++) {
1516 if (total > U32_MAX - req->vbuf.src[i].len) {
1517 pr_err("%s:Integer overflow on total src len\n",
1518 __func__);
1519 goto error;
1520 }
1521 total += req->vbuf.src[i].len;
1522 }
1523 }
1524
1525 if (req->data_len < req->byteoffset) {
1526 pr_err("%s: req data length %u is less than byteoffset %u\n",
1527 __func__, req->data_len, req->byteoffset);
1528 goto error;
1529 }
1530
1531 /* Ensure IV size */
1532 if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
1533 pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
1534 goto error;
1535 }
1536
1537 /* Ensure Key size */
1538 if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
1539 pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
1540 goto error;
1541 }
1542
1543 /* Ensure zer ivlen for ECB mode */
1544 if (req->ivlen > 0) {
1545 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1546 (req->mode == QCEDEV_DES_MODE_ECB)) {
1547 pr_err("%s: Expecting a zero length IV\n", __func__);
1548 goto error;
1549 }
1550 } else {
1551 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1552 (req->mode != QCEDEV_DES_MODE_ECB)) {
1553 pr_err("%s: Expecting a non-zero ength IV\n", __func__);
1554 goto error;
1555 }
1556 }
1557 /* Check for sum of all dst length is equal to data_len */
1558 for (i = 0, total = 0; i < req->entries; i++) {
1559 if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
1560 pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
1561 __func__, i, req->vbuf.dst[i].len);
1562 goto error;
1563 }
1564 if (req->vbuf.dst[i].len >= U32_MAX - total) {
1565 pr_err("%s: Integer overflow on total req dst vbuf length\n",
1566 __func__);
1567 goto error;
1568 }
1569 total += req->vbuf.dst[i].len;
1570 }
1571 if (total != req->data_len) {
1572 pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
1573 __func__, i, total, req->data_len);
1574 goto error;
1575 }
1576 /* Check for sum of all src length is equal to data_len */
1577 for (i = 0, total = 0; i < req->entries; i++) {
1578 if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
1579 pr_err("%s: NULL req src vbuf[%d] with length %d\n",
1580 __func__, i, req->vbuf.src[i].len);
1581 goto error;
1582 }
1583 if (req->vbuf.src[i].len > U32_MAX - total) {
1584 pr_err("%s: Integer overflow on total req src vbuf length\n",
1585 __func__);
1586 goto error;
1587 }
1588 total += req->vbuf.src[i].len;
1589 }
1590 if (total != req->data_len) {
1591 pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
1592 __func__, total, req->data_len);
1593 goto error;
1594 }
1595 return 0;
1596error:
1597 return -EINVAL;
1598
1599}
1600
1601static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1602 struct qcedev_control *podev)
1603{
1604 uint32_t total = 0;
1605 uint32_t i;
1606
1607 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1608 (!podev->ce_support.cmac)) {
1609 pr_err("%s: CMAC not supported\n", __func__);
1610 goto sha_error;
1611 }
1612 if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
1613 pr_err("%s: Invalid num entries (%d)\n",
1614 __func__, req->entries);
1615 goto sha_error;
1616 }
1617
1618 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
1619 pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
1620 goto sha_error;
1621 }
1622 if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
1623 (req->alg == QCEDEV_ALG_SHA1_HMAC)) {
1624 if (req->authkey == NULL) {
1625 pr_err("%s: Invalid authkey pointer\n", __func__);
1626 goto sha_error;
1627 }
1628 if (req->authklen <= 0) {
1629 pr_err("%s: Invalid authkey length (%d)\n",
1630 __func__, req->authklen);
1631 goto sha_error;
1632 }
1633 }
1634
1635 if (req->alg == QCEDEV_ALG_AES_CMAC) {
1636 if ((req->authklen != QCEDEV_AES_KEY_128) &&
1637 (req->authklen != QCEDEV_AES_KEY_256)) {
1638 pr_err("%s: unsupported key length\n", __func__);
1639 goto sha_error;
1640 }
1641 }
1642
1643 /* Check for sum of all src length is equal to data_len */
1644 for (i = 0, total = 0; i < req->entries; i++) {
1645 if (req->data[i].len > U32_MAX - total) {
1646 pr_err("%s: Integer overflow on total req buf length\n",
1647 __func__);
1648 goto sha_error;
1649 }
1650 total += req->data[i].len;
1651 }
1652
1653 if (total != req->data_len) {
1654 pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
1655 __func__, total, req->data_len);
1656 goto sha_error;
1657 }
1658 return 0;
1659sha_error:
1660 return -EINVAL;
1661}
1662
1663static inline long qcedev_ioctl(struct file *file,
1664 unsigned int cmd, unsigned long arg)
1665{
1666 int err = 0;
1667 struct qcedev_handle *handle;
1668 struct qcedev_control *podev;
1669 struct qcedev_async_req qcedev_areq;
1670 struct qcedev_stat *pstat;
1671
1672 handle = file->private_data;
1673 podev = handle->cntl;
1674 qcedev_areq.handle = handle;
1675 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05301676 pr_err("%s: invalid handle %pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001677 __func__, podev);
1678 return -ENOENT;
1679 }
1680
1681 /* Verify user arguments. */
1682 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1683 return -ENOTTY;
1684
1685 init_completion(&qcedev_areq.complete);
1686 pstat = &_qcedev_stat;
1687
1688 switch (cmd) {
1689 case QCEDEV_IOCTL_ENC_REQ:
1690 case QCEDEV_IOCTL_DEC_REQ:
1691 if (copy_from_user(&qcedev_areq.cipher_op_req,
1692 (void __user *)arg,
1693 sizeof(struct qcedev_cipher_op_req)))
1694 return -EFAULT;
1695 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1696
1697 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1698 podev))
1699 return -EINVAL;
1700
1701 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
1702 if (err)
1703 return err;
1704 if (copy_to_user((void __user *)arg,
1705 &qcedev_areq.cipher_op_req,
1706 sizeof(struct qcedev_cipher_op_req)))
1707 return -EFAULT;
1708 break;
1709
1710 case QCEDEV_IOCTL_SHA_INIT_REQ:
1711 {
1712 struct scatterlist sg_src;
1713
1714 if (copy_from_user(&qcedev_areq.sha_op_req,
1715 (void __user *)arg,
1716 sizeof(struct qcedev_sha_op_req)))
1717 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301718 mutex_lock(&hash_access_lock);
1719 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1720 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001721 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301722 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001723 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1724 err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301725 if (err) {
1726 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001727 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301728 }
1729 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001730 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1731 sizeof(struct qcedev_sha_op_req)))
1732 return -EFAULT;
1733 }
1734 handle->sha_ctxt.init_done = true;
1735 break;
1736 case QCEDEV_IOCTL_GET_CMAC_REQ:
1737 if (!podev->ce_support.cmac)
1738 return -ENOTTY;
1739 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1740 {
1741 struct scatterlist sg_src;
1742
1743 if (copy_from_user(&qcedev_areq.sha_op_req,
1744 (void __user *)arg,
1745 sizeof(struct qcedev_sha_op_req)))
1746 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301747 mutex_lock(&hash_access_lock);
1748 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1749 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001750 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301751 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001752 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1753
1754 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
1755 err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301756 if (err) {
1757 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001758 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301759 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001760 } else {
1761 if (handle->sha_ctxt.init_done == false) {
1762 pr_err("%s Init was not called\n", __func__);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301763 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001764 return -EINVAL;
1765 }
1766 err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301767 if (err) {
1768 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001769 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301770 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001771 }
1772
1773 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1774 pr_err("Invalid sha_ctxt.diglen %d\n",
1775 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301776 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001777 return -EINVAL;
1778 }
1779 memcpy(&qcedev_areq.sha_op_req.digest[0],
1780 &handle->sha_ctxt.digest[0],
1781 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301782 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001783 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1784 sizeof(struct qcedev_sha_op_req)))
1785 return -EFAULT;
1786 }
1787 break;
1788
1789 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1790
1791 if (handle->sha_ctxt.init_done == false) {
1792 pr_err("%s Init was not called\n", __func__);
1793 return -EINVAL;
1794 }
1795 if (copy_from_user(&qcedev_areq.sha_op_req,
1796 (void __user *)arg,
1797 sizeof(struct qcedev_sha_op_req)))
1798 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301799 mutex_lock(&hash_access_lock);
1800 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1801 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001802 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301803 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001804 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1805 err = qcedev_hash_final(&qcedev_areq, handle);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301806 if (err) {
1807 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001808 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301809 }
Brahmaji K2ec40862017-05-15 16:02:15 +05301810 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1811 pr_err("Invalid sha_ctxt.diglen %d\n",
1812 handle->sha_ctxt.diglen);
1813 mutex_unlock(&hash_access_lock);
1814 return -EINVAL;
1815 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001816 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
1817 memcpy(&qcedev_areq.sha_op_req.digest[0],
1818 &handle->sha_ctxt.digest[0],
1819 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301820 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001821 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1822 sizeof(struct qcedev_sha_op_req)))
1823 return -EFAULT;
1824 handle->sha_ctxt.init_done = false;
1825 break;
1826
1827 case QCEDEV_IOCTL_GET_SHA_REQ:
1828 {
1829 struct scatterlist sg_src;
1830
1831 if (copy_from_user(&qcedev_areq.sha_op_req,
1832 (void __user *)arg,
1833 sizeof(struct qcedev_sha_op_req)))
1834 return -EFAULT;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301835 mutex_lock(&hash_access_lock);
1836 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
1837 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001838 return -EINVAL;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301839 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001840 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1841 qcedev_hash_init(&qcedev_areq, handle, &sg_src);
1842 err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301843 if (err) {
1844 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001845 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301846 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001847 err = qcedev_hash_final(&qcedev_areq, handle);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301848 if (err) {
1849 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001850 return err;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301851 }
Brahmaji K2ec40862017-05-15 16:02:15 +05301852 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1853 pr_err("Invalid sha_ctxt.diglen %d\n",
1854 handle->sha_ctxt.diglen);
1855 mutex_unlock(&hash_access_lock);
1856 return -EINVAL;
1857 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001858 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
1859 memcpy(&qcedev_areq.sha_op_req.digest[0],
1860 &handle->sha_ctxt.digest[0],
1861 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301862 mutex_unlock(&hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001863 if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1864 sizeof(struct qcedev_sha_op_req)))
1865 return -EFAULT;
1866 }
1867 break;
1868
1869 default:
1870 return -ENOTTY;
1871 }
1872
1873 return err;
1874}
1875
Sonal Guptaeff149e2018-02-09 09:35:55 -08001876static int qcedev_probe_device(struct platform_device *pdev)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001877{
1878 void *handle = NULL;
1879 int rc = 0;
1880 struct qcedev_control *podev;
1881 struct msm_ce_hw_support *platform_support;
1882
1883 podev = &qce_dev[0];
1884
1885 podev->high_bw_req_count = 0;
1886 INIT_LIST_HEAD(&podev->ready_commands);
1887 podev->active_command = NULL;
1888
Sonal Guptaeff149e2018-02-09 09:35:55 -08001889 INIT_LIST_HEAD(&podev->context_banks);
1890
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001891 spin_lock_init(&podev->lock);
1892
1893 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
1894
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001895 podev->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
1896 msm_bus_cl_get_pdata(pdev);
1897 if (!podev->platform_support.bus_scale_table) {
1898 pr_err("bus_scale_table is NULL\n");
1899 return -ENODATA;
1900 }
1901 podev->bus_scale_handle = msm_bus_scale_register_client(
1902 (struct msm_bus_scale_pdata *)
1903 podev->platform_support.bus_scale_table);
1904 if (!podev->bus_scale_handle) {
1905 pr_err("%s not able to get bus scale\n", __func__);
1906 return -ENOMEM;
1907 }
1908
1909 rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 1);
1910 if (rc) {
1911 pr_err("%s Unable to set to high bandwidth\n", __func__);
1912 goto exit_unregister_bus_scale;
1913 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001914 handle = qce_open(pdev, &rc);
1915 if (handle == NULL) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001916 rc = -ENODEV;
1917 goto exit_scale_busbandwidth;
1918 }
1919 rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
1920 if (rc) {
1921 pr_err("%s Unable to set to low bandwidth\n", __func__);
1922 goto exit_qce_close;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001923 }
1924
1925 podev->qce = handle;
1926 podev->pdev = pdev;
1927 platform_set_drvdata(pdev, podev);
1928
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001929 qce_hw_support(podev->qce, &podev->ce_support);
1930 if (podev->ce_support.bam) {
1931 podev->platform_support.ce_shared = 0;
1932 podev->platform_support.shared_ce_resource = 0;
1933 podev->platform_support.hw_key_support =
1934 podev->ce_support.hw_key;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001935 podev->platform_support.sha_hmac = 1;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001936 } else {
1937 platform_support =
1938 (struct msm_ce_hw_support *)pdev->dev.platform_data;
1939 podev->platform_support.ce_shared = platform_support->ce_shared;
1940 podev->platform_support.shared_ce_resource =
1941 platform_support->shared_ce_resource;
1942 podev->platform_support.hw_key_support =
1943 platform_support->hw_key_support;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001944 podev->platform_support.sha_hmac = platform_support->sha_hmac;
1945 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001946
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001947 rc = misc_register(&podev->miscdevice);
Sonal Guptaeff149e2018-02-09 09:35:55 -08001948 if (rc) {
1949 pr_err("%s: err: register failed for misc: %d\n", __func__, rc);
1950 goto exit_qce_close;
1951 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001952
Sonal Guptaeff149e2018-02-09 09:35:55 -08001953 rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
1954 NULL, &pdev->dev);
1955 if (rc) {
1956 pr_err("%s: err: of_platform_populate failed: %d\n",
1957 __func__, rc);
1958 goto err;
1959 }
1960
1961 return 0;
1962
1963err:
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001964 misc_deregister(&podev->miscdevice);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001965
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001966exit_qce_close:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001967 if (handle)
1968 qce_close(handle);
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001969exit_scale_busbandwidth:
1970 msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
1971exit_unregister_bus_scale:
1972 if (podev->platform_support.bus_scale_table != NULL)
1973 msm_bus_scale_unregister_client(podev->bus_scale_handle);
Sonal Guptaeff149e2018-02-09 09:35:55 -08001974 podev->bus_scale_handle = 0;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001975 platform_set_drvdata(pdev, NULL);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001976 podev->pdev = NULL;
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07001977 podev->qce = NULL;
1978
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001979 return rc;
Sonal Guptaeff149e2018-02-09 09:35:55 -08001980}
1981
1982static int qcedev_probe(struct platform_device *pdev)
1983{
1984 if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
1985 return qcedev_probe_device(pdev);
1986 else if (of_device_is_compatible(pdev->dev.of_node,
1987 "qcom,qcedev,context-bank"))
1988 return qcedev_parse_context_bank(pdev);
1989
1990 return -EINVAL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001991};
1992
1993static int qcedev_remove(struct platform_device *pdev)
1994{
1995 struct qcedev_control *podev;
1996
1997 podev = platform_get_drvdata(pdev);
1998 if (!podev)
1999 return 0;
2000 if (podev->qce)
2001 qce_close(podev->qce);
2002
2003 if (podev->platform_support.bus_scale_table != NULL)
2004 msm_bus_scale_unregister_client(podev->bus_scale_handle);
2005
2006 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2007 misc_deregister(&podev->miscdevice);
2008 tasklet_kill(&podev->done_tasklet);
2009 return 0;
2010};
2011
2012static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
2013{
2014 struct qcedev_control *podev;
2015 int ret;
2016
2017 podev = platform_get_drvdata(pdev);
2018
2019 if (!podev || !podev->platform_support.bus_scale_table)
2020 return 0;
2021
2022 mutex_lock(&qcedev_sent_bw_req);
2023 if (podev->high_bw_req_count) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002024 ret = qcedev_control_clocks(podev, false);
2025 if (ret)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002026 goto suspend_exit;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002027 }
2028
2029suspend_exit:
2030 mutex_unlock(&qcedev_sent_bw_req);
2031 return 0;
2032}
2033
2034static int qcedev_resume(struct platform_device *pdev)
2035{
2036 struct qcedev_control *podev;
2037 int ret;
2038
2039 podev = platform_get_drvdata(pdev);
2040
2041 if (!podev || !podev->platform_support.bus_scale_table)
2042 return 0;
2043
2044 mutex_lock(&qcedev_sent_bw_req);
2045 if (podev->high_bw_req_count) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002046 ret = qcedev_control_clocks(podev, true);
2047 if (ret)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002048 goto resume_exit;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002049 }
2050
2051resume_exit:
2052 mutex_unlock(&qcedev_sent_bw_req);
2053 return 0;
2054}
2055
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002056static struct platform_driver qcedev_plat_driver = {
2057 .probe = qcedev_probe,
2058 .remove = qcedev_remove,
2059 .suspend = qcedev_suspend,
2060 .resume = qcedev_resume,
2061 .driver = {
2062 .name = "qce",
2063 .owner = THIS_MODULE,
2064 .of_match_table = qcedev_match,
2065 },
2066};
2067
2068static int _disp_stats(int id)
2069{
2070 struct qcedev_stat *pstat;
2071 int len = 0;
2072
2073 pstat = &_qcedev_stat;
2074 len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2075 "\nQTI QCE dev driver %d Statistics:\n",
2076 id + 1);
2077
2078 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2079 " Encryption operation success : %d\n",
2080 pstat->qcedev_enc_success);
2081 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2082 " Encryption operation fail : %d\n",
2083 pstat->qcedev_enc_fail);
2084 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2085 " Decryption operation success : %d\n",
2086 pstat->qcedev_dec_success);
2087
2088 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2089 " Encryption operation fail : %d\n",
2090 pstat->qcedev_dec_fail);
2091
2092 return len;
2093}
2094
2095static int _debug_stats_open(struct inode *inode, struct file *file)
2096{
2097 file->private_data = inode->i_private;
2098 return 0;
2099}
2100
2101static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2102 size_t count, loff_t *ppos)
2103{
2104 ssize_t rc = -EINVAL;
2105 int qcedev = *((int *) file->private_data);
2106 int len;
2107
2108 len = _disp_stats(qcedev);
2109
2110 if (len <= count)
2111 rc = simple_read_from_buffer((void __user *) buf, len,
2112 ppos, (void *) _debug_read_buf, len);
2113 return rc;
2114}
2115
2116static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2117 size_t count, loff_t *ppos)
2118{
2119 memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
2120 return count;
2121};
2122
2123static const struct file_operations _debug_stats_ops = {
2124 .open = _debug_stats_open,
2125 .read = _debug_stats_read,
2126 .write = _debug_stats_write,
2127};
2128
2129static int _qcedev_debug_init(void)
2130{
2131 int rc;
2132 char name[DEBUG_MAX_FNAME];
2133 struct dentry *dent;
2134
2135 _debug_dent = debugfs_create_dir("qcedev", NULL);
2136 if (IS_ERR(_debug_dent)) {
2137 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2138 PTR_ERR(_debug_dent));
2139 return PTR_ERR(_debug_dent);
2140 }
2141
2142 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
2143 _debug_qcedev = 0;
2144 dent = debugfs_create_file(name, 0644, _debug_dent,
2145 &_debug_qcedev, &_debug_stats_ops);
2146 if (dent == NULL) {
2147 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2148 PTR_ERR(dent));
2149 rc = PTR_ERR(dent);
2150 goto err;
2151 }
2152 return 0;
2153err:
2154 debugfs_remove_recursive(_debug_dent);
2155 return rc;
2156}
2157
2158static int qcedev_init(void)
2159{
2160 int rc;
2161
2162 rc = _qcedev_debug_init();
2163 if (rc)
2164 return rc;
2165 return platform_driver_register(&qcedev_plat_driver);
2166}
2167
2168static void qcedev_exit(void)
2169{
2170 debugfs_remove_recursive(_debug_dent);
2171 platform_driver_unregister(&qcedev_plat_driver);
2172}
2173
2174MODULE_LICENSE("GPL v2");
2175MODULE_DESCRIPTION("QTI DEV Crypto driver");
2176
2177module_init(qcedev_init);
2178module_exit(qcedev_exit);