blob: d4105622a082b2f545c77891237e1baaca32bb4a [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/*
2 * QTI CE device driver.
3 *
Prerna Kalla2ffd3642020-03-30 17:31:07 +05304 * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/mman.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <linux/platform_data/qcom_crypto_device.h>
32#include <linux/msm-bus.h>
33#include <linux/qcedev.h>
34
35#include <crypto/hash.h>
36#include "qcedevi.h"
37#include "qce.h"
Sonal Guptaeff149e2018-02-09 09:35:55 -080038#include "qcedev_smmu.h"
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070039
40#include <linux/compat.h>
41#include "compat_qcedev.h"
42
43#define CACHE_LINE_SIZE 32
44#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
45
46static uint8_t _std_init_vector_sha1_uint8[] = {
47 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
48 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
49 0xC3, 0xD2, 0xE1, 0xF0
50};
51/* standard initialization vector for SHA-256, source: FIPS 180-2 */
52static uint8_t _std_init_vector_sha256_uint8[] = {
53 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
54 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
55 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
56 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
57};
58
59static DEFINE_MUTEX(send_cmd_lock);
60static DEFINE_MUTEX(qcedev_sent_bw_req);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +053061static DEFINE_MUTEX(hash_access_lock);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070062
Sonal Guptaeff149e2018-02-09 09:35:55 -080063static const struct of_device_id qcedev_match[] = {
64 { .compatible = "qcom,qcedev"},
65 { .compatible = "qcom,qcedev,context-bank"},
66 {}
67};
68
Vishvesh Deobhankar3648f462019-11-11 16:41:02 +053069MODULE_DEVICE_TABLE(of, qcedev_match);
70
AnilKumar Chimatae5e60512017-05-03 14:06:59 -070071static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
72{
73 unsigned int control_flag;
74 int ret = 0;
75
76 if (podev->ce_support.req_bw_before_clk) {
77 if (enable)
78 control_flag = QCE_BW_REQUEST_FIRST;
79 else
80 control_flag = QCE_CLK_DISABLE_FIRST;
81 } else {
82 if (enable)
83 control_flag = QCE_CLK_ENABLE_FIRST;
84 else
85 control_flag = QCE_BW_REQUEST_RESET_FIRST;
86 }
87
88 switch (control_flag) {
89 case QCE_CLK_ENABLE_FIRST:
90 ret = qce_enable_clk(podev->qce);
91 if (ret) {
92 pr_err("%s Unable enable clk\n", __func__);
93 return ret;
94 }
95 ret = msm_bus_scale_client_update_request(
96 podev->bus_scale_handle, 1);
97 if (ret) {
98 pr_err("%s Unable to set high bw\n", __func__);
99 ret = qce_disable_clk(podev->qce);
100 if (ret)
101 pr_err("%s Unable disable clk\n", __func__);
102 return ret;
103 }
104 break;
105 case QCE_BW_REQUEST_FIRST:
106 ret = msm_bus_scale_client_update_request(
107 podev->bus_scale_handle, 1);
108 if (ret) {
109 pr_err("%s Unable to set high bw\n", __func__);
110 return ret;
111 }
112 ret = qce_enable_clk(podev->qce);
113 if (ret) {
114 pr_err("%s Unable enable clk\n", __func__);
115 ret = msm_bus_scale_client_update_request(
116 podev->bus_scale_handle, 0);
117 if (ret)
118 pr_err("%s Unable to set low bw\n", __func__);
119 return ret;
120 }
121 break;
122 case QCE_CLK_DISABLE_FIRST:
123 ret = qce_disable_clk(podev->qce);
124 if (ret) {
125 pr_err("%s Unable to disable clk\n", __func__);
126 return ret;
127 }
128 ret = msm_bus_scale_client_update_request(
129 podev->bus_scale_handle, 0);
130 if (ret) {
131 pr_err("%s Unable to set low bw\n", __func__);
132 ret = qce_enable_clk(podev->qce);
133 if (ret)
134 pr_err("%s Unable enable clk\n", __func__);
135 return ret;
136 }
137 break;
138 case QCE_BW_REQUEST_RESET_FIRST:
139 ret = msm_bus_scale_client_update_request(
140 podev->bus_scale_handle, 0);
141 if (ret) {
142 pr_err("%s Unable to set low bw\n", __func__);
143 return ret;
144 }
145 ret = qce_disable_clk(podev->qce);
146 if (ret) {
147 pr_err("%s Unable to disable clk\n", __func__);
148 ret = msm_bus_scale_client_update_request(
149 podev->bus_scale_handle, 1);
150 if (ret)
151 pr_err("%s Unable to set high bw\n", __func__);
152 return ret;
153 }
154 break;
155 default:
156 return -ENOENT;
157 }
158
159 return 0;
160}
161
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700162static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
163 bool high_bw_req)
164{
165 int ret = 0;
166
167 mutex_lock(&qcedev_sent_bw_req);
168 if (high_bw_req) {
169 if (podev->high_bw_req_count == 0) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700170 ret = qcedev_control_clocks(podev, true);
171 if (ret)
172 goto exit_unlock_mutex;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700173 }
174 podev->high_bw_req_count++;
175 } else {
176 if (podev->high_bw_req_count == 1) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700177 ret = qcedev_control_clocks(podev, false);
178 if (ret)
179 goto exit_unlock_mutex;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700180 }
181 podev->high_bw_req_count--;
182 }
AnilKumar Chimatae5e60512017-05-03 14:06:59 -0700183
184exit_unlock_mutex:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700185 mutex_unlock(&qcedev_sent_bw_req);
186}
187
188#define QCEDEV_MAGIC 0x56434544 /* "qced" */
189
190static int qcedev_open(struct inode *inode, struct file *file);
191static int qcedev_release(struct inode *inode, struct file *file);
192static int start_cipher_req(struct qcedev_control *podev);
193static int start_sha_req(struct qcedev_control *podev);
194static inline long qcedev_ioctl(struct file *file,
195 unsigned int cmd, unsigned long arg);
196
197#ifdef CONFIG_COMPAT
198#include "compat_qcedev.c"
199#else
200#define compat_qcedev_ioctl NULL
201#endif
202
203static const struct file_operations qcedev_fops = {
204 .owner = THIS_MODULE,
205 .unlocked_ioctl = qcedev_ioctl,
206 .compat_ioctl = compat_qcedev_ioctl,
207 .open = qcedev_open,
208 .release = qcedev_release,
209};
210
211static struct qcedev_control qce_dev[] = {
212 {
213 .miscdevice = {
214 .minor = MISC_DYNAMIC_MINOR,
215 .name = "qce",
216 .fops = &qcedev_fops,
217 },
218 .magic = QCEDEV_MAGIC,
219 },
220};
221
222#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
223#define DEBUG_MAX_FNAME 16
224#define DEBUG_MAX_RW_BUF 1024
225
226struct qcedev_stat {
227 u32 qcedev_dec_success;
228 u32 qcedev_dec_fail;
229 u32 qcedev_enc_success;
230 u32 qcedev_enc_fail;
231 u32 qcedev_sha_success;
232 u32 qcedev_sha_fail;
233};
234
235static struct qcedev_stat _qcedev_stat;
236static struct dentry *_debug_dent;
237static char _debug_read_buf[DEBUG_MAX_RW_BUF];
238static int _debug_qcedev;
239
240static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
241{
242 int i;
243
244 for (i = 0; i < MAX_QCE_DEVICE; i++) {
245 if (qce_dev[i].miscdevice.minor == n)
246 return &qce_dev[i];
247 }
248 return NULL;
249}
250
251static int qcedev_open(struct inode *inode, struct file *file)
252{
253 struct qcedev_handle *handle;
254 struct qcedev_control *podev;
255
256 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
257 if (podev == NULL) {
258 pr_err("%s: no such device %d\n", __func__,
259 MINOR(inode->i_rdev));
260 return -ENOENT;
261 }
262
263 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
264 if (handle == NULL)
265 return -ENOMEM;
266
267 handle->cntl = podev;
268 file->private_data = handle;
269 if (podev->platform_support.bus_scale_table != NULL)
270 qcedev_ce_high_bw_req(podev, true);
Sonal Guptaa540a072018-02-15 14:11:02 -0800271
272 mutex_init(&handle->registeredbufs.lock);
273 INIT_LIST_HEAD(&handle->registeredbufs.list);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700274 return 0;
275}
276
277static int qcedev_release(struct inode *inode, struct file *file)
278{
279 struct qcedev_control *podev;
280 struct qcedev_handle *handle;
281
282 handle = file->private_data;
283 podev = handle->cntl;
284 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +0530285 pr_err("%s: invalid handle %pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700286 __func__, podev);
287 }
288 kzfree(handle);
289 file->private_data = NULL;
290 if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
291 qcedev_ce_high_bw_req(podev, false);
292 return 0;
293}
294
295static void req_done(unsigned long data)
296{
297 struct qcedev_control *podev = (struct qcedev_control *)data;
298 struct qcedev_async_req *areq;
299 unsigned long flags = 0;
300 struct qcedev_async_req *new_req = NULL;
301 int ret = 0;
302
303 spin_lock_irqsave(&podev->lock, flags);
304 areq = podev->active_command;
305 podev->active_command = NULL;
306
307again:
308 if (!list_empty(&podev->ready_commands)) {
309 new_req = container_of(podev->ready_commands.next,
310 struct qcedev_async_req, list);
311 list_del(&new_req->list);
312 podev->active_command = new_req;
313 new_req->err = 0;
314 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
315 ret = start_cipher_req(podev);
316 else
317 ret = start_sha_req(podev);
318 }
319
320 spin_unlock_irqrestore(&podev->lock, flags);
321
322 if (areq)
323 complete(&areq->complete);
324
325 if (new_req && ret) {
326 complete(&new_req->complete);
327 spin_lock_irqsave(&podev->lock, flags);
328 podev->active_command = NULL;
329 areq = NULL;
330 ret = 0;
331 new_req = NULL;
332 goto again;
333 }
334}
335
336void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
337 unsigned char *authdata, int ret)
338{
339 struct qcedev_sha_req *areq;
340 struct qcedev_control *pdev;
341 struct qcedev_handle *handle;
342
343 uint32_t *auth32 = (uint32_t *)authdata;
344
345 areq = (struct qcedev_sha_req *) cookie;
346 handle = (struct qcedev_handle *) areq->cookie;
347 pdev = handle->cntl;
348
349 if (digest)
350 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
351
352 if (authdata) {
353 handle->sha_ctxt.auth_data[0] = auth32[0];
354 handle->sha_ctxt.auth_data[1] = auth32[1];
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700355 }
356
357 tasklet_schedule(&pdev->done_tasklet);
358};
359
360
361void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
362 unsigned char *iv, int ret)
363{
364 struct qcedev_cipher_req *areq;
365 struct qcedev_handle *handle;
366 struct qcedev_control *podev;
367 struct qcedev_async_req *qcedev_areq;
368
369 areq = (struct qcedev_cipher_req *) cookie;
370 handle = (struct qcedev_handle *) areq->cookie;
371 podev = handle->cntl;
372 qcedev_areq = podev->active_command;
373
374 if (iv)
375 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
376 qcedev_areq->cipher_op_req.ivlen);
377 tasklet_schedule(&podev->done_tasklet);
378};
379
380static int start_cipher_req(struct qcedev_control *podev)
381{
382 struct qcedev_async_req *qcedev_areq;
383 struct qce_req creq;
384 int ret = 0;
385
386 /* start the command on the podev->active_command */
387 qcedev_areq = podev->active_command;
388 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
389 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
390 pr_err("%s: Use of PMEM is not supported\n", __func__);
391 goto unsupported;
392 }
393 creq.pmem = NULL;
394 switch (qcedev_areq->cipher_op_req.alg) {
395 case QCEDEV_ALG_DES:
396 creq.alg = CIPHER_ALG_DES;
397 break;
398 case QCEDEV_ALG_3DES:
399 creq.alg = CIPHER_ALG_3DES;
400 break;
401 case QCEDEV_ALG_AES:
402 creq.alg = CIPHER_ALG_AES;
403 break;
404 default:
405 return -EINVAL;
406 };
407
408 switch (qcedev_areq->cipher_op_req.mode) {
409 case QCEDEV_AES_MODE_CBC:
410 case QCEDEV_DES_MODE_CBC:
411 creq.mode = QCE_MODE_CBC;
412 break;
413 case QCEDEV_AES_MODE_ECB:
414 case QCEDEV_DES_MODE_ECB:
415 creq.mode = QCE_MODE_ECB;
416 break;
417 case QCEDEV_AES_MODE_CTR:
418 creq.mode = QCE_MODE_CTR;
419 break;
420 case QCEDEV_AES_MODE_XTS:
421 creq.mode = QCE_MODE_XTS;
422 break;
423 default:
424 return -EINVAL;
425 };
426
427 if ((creq.alg == CIPHER_ALG_AES) &&
428 (creq.mode == QCE_MODE_CTR)) {
429 creq.dir = QCE_ENCRYPT;
430 } else {
431 if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
432 creq.dir = QCE_ENCRYPT;
433 else
434 creq.dir = QCE_DECRYPT;
435 }
436
437 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
438 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
439
440 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
441 creq.encklen = qcedev_areq->cipher_op_req.encklen;
442
443 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
444
445 if (qcedev_areq->cipher_op_req.encklen == 0) {
446 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
447 || (qcedev_areq->cipher_op_req.op ==
448 QCEDEV_OPER_DEC_NO_KEY))
449 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
450 else {
451 int i;
452
453 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
454 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
455 break;
456 }
457
458 if ((podev->platform_support.hw_key_support == 1) &&
459 (i == QCEDEV_MAX_KEY_SIZE))
460 creq.op = QCE_REQ_ABLK_CIPHER;
461 else {
462 ret = -EINVAL;
463 goto unsupported;
464 }
465 }
466 } else {
467 creq.op = QCE_REQ_ABLK_CIPHER;
468 }
469
470 creq.qce_cb = qcedev_cipher_req_cb;
471 creq.areq = (void *)&qcedev_areq->cipher_req;
472 creq.flags = 0;
473 ret = qce_ablk_cipher_req(podev->qce, &creq);
474unsupported:
475 if (ret)
476 qcedev_areq->err = -ENXIO;
477 else
478 qcedev_areq->err = 0;
479 return ret;
480};
481
482static int start_sha_req(struct qcedev_control *podev)
483{
484 struct qcedev_async_req *qcedev_areq;
485 struct qce_sha_req sreq;
486 int ret = 0;
487 struct qcedev_handle *handle;
488
489 /* start the command on the podev->active_command */
490 qcedev_areq = podev->active_command;
491 handle = qcedev_areq->handle;
492
493 switch (qcedev_areq->sha_op_req.alg) {
494 case QCEDEV_ALG_SHA1:
495 sreq.alg = QCE_HASH_SHA1;
496 break;
497 case QCEDEV_ALG_SHA256:
498 sreq.alg = QCE_HASH_SHA256;
499 break;
500 case QCEDEV_ALG_SHA1_HMAC:
501 if (podev->ce_support.sha_hmac) {
502 sreq.alg = QCE_HASH_SHA1_HMAC;
503 sreq.authkey = &handle->sha_ctxt.authkey[0];
504 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
505
506 } else {
507 sreq.alg = QCE_HASH_SHA1;
508 sreq.authkey = NULL;
509 }
510 break;
511 case QCEDEV_ALG_SHA256_HMAC:
512 if (podev->ce_support.sha_hmac) {
513 sreq.alg = QCE_HASH_SHA256_HMAC;
514 sreq.authkey = &handle->sha_ctxt.authkey[0];
515 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
516 } else {
517 sreq.alg = QCE_HASH_SHA256;
518 sreq.authkey = NULL;
519 }
520 break;
521 case QCEDEV_ALG_AES_CMAC:
522 sreq.alg = QCE_HASH_AES_CMAC;
523 sreq.authkey = &handle->sha_ctxt.authkey[0];
524 sreq.authklen = qcedev_areq->sha_op_req.authklen;
525 break;
526 default:
527 pr_err("Algorithm %d not supported, exiting\n",
528 qcedev_areq->sha_op_req.alg);
529 return -EINVAL;
530 };
531
532 qcedev_areq->sha_req.cookie = handle;
533
534 sreq.qce_cb = qcedev_sha_req_cb;
535 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
536 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
537 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
538 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
539 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
540 sreq.digest = &handle->sha_ctxt.digest[0];
541 sreq.first_blk = handle->sha_ctxt.first_blk;
542 sreq.last_blk = handle->sha_ctxt.last_blk;
543 }
544 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
545 sreq.src = qcedev_areq->sha_req.sreq.src;
546 sreq.areq = (void *)&qcedev_areq->sha_req;
547 sreq.flags = 0;
548
549 ret = qce_process_sha_req(podev->qce, &sreq);
550
551 if (ret)
552 qcedev_areq->err = -ENXIO;
553 else
554 qcedev_areq->err = 0;
555 return ret;
556};
557
558static int submit_req(struct qcedev_async_req *qcedev_areq,
559 struct qcedev_handle *handle)
560{
561 struct qcedev_control *podev;
562 unsigned long flags = 0;
563 int ret = 0;
564 struct qcedev_stat *pstat;
565
566 qcedev_areq->err = 0;
567 podev = handle->cntl;
568
569 spin_lock_irqsave(&podev->lock, flags);
570
571 if (podev->active_command == NULL) {
572 podev->active_command = qcedev_areq;
573 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
574 ret = start_cipher_req(podev);
575 else
576 ret = start_sha_req(podev);
577 } else {
578 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
579 }
580
581 if (ret != 0)
582 podev->active_command = NULL;
583
584 spin_unlock_irqrestore(&podev->lock, flags);
585
586 if (ret == 0)
587 wait_for_completion(&qcedev_areq->complete);
588
589 if (ret)
590 qcedev_areq->err = -EIO;
591
592 pstat = &_qcedev_stat;
593 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
594 switch (qcedev_areq->cipher_op_req.op) {
595 case QCEDEV_OPER_DEC:
596 if (qcedev_areq->err)
597 pstat->qcedev_dec_fail++;
598 else
599 pstat->qcedev_dec_success++;
600 break;
601 case QCEDEV_OPER_ENC:
602 if (qcedev_areq->err)
603 pstat->qcedev_enc_fail++;
604 else
605 pstat->qcedev_enc_success++;
606 break;
607 default:
608 break;
609 };
610 } else {
611 if (qcedev_areq->err)
612 pstat->qcedev_sha_fail++;
613 else
614 pstat->qcedev_sha_success++;
615 }
616
617 return qcedev_areq->err;
618}
619
620static int qcedev_sha_init(struct qcedev_async_req *areq,
621 struct qcedev_handle *handle)
622{
623 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
624
625 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
626 sha_ctxt->first_blk = 1;
627
628 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
629 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
630 memcpy(&sha_ctxt->digest[0],
631 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
632 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
633 } else {
634 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
635 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
636 memcpy(&sha_ctxt->digest[0],
637 &_std_init_vector_sha256_uint8[0],
638 SHA256_DIGEST_SIZE);
639 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
640 }
641 }
642 sha_ctxt->init_done = true;
643 return 0;
644}
645
646
647static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
648 struct qcedev_handle *handle,
649 struct scatterlist *sg_src)
650{
651 int err = 0;
652 int i = 0;
653 uint32_t total;
654
655 uint8_t *user_src = NULL;
656 uint8_t *k_src = NULL;
657 uint8_t *k_buf_src = NULL;
658 uint8_t *k_align_src = NULL;
659
660 uint32_t sha_pad_len = 0;
661 uint32_t trailing_buf_len = 0;
662 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
663 uint32_t sha_block_size;
664
665 total = qcedev_areq->sha_op_req.data_len + t_buf;
666
667 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
668 sha_block_size = SHA1_BLOCK_SIZE;
669 else
670 sha_block_size = SHA256_BLOCK_SIZE;
671
672 if (total <= sha_block_size) {
673 uint32_t len = qcedev_areq->sha_op_req.data_len;
674
675 i = 0;
676
677 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
678
679 /* Copy data from user src(s) */
680 while (len > 0) {
681 user_src =
682 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
683 if (user_src && copy_from_user(k_src,
684 (void __user *)user_src,
685 qcedev_areq->sha_op_req.data[i].len))
686 return -EFAULT;
687
688 len -= qcedev_areq->sha_op_req.data[i].len;
689 k_src += qcedev_areq->sha_op_req.data[i].len;
690 i++;
691 }
692 handle->sha_ctxt.trailing_buf_len = total;
693
694 return 0;
695 }
696
697
698 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
699 GFP_KERNEL);
700 if (k_buf_src == NULL)
701 return -ENOMEM;
702
703 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
704 CACHE_LINE_SIZE);
705 k_src = k_align_src;
706
707 /* check for trailing buffer from previous updates and append it */
708 if (t_buf > 0) {
709 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
710 t_buf);
711 k_src += t_buf;
712 }
713
714 /* Copy data from user src(s) */
715 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
716 if (user_src && copy_from_user(k_src,
717 (void __user *)user_src,
718 qcedev_areq->sha_op_req.data[0].len)) {
719 kzfree(k_buf_src);
720 return -EFAULT;
721 }
722 k_src += qcedev_areq->sha_op_req.data[0].len;
723 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
724 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
725 if (user_src && copy_from_user(k_src,
726 (void __user *)user_src,
727 qcedev_areq->sha_op_req.data[i].len)) {
728 kzfree(k_buf_src);
729 return -EFAULT;
730 }
731 k_src += qcedev_areq->sha_op_req.data[i].len;
732 }
733
734 /* get new trailing buffer */
735 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
736 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
737
738 qcedev_areq->sha_req.sreq.src = sg_src;
739 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
740 total-trailing_buf_len);
741 sg_mark_end(qcedev_areq->sha_req.sreq.src);
742
743 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
744
745 /* update sha_ctxt trailing buf content to new trailing buf */
746 if (trailing_buf_len > 0) {
747 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
748 memcpy(&handle->sha_ctxt.trailing_buf[0],
749 (k_src - trailing_buf_len),
750 trailing_buf_len);
751 }
752 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
753
754 err = submit_req(qcedev_areq, handle);
755
756 handle->sha_ctxt.last_blk = 0;
757 handle->sha_ctxt.first_blk = 0;
758
759 kzfree(k_buf_src);
760 return err;
761}
762
763static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
764 struct qcedev_handle *handle,
765 struct scatterlist *sg_src)
766{
767 int err = 0;
768 int i = 0;
769 int j = 0;
770 int k = 0;
771 int num_entries = 0;
772 uint32_t total = 0;
773
774 if (handle->sha_ctxt.init_done == false) {
775 pr_err("%s Init was not called\n", __func__);
776 return -EINVAL;
777 }
778
779 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
780
781 struct qcedev_sha_op_req *saved_req;
782 struct qcedev_sha_op_req req;
783 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
784
785 /* save the original req structure */
786 saved_req =
787 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
788 if (saved_req == NULL) {
789 pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
790 __func__, (uintptr_t)saved_req);
791 return -ENOMEM;
792 }
793 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
794 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
795
796 i = 0;
797 /* Address 32 KB at a time */
798 while ((i < req.entries) && (err == 0)) {
799 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
800 sreq->data[0].len = QCE_MAX_OPER_DATA;
801 if (i > 0) {
802 sreq->data[0].vaddr =
803 sreq->data[i].vaddr;
804 }
805
806 sreq->data_len = QCE_MAX_OPER_DATA;
807 sreq->entries = 1;
808
809 err = qcedev_sha_update_max_xfer(qcedev_areq,
810 handle, sg_src);
811
812 sreq->data[i].len = req.data[i].len -
813 QCE_MAX_OPER_DATA;
814 sreq->data[i].vaddr = req.data[i].vaddr +
815 QCE_MAX_OPER_DATA;
816 req.data[i].vaddr = sreq->data[i].vaddr;
817 req.data[i].len = sreq->data[i].len;
818 } else {
819 total = 0;
820 for (j = i; j < req.entries; j++) {
821 num_entries++;
822 if ((total + sreq->data[j].len) >=
823 QCE_MAX_OPER_DATA) {
824 sreq->data[j].len =
825 (QCE_MAX_OPER_DATA - total);
826 total = QCE_MAX_OPER_DATA;
827 break;
828 }
829 total += sreq->data[j].len;
830 }
831
832 sreq->data_len = total;
833 if (i > 0)
834 for (k = 0; k < num_entries; k++) {
835 sreq->data[k].len =
836 sreq->data[i+k].len;
837 sreq->data[k].vaddr =
838 sreq->data[i+k].vaddr;
839 }
840 sreq->entries = num_entries;
841
842 i = j;
843 err = qcedev_sha_update_max_xfer(qcedev_areq,
844 handle, sg_src);
845 num_entries = 0;
846
847 sreq->data[i].vaddr = req.data[i].vaddr +
848 sreq->data[i].len;
849 sreq->data[i].len = req.data[i].len -
850 sreq->data[i].len;
851 req.data[i].vaddr = sreq->data[i].vaddr;
852 req.data[i].len = sreq->data[i].len;
853
854 if (sreq->data[i].len == 0)
855 i++;
856 }
857 } /* end of while ((i < req.entries) && (err == 0)) */
858
859 /* Restore the original req structure */
860 for (i = 0; i < saved_req->entries; i++) {
861 sreq->data[i].len = saved_req->data[i].len;
862 sreq->data[i].vaddr = saved_req->data[i].vaddr;
863 }
864 sreq->entries = saved_req->entries;
865 sreq->data_len = saved_req->data_len;
866 kzfree(saved_req);
867 } else
868 err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
869
870 return err;
871}
872
873static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
874 struct qcedev_handle *handle)
875{
876 int err = 0;
877 struct scatterlist sg_src;
878 uint32_t total;
879 uint8_t *k_buf_src = NULL;
880 uint8_t *k_align_src = NULL;
881
882 if (handle->sha_ctxt.init_done == false) {
883 pr_err("%s Init was not called\n", __func__);
884 return -EINVAL;
885 }
886
887 handle->sha_ctxt.last_blk = 1;
888
889 total = handle->sha_ctxt.trailing_buf_len;
890
891 if (total) {
892 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
893 GFP_KERNEL);
894 if (k_buf_src == NULL)
895 return -ENOMEM;
896
897 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
898 CACHE_LINE_SIZE);
899 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
900 }
901 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
902 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
903 sg_mark_end(qcedev_areq->sha_req.sreq.src);
904
905 qcedev_areq->sha_req.sreq.nbytes = total;
906
907 err = submit_req(qcedev_areq, handle);
908
909 handle->sha_ctxt.first_blk = 0;
910 handle->sha_ctxt.last_blk = 0;
911 handle->sha_ctxt.auth_data[0] = 0;
912 handle->sha_ctxt.auth_data[1] = 0;
913 handle->sha_ctxt.trailing_buf_len = 0;
914 handle->sha_ctxt.init_done = false;
915 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
916
917 kzfree(k_buf_src);
Zhen Kong0acaefa2017-10-18 14:27:44 -0700918 qcedev_areq->sha_req.sreq.src = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700919 return err;
920}
921
922static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
923 struct qcedev_handle *handle,
924 struct scatterlist *sg_src)
925{
926 int err = 0;
927 int i = 0;
928 uint32_t total;
929
930 uint8_t *user_src = NULL;
931 uint8_t *k_src = NULL;
932 uint8_t *k_buf_src = NULL;
933
934 total = qcedev_areq->sha_op_req.data_len;
935
936 if (copy_from_user(&handle->sha_ctxt.authkey[0],
937 (void __user *)qcedev_areq->sha_op_req.authkey,
938 qcedev_areq->sha_op_req.authklen))
939 return -EFAULT;
940
941
942 k_buf_src = kmalloc(total, GFP_KERNEL);
943 if (k_buf_src == NULL)
944 return -ENOMEM;
945
946 k_src = k_buf_src;
947
948 /* Copy data from user src(s) */
949 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
950 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
951 user_src =
952 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
953 if (user_src && copy_from_user(k_src, (void __user *)user_src,
954 qcedev_areq->sha_op_req.data[i].len)) {
955 kzfree(k_buf_src);
956 return -EFAULT;
957 }
958 k_src += qcedev_areq->sha_op_req.data[i].len;
959 }
960
961 qcedev_areq->sha_req.sreq.src = sg_src;
962 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
963 sg_mark_end(qcedev_areq->sha_req.sreq.src);
964
965 qcedev_areq->sha_req.sreq.nbytes = total;
966 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
967 err = submit_req(qcedev_areq, handle);
968
969 kzfree(k_buf_src);
970 return err;
971}
972
973static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
974 struct qcedev_handle *handle,
975 struct scatterlist *sg_src)
976{
977 int err = 0;
978
979 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
980 qcedev_sha_init(areq, handle);
981 if (copy_from_user(&handle->sha_ctxt.authkey[0],
982 (void __user *)areq->sha_op_req.authkey,
983 areq->sha_op_req.authklen))
984 return -EFAULT;
985 } else {
986 struct qcedev_async_req authkey_areq;
987 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
988
989 init_completion(&authkey_areq.complete);
990
991 authkey_areq.sha_op_req.entries = 1;
992 authkey_areq.sha_op_req.data[0].vaddr =
993 areq->sha_op_req.authkey;
994 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
995 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
996 authkey_areq.sha_op_req.diglen = 0;
997 authkey_areq.handle = handle;
998
999 memset(&authkey_areq.sha_op_req.digest[0], 0,
1000 QCEDEV_MAX_SHA_DIGEST);
1001 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1002 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1003 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1004 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1005
1006 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1007
1008 qcedev_sha_init(&authkey_areq, handle);
1009 err = qcedev_sha_update(&authkey_areq, handle, sg_src);
1010 if (!err)
1011 err = qcedev_sha_final(&authkey_areq, handle);
1012 else
1013 return err;
1014 memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
1015 handle->sha_ctxt.diglen);
1016 qcedev_sha_init(areq, handle);
1017
1018 memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
1019 handle->sha_ctxt.diglen);
1020 }
1021 return err;
1022}
1023
1024static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
1025 struct qcedev_handle *handle)
1026{
1027 int err = 0;
1028 struct scatterlist sg_src;
1029 uint8_t *k_src = NULL;
1030 uint32_t sha_block_size = 0;
1031 uint32_t sha_digest_size = 0;
1032
1033 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1034 sha_digest_size = SHA1_DIGEST_SIZE;
1035 sha_block_size = SHA1_BLOCK_SIZE;
1036 } else {
1037 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1038 sha_digest_size = SHA256_DIGEST_SIZE;
1039 sha_block_size = SHA256_BLOCK_SIZE;
1040 }
1041 }
1042 k_src = kmalloc(sha_block_size, GFP_KERNEL);
1043 if (k_src == NULL)
1044 return -ENOMEM;
1045
1046 /* check for trailing buffer from previous updates and append it */
1047 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1048 handle->sha_ctxt.trailing_buf_len);
1049
1050 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1051 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1052 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1053
1054 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
1055 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1056 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
1057 sha_digest_size);
1058 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
1059
1060 handle->sha_ctxt.first_blk = 1;
1061 handle->sha_ctxt.last_blk = 0;
1062 handle->sha_ctxt.auth_data[0] = 0;
1063 handle->sha_ctxt.auth_data[1] = 0;
1064
1065 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1066 memcpy(&handle->sha_ctxt.digest[0],
1067 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
1068 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
1069 }
1070
1071 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1072 memcpy(&handle->sha_ctxt.digest[0],
1073 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
1074 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
1075 }
1076 err = submit_req(qcedev_areq, handle);
1077
1078 handle->sha_ctxt.last_blk = 0;
1079 handle->sha_ctxt.first_blk = 0;
1080
1081 kzfree(k_src);
Zhen Kong0acaefa2017-10-18 14:27:44 -07001082 qcedev_areq->sha_req.sreq.src = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001083 return err;
1084}
1085
1086static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
1087 struct qcedev_handle *handle, bool ikey)
1088{
1089 int i;
1090 uint32_t constant;
1091 uint32_t sha_block_size;
1092
1093 if (ikey)
1094 constant = 0x36;
1095 else
1096 constant = 0x5c;
1097
1098 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1099 sha_block_size = SHA1_BLOCK_SIZE;
1100 else
1101 sha_block_size = SHA256_BLOCK_SIZE;
1102
1103 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1104 for (i = 0; i < sha_block_size; i++)
1105 handle->sha_ctxt.trailing_buf[i] =
1106 (handle->sha_ctxt.authkey[i] ^ constant);
1107
1108 handle->sha_ctxt.trailing_buf_len = sha_block_size;
1109 return 0;
1110}
1111
1112static int qcedev_hmac_init(struct qcedev_async_req *areq,
1113 struct qcedev_handle *handle,
1114 struct scatterlist *sg_src)
1115{
1116 int err;
1117 struct qcedev_control *podev = handle->cntl;
1118
1119 err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
1120 if (err)
1121 return err;
1122 if (!podev->ce_support.sha_hmac)
1123 qcedev_hmac_update_iokey(areq, handle, true);
1124 return 0;
1125}
1126
1127static int qcedev_hmac_final(struct qcedev_async_req *areq,
1128 struct qcedev_handle *handle)
1129{
1130 int err;
1131 struct qcedev_control *podev = handle->cntl;
1132
1133 err = qcedev_sha_final(areq, handle);
1134 if (podev->ce_support.sha_hmac)
1135 return err;
1136
1137 qcedev_hmac_update_iokey(areq, handle, false);
1138 err = qcedev_hmac_get_ohash(areq, handle);
1139 if (err)
1140 return err;
1141 err = qcedev_sha_final(areq, handle);
1142
1143 return err;
1144}
1145
1146static int qcedev_hash_init(struct qcedev_async_req *areq,
1147 struct qcedev_handle *handle,
1148 struct scatterlist *sg_src)
1149{
1150 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1151 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1152 return qcedev_sha_init(areq, handle);
1153 else
1154 return qcedev_hmac_init(areq, handle, sg_src);
1155}
1156
1157static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
1158 struct qcedev_handle *handle,
1159 struct scatterlist *sg_src)
1160{
1161 return qcedev_sha_update(qcedev_areq, handle, sg_src);
1162}
1163
1164static int qcedev_hash_final(struct qcedev_async_req *areq,
1165 struct qcedev_handle *handle)
1166{
1167 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1168 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1169 return qcedev_sha_final(areq, handle);
1170 else
1171 return qcedev_hmac_final(areq, handle);
1172}
1173
1174static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
1175 int *di, struct qcedev_handle *handle,
1176 uint8_t *k_align_src)
1177{
1178 int err = 0;
1179 int i = 0;
1180 int dst_i = *di;
1181 struct scatterlist sg_src;
1182 uint32_t byteoffset = 0;
1183 uint8_t *user_src = NULL;
1184 uint8_t *k_align_dst = k_align_src;
1185 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1186
1187
1188 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1189 byteoffset = areq->cipher_op_req.byteoffset;
1190
1191 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1192 if (user_src && copy_from_user((k_align_src + byteoffset),
1193 (void __user *)user_src,
1194 areq->cipher_op_req.vbuf.src[0].len))
1195 return -EFAULT;
1196
1197 k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
1198
1199 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1200 user_src =
1201 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1202 if (user_src && copy_from_user(k_align_src,
1203 (void __user *)user_src,
1204 areq->cipher_op_req.vbuf.src[i].len)) {
1205 return -EFAULT;
1206 }
1207 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1208 }
1209
1210 /* restore src beginning */
1211 k_align_src = k_align_dst;
1212 areq->cipher_op_req.data_len += byteoffset;
1213
1214 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1215 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1216
1217 /* In place encryption/decryption */
1218 sg_set_buf(areq->cipher_req.creq.src,
1219 k_align_dst,
1220 areq->cipher_op_req.data_len);
1221 sg_mark_end(areq->cipher_req.creq.src);
1222
1223 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1224 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1225 areq->cipher_op_req.entries = 1;
1226
1227 err = submit_req(areq, handle);
1228
1229 /* copy data to destination buffer*/
1230 creq->data_len -= byteoffset;
1231
1232 while (creq->data_len > 0) {
1233 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1234 if (err == 0 && copy_to_user(
1235 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1236 (k_align_dst + byteoffset),
Zhen Kong0acaefa2017-10-18 14:27:44 -07001237 creq->vbuf.dst[dst_i].len)) {
1238 err = -EFAULT;
1239 goto exit;
1240 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001241
Monika Singha125bf02018-11-22 12:18:34 +05301242 k_align_dst += creq->vbuf.dst[dst_i].len;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001243 creq->data_len -= creq->vbuf.dst[dst_i].len;
1244 dst_i++;
1245 } else {
1246 if (err == 0 && copy_to_user(
1247 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1248 (k_align_dst + byteoffset),
Zhen Kong0acaefa2017-10-18 14:27:44 -07001249 creq->data_len)) {
1250 err = -EFAULT;
1251 goto exit;
1252 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001253
1254 k_align_dst += creq->data_len;
1255 creq->vbuf.dst[dst_i].len -= creq->data_len;
1256 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1257 creq->data_len = 0;
1258 }
1259 }
1260 *di = dst_i;
Zhen Kong0acaefa2017-10-18 14:27:44 -07001261exit:
1262 areq->cipher_req.creq.src = NULL;
1263 areq->cipher_req.creq.dst = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001264 return err;
1265};
1266
1267static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
1268 struct qcedev_handle *handle)
1269{
1270 int err = 0;
1271 int di = 0;
1272 int i = 0;
1273 int j = 0;
1274 int k = 0;
1275 uint32_t byteoffset = 0;
1276 int num_entries = 0;
1277 uint32_t total = 0;
1278 uint32_t len;
1279 uint8_t *k_buf_src = NULL;
1280 uint8_t *k_align_src = NULL;
1281 uint32_t max_data_xfer;
1282 struct qcedev_cipher_op_req *saved_req;
1283 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1284
1285 total = 0;
1286
1287 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1288 byteoffset = areq->cipher_op_req.byteoffset;
1289 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1290 GFP_KERNEL);
1291 if (k_buf_src == NULL)
1292 return -ENOMEM;
1293 k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
1294 CACHE_LINE_SIZE);
1295 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1296
1297 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1298 if (saved_req == NULL) {
1299 kzfree(k_buf_src);
1300 return -ENOMEM;
1301
1302 }
1303 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1304
1305 if (areq->cipher_op_req.data_len > max_data_xfer) {
1306 struct qcedev_cipher_op_req req;
1307
1308 /* save the original req structure */
1309 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1310
1311 i = 0;
1312 /* Address 32 KB at a time */
1313 while ((i < req.entries) && (err == 0)) {
1314 if (creq->vbuf.src[i].len > max_data_xfer) {
1315 creq->vbuf.src[0].len = max_data_xfer;
1316 if (i > 0) {
1317 creq->vbuf.src[0].vaddr =
1318 creq->vbuf.src[i].vaddr;
1319 }
1320
1321 creq->data_len = max_data_xfer;
1322 creq->entries = 1;
1323
1324 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1325 &di, handle, k_align_src);
1326 if (err < 0) {
1327 kzfree(k_buf_src);
1328 kzfree(saved_req);
1329 return err;
1330 }
1331
1332 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1333 max_data_xfer;
1334 creq->vbuf.src[i].vaddr =
1335 req.vbuf.src[i].vaddr +
1336 max_data_xfer;
1337 req.vbuf.src[i].vaddr =
1338 creq->vbuf.src[i].vaddr;
1339 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1340
1341 } else {
1342 total = areq->cipher_op_req.byteoffset;
1343 for (j = i; j < req.entries; j++) {
1344 num_entries++;
1345 if ((total + creq->vbuf.src[j].len)
1346 >= max_data_xfer) {
1347 creq->vbuf.src[j].len =
1348 max_data_xfer - total;
1349 total = max_data_xfer;
1350 break;
1351 }
1352 total += creq->vbuf.src[j].len;
1353 }
1354
1355 creq->data_len = total;
1356 if (i > 0)
1357 for (k = 0; k < num_entries; k++) {
1358 creq->vbuf.src[k].len =
1359 creq->vbuf.src[i+k].len;
1360 creq->vbuf.src[k].vaddr =
1361 creq->vbuf.src[i+k].vaddr;
1362 }
1363 creq->entries = num_entries;
1364
1365 i = j;
1366 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1367 &di, handle, k_align_src);
1368 if (err < 0) {
1369 kzfree(k_buf_src);
1370 kzfree(saved_req);
1371 return err;
1372 }
1373
1374 num_entries = 0;
1375 areq->cipher_op_req.byteoffset = 0;
1376
1377 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1378 + creq->vbuf.src[i].len;
1379 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1380 creq->vbuf.src[i].len;
1381
1382 req.vbuf.src[i].vaddr =
1383 creq->vbuf.src[i].vaddr;
1384 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1385
1386 if (creq->vbuf.src[i].len == 0)
1387 i++;
1388 }
1389
1390 areq->cipher_op_req.byteoffset = 0;
1391 max_data_xfer = QCE_MAX_OPER_DATA;
1392 byteoffset = 0;
1393
1394 } /* end of while ((i < req.entries) && (err == 0)) */
1395 } else
1396 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
1397 k_align_src);
1398
1399 /* Restore the original req structure */
1400 for (i = 0; i < saved_req->entries; i++) {
1401 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1402 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1403 }
1404 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1405 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1406 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1407 len += saved_req->vbuf.dst[i].len;
1408 }
1409 creq->entries = saved_req->entries;
1410 creq->data_len = saved_req->data_len;
1411 creq->byteoffset = saved_req->byteoffset;
1412
1413 kzfree(saved_req);
1414 kzfree(k_buf_src);
1415 return err;
1416
1417}
1418
1419static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
1420 struct qcedev_control *podev)
1421{
1422 /* if intending to use HW key make sure key fields are set
1423 * correctly and HW key is indeed supported in target
1424 */
1425 if (req->encklen == 0) {
1426 int i;
1427
1428 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
1429 if (req->enckey[i]) {
1430 pr_err("%s: Invalid key: non-zero key input\n",
1431 __func__);
1432 goto error;
1433 }
1434 }
1435 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1436 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1437 if (!podev->platform_support.hw_key_support) {
1438 pr_err("%s: Invalid op %d\n", __func__,
1439 (uint32_t)req->op);
1440 goto error;
1441 }
1442 } else {
1443 if (req->encklen == QCEDEV_AES_KEY_192) {
1444 if (!podev->ce_support.aes_key_192) {
1445 pr_err("%s: AES-192 not supported\n", __func__);
1446 goto error;
1447 }
1448 } else {
1449 /* if not using HW key make sure key
1450 * length is valid
1451 */
1452 if (req->mode == QCEDEV_AES_MODE_XTS) {
1453 if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
1454 (req->encklen != QCEDEV_AES_KEY_256*2)) {
1455 pr_err("%s: unsupported key size: %d\n",
1456 __func__, req->encklen);
1457 goto error;
1458 }
1459 } else {
1460 if ((req->encklen != QCEDEV_AES_KEY_128) &&
1461 (req->encklen != QCEDEV_AES_KEY_256)) {
1462 pr_err("%s: unsupported key size %d\n",
1463 __func__, req->encklen);
1464 goto error;
1465 }
1466 }
1467 }
1468 }
1469 return 0;
1470error:
1471 return -EINVAL;
1472}
1473
1474static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1475 struct qcedev_control *podev)
1476{
1477 uint32_t total = 0;
1478 uint32_t i;
1479
1480 if (req->use_pmem) {
1481 pr_err("%s: Use of PMEM is not supported\n", __func__);
1482 goto error;
1483 }
1484 if ((req->entries == 0) || (req->data_len == 0) ||
1485 (req->entries > QCEDEV_MAX_BUFFERS)) {
1486 pr_err("%s: Invalid cipher length/entries\n", __func__);
1487 goto error;
1488 }
1489 if ((req->alg >= QCEDEV_ALG_LAST) ||
1490 (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
1491 pr_err("%s: Invalid algorithm %d\n", __func__,
1492 (uint32_t)req->alg);
1493 goto error;
1494 }
1495 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1496 (!podev->ce_support.aes_xts)) {
1497 pr_err("%s: XTS algorithm is not supported\n", __func__);
1498 goto error;
1499 }
1500 if (req->alg == QCEDEV_ALG_AES) {
1501 if (qcedev_check_cipher_key(req, podev))
1502 goto error;
1503
1504 }
1505 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1506 if (req->byteoffset) {
1507 if (req->mode != QCEDEV_AES_MODE_CTR) {
1508 pr_err("%s: Operation on byte offset not supported\n",
1509 __func__);
1510 goto error;
1511 }
1512 if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
1513 pr_err("%s: Invalid byte offset\n", __func__);
1514 goto error;
1515 }
1516 total = req->byteoffset;
1517 for (i = 0; i < req->entries; i++) {
1518 if (total > U32_MAX - req->vbuf.src[i].len) {
1519 pr_err("%s:Integer overflow on total src len\n",
1520 __func__);
1521 goto error;
1522 }
1523 total += req->vbuf.src[i].len;
1524 }
1525 }
1526
1527 if (req->data_len < req->byteoffset) {
1528 pr_err("%s: req data length %u is less than byteoffset %u\n",
1529 __func__, req->data_len, req->byteoffset);
1530 goto error;
1531 }
1532
1533 /* Ensure IV size */
1534 if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
1535 pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
1536 goto error;
1537 }
1538
1539 /* Ensure Key size */
1540 if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
1541 pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
1542 goto error;
1543 }
1544
1545 /* Ensure zer ivlen for ECB mode */
1546 if (req->ivlen > 0) {
1547 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1548 (req->mode == QCEDEV_DES_MODE_ECB)) {
1549 pr_err("%s: Expecting a zero length IV\n", __func__);
1550 goto error;
1551 }
1552 } else {
1553 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1554 (req->mode != QCEDEV_DES_MODE_ECB)) {
1555 pr_err("%s: Expecting a non-zero ength IV\n", __func__);
1556 goto error;
1557 }
1558 }
1559 /* Check for sum of all dst length is equal to data_len */
1560 for (i = 0, total = 0; i < req->entries; i++) {
1561 if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
1562 pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
1563 __func__, i, req->vbuf.dst[i].len);
1564 goto error;
1565 }
1566 if (req->vbuf.dst[i].len >= U32_MAX - total) {
1567 pr_err("%s: Integer overflow on total req dst vbuf length\n",
1568 __func__);
1569 goto error;
1570 }
1571 total += req->vbuf.dst[i].len;
1572 }
1573 if (total != req->data_len) {
1574 pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
1575 __func__, i, total, req->data_len);
1576 goto error;
1577 }
1578 /* Check for sum of all src length is equal to data_len */
1579 for (i = 0, total = 0; i < req->entries; i++) {
1580 if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
1581 pr_err("%s: NULL req src vbuf[%d] with length %d\n",
1582 __func__, i, req->vbuf.src[i].len);
1583 goto error;
1584 }
1585 if (req->vbuf.src[i].len > U32_MAX - total) {
1586 pr_err("%s: Integer overflow on total req src vbuf length\n",
1587 __func__);
1588 goto error;
1589 }
1590 total += req->vbuf.src[i].len;
1591 }
1592 if (total != req->data_len) {
1593 pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
1594 __func__, total, req->data_len);
1595 goto error;
1596 }
1597 return 0;
1598error:
1599 return -EINVAL;
1600
1601}
1602
1603static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1604 struct qcedev_control *podev)
1605{
1606 uint32_t total = 0;
1607 uint32_t i;
1608
1609 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1610 (!podev->ce_support.cmac)) {
1611 pr_err("%s: CMAC not supported\n", __func__);
1612 goto sha_error;
1613 }
1614 if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
1615 pr_err("%s: Invalid num entries (%d)\n",
1616 __func__, req->entries);
1617 goto sha_error;
1618 }
1619
1620 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
1621 pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
1622 goto sha_error;
1623 }
1624 if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
1625 (req->alg == QCEDEV_ALG_SHA1_HMAC)) {
1626 if (req->authkey == NULL) {
1627 pr_err("%s: Invalid authkey pointer\n", __func__);
1628 goto sha_error;
1629 }
1630 if (req->authklen <= 0) {
1631 pr_err("%s: Invalid authkey length (%d)\n",
1632 __func__, req->authklen);
1633 goto sha_error;
1634 }
1635 }
1636
1637 if (req->alg == QCEDEV_ALG_AES_CMAC) {
1638 if ((req->authklen != QCEDEV_AES_KEY_128) &&
1639 (req->authklen != QCEDEV_AES_KEY_256)) {
1640 pr_err("%s: unsupported key length\n", __func__);
1641 goto sha_error;
1642 }
1643 }
1644
1645 /* Check for sum of all src length is equal to data_len */
1646 for (i = 0, total = 0; i < req->entries; i++) {
1647 if (req->data[i].len > U32_MAX - total) {
1648 pr_err("%s: Integer overflow on total req buf length\n",
1649 __func__);
1650 goto sha_error;
1651 }
1652 total += req->data[i].len;
1653 }
1654
1655 if (total != req->data_len) {
1656 pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
1657 __func__, total, req->data_len);
1658 goto sha_error;
1659 }
1660 return 0;
1661sha_error:
1662 return -EINVAL;
1663}
1664
1665static inline long qcedev_ioctl(struct file *file,
1666 unsigned int cmd, unsigned long arg)
1667{
1668 int err = 0;
1669 struct qcedev_handle *handle;
1670 struct qcedev_control *podev;
Zhen Kong534ad922018-02-01 14:52:10 -08001671 struct qcedev_async_req *qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001672 struct qcedev_stat *pstat;
1673
Zhen Kong534ad922018-02-01 14:52:10 -08001674 qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
1675 if (!qcedev_areq)
1676 return -ENOMEM;
1677
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001678 handle = file->private_data;
1679 podev = handle->cntl;
Zhen Kong534ad922018-02-01 14:52:10 -08001680 qcedev_areq->handle = handle;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001681 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05301682 pr_err("%s: invalid handle %pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001683 __func__, podev);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301684 err = -ENOENT;
1685 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001686 }
1687
1688 /* Verify user arguments. */
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301689 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
1690 err = -ENOTTY;
1691 goto exit_free_qcedev_areq;
1692 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001693
Zhen Kong534ad922018-02-01 14:52:10 -08001694 init_completion(&qcedev_areq->complete);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001695 pstat = &_qcedev_stat;
1696
1697 switch (cmd) {
1698 case QCEDEV_IOCTL_ENC_REQ:
1699 case QCEDEV_IOCTL_DEC_REQ:
Zhen Kong534ad922018-02-01 14:52:10 -08001700 if (copy_from_user(&qcedev_areq->cipher_op_req,
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001701 (void __user *)arg,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301702 sizeof(struct qcedev_cipher_op_req))) {
1703 err = -EFAULT;
1704 goto exit_free_qcedev_areq;
1705 }
Zhen Kong534ad922018-02-01 14:52:10 -08001706 qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001707
Zhen Kong534ad922018-02-01 14:52:10 -08001708 if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301709 podev)) {
1710 err = -EINVAL;
1711 goto exit_free_qcedev_areq;
1712 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001713
Zhen Kong534ad922018-02-01 14:52:10 -08001714 err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001715 if (err)
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301716 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001717 if (copy_to_user((void __user *)arg,
Zhen Kong534ad922018-02-01 14:52:10 -08001718 &qcedev_areq->cipher_op_req,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301719 sizeof(struct qcedev_cipher_op_req))) {
1720 err = -EFAULT;
1721 goto exit_free_qcedev_areq;
1722 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001723 break;
1724
1725 case QCEDEV_IOCTL_SHA_INIT_REQ:
1726 {
1727 struct scatterlist sg_src;
1728
Zhen Kong534ad922018-02-01 14:52:10 -08001729 if (copy_from_user(&qcedev_areq->sha_op_req,
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001730 (void __user *)arg,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301731 sizeof(struct qcedev_sha_op_req))) {
1732 err = -EFAULT;
1733 goto exit_free_qcedev_areq;
1734 }
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301735 mutex_lock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001736 if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301737 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301738 err = -EINVAL;
1739 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301740 }
Zhen Kong534ad922018-02-01 14:52:10 -08001741 qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
1742 err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301743 if (err) {
1744 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301745 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301746 }
1747 mutex_unlock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001748 if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301749 sizeof(struct qcedev_sha_op_req))) {
1750 err = -EFAULT;
1751 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001752 }
1753 handle->sha_ctxt.init_done = true;
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301754 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001755 break;
1756 case QCEDEV_IOCTL_GET_CMAC_REQ:
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301757 if (!podev->ce_support.cmac) {
1758 err = -ENOTTY;
1759 goto exit_free_qcedev_areq;
1760 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001761 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1762 {
1763 struct scatterlist sg_src;
1764
Zhen Kong534ad922018-02-01 14:52:10 -08001765 if (copy_from_user(&qcedev_areq->sha_op_req,
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001766 (void __user *)arg,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301767 sizeof(struct qcedev_sha_op_req))) {
1768 err = -EFAULT;
1769 goto exit_free_qcedev_areq;
1770 }
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301771 mutex_lock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001772 if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301773 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301774 err = -EINVAL;
1775 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301776 }
Zhen Kong534ad922018-02-01 14:52:10 -08001777 qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001778
Zhen Kong534ad922018-02-01 14:52:10 -08001779 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
1780 err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301781 if (err) {
1782 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301783 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301784 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001785 } else {
1786 if (handle->sha_ctxt.init_done == false) {
1787 pr_err("%s Init was not called\n", __func__);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301788 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301789 err = -EINVAL;
1790 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001791 }
Zhen Kong534ad922018-02-01 14:52:10 -08001792 err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301793 if (err) {
1794 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301795 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301796 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001797 }
1798
1799 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1800 pr_err("Invalid sha_ctxt.diglen %d\n",
1801 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301802 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301803 err = -EINVAL;
1804 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001805 }
Zhen Kong534ad922018-02-01 14:52:10 -08001806 memcpy(&qcedev_areq->sha_op_req.digest[0],
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001807 &handle->sha_ctxt.digest[0],
1808 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301809 mutex_unlock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001810 if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
Arun KS5eb6ddb2019-08-29 11:27:55 +05301811 sizeof(struct qcedev_sha_op_req))) {
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301812 err = -EFAULT;
1813 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001814 }
Arun KS5eb6ddb2019-08-29 11:27:55 +05301815 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001816 break;
1817
1818 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1819
1820 if (handle->sha_ctxt.init_done == false) {
1821 pr_err("%s Init was not called\n", __func__);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301822 err = -EINVAL;
1823 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001824 }
Zhen Kong534ad922018-02-01 14:52:10 -08001825 if (copy_from_user(&qcedev_areq->sha_op_req,
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001826 (void __user *)arg,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301827 sizeof(struct qcedev_sha_op_req))) {
1828 err = -EFAULT;
1829 goto exit_free_qcedev_areq;
1830 }
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301831 mutex_lock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001832 if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301833 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301834 err = -EINVAL;
1835 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301836 }
Zhen Kong534ad922018-02-01 14:52:10 -08001837 qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
1838 err = qcedev_hash_final(qcedev_areq, handle);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301839 if (err) {
1840 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301841 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301842 }
Brahmaji K2ec40862017-05-15 16:02:15 +05301843 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1844 pr_err("Invalid sha_ctxt.diglen %d\n",
1845 handle->sha_ctxt.diglen);
1846 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301847 err = -EINVAL;
1848 goto exit_free_qcedev_areq;
Brahmaji K2ec40862017-05-15 16:02:15 +05301849 }
Zhen Kong534ad922018-02-01 14:52:10 -08001850 qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
1851 memcpy(&qcedev_areq->sha_op_req.digest[0],
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001852 &handle->sha_ctxt.digest[0],
1853 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301854 mutex_unlock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001855 if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301856 sizeof(struct qcedev_sha_op_req))) {
1857 err = -EFAULT;
1858 goto exit_free_qcedev_areq;
1859 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001860 handle->sha_ctxt.init_done = false;
1861 break;
1862
1863 case QCEDEV_IOCTL_GET_SHA_REQ:
1864 {
1865 struct scatterlist sg_src;
1866
Zhen Kong534ad922018-02-01 14:52:10 -08001867 if (copy_from_user(&qcedev_areq->sha_op_req,
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001868 (void __user *)arg,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301869 sizeof(struct qcedev_sha_op_req))) {
1870 err = -EFAULT;
1871 goto exit_free_qcedev_areq;
1872 }
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301873 mutex_lock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001874 if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301875 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301876 err = -EINVAL;
1877 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301878 }
Zhen Kong534ad922018-02-01 14:52:10 -08001879 qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
1880 qcedev_hash_init(qcedev_areq, handle, &sg_src);
1881 err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301882 if (err) {
1883 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301884 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301885 }
Zhen Kong534ad922018-02-01 14:52:10 -08001886 err = qcedev_hash_final(qcedev_areq, handle);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301887 if (err) {
1888 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301889 goto exit_free_qcedev_areq;
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301890 }
Brahmaji K2ec40862017-05-15 16:02:15 +05301891 if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
1892 pr_err("Invalid sha_ctxt.diglen %d\n",
1893 handle->sha_ctxt.diglen);
1894 mutex_unlock(&hash_access_lock);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301895 err = -EINVAL;
1896 goto exit_free_qcedev_areq;
Brahmaji K2ec40862017-05-15 16:02:15 +05301897 }
Zhen Kong534ad922018-02-01 14:52:10 -08001898 qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
1899 memcpy(&qcedev_areq->sha_op_req.digest[0],
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001900 &handle->sha_ctxt.digest[0],
1901 handle->sha_ctxt.diglen);
AnilKumar Chimatab9805a32017-03-13 16:13:47 +05301902 mutex_unlock(&hash_access_lock);
Zhen Kong534ad922018-02-01 14:52:10 -08001903 if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
Arun KS5eb6ddb2019-08-29 11:27:55 +05301904 sizeof(struct qcedev_sha_op_req))) {
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301905 err = -EFAULT;
1906 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001907 }
Arun KS5eb6ddb2019-08-29 11:27:55 +05301908 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001909 break;
1910
Sonal Guptaa540a072018-02-15 14:11:02 -08001911 case QCEDEV_IOCTL_MAP_BUF_REQ:
1912 {
1913 unsigned long long vaddr = 0;
1914 struct qcedev_map_buf_req map_buf = { {0} };
1915 int i = 0;
1916
1917 if (copy_from_user(&map_buf,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301918 (void __user *)arg, sizeof(map_buf))) {
1919 err = -EFAULT;
1920 goto exit_free_qcedev_areq;
1921 }
Sonal Guptaa540a072018-02-15 14:11:02 -08001922
Prerna Kalla2ffd3642020-03-30 17:31:07 +05301923 if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) {
1924 err = -EINVAL;
1925 goto exit_free_qcedev_areq;
1926 }
1927
Sonal Guptaa540a072018-02-15 14:11:02 -08001928 for (i = 0; i < map_buf.num_fds; i++) {
1929 err = qcedev_check_and_map_buffer(handle,
1930 map_buf.fd[i],
1931 map_buf.fd_offset[i],
1932 map_buf.fd_size[i],
1933 &vaddr);
1934 if (err) {
1935 pr_err(
1936 "%s: err: failed to map fd(%d) - %d\n",
1937 __func__, map_buf.fd[i], err);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301938 goto exit_free_qcedev_areq;
Sonal Guptaa540a072018-02-15 14:11:02 -08001939 }
1940 map_buf.buf_vaddr[i] = vaddr;
1941 pr_info("%s: info: vaddr = %llx\n",
1942 __func__, vaddr);
1943 }
1944
1945 if (copy_to_user((void __user *)arg, &map_buf,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301946 sizeof(map_buf))) {
1947 err = -EFAULT;
1948 goto exit_free_qcedev_areq;
1949 }
Sonal Guptaa540a072018-02-15 14:11:02 -08001950 break;
1951 }
1952
1953 case QCEDEV_IOCTL_UNMAP_BUF_REQ:
1954 {
1955 struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
1956 int i = 0;
1957
1958 if (copy_from_user(&unmap_buf,
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301959 (void __user *)arg, sizeof(unmap_buf))) {
1960 err = -EFAULT;
1961 goto exit_free_qcedev_areq;
1962 }
Sonal Guptaa540a072018-02-15 14:11:02 -08001963
1964 for (i = 0; i < unmap_buf.num_fds; i++) {
1965 err = qcedev_check_and_unmap_buffer(handle,
1966 unmap_buf.fd[i]);
1967 if (err) {
1968 pr_err(
1969 "%s: err: failed to unmap fd(%d) - %d\n",
1970 __func__,
1971 unmap_buf.fd[i], err);
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301972 goto exit_free_qcedev_areq;
Sonal Guptaa540a072018-02-15 14:11:02 -08001973 }
1974 }
1975 break;
1976 }
1977
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001978 default:
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301979 err = -ENOTTY;
1980 goto exit_free_qcedev_areq;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001981 }
1982
AnilKumar Chimata6d761772019-02-08 15:42:48 +05301983exit_free_qcedev_areq:
1984 kfree(qcedev_areq);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001985 return err;
1986}
1987
Sonal Guptaeff149e2018-02-09 09:35:55 -08001988static int qcedev_probe_device(struct platform_device *pdev)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001989{
1990 void *handle = NULL;
1991 int rc = 0;
1992 struct qcedev_control *podev;
1993 struct msm_ce_hw_support *platform_support;
1994
1995 podev = &qce_dev[0];
1996
1997 podev->high_bw_req_count = 0;
1998 INIT_LIST_HEAD(&podev->ready_commands);
1999 podev->active_command = NULL;
2000
Sonal Guptaeff149e2018-02-09 09:35:55 -08002001 INIT_LIST_HEAD(&podev->context_banks);
2002
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002003 spin_lock_init(&podev->lock);
2004
2005 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
2006
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002007 podev->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
2008 msm_bus_cl_get_pdata(pdev);
2009 if (!podev->platform_support.bus_scale_table) {
2010 pr_err("bus_scale_table is NULL\n");
2011 return -ENODATA;
2012 }
2013 podev->bus_scale_handle = msm_bus_scale_register_client(
2014 (struct msm_bus_scale_pdata *)
2015 podev->platform_support.bus_scale_table);
2016 if (!podev->bus_scale_handle) {
2017 pr_err("%s not able to get bus scale\n", __func__);
2018 return -ENOMEM;
2019 }
2020
2021 rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 1);
2022 if (rc) {
2023 pr_err("%s Unable to set to high bandwidth\n", __func__);
2024 goto exit_unregister_bus_scale;
2025 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002026 handle = qce_open(pdev, &rc);
2027 if (handle == NULL) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002028 rc = -ENODEV;
2029 goto exit_scale_busbandwidth;
2030 }
2031 rc = msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
2032 if (rc) {
2033 pr_err("%s Unable to set to low bandwidth\n", __func__);
2034 goto exit_qce_close;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002035 }
2036
2037 podev->qce = handle;
2038 podev->pdev = pdev;
2039 platform_set_drvdata(pdev, podev);
2040
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002041 qce_hw_support(podev->qce, &podev->ce_support);
2042 if (podev->ce_support.bam) {
2043 podev->platform_support.ce_shared = 0;
2044 podev->platform_support.shared_ce_resource = 0;
2045 podev->platform_support.hw_key_support =
2046 podev->ce_support.hw_key;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002047 podev->platform_support.sha_hmac = 1;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002048 } else {
2049 platform_support =
2050 (struct msm_ce_hw_support *)pdev->dev.platform_data;
2051 podev->platform_support.ce_shared = platform_support->ce_shared;
2052 podev->platform_support.shared_ce_resource =
2053 platform_support->shared_ce_resource;
2054 podev->platform_support.hw_key_support =
2055 platform_support->hw_key_support;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002056 podev->platform_support.sha_hmac = platform_support->sha_hmac;
2057 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002058
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002059 rc = misc_register(&podev->miscdevice);
Sonal Guptaeff149e2018-02-09 09:35:55 -08002060 if (rc) {
2061 pr_err("%s: err: register failed for misc: %d\n", __func__, rc);
2062 goto exit_qce_close;
2063 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002064
Sonal Guptaa540a072018-02-15 14:11:02 -08002065 podev->mem_client = qcedev_mem_new_client(MEM_ION);
2066 if (!podev->mem_client) {
2067 pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
2068 goto err;
2069 }
2070
Sonal Guptaeff149e2018-02-09 09:35:55 -08002071 rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
2072 NULL, &pdev->dev);
2073 if (rc) {
2074 pr_err("%s: err: of_platform_populate failed: %d\n",
2075 __func__, rc);
2076 goto err;
2077 }
2078
2079 return 0;
2080
2081err:
Sonal Guptaa540a072018-02-15 14:11:02 -08002082 if (podev->mem_client)
2083 qcedev_mem_delete_client(podev->mem_client);
2084 podev->mem_client = NULL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002085
Sonal Guptaa540a072018-02-15 14:11:02 -08002086 misc_deregister(&podev->miscdevice);
Vishvesh Deobhankar465f5112019-11-19 15:55:09 +05302087 if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 1))
2088 pr_err("%s Unable to set high bandwidth\n", __func__);
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002089exit_qce_close:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002090 if (handle)
2091 qce_close(handle);
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002092exit_scale_busbandwidth:
Vishvesh Deobhankar465f5112019-11-19 15:55:09 +05302093 if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 0))
2094 pr_err("%s Unable to set low bandwidth\n", __func__);
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002095exit_unregister_bus_scale:
2096 if (podev->platform_support.bus_scale_table != NULL)
2097 msm_bus_scale_unregister_client(podev->bus_scale_handle);
Sonal Guptaeff149e2018-02-09 09:35:55 -08002098 podev->bus_scale_handle = 0;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002099 platform_set_drvdata(pdev, NULL);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002100 podev->pdev = NULL;
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002101 podev->qce = NULL;
2102
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002103 return rc;
Sonal Guptaeff149e2018-02-09 09:35:55 -08002104}
2105
2106static int qcedev_probe(struct platform_device *pdev)
2107{
2108 if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
2109 return qcedev_probe_device(pdev);
2110 else if (of_device_is_compatible(pdev->dev.of_node,
2111 "qcom,qcedev,context-bank"))
2112 return qcedev_parse_context_bank(pdev);
2113
2114 return -EINVAL;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002115};
2116
2117static int qcedev_remove(struct platform_device *pdev)
2118{
2119 struct qcedev_control *podev;
2120
2121 podev = platform_get_drvdata(pdev);
2122 if (!podev)
2123 return 0;
Vishvesh Deobhankar465f5112019-11-19 15:55:09 +05302124 if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 1))
2125 pr_err("%s Unable to set high bandwidth\n", __func__);
2126
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002127 if (podev->qce)
2128 qce_close(podev->qce);
2129
Vishvesh Deobhankar465f5112019-11-19 15:55:09 +05302130 if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 0))
2131 pr_err("%s Unable to set low bandwidth\n", __func__);
2132
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002133 if (podev->platform_support.bus_scale_table != NULL)
2134 msm_bus_scale_unregister_client(podev->bus_scale_handle);
2135
2136 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2137 misc_deregister(&podev->miscdevice);
2138 tasklet_kill(&podev->done_tasklet);
2139 return 0;
2140};
2141
2142static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
2143{
2144 struct qcedev_control *podev;
2145 int ret;
2146
2147 podev = platform_get_drvdata(pdev);
2148
2149 if (!podev || !podev->platform_support.bus_scale_table)
2150 return 0;
2151
2152 mutex_lock(&qcedev_sent_bw_req);
2153 if (podev->high_bw_req_count) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002154 ret = qcedev_control_clocks(podev, false);
2155 if (ret)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002156 goto suspend_exit;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002157 }
2158
2159suspend_exit:
2160 mutex_unlock(&qcedev_sent_bw_req);
2161 return 0;
2162}
2163
2164static int qcedev_resume(struct platform_device *pdev)
2165{
2166 struct qcedev_control *podev;
2167 int ret;
2168
2169 podev = platform_get_drvdata(pdev);
2170
2171 if (!podev || !podev->platform_support.bus_scale_table)
2172 return 0;
2173
2174 mutex_lock(&qcedev_sent_bw_req);
2175 if (podev->high_bw_req_count) {
AnilKumar Chimatae5e60512017-05-03 14:06:59 -07002176 ret = qcedev_control_clocks(podev, true);
2177 if (ret)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002178 goto resume_exit;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002179 }
2180
2181resume_exit:
2182 mutex_unlock(&qcedev_sent_bw_req);
2183 return 0;
2184}
2185
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002186static struct platform_driver qcedev_plat_driver = {
2187 .probe = qcedev_probe,
2188 .remove = qcedev_remove,
2189 .suspend = qcedev_suspend,
2190 .resume = qcedev_resume,
2191 .driver = {
2192 .name = "qce",
2193 .owner = THIS_MODULE,
2194 .of_match_table = qcedev_match,
2195 },
2196};
2197
2198static int _disp_stats(int id)
2199{
2200 struct qcedev_stat *pstat;
2201 int len = 0;
2202
2203 pstat = &_qcedev_stat;
2204 len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2205 "\nQTI QCE dev driver %d Statistics:\n",
2206 id + 1);
2207
2208 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2209 " Encryption operation success : %d\n",
2210 pstat->qcedev_enc_success);
2211 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2212 " Encryption operation fail : %d\n",
2213 pstat->qcedev_enc_fail);
2214 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2215 " Decryption operation success : %d\n",
2216 pstat->qcedev_dec_success);
2217
2218 len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2219 " Encryption operation fail : %d\n",
2220 pstat->qcedev_dec_fail);
2221
2222 return len;
2223}
2224
2225static int _debug_stats_open(struct inode *inode, struct file *file)
2226{
2227 file->private_data = inode->i_private;
2228 return 0;
2229}
2230
2231static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2232 size_t count, loff_t *ppos)
2233{
2234 ssize_t rc = -EINVAL;
2235 int qcedev = *((int *) file->private_data);
2236 int len;
2237
2238 len = _disp_stats(qcedev);
2239
2240 if (len <= count)
2241 rc = simple_read_from_buffer((void __user *) buf, len,
2242 ppos, (void *) _debug_read_buf, len);
2243 return rc;
2244}
2245
2246static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2247 size_t count, loff_t *ppos)
2248{
2249 memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
2250 return count;
2251};
2252
2253static const struct file_operations _debug_stats_ops = {
2254 .open = _debug_stats_open,
2255 .read = _debug_stats_read,
2256 .write = _debug_stats_write,
2257};
2258
2259static int _qcedev_debug_init(void)
2260{
2261 int rc;
2262 char name[DEBUG_MAX_FNAME];
2263 struct dentry *dent;
2264
2265 _debug_dent = debugfs_create_dir("qcedev", NULL);
2266 if (IS_ERR(_debug_dent)) {
2267 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2268 PTR_ERR(_debug_dent));
2269 return PTR_ERR(_debug_dent);
2270 }
2271
2272 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
2273 _debug_qcedev = 0;
2274 dent = debugfs_create_file(name, 0644, _debug_dent,
2275 &_debug_qcedev, &_debug_stats_ops);
2276 if (dent == NULL) {
2277 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2278 PTR_ERR(dent));
2279 rc = PTR_ERR(dent);
2280 goto err;
2281 }
2282 return 0;
2283err:
2284 debugfs_remove_recursive(_debug_dent);
2285 return rc;
2286}
2287
2288static int qcedev_init(void)
2289{
2290 int rc;
2291
2292 rc = _qcedev_debug_init();
2293 if (rc)
2294 return rc;
2295 return platform_driver_register(&qcedev_plat_driver);
2296}
2297
2298static void qcedev_exit(void)
2299{
2300 debugfs_remove_recursive(_debug_dent);
2301 platform_driver_unregister(&qcedev_plat_driver);
2302}
2303
2304MODULE_LICENSE("GPL v2");
2305MODULE_DESCRIPTION("QTI DEV Crypto driver");
2306
2307module_init(qcedev_init);
2308module_exit(qcedev_exit);