blob: 969f45ea16bda5e790af62c9bd69410f60068fb5 [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/*
2 * QTI Crypto Engine driver.
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mod_devicetable.h>
21#include <linux/device.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/io.h>
26#include <linux/platform_device.h>
27#include <linux/spinlock.h>
28#include <linux/delay.h>
29#include <linux/crypto.h>
30#include <linux/bitops.h>
AnilKumar Chimata9abc7e42017-04-07 16:27:29 -070031#include <linux/clk/qcom.h>
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070032#include <linux/qcrypto.h>
33#include <crypto/hash.h>
34#include <crypto/sha.h>
35#include <soc/qcom/socinfo.h>
AnilKumar Chimatae9960e02017-07-26 18:31:37 +053036#include <asm/dma-iommu.h>
37#include <linux/iommu.h>
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070038
39#include "qce.h"
40#include "qce50.h"
41#include "qcryptohw_50.h"
42#include "qce_ota.h"
43
AnilKumar Chimatae9960e02017-07-26 18:31:37 +053044#define CRYPTO_SMMU_IOVA_START 0x10000000
45#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
46
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070047#define CRYPTO_CONFIG_RESET 0xE01EF
48#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
49#define QCE_MAX_NUM_DSCR 0x200
50#define QCE_SECTOR_SIZE 0x200
51#define CE_CLK_100MHZ 100000000
52#define CE_CLK_DIV 1000000
53
54#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
55#define CRYPTO_CORE_MINOR_VER_NUM 0x03
56#define CRYPTO_CORE_STEP_VER_NUM 0x1
57
58#define CRYPTO_REQ_USER_PAT 0xdead0000
59
60static DEFINE_MUTEX(bam_register_lock);
61static DEFINE_MUTEX(qce_iomap_mutex);
62
63struct bam_registration_info {
64 struct list_head qlist;
65 unsigned long handle;
66 uint32_t cnt;
67 uint32_t bam_mem;
68 void __iomem *bam_iobase;
69 bool support_cmd_dscr;
70};
71static LIST_HEAD(qce50_bam_list);
72
73/* Used to determine the mode */
74#define MAX_BUNCH_MODE_REQ 2
75/* Max number of request supported */
76#define MAX_QCE_BAM_REQ 8
77/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
78#define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2)
79/* To create extra request space to hold dummy request */
80#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
81/* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
82#define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
83/* QCE driver modes */
84#define IN_INTERRUPT_MODE 0
85#define IN_BUNCH_MODE 1
86/* Dummy request data length */
87#define DUMMY_REQ_DATA_LEN 64
88/* Delay timer to expire when in bunch mode */
89#define DELAY_IN_JIFFIES 5
90/* Index to point the dummy request */
91#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
92
93#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
94
95enum qce_owner {
96 QCE_OWNER_NONE = 0,
97 QCE_OWNER_CLIENT = 1,
98 QCE_OWNER_TIMEOUT = 2
99};
100
101struct dummy_request {
102 struct qce_sha_req sreq;
103 struct scatterlist sg;
104 struct ahash_request areq;
105};
106
107/*
108 * CE HW device structure.
109 * Each engine has an instance of the structure.
110 * Each engine can only handle one crypto operation at one time. It is up to
111 * the sw above to ensure single threading of operation on an engine.
112 */
113struct qce_device {
114 struct device *pdev; /* Handle to platform_device structure */
115 struct bam_registration_info *pbam;
116
117 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
118 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
119 int memsize; /* Memory allocated */
120 unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
121 int iovec_memsize; /* Memory allocated */
122 uint32_t bam_mem; /* bam physical address, from DT */
123 uint32_t bam_mem_size; /* bam io size, from DT */
124 int is_shared; /* CE HW is shared */
125 bool support_cmd_dscr;
126 bool support_hw_key;
127 bool support_clk_mgmt_sus_res;
128 bool support_only_core_src_clk;
AnilKumar Chimata70cf1772017-05-02 18:39:39 -0700129 bool request_bw_before_clk;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700130
131 void __iomem *iobase; /* Virtual io base of CE HW */
132 unsigned int phy_iobase; /* Physical io base of CE HW */
133
134 struct clk *ce_core_src_clk; /* Handle to CE src clk*/
135 struct clk *ce_core_clk; /* Handle to CE clk */
136 struct clk *ce_clk; /* Handle to CE clk */
137 struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
138 bool no_get_around;
139 bool no_ccm_mac_status_get_around;
140 unsigned int ce_opp_freq_hz;
141 bool use_sw_aes_cbc_ecb_ctr_algo;
142 bool use_sw_aead_algo;
143 bool use_sw_aes_xts_algo;
144 bool use_sw_ahash_algo;
145 bool use_sw_hmac_algo;
146 bool use_sw_aes_ccm_algo;
147 uint32_t engines_avail;
148 struct qce_ce_cfg_reg_setting reg;
149 struct ce_bam_info ce_bam_info;
150 struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
151 unsigned int ce_request_index;
152 enum qce_owner owner;
153 atomic_t no_of_queued_req;
154 struct timer_list timer;
155 struct dummy_request dummyreq;
156 unsigned int mode;
157 unsigned int intr_cadence;
158 unsigned int dev_no;
159 struct qce_driver_stats qce_stats;
160 atomic_t bunch_cmd_seq;
161 atomic_t last_intr_seq;
162 bool cadence_flag;
163 uint8_t *dummyreq_in_buf;
AnilKumar Chimatae9960e02017-07-26 18:31:37 +0530164 struct dma_iommu_mapping *smmu_mapping;
165 bool bypass_s1_smmu;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700166};
167
168static void print_notify_debug(struct sps_event_notify *notify);
169static void _sps_producer_callback(struct sps_event_notify *notify);
170static int qce_dummy_req(struct qce_device *pce_dev);
171
172static int _qce50_disp_stats;
173
174/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
175static uint32_t _std_init_vector_sha1[] = {
176 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
177};
178
179/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
180static uint32_t _std_init_vector_sha256[] = {
181 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
182 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
183};
184
185static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
186 unsigned int len)
187{
188 unsigned int n;
189
190 n = len / sizeof(uint32_t);
191 for (; n > 0; n--) {
192 *iv = ((*b << 24) & 0xff000000) |
193 (((*(b+1)) << 16) & 0xff0000) |
194 (((*(b+2)) << 8) & 0xff00) |
195 (*(b+3) & 0xff);
196 b += sizeof(uint32_t);
197 iv++;
198 }
199
200 n = len % sizeof(uint32_t);
201 if (n == 3) {
202 *iv = ((*b << 24) & 0xff000000) |
203 (((*(b+1)) << 16) & 0xff0000) |
204 (((*(b+2)) << 8) & 0xff00);
205 } else if (n == 2) {
206 *iv = ((*b << 24) & 0xff000000) |
207 (((*(b+1)) << 16) & 0xff0000);
208 } else if (n == 1) {
209 *iv = ((*b << 24) & 0xff000000);
210 }
211}
212
213static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
214 unsigned int len)
215{
216 unsigned int i, j;
217 unsigned char swap_iv[AES_IV_LENGTH];
218
219 memset(swap_iv, 0, AES_IV_LENGTH);
220 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
221 swap_iv[i] = b[j];
222 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
223}
224
225static int count_sg(struct scatterlist *sg, int nbytes)
226{
227 int i;
228
229 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
230 nbytes -= sg->length;
231 return i;
232}
233
234static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
235 enum dma_data_direction direction)
236{
237 int i;
238
239 for (i = 0; i < nents; ++i) {
240 dma_map_sg(dev, sg, 1, direction);
241 sg = sg_next(sg);
242 }
243
244 return nents;
245}
246
247static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
248 int nents, enum dma_data_direction direction)
249{
250 int i;
251
252 for (i = 0; i < nents; ++i) {
253 dma_unmap_sg(dev, sg, 1, direction);
254 sg = sg_next(sg);
255 }
256
257 return nents;
258}
259
260static int _probe_ce_engine(struct qce_device *pce_dev)
261{
262 unsigned int rev;
263 unsigned int maj_rev, min_rev, step_rev;
264
265 rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
266 /*
267 * Ensure previous instructions (setting the GO register)
268 * was completed before checking the version.
269 */
270 mb();
271 maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
272 min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
273 step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
274
275 if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
276 pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
277 pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
278 return -EIO;
279 }
280
281 /*
282 * The majority of crypto HW bugs have been fixed in 5.3.0 and
283 * above. That allows a single sps transfer of consumer
284 * pipe, and a single sps transfer of producer pipe
285 * for a crypto request. no_get_around flag indicates this.
286 *
287 * In 5.3.1, the CCM MAC_FAILED in result dump issue is
288 * fixed. no_ccm_mac_status_get_around flag indicates this.
289 */
290 pce_dev->no_get_around = (min_rev >=
291 CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
292 if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
293 pce_dev->no_ccm_mac_status_get_around = true;
294 else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
295 (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
296 pce_dev->no_ccm_mac_status_get_around = true;
297 else
298 pce_dev->no_ccm_mac_status_get_around = false;
299
300 pce_dev->ce_bam_info.minor_version = min_rev;
301
302 pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
303 CRYPTO_ENGINES_AVAIL);
304 dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
305 maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
306
307 pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
308
AnilKumar Chimata70cf1772017-05-02 18:39:39 -0700309 dev_info(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700310 pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
311 pce_dev->ce_bam_info.dest_pipe_index,
312 pce_dev->ce_bam_info.src_pipe_index,
313 pce_dev->ce_bam_info.bam_iobase,
314 pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
315 return 0;
316};
317
318static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
319 struct qce_device *pce_dev,
320 int req_info, struct qce_sha_req *sreq)
321{
322 struct ce_sps_data *pce_sps_data;
323 struct qce_cmdlistptr_ops *cmdlistptr;
324
325 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
326 cmdlistptr = &pce_sps_data->cmdlistptr;
327 switch (sreq->alg) {
328 case QCE_HASH_SHA1:
329 return &cmdlistptr->auth_sha1;
330 case QCE_HASH_SHA256:
331 return &cmdlistptr->auth_sha256;
332 case QCE_HASH_SHA1_HMAC:
333 return &cmdlistptr->auth_sha1_hmac;
334 case QCE_HASH_SHA256_HMAC:
335 return &cmdlistptr->auth_sha256_hmac;
336 case QCE_HASH_AES_CMAC:
337 if (sreq->authklen == AES128_KEY_SIZE)
338 return &cmdlistptr->auth_aes_128_cmac;
339 return &cmdlistptr->auth_aes_256_cmac;
340 default:
341 return NULL;
342 }
343 return NULL;
344}
345
346static int _ce_setup_hash(struct qce_device *pce_dev,
347 struct qce_sha_req *sreq,
348 struct qce_cmdlist_info *cmdlistinfo)
349{
350 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
351 uint32_t diglen;
352 int i;
353 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
354 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
355 bool sha1 = false;
356 struct sps_command_element *pce = NULL;
357 bool use_hw_key = false;
358 bool use_pipe_key = false;
359 uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
360 uint32_t auth_cfg;
361
362 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
363 (sreq->alg == QCE_HASH_SHA256_HMAC) ||
364 (sreq->alg == QCE_HASH_AES_CMAC)) {
365
366
367 /* no more check for null key. use flag */
368 if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
369 == QCRYPTO_CTX_USE_HW_KEY)
370 use_hw_key = true;
371 else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
372 QCRYPTO_CTX_USE_PIPE_KEY)
373 use_pipe_key = true;
374 pce = cmdlistinfo->go_proc;
375 if (use_hw_key == true) {
376 pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
377 pce_dev->phy_iobase);
378 } else {
379 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
380 pce_dev->phy_iobase);
381 pce = cmdlistinfo->auth_key;
382 if (use_pipe_key == false) {
383 _byte_stream_to_net_words(mackey32,
384 sreq->authkey,
385 sreq->authklen);
386 for (i = 0; i < authk_size_in_word; i++, pce++)
387 pce->data = mackey32[i];
388 }
389 }
390 }
391
392 if (sreq->alg == QCE_HASH_AES_CMAC)
393 goto go_proc;
394
395 /* if not the last, the size has to be on the block boundary */
396 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
397 return -EIO;
398
399 switch (sreq->alg) {
400 case QCE_HASH_SHA1:
401 case QCE_HASH_SHA1_HMAC:
402 diglen = SHA1_DIGEST_SIZE;
403 sha1 = true;
404 break;
405 case QCE_HASH_SHA256:
406 case QCE_HASH_SHA256_HMAC:
407 diglen = SHA256_DIGEST_SIZE;
408 break;
409 default:
410 return -EINVAL;
411 }
412
413 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
414 if (sreq->first_blk) {
415 if (sha1) {
416 for (i = 0; i < 5; i++)
417 auth32[i] = _std_init_vector_sha1[i];
418 } else {
419 for (i = 0; i < 8; i++)
420 auth32[i] = _std_init_vector_sha256[i];
421 }
422 } else {
423 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
424 }
425
426 pce = cmdlistinfo->auth_iv;
427 for (i = 0; i < 5; i++, pce++)
428 pce->data = auth32[i];
429
430 if ((sreq->alg == QCE_HASH_SHA256) ||
431 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
432 for (i = 5; i < 8; i++, pce++)
433 pce->data = auth32[i];
434 }
435
436 /* write auth_bytecnt 0/1, start with 0 */
437 pce = cmdlistinfo->auth_bytecount;
438 for (i = 0; i < 2; i++, pce++)
439 pce->data = sreq->auth_data[i];
440
441 /* Set/reset last bit in CFG register */
442 pce = cmdlistinfo->auth_seg_cfg;
443 auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
444 1 << CRYPTO_FIRST |
445 1 << CRYPTO_USE_PIPE_KEY_AUTH |
446 1 << CRYPTO_USE_HW_KEY_AUTH);
447 if (sreq->last_blk)
448 auth_cfg |= 1 << CRYPTO_LAST;
449 if (sreq->first_blk)
450 auth_cfg |= 1 << CRYPTO_FIRST;
451 if (use_hw_key)
452 auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
453 if (use_pipe_key)
454 auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
455 pce->data = auth_cfg;
456go_proc:
457 /* write auth seg size */
458 pce = cmdlistinfo->auth_seg_size;
459 pce->data = sreq->size;
460
461 pce = cmdlistinfo->encr_seg_cfg;
462 pce->data = 0;
463
464 /* write auth seg size start*/
465 pce = cmdlistinfo->auth_seg_start;
466 pce->data = 0;
467
468 /* write seg size */
469 pce = cmdlistinfo->seg_size;
470
471 /* always ensure there is input data. ZLT does not work for bam-ndp */
472 if (sreq->size)
473 pce->data = sreq->size;
474 else
475 pce->data = pce_dev->ce_bam_info.ce_burst_size;
476
477 return 0;
478}
479
480static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
481 struct qce_device *pce_dev,
482 int req_info, struct qce_req *creq)
483{
484 struct ce_sps_data *pce_sps_data;
485 struct qce_cmdlistptr_ops *cmdlistptr;
486
487 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
488 cmdlistptr = &pce_sps_data->cmdlistptr;
489 switch (creq->alg) {
490 case CIPHER_ALG_DES:
491 switch (creq->mode) {
492 case QCE_MODE_CBC:
493 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
494 return &cmdlistptr->aead_hmac_sha1_cbc_des;
495 else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
496 return &cmdlistptr->aead_hmac_sha256_cbc_des;
497 else
498 return NULL;
499 break;
500 default:
501 return NULL;
502 }
503 break;
504 case CIPHER_ALG_3DES:
505 switch (creq->mode) {
506 case QCE_MODE_CBC:
507 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
508 return &cmdlistptr->aead_hmac_sha1_cbc_3des;
509 else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
510 return &cmdlistptr->aead_hmac_sha256_cbc_3des;
511 else
512 return NULL;
513 break;
514 default:
515 return NULL;
516 }
517 break;
518 case CIPHER_ALG_AES:
519 switch (creq->mode) {
520 case QCE_MODE_CBC:
521 if (creq->encklen == AES128_KEY_SIZE) {
522 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
523 return &cmdlistptr->
524 aead_hmac_sha1_cbc_aes_128;
525 else if (creq->auth_alg ==
526 QCE_HASH_SHA256_HMAC)
527 return &cmdlistptr->
528 aead_hmac_sha256_cbc_aes_128;
529 else
530 return NULL;
531 } else if (creq->encklen == AES256_KEY_SIZE) {
532 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
533 return &cmdlistptr->
534 aead_hmac_sha1_cbc_aes_256;
535 else if (creq->auth_alg ==
536 QCE_HASH_SHA256_HMAC)
537 return &cmdlistptr->
538 aead_hmac_sha256_cbc_aes_256;
539 else
540 return NULL;
541 } else
542 return NULL;
543 break;
544 default:
545 return NULL;
546 }
547 break;
548
549 default:
550 return NULL;
551 }
552 return NULL;
553}
554
555static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
556 uint32_t totallen_in, uint32_t coffset,
557 struct qce_cmdlist_info *cmdlistinfo)
558{
559 int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
560 int i;
561 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
562 struct sps_command_element *pce;
563 uint32_t a_cfg;
564 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
565 uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
566 uint32_t enck_size_in_word = 0;
567 uint32_t enciv_in_word;
568 uint32_t key_size;
569 uint32_t encr_cfg = 0;
570 uint32_t ivsize = q_req->ivsize;
571
572 key_size = q_req->encklen;
573 enck_size_in_word = key_size/sizeof(uint32_t);
574
575 switch (q_req->alg) {
576 case CIPHER_ALG_DES:
577 enciv_in_word = 2;
578 break;
579 case CIPHER_ALG_3DES:
580 enciv_in_word = 2;
581 break;
582 case CIPHER_ALG_AES:
583 if ((key_size != AES128_KEY_SIZE) &&
584 (key_size != AES256_KEY_SIZE))
585 return -EINVAL;
586 enciv_in_word = 4;
587 break;
588 default:
589 return -EINVAL;
590 }
591
592 /* only support cbc mode */
593 if (q_req->mode != QCE_MODE_CBC)
594 return -EINVAL;
595
596 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
597 pce = cmdlistinfo->encr_cntr_iv;
598 for (i = 0; i < enciv_in_word; i++, pce++)
599 pce->data = enciv32[i];
600
601 /*
602 * write encr key
603 * do not use hw key or pipe key
604 */
605 _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
606 pce = cmdlistinfo->encr_key;
607 for (i = 0; i < enck_size_in_word; i++, pce++)
608 pce->data = enckey32[i];
609
610 /* write encr seg cfg */
611 pce = cmdlistinfo->encr_seg_cfg;
612 encr_cfg = pce->data;
613 if (q_req->dir == QCE_ENCRYPT)
614 encr_cfg |= (1 << CRYPTO_ENCODE);
615 else
616 encr_cfg &= ~(1 << CRYPTO_ENCODE);
617 pce->data = encr_cfg;
618
619 /* we only support sha1-hmac and sha256-hmac at this point */
620 _byte_stream_to_net_words(mackey32, q_req->authkey,
621 q_req->authklen);
622 pce = cmdlistinfo->auth_key;
623 for (i = 0; i < authk_size_in_word; i++, pce++)
624 pce->data = mackey32[i];
625 pce = cmdlistinfo->auth_iv;
626
627 if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
628 for (i = 0; i < 5; i++, pce++)
629 pce->data = _std_init_vector_sha1[i];
630 else
631 for (i = 0; i < 8; i++, pce++)
632 pce->data = _std_init_vector_sha256[i];
633
634 /* write auth_bytecnt 0/1, start with 0 */
635 pce = cmdlistinfo->auth_bytecount;
636 for (i = 0; i < 2; i++, pce++)
637 pce->data = 0;
638
639 pce = cmdlistinfo->auth_seg_cfg;
640 a_cfg = pce->data;
641 a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
642 if (q_req->dir == QCE_ENCRYPT)
643 a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
644 else
645 a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
646 pce->data = a_cfg;
647
648 /* write auth seg size */
649 pce = cmdlistinfo->auth_seg_size;
650 pce->data = totallen_in;
651
652 /* write auth seg size start*/
653 pce = cmdlistinfo->auth_seg_start;
654 pce->data = 0;
655
656 /* write seg size */
657 pce = cmdlistinfo->seg_size;
658 pce->data = totallen_in;
659
660 /* write encr seg size */
661 pce = cmdlistinfo->encr_seg_size;
662 pce->data = q_req->cryptlen;
663
664 /* write encr seg start */
665 pce = cmdlistinfo->encr_seg_start;
666 pce->data = (coffset & 0xffff);
667
668 return 0;
669
670}
671
672static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
673 struct qce_device *pce_dev,
674 int req_info, struct qce_req *creq)
675{
676 struct ce_request_info *preq_info;
677 struct ce_sps_data *pce_sps_data;
678 struct qce_cmdlistptr_ops *cmdlistptr;
679
680 preq_info = &pce_dev->ce_request_info[req_info];
681 pce_sps_data = &preq_info->ce_sps;
682 cmdlistptr = &pce_sps_data->cmdlistptr;
683 if (creq->alg != CIPHER_ALG_AES) {
684 switch (creq->alg) {
685 case CIPHER_ALG_DES:
686 if (creq->mode == QCE_MODE_ECB)
687 return &cmdlistptr->cipher_des_ecb;
688 return &cmdlistptr->cipher_des_cbc;
689 case CIPHER_ALG_3DES:
690 if (creq->mode == QCE_MODE_ECB)
691 return &cmdlistptr->cipher_3des_ecb;
692 return &cmdlistptr->cipher_3des_cbc;
693 default:
694 return NULL;
695 }
696 } else {
697 switch (creq->mode) {
698 case QCE_MODE_ECB:
699 if (creq->encklen == AES128_KEY_SIZE)
700 return &cmdlistptr->cipher_aes_128_ecb;
701 return &cmdlistptr->cipher_aes_256_ecb;
702 case QCE_MODE_CBC:
703 case QCE_MODE_CTR:
704 if (creq->encklen == AES128_KEY_SIZE)
705 return &cmdlistptr->cipher_aes_128_cbc_ctr;
706 return &cmdlistptr->cipher_aes_256_cbc_ctr;
707 case QCE_MODE_XTS:
708 if (creq->encklen/2 == AES128_KEY_SIZE)
709 return &cmdlistptr->cipher_aes_128_xts;
710 return &cmdlistptr->cipher_aes_256_xts;
711 case QCE_MODE_CCM:
712 if (creq->encklen == AES128_KEY_SIZE)
713 return &cmdlistptr->aead_aes_128_ccm;
714 return &cmdlistptr->aead_aes_256_ccm;
715 default:
716 return NULL;
717 }
718 }
719 return NULL;
720}
721
722static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
723 uint32_t totallen_in, uint32_t coffset,
724 struct qce_cmdlist_info *cmdlistinfo)
725{
726 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
727 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
728 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
729 0, 0, 0, 0};
730 uint32_t enck_size_in_word = 0;
731 uint32_t key_size;
732 bool use_hw_key = false;
733 bool use_pipe_key = false;
734 uint32_t encr_cfg = 0;
735 uint32_t ivsize = creq->ivsize;
736 int i;
737 struct sps_command_element *pce = NULL;
738
739 if (creq->mode == QCE_MODE_XTS)
740 key_size = creq->encklen/2;
741 else
742 key_size = creq->encklen;
743
744 pce = cmdlistinfo->go_proc;
745 if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
746 use_hw_key = true;
747 } else {
748 if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
749 QCRYPTO_CTX_USE_PIPE_KEY)
750 use_pipe_key = true;
751 }
752 pce = cmdlistinfo->go_proc;
753 if (use_hw_key == true)
754 pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
755 pce_dev->phy_iobase);
756 else
757 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
758 pce_dev->phy_iobase);
759 if ((use_pipe_key == false) && (use_hw_key == false)) {
760 _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
761 enck_size_in_word = key_size/sizeof(uint32_t);
762 }
763
764 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
765 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
766 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
767 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
768 uint32_t auth_cfg = 0;
769
770 /* write nonce */
771 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
772 pce = cmdlistinfo->auth_nonce_info;
773 for (i = 0; i < noncelen32; i++, pce++)
774 pce->data = nonce32[i];
775
776 if (creq->authklen == AES128_KEY_SIZE)
777 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
778 else {
779 if (creq->authklen == AES256_KEY_SIZE)
780 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
781 }
782 if (creq->dir == QCE_ENCRYPT)
783 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
784 else
785 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
786 auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
787
788 if (use_hw_key == true) {
789 auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
790 } else {
791 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
792 /* write auth key */
793 pce = cmdlistinfo->auth_key;
794 for (i = 0; i < authklen32; i++, pce++)
795 pce->data = enckey32[i];
796 }
797
798 pce = cmdlistinfo->auth_seg_cfg;
799 pce->data = auth_cfg;
800
801 pce = cmdlistinfo->auth_seg_size;
802 if (creq->dir == QCE_ENCRYPT)
803 pce->data = totallen_in;
804 else
805 pce->data = totallen_in - creq->authsize;
806 pce = cmdlistinfo->auth_seg_start;
807 pce->data = 0;
808 } else {
809 if (creq->op != QCE_REQ_AEAD) {
810 pce = cmdlistinfo->auth_seg_cfg;
811 pce->data = 0;
812 }
813 }
814 switch (creq->mode) {
815 case QCE_MODE_ECB:
816 if (key_size == AES128_KEY_SIZE)
817 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
818 else
819 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
820 break;
821 case QCE_MODE_CBC:
822 if (key_size == AES128_KEY_SIZE)
823 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
824 else
825 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
826 break;
827 case QCE_MODE_XTS:
828 if (key_size == AES128_KEY_SIZE)
829 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
830 else
831 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
832 break;
833 case QCE_MODE_CCM:
834 if (key_size == AES128_KEY_SIZE)
835 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
836 else
837 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
838 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
839 (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
840 break;
841 case QCE_MODE_CTR:
842 default:
843 if (key_size == AES128_KEY_SIZE)
844 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
845 else
846 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
847 break;
848 }
849
850 switch (creq->alg) {
851 case CIPHER_ALG_DES:
852 if (creq->mode != QCE_MODE_ECB) {
853 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
854 pce = cmdlistinfo->encr_cntr_iv;
855 pce->data = enciv32[0];
856 pce++;
857 pce->data = enciv32[1];
858 }
859 if (use_hw_key == false) {
860 pce = cmdlistinfo->encr_key;
861 pce->data = enckey32[0];
862 pce++;
863 pce->data = enckey32[1];
864 }
865 break;
866 case CIPHER_ALG_3DES:
867 if (creq->mode != QCE_MODE_ECB) {
868 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
869 pce = cmdlistinfo->encr_cntr_iv;
870 pce->data = enciv32[0];
871 pce++;
872 pce->data = enciv32[1];
873 }
874 if (use_hw_key == false) {
875 /* write encr key */
876 pce = cmdlistinfo->encr_key;
877 for (i = 0; i < 6; i++, pce++)
878 pce->data = enckey32[i];
879 }
880 break;
881 case CIPHER_ALG_AES:
882 default:
883 if (creq->mode == QCE_MODE_XTS) {
884 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
885 = {0, 0, 0, 0, 0, 0, 0, 0};
886 uint32_t xtsklen =
887 creq->encklen/(2 * sizeof(uint32_t));
888
889 if ((use_hw_key == false) && (use_pipe_key == false)) {
890 _byte_stream_to_net_words(xtskey32,
891 (creq->enckey + creq->encklen/2),
892 creq->encklen/2);
893 /* write xts encr key */
894 pce = cmdlistinfo->encr_xts_key;
895 for (i = 0; i < xtsklen; i++, pce++)
896 pce->data = xtskey32[i];
897 }
898 /* write xts du size */
899 pce = cmdlistinfo->encr_xts_du_size;
900 switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
901 case QCRYPTO_CTX_XTS_DU_SIZE_512B:
902 pce->data = min((unsigned int)QCE_SECTOR_SIZE,
903 creq->cryptlen);
904 break;
905 case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
906 pce->data =
907 min((unsigned int)QCE_SECTOR_SIZE * 2,
908 creq->cryptlen);
909 break;
910 default:
911 pce->data = creq->cryptlen;
912 break;
913 }
914 }
915 if (creq->mode != QCE_MODE_ECB) {
916 if (creq->mode == QCE_MODE_XTS)
917 _byte_stream_swap_to_net_words(enciv32,
918 creq->iv, ivsize);
919 else
920 _byte_stream_to_net_words(enciv32, creq->iv,
921 ivsize);
922 /* write encr cntr iv */
923 pce = cmdlistinfo->encr_cntr_iv;
924 for (i = 0; i < 4; i++, pce++)
925 pce->data = enciv32[i];
926
927 if (creq->mode == QCE_MODE_CCM) {
928 /* write cntr iv for ccm */
929 pce = cmdlistinfo->encr_ccm_cntr_iv;
930 for (i = 0; i < 4; i++, pce++)
931 pce->data = enciv32[i];
932 /* update cntr_iv[3] by one */
933 pce = cmdlistinfo->encr_cntr_iv;
934 pce += 3;
935 pce->data += 1;
936 }
937 }
938
939 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
940 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
941 CRYPTO_ENCR_KEY_SZ);
942 } else {
943 if (use_hw_key == false) {
944 /* write encr key */
945 pce = cmdlistinfo->encr_key;
946 for (i = 0; i < enck_size_in_word; i++, pce++)
947 pce->data = enckey32[i];
948 }
949 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
950 break;
951 } /* end of switch (creq->mode) */
952
953 if (use_pipe_key)
954 encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
955 << CRYPTO_USE_PIPE_KEY_ENCR);
956
957 /* write encr seg cfg */
958 pce = cmdlistinfo->encr_seg_cfg;
959 if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
960 if (creq->dir == QCE_ENCRYPT)
961 pce->data |= (1 << CRYPTO_ENCODE);
962 else
963 pce->data &= ~(1 << CRYPTO_ENCODE);
964 encr_cfg = pce->data;
965 } else {
966 encr_cfg |=
967 ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
968 }
969 if (use_hw_key == true)
970 encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
971 else
972 encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
973 pce->data = encr_cfg;
974
975 /* write encr seg size */
976 pce = cmdlistinfo->encr_seg_size;
977 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
978 pce->data = (creq->cryptlen + creq->authsize);
979 else
980 pce->data = creq->cryptlen;
981
982 /* write encr seg start */
983 pce = cmdlistinfo->encr_seg_start;
984 pce->data = (coffset & 0xffff);
985
986 /* write seg size */
987 pce = cmdlistinfo->seg_size;
988 pce->data = totallen_in;
989
990 return 0;
991};
992
993static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
994 struct qce_cmdlist_info *cmdlistinfo)
995{
996 uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
997 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
998 uint32_t cfg;
999 struct sps_command_element *pce;
1000 int i;
1001
1002 switch (req->algorithm) {
1003 case QCE_OTA_ALGO_KASUMI:
1004 cfg = pce_dev->reg.auth_cfg_kasumi;
1005 break;
1006 case QCE_OTA_ALGO_SNOW3G:
1007 default:
1008 cfg = pce_dev->reg.auth_cfg_snow3g;
1009 break;
1010 };
1011
1012 /* write key in CRYPTO_AUTH_IV0-3_REG */
1013 _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
1014 pce = cmdlistinfo->auth_iv;
1015 for (i = 0; i < key_size_in_word; i++, pce++)
1016 pce->data = ikey32[i];
1017
1018 /* write last bits in CRYPTO_AUTH_IV4_REG */
1019 pce->data = req->last_bits;
1020
1021 /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
1022 pce = cmdlistinfo->auth_bytecount;
1023 pce->data = req->fresh;
1024
1025 /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
1026 pce++;
1027 pce->data = req->count_i;
1028
1029 /* write auth seg cfg */
1030 pce = cmdlistinfo->auth_seg_cfg;
1031 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1032 cfg |= BIT(CRYPTO_F9_DIRECTION);
1033 pce->data = cfg;
1034
1035 /* write auth seg size */
1036 pce = cmdlistinfo->auth_seg_size;
1037 pce->data = req->msize;
1038
1039 /* write auth seg start*/
1040 pce = cmdlistinfo->auth_seg_start;
1041 pce->data = 0;
1042
1043 /* write seg size */
1044 pce = cmdlistinfo->seg_size;
1045 pce->data = req->msize;
1046
1047
1048 /* write go */
1049 pce = cmdlistinfo->go_proc;
1050 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
1051 return 0;
1052}
1053
1054static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
1055 bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
1056 uint16_t cipher_size,
1057 struct qce_cmdlist_info *cmdlistinfo)
1058{
1059 uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
1060 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
1061 uint32_t cfg;
1062 struct sps_command_element *pce;
1063 int i;
1064
1065 switch (req->algorithm) {
1066 case QCE_OTA_ALGO_KASUMI:
1067 cfg = pce_dev->reg.encr_cfg_kasumi;
1068 break;
1069 case QCE_OTA_ALGO_SNOW3G:
1070 default:
1071 cfg = pce_dev->reg.encr_cfg_snow3g;
1072 break;
1073 };
1074 /* write key */
1075 _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
1076 pce = cmdlistinfo->encr_key;
1077 for (i = 0; i < key_size_in_word; i++, pce++)
1078 pce->data = ckey32[i];
1079
1080 /* write encr seg cfg */
1081 pce = cmdlistinfo->encr_seg_cfg;
1082 if (key_stream_mode)
1083 cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
1084 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1085 cfg |= BIT(CRYPTO_F8_DIRECTION);
1086 pce->data = cfg;
1087
1088 /* write encr seg start */
1089 pce = cmdlistinfo->encr_seg_start;
1090 pce->data = (cipher_offset & 0xffff);
1091
1092 /* write encr seg size */
1093 pce = cmdlistinfo->encr_seg_size;
1094 pce->data = cipher_size;
1095
1096 /* write seg size */
1097 pce = cmdlistinfo->seg_size;
1098 pce->data = req->data_len;
1099
1100 /* write cntr0_iv0 for countC */
1101 pce = cmdlistinfo->encr_cntr_iv;
1102 pce->data = req->count_c;
1103 /* write cntr1_iv1 for nPkts, and bearer */
1104 pce++;
1105 if (npkts == 1)
1106 npkts = 0;
1107 pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
1108 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
1109
1110 /* write go */
1111 pce = cmdlistinfo->go_proc;
1112 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
1113
1114 return 0;
1115}
1116
1117static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
1118{
1119 int i, j, ents;
1120 struct ce_sps_data *pce_sps_data;
1121 struct sps_iovec *iovec;
1122 uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
1123
1124 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
1125 iovec = pce_sps_data->in_transfer.iovec;
1126 pr_info("==============================================\n");
1127 pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
1128 pr_info("==============================================\n");
1129 for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
1130 pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
1131 iovec->addr, iovec->size, iovec->flags);
1132 if (iovec->flags & cmd_flags) {
1133 struct sps_command_element *pced;
1134
1135 pced = (struct sps_command_element *)
1136 (GET_VIRT_ADDR(iovec->addr));
1137 ents = iovec->size/(sizeof(struct sps_command_element));
1138 for (j = 0; j < ents; j++) {
1139 pr_info(" [%d] [0x%x] 0x%x\n", j,
1140 pced->addr, pced->data);
1141 pced++;
1142 }
1143 }
1144 iovec++;
1145 }
1146
1147 pr_info("==============================================\n");
1148 pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
1149 pr_info("==============================================\n");
1150 iovec = pce_sps_data->out_transfer.iovec;
1151 for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
1152 pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
1153 iovec->addr, iovec->size, iovec->flags);
1154 iovec++;
1155 }
1156}
1157
1158#ifdef QCE_DEBUG
1159
1160static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
1161{
1162 _qce_dump_descr_fifos(pce_dev, req_info);
1163}
1164
1165#define QCE_WRITE_REG(val, addr) \
1166{ \
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05301167 pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001168 writel_relaxed(val, addr); \
1169}
1170
1171#else
1172
1173static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
1174{
1175}
1176
1177#define QCE_WRITE_REG(val, addr) \
1178 writel_relaxed(val, addr)
1179
1180#endif
1181
1182static int _ce_setup_hash_direct(struct qce_device *pce_dev,
1183 struct qce_sha_req *sreq)
1184{
1185 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
1186 uint32_t diglen;
1187 bool use_hw_key = false;
1188 bool use_pipe_key = false;
1189 int i;
1190 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
1191 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1192 uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
1193 bool sha1 = false;
1194 uint32_t auth_cfg = 0;
1195
1196 /* clear status */
1197 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1198
1199 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1200 CRYPTO_CONFIG_REG));
1201 /*
1202 * Ensure previous instructions (setting the CONFIG register)
1203 * was completed before issuing starting to set other config register
1204 * This is to ensure the configurations are done in correct endian-ness
1205 * as set in the CONFIG registers
1206 */
1207 mb();
1208
1209 if (sreq->alg == QCE_HASH_AES_CMAC) {
1210 /* write seg_cfg */
1211 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1212 /* write seg_cfg */
1213 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1214 /* write seg_cfg */
1215 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1216
1217 /* Clear auth_ivn, auth_keyn registers */
1218 for (i = 0; i < 16; i++) {
1219 QCE_WRITE_REG(0, (pce_dev->iobase +
1220 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1221 QCE_WRITE_REG(0, (pce_dev->iobase +
1222 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
1223 }
1224 /* write auth_bytecnt 0/1/2/3, start with 0 */
1225 for (i = 0; i < 4; i++)
1226 QCE_WRITE_REG(0, pce_dev->iobase +
1227 CRYPTO_AUTH_BYTECNT0_REG +
1228 i * sizeof(uint32_t));
1229
1230 if (sreq->authklen == AES128_KEY_SIZE)
1231 auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
1232 else
1233 auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
1234 }
1235
1236 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
1237 (sreq->alg == QCE_HASH_SHA256_HMAC) ||
1238 (sreq->alg == QCE_HASH_AES_CMAC)) {
1239
1240 _byte_stream_to_net_words(mackey32, sreq->authkey,
1241 sreq->authklen);
1242
1243 /* no more check for null key. use flag to check*/
1244
1245 if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
1246 QCRYPTO_CTX_USE_HW_KEY) {
1247 use_hw_key = true;
1248 } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
1249 QCRYPTO_CTX_USE_PIPE_KEY) {
1250 use_pipe_key = true;
1251 } else {
1252 /* setup key */
1253 for (i = 0; i < authk_size_in_word; i++)
1254 QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
1255 (CRYPTO_AUTH_KEY0_REG +
1256 i*sizeof(uint32_t))));
1257 }
1258 }
1259
1260 if (sreq->alg == QCE_HASH_AES_CMAC)
1261 goto go_proc;
1262
1263 /* if not the last, the size has to be on the block boundary */
1264 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
1265 return -EIO;
1266
1267 switch (sreq->alg) {
1268 case QCE_HASH_SHA1:
1269 auth_cfg = pce_dev->reg.auth_cfg_sha1;
1270 diglen = SHA1_DIGEST_SIZE;
1271 sha1 = true;
1272 break;
1273 case QCE_HASH_SHA1_HMAC:
1274 auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
1275 diglen = SHA1_DIGEST_SIZE;
1276 sha1 = true;
1277 break;
1278 case QCE_HASH_SHA256:
1279 auth_cfg = pce_dev->reg.auth_cfg_sha256;
1280 diglen = SHA256_DIGEST_SIZE;
1281 break;
1282 case QCE_HASH_SHA256_HMAC:
1283 auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
1284 diglen = SHA256_DIGEST_SIZE;
1285 break;
1286 default:
1287 return -EINVAL;
1288 }
1289
1290 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
1291 if (sreq->first_blk) {
1292 if (sha1) {
1293 for (i = 0; i < 5; i++)
1294 auth32[i] = _std_init_vector_sha1[i];
1295 } else {
1296 for (i = 0; i < 8; i++)
1297 auth32[i] = _std_init_vector_sha256[i];
1298 }
1299 } else {
1300 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
1301 }
1302
1303 /* Set auth_ivn, auth_keyn registers */
1304 for (i = 0; i < 5; i++)
1305 QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
1306 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1307
1308 if ((sreq->alg == QCE_HASH_SHA256) ||
1309 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
1310 for (i = 5; i < 8; i++)
1311 QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
1312 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1313 }
1314
1315
1316 /* write auth_bytecnt 0/1/2/3, start with 0 */
1317 for (i = 0; i < 2; i++)
1318 QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
1319 CRYPTO_AUTH_BYTECNT0_REG +
1320 i * sizeof(uint32_t));
1321
1322 /* Set/reset last bit in CFG register */
1323 if (sreq->last_blk)
1324 auth_cfg |= 1 << CRYPTO_LAST;
1325 else
1326 auth_cfg &= ~(1 << CRYPTO_LAST);
1327 if (sreq->first_blk)
1328 auth_cfg |= 1 << CRYPTO_FIRST;
1329 else
1330 auth_cfg &= ~(1 << CRYPTO_FIRST);
1331 if (use_hw_key)
1332 auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
1333 if (use_pipe_key)
1334 auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
1335go_proc:
1336 /* write seg_cfg */
1337 QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1338 /* write auth seg_size */
1339 QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1340
1341 /* write auth_seg_start */
1342 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1343
1344 /* reset encr seg_cfg */
1345 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1346
1347 /* write seg_size */
1348 QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1349
1350 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1351 CRYPTO_CONFIG_REG));
1352 /* issue go to crypto */
1353 if (use_hw_key == false) {
1354 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1355 (1 << CRYPTO_CLR_CNTXT)),
1356 pce_dev->iobase + CRYPTO_GOPROC_REG);
1357 } else {
1358 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1359 pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
1360 }
1361 /*
1362 * Ensure previous instructions (setting the GO register)
1363 * was completed before issuing a DMA transfer request
1364 */
1365 mb();
1366 return 0;
1367}
1368
1369static int _ce_setup_aead_direct(struct qce_device *pce_dev,
1370 struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
1371{
1372 int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
1373 int i;
1374 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
1375 uint32_t a_cfg;
1376 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
1377 uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
1378 uint32_t enck_size_in_word = 0;
1379 uint32_t enciv_in_word;
1380 uint32_t key_size;
1381 uint32_t ivsize = q_req->ivsize;
1382 uint32_t encr_cfg;
1383
1384
1385 /* clear status */
1386 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1387
1388 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1389 CRYPTO_CONFIG_REG));
1390 /*
1391 * Ensure previous instructions (setting the CONFIG register)
1392 * was completed before issuing starting to set other config register
1393 * This is to ensure the configurations are done in correct endian-ness
1394 * as set in the CONFIG registers
1395 */
1396 mb();
1397
1398 key_size = q_req->encklen;
1399 enck_size_in_word = key_size/sizeof(uint32_t);
1400
1401 switch (q_req->alg) {
1402
1403 case CIPHER_ALG_DES:
1404
1405 switch (q_req->mode) {
1406 case QCE_MODE_CBC:
1407 encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
1408 break;
1409 default:
1410 return -EINVAL;
1411 }
1412
1413 enciv_in_word = 2;
1414 break;
1415
1416 case CIPHER_ALG_3DES:
1417
1418 switch (q_req->mode) {
1419 case QCE_MODE_CBC:
1420 encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
1421 break;
1422 default:
1423 return -EINVAL;
1424 }
1425
1426 enciv_in_word = 2;
1427
1428 break;
1429
1430 case CIPHER_ALG_AES:
1431
1432 switch (q_req->mode) {
1433 case QCE_MODE_CBC:
1434 if (key_size == AES128_KEY_SIZE)
1435 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
1436 else if (key_size == AES256_KEY_SIZE)
1437 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
1438 else
1439 return -EINVAL;
1440 break;
1441 default:
1442 return -EINVAL;
1443 }
1444
1445 enciv_in_word = 4;
1446 break;
1447
1448 default:
1449 return -EINVAL;
1450 }
1451
1452
1453
1454
1455 /* write CNTR0_IV0_REG */
1456 if (q_req->mode != QCE_MODE_ECB) {
1457 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
1458 for (i = 0; i < enciv_in_word; i++)
1459 QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
1460 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
1461 }
1462
1463 /*
1464 * write encr key
1465 * do not use hw key or pipe key
1466 */
1467 _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
1468 for (i = 0; i < enck_size_in_word; i++)
1469 QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
1470 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
1471
1472 /* write encr seg cfg */
1473 if (q_req->dir == QCE_ENCRYPT)
1474 encr_cfg |= (1 << CRYPTO_ENCODE);
1475 QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1476
1477 /* we only support sha1-hmac and sha256-hmac at this point */
1478 _byte_stream_to_net_words(mackey32, q_req->authkey,
1479 q_req->authklen);
1480 for (i = 0; i < authk_size_in_word; i++)
1481 QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
1482 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
1483
1484 if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
1485 for (i = 0; i < 5; i++)
1486 QCE_WRITE_REG(_std_init_vector_sha1[i],
1487 pce_dev->iobase +
1488 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
1489 } else {
1490 for (i = 0; i < 8; i++)
1491 QCE_WRITE_REG(_std_init_vector_sha256[i],
1492 pce_dev->iobase +
1493 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
1494 }
1495
1496 /* write auth_bytecnt 0/1, start with 0 */
1497 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
1498 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
1499
1500 /* write encr seg size */
1501 QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
1502 CRYPTO_ENCR_SEG_SIZE_REG);
1503
1504 /* write encr start */
1505 QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
1506 CRYPTO_ENCR_SEG_START_REG);
1507
1508 if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
1509 a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
1510 else
1511 a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
1512
1513 if (q_req->dir == QCE_ENCRYPT)
1514 a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
1515 else
1516 a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1517
1518 /* write auth seg_cfg */
1519 QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1520
1521 /* write auth seg_size */
1522 QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1523
1524 /* write auth_seg_start */
1525 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1526
1527
1528 /* write seg_size */
1529 QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1530
1531
1532 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1533
1534 CRYPTO_CONFIG_REG));
1535 /* issue go to crypto */
1536 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1537 (1 << CRYPTO_CLR_CNTXT)),
1538 pce_dev->iobase + CRYPTO_GOPROC_REG);
1539 /*
1540 * Ensure previous instructions (setting the GO register)
1541 * was completed before issuing a DMA transfer request
1542 */
1543 mb();
1544 return 0;
1545};
1546
1547static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
1548 struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
1549{
1550 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
1551 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1552 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
1553 0, 0, 0, 0};
1554 uint32_t enck_size_in_word = 0;
1555 uint32_t key_size;
1556 bool use_hw_key = false;
1557 bool use_pipe_key = false;
1558 uint32_t encr_cfg = 0;
1559 uint32_t ivsize = creq->ivsize;
1560 int i;
1561
1562 /* clear status */
1563 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1564
1565 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1566 CRYPTO_CONFIG_REG));
1567 /*
1568 * Ensure previous instructions (setting the CONFIG register)
1569 * was completed before issuing starting to set other config register
1570 * This is to ensure the configurations are done in correct endian-ness
1571 * as set in the CONFIG registers
1572 */
1573 mb();
1574
1575 if (creq->mode == QCE_MODE_XTS)
1576 key_size = creq->encklen/2;
1577 else
1578 key_size = creq->encklen;
1579
1580 if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
1581 use_hw_key = true;
1582 } else {
1583 if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
1584 QCRYPTO_CTX_USE_PIPE_KEY)
1585 use_pipe_key = true;
1586 }
1587 if ((use_pipe_key == false) && (use_hw_key == false)) {
1588 _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
1589 enck_size_in_word = key_size/sizeof(uint32_t);
1590 }
1591 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
1592 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
1593 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
1594 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
1595 uint32_t auth_cfg = 0;
1596
1597 /* Clear auth_ivn, auth_keyn registers */
1598 for (i = 0; i < 16; i++) {
1599 QCE_WRITE_REG(0, (pce_dev->iobase +
1600 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1601 QCE_WRITE_REG(0, (pce_dev->iobase +
1602 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
1603 }
1604 /* write auth_bytecnt 0/1/2/3, start with 0 */
1605 for (i = 0; i < 4; i++)
1606 QCE_WRITE_REG(0, pce_dev->iobase +
1607 CRYPTO_AUTH_BYTECNT0_REG +
1608 i * sizeof(uint32_t));
1609 /* write nonce */
1610 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
1611 for (i = 0; i < noncelen32; i++)
1612 QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
1613 CRYPTO_AUTH_INFO_NONCE0_REG +
1614 (i*sizeof(uint32_t)));
1615
1616 if (creq->authklen == AES128_KEY_SIZE)
1617 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
1618 else {
1619 if (creq->authklen == AES256_KEY_SIZE)
1620 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
1621 }
1622 if (creq->dir == QCE_ENCRYPT)
1623 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1624 else
1625 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
1626 auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
1627
1628 if (use_hw_key == true) {
1629 auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
1630 } else {
1631 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
1632 /* write auth key */
1633 for (i = 0; i < authklen32; i++)
1634 QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
1635 CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
1636 }
1637 QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
1638 CRYPTO_AUTH_SEG_CFG_REG);
1639 if (creq->dir == QCE_ENCRYPT) {
1640 QCE_WRITE_REG(totallen_in, pce_dev->iobase +
1641 CRYPTO_AUTH_SEG_SIZE_REG);
1642 } else {
1643 QCE_WRITE_REG((totallen_in - creq->authsize),
1644 pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1645 }
1646 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1647 } else {
1648 if (creq->op != QCE_REQ_AEAD)
1649 QCE_WRITE_REG(0, pce_dev->iobase +
1650 CRYPTO_AUTH_SEG_CFG_REG);
1651 }
1652 /*
1653 * Ensure previous instructions (write to all AUTH registers)
1654 * was completed before accessing a register that is not in
1655 * in the same 1K range.
1656 */
1657 mb();
1658 switch (creq->mode) {
1659 case QCE_MODE_ECB:
1660 if (key_size == AES128_KEY_SIZE)
1661 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
1662 else
1663 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
1664 break;
1665 case QCE_MODE_CBC:
1666 if (key_size == AES128_KEY_SIZE)
1667 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
1668 else
1669 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
1670 break;
1671 case QCE_MODE_XTS:
1672 if (key_size == AES128_KEY_SIZE)
1673 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
1674 else
1675 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
1676 break;
1677 case QCE_MODE_CCM:
1678 if (key_size == AES128_KEY_SIZE)
1679 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
1680 else
1681 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
1682 break;
1683 case QCE_MODE_CTR:
1684 default:
1685 if (key_size == AES128_KEY_SIZE)
1686 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
1687 else
1688 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
1689 break;
1690 }
1691
1692 switch (creq->alg) {
1693 case CIPHER_ALG_DES:
1694 if (creq->mode != QCE_MODE_ECB) {
1695 encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
1696 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
1697 QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
1698 CRYPTO_CNTR0_IV0_REG);
1699 QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
1700 CRYPTO_CNTR1_IV1_REG);
1701 } else {
1702 encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
1703 }
1704 if (use_hw_key == false) {
1705 QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
1706 CRYPTO_ENCR_KEY0_REG);
1707 QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
1708 CRYPTO_ENCR_KEY1_REG);
1709 }
1710 break;
1711 case CIPHER_ALG_3DES:
1712 if (creq->mode != QCE_MODE_ECB) {
1713 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
1714 QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
1715 CRYPTO_CNTR0_IV0_REG);
1716 QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
1717 CRYPTO_CNTR1_IV1_REG);
1718 encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
1719 } else {
1720 encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
1721 }
1722 if (use_hw_key == false) {
1723 /* write encr key */
1724 for (i = 0; i < 6; i++)
1725 QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
1726 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
1727 }
1728 break;
1729 case CIPHER_ALG_AES:
1730 default:
1731 if (creq->mode == QCE_MODE_XTS) {
1732 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
1733 = {0, 0, 0, 0, 0, 0, 0, 0};
1734 uint32_t xtsklen =
1735 creq->encklen/(2 * sizeof(uint32_t));
1736
1737 if ((use_hw_key == false) && (use_pipe_key == false)) {
1738 _byte_stream_to_net_words(xtskey32,
1739 (creq->enckey + creq->encklen/2),
1740 creq->encklen/2);
1741 /* write xts encr key */
1742 for (i = 0; i < xtsklen; i++)
1743 QCE_WRITE_REG(xtskey32[i],
1744 pce_dev->iobase +
1745 CRYPTO_ENCR_XTS_KEY0_REG +
1746 (i * sizeof(uint32_t)));
1747 }
1748 /* write xts du size */
1749 switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
1750 case QCRYPTO_CTX_XTS_DU_SIZE_512B:
1751 QCE_WRITE_REG(
1752 min((uint32_t)QCE_SECTOR_SIZE,
1753 creq->cryptlen), pce_dev->iobase +
1754 CRYPTO_ENCR_XTS_DU_SIZE_REG);
1755 break;
1756 case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
1757 QCE_WRITE_REG(
1758 min((uint32_t)(QCE_SECTOR_SIZE * 2),
1759 creq->cryptlen), pce_dev->iobase +
1760 CRYPTO_ENCR_XTS_DU_SIZE_REG);
1761 break;
1762 default:
1763 QCE_WRITE_REG(creq->cryptlen,
1764 pce_dev->iobase +
1765 CRYPTO_ENCR_XTS_DU_SIZE_REG);
1766 break;
1767 }
1768 }
1769 if (creq->mode != QCE_MODE_ECB) {
1770 if (creq->mode == QCE_MODE_XTS)
1771 _byte_stream_swap_to_net_words(enciv32,
1772 creq->iv, ivsize);
1773 else
1774 _byte_stream_to_net_words(enciv32, creq->iv,
1775 ivsize);
1776
1777 /* write encr cntr iv */
1778 for (i = 0; i <= 3; i++)
1779 QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
1780 CRYPTO_CNTR0_IV0_REG +
1781 (i * sizeof(uint32_t)));
1782
1783 if (creq->mode == QCE_MODE_CCM) {
1784 /* write cntr iv for ccm */
1785 for (i = 0; i <= 3; i++)
1786 QCE_WRITE_REG(enciv32[i],
1787 pce_dev->iobase +
1788 CRYPTO_ENCR_CCM_INT_CNTR0_REG +
1789 (i * sizeof(uint32_t)));
1790 /* update cntr_iv[3] by one */
1791 QCE_WRITE_REG((enciv32[3] + 1),
1792 pce_dev->iobase +
1793 CRYPTO_CNTR0_IV0_REG +
1794 (3 * sizeof(uint32_t)));
1795 }
1796 }
1797
1798 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
1799 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
1800 CRYPTO_ENCR_KEY_SZ);
1801 } else {
1802 if ((use_hw_key == false) && (use_pipe_key == false)) {
1803 for (i = 0; i < enck_size_in_word; i++)
1804 QCE_WRITE_REG(enckey32[i],
1805 pce_dev->iobase +
1806 CRYPTO_ENCR_KEY0_REG +
1807 (i * sizeof(uint32_t)));
1808 }
1809 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
1810 break;
1811 } /* end of switch (creq->mode) */
1812
1813 if (use_pipe_key)
1814 encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
1815 << CRYPTO_USE_PIPE_KEY_ENCR);
1816
1817 /* write encr seg cfg */
1818 encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
1819 if (use_hw_key == true)
1820 encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
1821 else
1822 encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
1823 /* write encr seg cfg */
1824 QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1825
1826 /* write encr seg size */
1827 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
1828 QCE_WRITE_REG((creq->cryptlen + creq->authsize),
1829 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1830 } else {
1831 QCE_WRITE_REG(creq->cryptlen,
1832 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1833 }
1834
1835 /* write encr seg start */
1836 QCE_WRITE_REG((coffset & 0xffff),
1837 pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
1838
1839 /* write encr counter mask */
1840 QCE_WRITE_REG(0xffffffff,
1841 pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
1842 QCE_WRITE_REG(0xffffffff,
1843 pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
1844 QCE_WRITE_REG(0xffffffff,
1845 pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
1846 QCE_WRITE_REG(0xffffffff,
1847 pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
1848
1849 /* write seg size */
1850 QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1851
1852 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1853 CRYPTO_CONFIG_REG));
1854 /* issue go to crypto */
1855 if (use_hw_key == false) {
1856 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1857 (1 << CRYPTO_CLR_CNTXT)),
1858 pce_dev->iobase + CRYPTO_GOPROC_REG);
1859 } else {
1860 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1861 pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
1862 }
1863 /*
1864 * Ensure previous instructions (setting the GO register)
1865 * was completed before issuing a DMA transfer request
1866 */
1867 mb();
1868 return 0;
1869};
1870
1871static int _ce_f9_setup_direct(struct qce_device *pce_dev,
1872 struct qce_f9_req *req)
1873{
1874 uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
1875 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
1876 uint32_t auth_cfg;
1877 int i;
1878
1879 switch (req->algorithm) {
1880 case QCE_OTA_ALGO_KASUMI:
1881 auth_cfg = pce_dev->reg.auth_cfg_kasumi;
1882 break;
1883 case QCE_OTA_ALGO_SNOW3G:
1884 default:
1885 auth_cfg = pce_dev->reg.auth_cfg_snow3g;
1886 break;
1887 };
1888
1889 /* clear status */
1890 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1891
1892 /* set big endian configuration */
1893 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1894 CRYPTO_CONFIG_REG));
1895 /*
1896 * Ensure previous instructions (setting the CONFIG register)
1897 * was completed before issuing starting to set other config register
1898 * This is to ensure the configurations are done in correct endian-ness
1899 * as set in the CONFIG registers
1900 */
1901 mb();
1902
1903 /* write enc_seg_cfg */
1904 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1905
1906 /* write ecn_seg_size */
1907 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1908
1909 /* write key in CRYPTO_AUTH_IV0-3_REG */
1910 _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
1911 for (i = 0; i < key_size_in_word; i++)
1912 QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
1913 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1914
1915 /* write last bits in CRYPTO_AUTH_IV4_REG */
1916 QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
1917 CRYPTO_AUTH_IV4_REG));
1918
1919 /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
1920 QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
1921 CRYPTO_AUTH_BYTECNT0_REG));
1922
1923 /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
1924 QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
1925 CRYPTO_AUTH_BYTECNT1_REG));
1926
1927 /* write auth seg cfg */
1928 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1929 auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
1930 QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1931
1932 /* write auth seg size */
1933 QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1934
1935 /* write auth seg start*/
1936 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1937
1938 /* write seg size */
1939 QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1940
1941 /* set little endian configuration before go*/
1942 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1943 CRYPTO_CONFIG_REG));
1944 /* write go */
1945 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1946 (1 << CRYPTO_CLR_CNTXT)),
1947 pce_dev->iobase + CRYPTO_GOPROC_REG);
1948 /*
1949 * Ensure previous instructions (setting the GO register)
1950 * was completed before issuing a DMA transfer request
1951 */
1952 mb();
1953 return 0;
1954}
1955
1956static int _ce_f8_setup_direct(struct qce_device *pce_dev,
1957 struct qce_f8_req *req, bool key_stream_mode,
1958 uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
1959{
1960 int i = 0;
1961 uint32_t encr_cfg = 0;
1962 uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
1963 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
1964
1965 switch (req->algorithm) {
1966 case QCE_OTA_ALGO_KASUMI:
1967 encr_cfg = pce_dev->reg.encr_cfg_kasumi;
1968 break;
1969 case QCE_OTA_ALGO_SNOW3G:
1970 default:
1971 encr_cfg = pce_dev->reg.encr_cfg_snow3g;
1972 break;
1973 };
1974 /* clear status */
1975 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1976 /* set big endian configuration */
1977 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1978 CRYPTO_CONFIG_REG));
1979 /* write auth seg configuration */
1980 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1981 /* write auth seg size */
1982 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1983
1984 /* write key */
1985 _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
1986
1987 for (i = 0; i < key_size_in_word; i++)
1988 QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
1989 (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
1990 /* write encr seg cfg */
1991 if (key_stream_mode)
1992 encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
1993 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1994 encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
1995 QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
1996 CRYPTO_ENCR_SEG_CFG_REG);
1997
1998 /* write encr seg start */
1999 QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
2000 CRYPTO_ENCR_SEG_START_REG);
2001 /* write encr seg size */
2002 QCE_WRITE_REG(cipher_size, pce_dev->iobase +
2003 CRYPTO_ENCR_SEG_SIZE_REG);
2004
2005 /* write seg size */
2006 QCE_WRITE_REG(req->data_len, pce_dev->iobase +
2007 CRYPTO_SEG_SIZE_REG);
2008
2009 /* write cntr0_iv0 for countC */
2010 QCE_WRITE_REG(req->count_c, pce_dev->iobase +
2011 CRYPTO_CNTR0_IV0_REG);
2012 /* write cntr1_iv1 for nPkts, and bearer */
2013 if (npkts == 1)
2014 npkts = 0;
2015 QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
2016 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
2017 pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
2018
2019 /* set little endian configuration before go*/
2020 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
2021 CRYPTO_CONFIG_REG));
2022 /* write go */
2023 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
2024 (1 << CRYPTO_CLR_CNTXT)),
2025 pce_dev->iobase + CRYPTO_GOPROC_REG);
2026 /*
2027 * Ensure previous instructions (setting the GO register)
2028 * was completed before issuing a DMA transfer request
2029 */
2030 mb();
2031 return 0;
2032}
2033
2034
2035static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
2036{
2037 int rc = 0;
2038 struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
2039 [req_info].ce_sps;
2040
2041 if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
2042 return rc;
2043
2044 rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
2045 GET_PHYS_ADDR(pce_sps_data->
2046 cmdlistptr.unlock_all_pipes.cmdlist),
2047 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
2048 if (rc) {
2049 pr_err("sps_xfr_one() fail rc=%d", rc);
2050 rc = -EINVAL;
2051 }
2052 return rc;
2053}
2054
2055static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
2056 bool is_complete);
2057
2058static int _aead_complete(struct qce_device *pce_dev, int req_info)
2059{
2060 struct aead_request *areq;
2061 unsigned char mac[SHA256_DIGEST_SIZE];
2062 uint32_t ccm_fail_status = 0;
2063 uint32_t result_dump_status;
2064 int32_t result_status = 0;
2065 struct ce_request_info *preq_info;
2066 struct ce_sps_data *pce_sps_data;
2067 qce_comp_func_ptr_t qce_callback;
2068
2069 preq_info = &pce_dev->ce_request_info[req_info];
2070 pce_sps_data = &preq_info->ce_sps;
2071 qce_callback = preq_info->qce_cb;
2072 areq = (struct aead_request *) preq_info->areq;
2073 if (areq->src != areq->dst) {
2074 qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
2075 DMA_FROM_DEVICE);
2076 }
2077 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
2078 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2079 DMA_TO_DEVICE);
2080
2081 if (preq_info->asg)
2082 qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
2083 preq_info->assoc_nents, DMA_TO_DEVICE);
2084 /* check MAC */
2085 memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
2086 SHA256_DIGEST_SIZE);
2087
2088 /* read status before unlock */
2089 if (preq_info->dir == QCE_DECRYPT) {
2090 if (pce_dev->no_get_around)
2091 if (pce_dev->no_ccm_mac_status_get_around)
2092 ccm_fail_status = be32_to_cpu(pce_sps_data->
2093 result->status);
2094 else
2095 ccm_fail_status = be32_to_cpu(pce_sps_data->
2096 result_null->status);
2097 else
2098 ccm_fail_status = readl_relaxed(pce_dev->iobase +
2099 CRYPTO_STATUS_REG);
2100 }
2101 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2102 qce_free_req_info(pce_dev, req_info, true);
2103 qce_callback(areq, mac, NULL, -ENXIO);
2104 return -ENXIO;
2105 }
2106 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2107 pce_sps_data->result->status = 0;
2108
2109 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2110 | (1 << CRYPTO_HSD_ERR))) {
2111 pr_err("aead operation error. Status %x\n", result_dump_status);
2112 result_status = -ENXIO;
2113 } else if (pce_sps_data->consumer_status |
2114 pce_sps_data->producer_status) {
2115 pr_err("aead sps operation error. sps status %x %x\n",
2116 pce_sps_data->consumer_status,
2117 pce_sps_data->producer_status);
2118 result_status = -ENXIO;
2119 }
2120
2121 if (preq_info->mode == QCE_MODE_CCM) {
2122 /*
2123 * Not from result dump, instead, use the status we just
2124 * read of device for MAC_FAILED.
2125 */
2126 if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
2127 (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
2128 result_status = -EBADMSG;
2129 qce_free_req_info(pce_dev, req_info, true);
2130 qce_callback(areq, mac, NULL, result_status);
2131
2132 } else {
2133 uint32_t ivsize = 0;
2134 struct crypto_aead *aead;
2135 unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
2136
2137 aead = crypto_aead_reqtfm(areq);
2138 ivsize = crypto_aead_ivsize(aead);
2139 memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
2140 sizeof(iv));
2141 qce_free_req_info(pce_dev, req_info, true);
2142 qce_callback(areq, mac, iv, result_status);
2143
2144 }
2145 return 0;
2146};
2147
2148static int _sha_complete(struct qce_device *pce_dev, int req_info)
2149{
2150 struct ahash_request *areq;
2151 unsigned char digest[SHA256_DIGEST_SIZE];
2152 uint32_t bytecount32[2];
2153 int32_t result_status = 0;
2154 uint32_t result_dump_status;
2155 struct ce_request_info *preq_info;
2156 struct ce_sps_data *pce_sps_data;
2157 qce_comp_func_ptr_t qce_callback;
2158
2159 preq_info = &pce_dev->ce_request_info[req_info];
2160 pce_sps_data = &preq_info->ce_sps;
2161 qce_callback = preq_info->qce_cb;
2162 areq = (struct ahash_request *) preq_info->areq;
2163 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
2164 DMA_TO_DEVICE);
2165 memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
2166 SHA256_DIGEST_SIZE);
2167 _byte_stream_to_net_words(bytecount32,
2168 (unsigned char *)pce_sps_data->result->auth_byte_count,
2169 2 * CRYPTO_REG_SIZE);
2170
2171 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2172 qce_free_req_info(pce_dev, req_info, true);
2173 qce_callback(areq, digest, (char *)bytecount32,
2174 -ENXIO);
2175 return -ENXIO;
2176 }
2177
2178 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2179 pce_sps_data->result->status = 0;
2180 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2181 | (1 << CRYPTO_HSD_ERR))) {
2182
2183 pr_err("sha operation error. Status %x\n", result_dump_status);
2184 result_status = -ENXIO;
2185 } else if (pce_sps_data->consumer_status) {
2186 pr_err("sha sps operation error. sps status %x\n",
2187 pce_sps_data->consumer_status);
2188 result_status = -ENXIO;
2189 }
2190 qce_free_req_info(pce_dev, req_info, true);
2191 qce_callback(areq, digest, (char *)bytecount32, result_status);
2192 return 0;
2193}
2194
2195static int _f9_complete(struct qce_device *pce_dev, int req_info)
2196{
2197 uint32_t mac_i;
2198 int32_t result_status = 0;
2199 uint32_t result_dump_status;
2200 struct ce_request_info *preq_info;
2201 struct ce_sps_data *pce_sps_data;
2202 qce_comp_func_ptr_t qce_callback;
2203 void *areq;
2204
2205 preq_info = &pce_dev->ce_request_info[req_info];
2206 pce_sps_data = &preq_info->ce_sps;
2207 qce_callback = preq_info->qce_cb;
2208 areq = preq_info->areq;
2209 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
2210 preq_info->ota_size, DMA_TO_DEVICE);
2211 _byte_stream_to_net_words(&mac_i,
2212 (char *)(&pce_sps_data->result->auth_iv[0]),
2213 CRYPTO_REG_SIZE);
2214
2215 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2216 qce_free_req_info(pce_dev, req_info, true);
2217 qce_callback(areq, NULL, NULL, -ENXIO);
2218 return -ENXIO;
2219 }
2220
2221 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2222 pce_sps_data->result->status = 0;
2223 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2224 | (1 << CRYPTO_HSD_ERR))) {
2225 pr_err("f9 operation error. Status %x\n", result_dump_status);
2226 result_status = -ENXIO;
2227 } else if (pce_sps_data->consumer_status |
2228 pce_sps_data->producer_status) {
2229 pr_err("f9 sps operation error. sps status %x %x\n",
2230 pce_sps_data->consumer_status,
2231 pce_sps_data->producer_status);
2232 result_status = -ENXIO;
2233 }
2234 qce_free_req_info(pce_dev, req_info, true);
2235 qce_callback(areq, (char *)&mac_i, NULL, result_status);
2236
2237 return 0;
2238}
2239
2240static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
2241{
2242 struct ablkcipher_request *areq;
2243 unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
2244 int32_t result_status = 0;
2245 uint32_t result_dump_status;
2246 struct ce_request_info *preq_info;
2247 struct ce_sps_data *pce_sps_data;
2248 qce_comp_func_ptr_t qce_callback;
2249
2250 preq_info = &pce_dev->ce_request_info[req_info];
2251 pce_sps_data = &preq_info->ce_sps;
2252 qce_callback = preq_info->qce_cb;
2253 areq = (struct ablkcipher_request *) preq_info->areq;
2254 if (areq->src != areq->dst) {
2255 qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
2256 preq_info->dst_nents, DMA_FROM_DEVICE);
2257 }
2258 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
2259 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2260 DMA_TO_DEVICE);
2261
2262 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2263 qce_free_req_info(pce_dev, req_info, true);
2264 qce_callback(areq, NULL, NULL, -ENXIO);
2265 return -ENXIO;
2266 }
2267 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2268 pce_sps_data->result->status = 0;
2269
2270 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2271 | (1 << CRYPTO_HSD_ERR))) {
2272 pr_err("ablk_cipher operation error. Status %x\n",
2273 result_dump_status);
2274 result_status = -ENXIO;
2275 } else if (pce_sps_data->consumer_status |
2276 pce_sps_data->producer_status) {
2277 pr_err("ablk_cipher sps operation error. sps status %x %x\n",
2278 pce_sps_data->consumer_status,
2279 pce_sps_data->producer_status);
2280 result_status = -ENXIO;
2281 }
2282
2283 if (preq_info->mode == QCE_MODE_ECB) {
2284 qce_free_req_info(pce_dev, req_info, true);
2285 qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
2286 result_status);
2287 } else {
2288 if (pce_dev->ce_bam_info.minor_version == 0) {
2289 if (preq_info->mode == QCE_MODE_CBC) {
2290 if (preq_info->dir == QCE_DECRYPT)
2291 memcpy(iv, (char *)preq_info->dec_iv,
2292 sizeof(iv));
2293 else
2294 memcpy(iv, (unsigned char *)
2295 (sg_virt(areq->src) +
2296 areq->src->length - 16),
2297 sizeof(iv));
2298 }
2299 if ((preq_info->mode == QCE_MODE_CTR) ||
2300 (preq_info->mode == QCE_MODE_XTS)) {
2301 uint32_t num_blk = 0;
2302 uint32_t cntr_iv3 = 0;
2303 unsigned long long cntr_iv64 = 0;
2304 unsigned char *b = (unsigned char *)(&cntr_iv3);
2305
2306 memcpy(iv, areq->info, sizeof(iv));
2307 if (preq_info->mode != QCE_MODE_XTS)
2308 num_blk = areq->nbytes/16;
2309 else
2310 num_blk = 1;
2311 cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
2312 (((*(iv + 13)) << 16) & 0xff0000) |
2313 (((*(iv + 14)) << 8) & 0xff00) |
2314 (*(iv + 15) & 0xff);
2315 cntr_iv64 =
2316 (((unsigned long long)cntr_iv3 &
2317 0xFFFFFFFFULL) +
2318 (unsigned long long)num_blk) %
2319 (unsigned long long)(0x100000000ULL);
2320
2321 cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
2322 *(iv + 15) = (char)(*b);
2323 *(iv + 14) = (char)(*(b + 1));
2324 *(iv + 13) = (char)(*(b + 2));
2325 *(iv + 12) = (char)(*(b + 3));
2326 }
2327 } else {
2328 memcpy(iv,
2329 (char *)(pce_sps_data->result->encr_cntr_iv),
2330 sizeof(iv));
2331 }
2332 qce_free_req_info(pce_dev, req_info, true);
2333 qce_callback(areq, NULL, iv, result_status);
2334 }
2335 return 0;
2336}
2337
2338static int _f8_complete(struct qce_device *pce_dev, int req_info)
2339{
2340 int32_t result_status = 0;
2341 uint32_t result_dump_status;
2342 uint32_t result_dump_status2;
2343 struct ce_request_info *preq_info;
2344 struct ce_sps_data *pce_sps_data;
2345 qce_comp_func_ptr_t qce_callback;
2346 void *areq;
2347
2348 preq_info = &pce_dev->ce_request_info[req_info];
2349 pce_sps_data = &preq_info->ce_sps;
2350 qce_callback = preq_info->qce_cb;
2351 areq = preq_info->areq;
2352 if (preq_info->phy_ota_dst)
2353 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
2354 preq_info->ota_size, DMA_FROM_DEVICE);
2355 if (preq_info->phy_ota_src)
2356 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
2357 preq_info->ota_size, (preq_info->phy_ota_dst) ?
2358 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
2359
2360 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2361 qce_free_req_info(pce_dev, req_info, true);
2362 qce_callback(areq, NULL, NULL, -ENXIO);
2363 return -ENXIO;
2364 }
2365 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2366 result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
2367
2368 if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2369 | (1 << CRYPTO_HSD_ERR)))) {
2370 pr_err(
2371 "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
2372 result_dump_status, result_dump_status2, req_info);
2373 result_status = -ENXIO;
2374 } else if (pce_sps_data->consumer_status |
2375 pce_sps_data->producer_status) {
2376 pr_err("f8 sps operation error. sps status %x %x\n",
2377 pce_sps_data->consumer_status,
2378 pce_sps_data->producer_status);
2379 result_status = -ENXIO;
2380 }
2381 pce_sps_data->result->status = 0;
2382 pce_sps_data->result->status2 = 0;
2383 qce_free_req_info(pce_dev, req_info, true);
2384 qce_callback(areq, NULL, NULL, result_status);
2385 return 0;
2386}
2387
2388static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
2389{
2390 struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
2391 .ce_sps;
2392 pce_sps_data->in_transfer.iovec_count = 0;
2393 pce_sps_data->out_transfer.iovec_count = 0;
2394}
2395
2396static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
2397{
2398 struct sps_iovec *iovec;
2399
2400 if (sps_bam_pipe->iovec_count == 0)
2401 return;
2402 iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
2403 iovec->flags |= flag;
2404}
2405
2406static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
2407 struct sps_transfer *sps_bam_pipe)
2408{
2409 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2410 sps_bam_pipe->iovec_count;
2411 uint32_t data_cnt;
2412
2413 while (len > 0) {
2414 if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
2415 pr_err("Num of descrptor %d exceed max (%d)",
2416 sps_bam_pipe->iovec_count,
2417 (uint32_t)QCE_MAX_NUM_DSCR);
2418 return -ENOMEM;
2419 }
2420 if (len > SPS_MAX_PKT_SIZE)
2421 data_cnt = SPS_MAX_PKT_SIZE;
2422 else
2423 data_cnt = len;
2424 iovec->size = data_cnt;
2425 iovec->addr = SPS_GET_LOWER_ADDR(paddr);
2426 iovec->flags = SPS_GET_UPPER_ADDR(paddr);
2427 sps_bam_pipe->iovec_count++;
2428 iovec++;
2429 paddr += data_cnt;
2430 len -= data_cnt;
2431 }
2432 return 0;
2433}
2434
2435static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
2436 struct scatterlist *sg_src, uint32_t nbytes,
2437 struct sps_transfer *sps_bam_pipe)
2438{
2439 uint32_t data_cnt, len;
2440 dma_addr_t addr;
2441 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2442 sps_bam_pipe->iovec_count;
2443
2444 while (nbytes > 0) {
2445 len = min(nbytes, sg_dma_len(sg_src));
2446 nbytes -= len;
2447 addr = sg_dma_address(sg_src);
2448 if (pce_dev->ce_bam_info.minor_version == 0)
2449 len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
2450 while (len > 0) {
2451 if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
2452 pr_err("Num of descrptor %d exceed max (%d)",
2453 sps_bam_pipe->iovec_count,
2454 (uint32_t)QCE_MAX_NUM_DSCR);
2455 return -ENOMEM;
2456 }
2457 if (len > SPS_MAX_PKT_SIZE) {
2458 data_cnt = SPS_MAX_PKT_SIZE;
2459 iovec->size = data_cnt;
2460 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2461 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2462 } else {
2463 data_cnt = len;
2464 iovec->size = data_cnt;
2465 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2466 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2467 }
2468 iovec++;
2469 sps_bam_pipe->iovec_count++;
2470 addr += data_cnt;
2471 len -= data_cnt;
2472 }
2473 sg_src = sg_next(sg_src);
2474 }
2475 return 0;
2476}
2477
2478static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
2479 struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
2480 struct sps_transfer *sps_bam_pipe)
2481{
2482 uint32_t data_cnt, len;
2483 dma_addr_t addr;
2484 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2485 sps_bam_pipe->iovec_count;
2486 unsigned int res_within_sg;
2487
2488 if (!sg_src)
2489 return -ENOENT;
2490 res_within_sg = sg_dma_len(sg_src);
2491
2492 while (off > 0) {
2493 if (!sg_src) {
2494 pr_err("broken sg list off %d nbytes %d\n",
2495 off, nbytes);
2496 return -ENOENT;
2497 }
2498 len = sg_dma_len(sg_src);
2499 if (off < len) {
2500 res_within_sg = len - off;
2501 break;
2502 }
2503 off -= len;
2504 sg_src = sg_next(sg_src);
2505 if (sg_src)
2506 res_within_sg = sg_dma_len(sg_src);
2507 }
2508 while (nbytes > 0 && sg_src) {
2509 len = min(nbytes, res_within_sg);
2510 nbytes -= len;
2511 addr = sg_dma_address(sg_src) + off;
2512 if (pce_dev->ce_bam_info.minor_version == 0)
2513 len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
2514 while (len > 0) {
2515 if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
2516 pr_err("Num of descrptor %d exceed max (%d)",
2517 sps_bam_pipe->iovec_count,
2518 (uint32_t)QCE_MAX_NUM_DSCR);
2519 return -ENOMEM;
2520 }
2521 if (len > SPS_MAX_PKT_SIZE) {
2522 data_cnt = SPS_MAX_PKT_SIZE;
2523 iovec->size = data_cnt;
2524 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2525 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2526 } else {
2527 data_cnt = len;
2528 iovec->size = data_cnt;
2529 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2530 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2531 }
2532 iovec++;
2533 sps_bam_pipe->iovec_count++;
2534 addr += data_cnt;
2535 len -= data_cnt;
2536 }
2537 if (nbytes) {
2538 sg_src = sg_next(sg_src);
2539 if (!sg_src) {
2540 pr_err("more data bytes %d\n", nbytes);
2541 return -ENOMEM;
2542 }
2543 res_within_sg = sg_dma_len(sg_src);
2544 off = 0;
2545 }
2546 }
2547 return 0;
2548}
2549
2550static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
2551 struct qce_cmdlist_info *cmdptr,
2552 struct sps_transfer *sps_bam_pipe)
2553{
2554 dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
2555 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2556 sps_bam_pipe->iovec_count;
2557 iovec->size = cmdptr->size;
2558 iovec->addr = SPS_GET_LOWER_ADDR(paddr);
2559 iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
2560 sps_bam_pipe->iovec_count++;
2561 if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
2562 pr_err("Num of descrptor %d exceed max (%d)",
2563 sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
2564 return -ENOMEM;
2565 }
2566 return 0;
2567}
2568
2569static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
2570{
2571 int rc = 0;
2572 struct ce_sps_data *pce_sps_data;
2573
2574 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
2575 pce_sps_data->out_transfer.user =
2576 (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
2577 (unsigned int) req_info));
2578 pce_sps_data->in_transfer.user =
2579 (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
2580 (unsigned int) req_info));
2581 _qce_dump_descr_fifos_dbg(pce_dev, req_info);
2582
2583 if (pce_sps_data->in_transfer.iovec_count) {
2584 rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
2585 &pce_sps_data->in_transfer);
2586 if (rc) {
2587 pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
2588 (uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
2589 rc);
2590 goto ret;
2591 }
2592 }
2593 rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
2594 &pce_sps_data->out_transfer);
2595 if (rc)
2596 pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
2597 (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
2598ret:
2599 if (rc)
2600 _qce_dump_descr_fifos(pce_dev, req_info);
2601 return rc;
2602}
2603
2604/**
2605 * Allocate and Connect a CE peripheral's SPS endpoint
2606 *
2607 * This function allocates endpoint context and
2608 * connect it with memory endpoint by calling
2609 * appropriate SPS driver APIs.
2610 *
2611 * Also registers a SPS callback function with
2612 * SPS driver
2613 *
2614 * This function should only be called once typically
2615 * during driver probe.
2616 *
2617 * @pce_dev - Pointer to qce_device structure
2618 * @ep - Pointer to sps endpoint data structure
2619 * @is_produce - 1 means Producer endpoint
2620 * 0 means Consumer endpoint
2621 *
2622 * @return - 0 if successful else negative value.
2623 *
2624 */
2625static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
2626 struct qce_sps_ep_conn_data *ep,
2627 bool is_producer)
2628{
2629 int rc = 0;
2630 struct sps_pipe *sps_pipe_info;
2631 struct sps_connect *sps_connect_info = &ep->connect;
2632 struct sps_register_event *sps_event = &ep->event;
2633
2634 /* Allocate endpoint context */
2635 sps_pipe_info = sps_alloc_endpoint();
2636 if (!sps_pipe_info) {
2637 pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
2638 is_producer);
2639 rc = -ENOMEM;
2640 goto out;
2641 }
2642 /* Now save the sps pipe handle */
2643 ep->pipe = sps_pipe_info;
2644
2645 /* Get default connection configuration for an endpoint */
2646 rc = sps_get_config(sps_pipe_info, sps_connect_info);
2647 if (rc) {
2648 pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
2649 (uintptr_t)sps_pipe_info, rc);
2650 goto get_config_err;
2651 }
2652
2653 /* Modify the default connection configuration */
2654 if (is_producer) {
2655 /*
2656 * For CE producer transfer, source should be
2657 * CE peripheral where as destination should
2658 * be system memory.
2659 */
2660 sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
2661 sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
2662 /* Producer pipe will handle this connection */
2663 sps_connect_info->mode = SPS_MODE_SRC;
2664 sps_connect_info->options =
2665 SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
2666 } else {
2667 /* For CE consumer transfer, source should be
2668 * system memory where as destination should
2669 * CE peripheral
2670 */
2671 sps_connect_info->source = SPS_DEV_HANDLE_MEM;
2672 sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
2673 sps_connect_info->mode = SPS_MODE_DEST;
2674 sps_connect_info->options =
2675 SPS_O_AUTO_ENABLE;
2676 }
2677
2678 /* Producer pipe index */
2679 sps_connect_info->src_pipe_index =
2680 pce_dev->ce_bam_info.src_pipe_index;
2681 /* Consumer pipe index */
2682 sps_connect_info->dest_pipe_index =
2683 pce_dev->ce_bam_info.dest_pipe_index;
2684 /* Set pipe group */
2685 sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
2686 sps_connect_info->event_thresh = 0x10;
2687 /*
2688 * Max. no of scatter/gather buffers that can
2689 * be passed by block layer = 32 (NR_SG).
2690 * Each BAM descritor needs 64 bits (8 bytes).
2691 * One BAM descriptor is required per buffer transfer.
2692 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
2693 * But due to HW limitation we need to allocate atleast one extra
2694 * descriptor memory (256 bytes + 8 bytes). But in order to be
2695 * in power of 2, we are allocating 512 bytes of memory.
2696 */
2697 sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
2698 sizeof(struct sps_iovec);
2699 if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
2700 sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
2701 sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
2702 sps_connect_info->desc.size,
2703 &sps_connect_info->desc.phys_base,
2704 GFP_KERNEL);
2705 if (sps_connect_info->desc.base == NULL) {
2706 rc = -ENOMEM;
2707 pr_err("Can not allocate coherent memory for sps data\n");
2708 goto get_config_err;
2709 }
2710
2711 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
2712
2713 /* Establish connection between peripheral and memory endpoint */
2714 rc = sps_connect(sps_pipe_info, sps_connect_info);
2715 if (rc) {
2716 pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
2717 (uintptr_t)sps_pipe_info, rc);
2718 goto sps_connect_err;
2719 }
2720
2721 sps_event->mode = SPS_TRIGGER_CALLBACK;
2722 sps_event->xfer_done = NULL;
2723 sps_event->user = (void *)pce_dev;
2724 if (is_producer) {
2725 sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
2726 sps_event->callback = _sps_producer_callback;
2727 rc = sps_register_event(ep->pipe, sps_event);
2728 if (rc) {
2729 pr_err("Producer callback registration failed rc=%d\n",
2730 rc);
2731 goto sps_connect_err;
2732 }
2733 } else {
2734 sps_event->options = SPS_O_EOT;
2735 sps_event->callback = NULL;
2736 }
2737
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05302738 pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002739 is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
2740 (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
2741 goto out;
2742
2743sps_connect_err:
2744 dma_free_coherent(pce_dev->pdev,
2745 sps_connect_info->desc.size,
2746 sps_connect_info->desc.base,
2747 sps_connect_info->desc.phys_base);
2748get_config_err:
2749 sps_free_endpoint(sps_pipe_info);
2750out:
2751 return rc;
2752}
2753
2754/**
2755 * Disconnect and Deallocate a CE peripheral's SPS endpoint
2756 *
2757 * This function disconnect endpoint and deallocates
2758 * endpoint context.
2759 *
2760 * This function should only be called once typically
2761 * during driver remove.
2762 *
2763 * @pce_dev - Pointer to qce_device structure
2764 * @ep - Pointer to sps endpoint data structure
2765 *
2766 */
2767static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
2768 struct qce_sps_ep_conn_data *ep)
2769{
2770 struct sps_pipe *sps_pipe_info = ep->pipe;
2771 struct sps_connect *sps_connect_info = &ep->connect;
2772
2773 sps_disconnect(sps_pipe_info);
2774 dma_free_coherent(pce_dev->pdev,
2775 sps_connect_info->desc.size,
2776 sps_connect_info->desc.base,
2777 sps_connect_info->desc.phys_base);
2778 sps_free_endpoint(sps_pipe_info);
2779}
2780
2781static void qce_sps_release_bam(struct qce_device *pce_dev)
2782{
2783 struct bam_registration_info *pbam;
2784
2785 mutex_lock(&bam_register_lock);
2786 pbam = pce_dev->pbam;
2787 if (pbam == NULL)
2788 goto ret;
2789
2790 pbam->cnt--;
2791 if (pbam->cnt > 0)
2792 goto ret;
2793
2794 if (pce_dev->ce_bam_info.bam_handle) {
2795 sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
2796
2797 pr_debug("deregister bam handle 0x%lx\n",
2798 pce_dev->ce_bam_info.bam_handle);
2799 pce_dev->ce_bam_info.bam_handle = 0;
2800 }
2801 iounmap(pbam->bam_iobase);
2802 pr_debug("delete bam 0x%x\n", pbam->bam_mem);
2803 list_del(&pbam->qlist);
2804 kfree(pbam);
2805
2806ret:
2807 pce_dev->pbam = NULL;
2808 mutex_unlock(&bam_register_lock);
2809}
2810
2811static int qce_sps_get_bam(struct qce_device *pce_dev)
2812{
2813 int rc = 0;
2814 struct sps_bam_props bam = {0};
2815 struct bam_registration_info *pbam = NULL;
2816 struct bam_registration_info *p;
2817 uint32_t bam_cfg = 0;
2818
2819
2820 mutex_lock(&bam_register_lock);
2821
2822 list_for_each_entry(p, &qce50_bam_list, qlist) {
2823 if (p->bam_mem == pce_dev->bam_mem) {
2824 pbam = p; /* found */
2825 break;
2826 }
2827 }
2828
2829 if (pbam) {
2830 pr_debug("found bam 0x%x\n", pbam->bam_mem);
2831 pbam->cnt++;
2832 pce_dev->ce_bam_info.bam_handle = pbam->handle;
2833 pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
2834 pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
2835 pce_dev->pbam = pbam;
2836 pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
2837 goto ret;
2838 }
2839
2840 pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
2841 if (!pbam) {
2842 rc = -ENOMEM;
2843 goto ret;
2844 }
2845 pbam->cnt = 1;
2846 pbam->bam_mem = pce_dev->bam_mem;
2847 pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
2848 pce_dev->bam_mem_size);
2849 if (!pbam->bam_iobase) {
2850 kfree(pbam);
2851 rc = -ENOMEM;
2852 pr_err("Can not map BAM io memory\n");
2853 goto ret;
2854 }
2855 pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
2856 pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
2857 pbam->handle = 0;
2858 pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
2859 bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
2860 CRYPTO_BAM_CNFG_BITS_REG);
2861 pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
2862 true : false;
2863 if (pbam->support_cmd_dscr == false) {
2864 pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
2865 bam_cfg);
2866 pce_dev->no_get_around = false;
2867 }
2868 pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
2869
2870 bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
2871 bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
2872
2873 /*
2874 * This event thresold value is only significant for BAM-to-BAM
2875 * transfer. It's ignored for BAM-to-System mode transfer.
2876 */
2877 bam.event_threshold = 0x10; /* Pipe event threshold */
2878 /*
2879 * This threshold controls when the BAM publish
2880 * the descriptor size on the sideband interface.
2881 * SPS HW will only be used when
2882 * data transfer size > 64 bytes.
2883 */
2884 bam.summing_threshold = 64;
2885 /* SPS driver wll handle the crypto BAM IRQ */
2886 bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
2887 /*
2888 * Set flag to indicate BAM global device control is managed
2889 * remotely.
2890 */
2891 if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
2892 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2893 else
2894 bam.manage = SPS_BAM_MGR_LOCAL;
2895
2896 bam.ee = pce_dev->ce_bam_info.bam_ee;
2897 bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
2898 bam.options |= SPS_BAM_CACHED_WP;
2899 pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05302900 pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002901
2902 /* Register CE Peripheral BAM device to SPS driver */
2903 rc = sps_register_bam_device(&bam, &pbam->handle);
2904 if (rc) {
2905 pr_err("sps_register_bam_device() failed! err=%d", rc);
2906 rc = -EIO;
2907 iounmap(pbam->bam_iobase);
2908 kfree(pbam);
2909 goto ret;
2910 }
2911
2912 pce_dev->pbam = pbam;
2913 list_add_tail(&pbam->qlist, &qce50_bam_list);
2914 pce_dev->ce_bam_info.bam_handle = pbam->handle;
2915
2916ret:
2917 mutex_unlock(&bam_register_lock);
2918
2919 return rc;
2920}
2921/**
2922 * Initialize SPS HW connected with CE core
2923 *
2924 * This function register BAM HW resources with
2925 * SPS driver and then initialize 2 SPS endpoints
2926 *
2927 * This function should only be called once typically
2928 * during driver probe.
2929 *
2930 * @pce_dev - Pointer to qce_device structure
2931 *
2932 * @return - 0 if successful else negative value.
2933 *
2934 */
2935static int qce_sps_init(struct qce_device *pce_dev)
2936{
2937 int rc = 0;
2938
2939 rc = qce_sps_get_bam(pce_dev);
2940 if (rc)
2941 return rc;
2942 pr_debug("BAM device registered. bam_handle=0x%lx\n",
2943 pce_dev->ce_bam_info.bam_handle);
2944
2945 rc = qce_sps_init_ep_conn(pce_dev,
2946 &pce_dev->ce_bam_info.producer, true);
2947 if (rc)
2948 goto sps_connect_producer_err;
2949 rc = qce_sps_init_ep_conn(pce_dev,
2950 &pce_dev->ce_bam_info.consumer, false);
2951 if (rc)
2952 goto sps_connect_consumer_err;
2953
2954 pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
2955 (unsigned long long)pce_dev->ce_bam_info.bam_mem,
2956 (unsigned int)pce_dev->ce_bam_info.bam_irq);
2957 return rc;
2958
2959sps_connect_consumer_err:
2960 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
2961sps_connect_producer_err:
2962 qce_sps_release_bam(pce_dev);
2963 return rc;
2964}
2965
2966static inline int qce_alloc_req_info(struct qce_device *pce_dev)
2967{
2968 int i;
2969 int request_index = pce_dev->ce_request_index;
2970
2971 for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
2972 request_index++;
2973 if (request_index >= MAX_QCE_BAM_REQ)
2974 request_index = 0;
2975 if (xchg(&pce_dev->ce_request_info[request_index].
2976 in_use, true) == false) {
2977 pce_dev->ce_request_index = request_index;
2978 return request_index;
2979 }
2980 }
2981 pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
2982 pce_dev->dev_no, atomic_read(
2983 &pce_dev->no_of_queued_req));
2984 return -EBUSY;
2985}
2986
2987static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
2988 bool is_complete)
2989{
2990 pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
2991 if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
2992 if (req_info < MAX_QCE_BAM_REQ && is_complete)
2993 atomic_dec(&pce_dev->no_of_queued_req);
2994 } else
2995 pr_warn("request info %d free already\n", req_info);
2996}
2997
2998static void print_notify_debug(struct sps_event_notify *notify)
2999{
3000 phys_addr_t addr =
3001 DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
3002 notify->data.transfer.iovec.addr);
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05303003 pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07003004 notify->event_id, &addr,
3005 notify->data.transfer.iovec.size,
3006 notify->data.transfer.iovec.flags,
3007 notify->data.transfer.user);
3008}
3009
3010static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
3011{
3012 struct ce_request_info *preq_info;
3013
3014 preq_info = &pce_dev->ce_request_info[req_info];
3015
3016 switch (preq_info->xfer_type) {
3017 case QCE_XFER_CIPHERING:
3018 _ablk_cipher_complete(pce_dev, req_info);
3019 break;
3020 case QCE_XFER_HASHING:
3021 _sha_complete(pce_dev, req_info);
3022 break;
3023 case QCE_XFER_AEAD:
3024 _aead_complete(pce_dev, req_info);
3025 break;
3026 case QCE_XFER_F8:
3027 _f8_complete(pce_dev, req_info);
3028 break;
3029 case QCE_XFER_F9:
3030 _f9_complete(pce_dev, req_info);
3031 break;
3032 default:
3033 qce_free_req_info(pce_dev, req_info, true);
3034 break;
3035 }
3036}
3037
3038static void qce_multireq_timeout(unsigned long data)
3039{
3040 struct qce_device *pce_dev = (struct qce_device *)data;
3041 int ret = 0;
3042 int last_seq;
3043 unsigned long flags;
3044
3045 last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
3046 if (last_seq == 0 ||
3047 last_seq != atomic_read(&pce_dev->last_intr_seq)) {
3048 atomic_set(&pce_dev->last_intr_seq, last_seq);
3049 mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
3050 return;
3051 }
3052 /* last bunch mode command time out */
3053
3054 /*
3055 * From here to dummy request finish sps request and set owner back
3056 * to none, we disable interrupt.
3057 * So it won't get preempted or interrupted. If bam inerrupts happen
3058 * between, and completion callback gets called from BAM, a new
3059 * request may be issued by the client driver. Deadlock may happen.
3060 */
3061 local_irq_save(flags);
3062 if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
3063 != QCE_OWNER_NONE) {
3064 local_irq_restore(flags);
3065 mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
3066 return;
3067 }
3068
3069 ret = qce_dummy_req(pce_dev);
3070 if (ret)
3071 pr_warn("pcedev %d: Failed to insert dummy req\n",
3072 pce_dev->dev_no);
3073 cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
3074 pce_dev->mode = IN_INTERRUPT_MODE;
3075 local_irq_restore(flags);
3076
3077 del_timer(&(pce_dev->timer));
3078 pce_dev->qce_stats.no_of_timeouts++;
3079 pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
3080}
3081
3082void qce_get_driver_stats(void *handle)
3083{
3084 struct qce_device *pce_dev = (struct qce_device *) handle;
3085
3086 if (!_qce50_disp_stats)
3087 return;
3088 pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
3089 pce_dev->qce_stats.no_of_timeouts);
3090 pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
3091 pce_dev->qce_stats.no_of_dummy_reqs);
3092 if (pce_dev->mode)
3093 pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
3094 else
3095 pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
3096 pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
3097 atomic_read(&pce_dev->no_of_queued_req));
3098}
3099EXPORT_SYMBOL(qce_get_driver_stats);
3100
3101void qce_clear_driver_stats(void *handle)
3102{
3103 struct qce_device *pce_dev = (struct qce_device *) handle;
3104
3105 pce_dev->qce_stats.no_of_timeouts = 0;
3106 pce_dev->qce_stats.no_of_dummy_reqs = 0;
3107}
3108EXPORT_SYMBOL(qce_clear_driver_stats);
3109
3110static void _sps_producer_callback(struct sps_event_notify *notify)
3111{
3112 struct qce_device *pce_dev = (struct qce_device *)
3113 ((struct sps_event_notify *)notify)->user;
3114 int rc = 0;
3115 unsigned int req_info;
3116 struct ce_sps_data *pce_sps_data;
3117 struct ce_request_info *preq_info;
3118
3119 print_notify_debug(notify);
3120
3121 req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
3122 if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
3123 pr_warn("request information %d out of range\n", req_info);
3124 return;
3125 }
3126
3127 req_info = req_info & 0x00ff;
3128 if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
3129 pr_warn("request information %d out of range\n", req_info);
3130 return;
3131 }
3132
3133 preq_info = &pce_dev->ce_request_info[req_info];
3134
3135 pce_sps_data = &preq_info->ce_sps;
3136 if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
3137 preq_info->xfer_type == QCE_XFER_AEAD) &&
3138 pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
3139 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
3140 pce_sps_data->out_transfer.iovec_count = 0;
3141 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
3142 CRYPTO_RESULT_DUMP_SIZE,
3143 &pce_sps_data->out_transfer);
3144 _qce_set_flag(&pce_sps_data->out_transfer,
3145 SPS_IOVEC_FLAG_INT);
3146 rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
3147 &pce_sps_data->out_transfer);
3148 if (rc) {
3149 pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
3150 (uintptr_t)pce_dev->ce_bam_info.producer.pipe,
3151 rc);
3152 }
3153 return;
3154 }
3155
3156 _qce_req_complete(pce_dev, req_info);
3157}
3158
3159/**
3160 * De-initialize SPS HW connected with CE core
3161 *
3162 * This function deinitialize SPS endpoints and then
3163 * deregisters BAM resources from SPS driver.
3164 *
3165 * This function should only be called once typically
3166 * during driver remove.
3167 *
3168 * @pce_dev - Pointer to qce_device structure
3169 *
3170 */
3171static void qce_sps_exit(struct qce_device *pce_dev)
3172{
3173 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
3174 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
3175 qce_sps_release_bam(pce_dev);
3176}
3177
3178static void qce_add_cmd_element(struct qce_device *pdev,
3179 struct sps_command_element **cmd_ptr, u32 addr,
3180 u32 data, struct sps_command_element **populate)
3181{
3182 (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
3183 (*cmd_ptr)->command = 0;
3184 (*cmd_ptr)->data = data;
3185 (*cmd_ptr)->mask = 0xFFFFFFFF;
3186 (*cmd_ptr)->reserved = 0;
3187 if (populate != NULL)
3188 *populate = *cmd_ptr;
3189 (*cmd_ptr)++;
3190}
3191
3192static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
3193 unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
3194 bool key_128)
3195{
3196 struct sps_command_element *ce_vaddr;
3197 uintptr_t ce_vaddr_start;
3198 struct qce_cmdlistptr_ops *cmdlistptr;
3199 struct qce_cmdlist_info *pcl_info = NULL;
3200 int i = 0;
3201 uint32_t encr_cfg = 0;
3202 uint32_t key_reg = 0;
3203 uint32_t xts_key_reg = 0;
3204 uint32_t iv_reg = 0;
3205
3206 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3207 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3208 pdev->ce_bam_info.ce_burst_size);
3209 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3210 ce_vaddr_start = (uintptr_t)(*pvaddr);
3211 /*
3212 * Designate chunks of the allocated memory to various
3213 * command list pointers related to AES cipher operations defined
3214 * in ce_cmdlistptrs_ops structure.
3215 */
3216 switch (mode) {
3217 case QCE_MODE_CBC:
3218 case QCE_MODE_CTR:
3219 if (key_128 == true) {
3220 cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
3221 (uintptr_t)ce_vaddr;
3222 pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
3223 if (mode == QCE_MODE_CBC)
3224 encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
3225 else
3226 encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
3227 iv_reg = 4;
3228 key_reg = 4;
3229 xts_key_reg = 0;
3230 } else {
3231 cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
3232 (uintptr_t)ce_vaddr;
3233 pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
3234
3235 if (mode == QCE_MODE_CBC)
3236 encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
3237 else
3238 encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
3239 iv_reg = 4;
3240 key_reg = 8;
3241 xts_key_reg = 0;
3242 }
3243 break;
3244 case QCE_MODE_ECB:
3245 if (key_128 == true) {
3246 cmdlistptr->cipher_aes_128_ecb.cmdlist =
3247 (uintptr_t)ce_vaddr;
3248 pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
3249
3250 encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
3251 iv_reg = 0;
3252 key_reg = 4;
3253 xts_key_reg = 0;
3254 } else {
3255 cmdlistptr->cipher_aes_256_ecb.cmdlist =
3256 (uintptr_t)ce_vaddr;
3257 pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
3258
3259 encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
3260 iv_reg = 0;
3261 key_reg = 8;
3262 xts_key_reg = 0;
3263 }
3264 break;
3265 case QCE_MODE_XTS:
3266 if (key_128 == true) {
3267 cmdlistptr->cipher_aes_128_xts.cmdlist =
3268 (uintptr_t)ce_vaddr;
3269 pcl_info = &(cmdlistptr->cipher_aes_128_xts);
3270
3271 encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
3272 iv_reg = 4;
3273 key_reg = 4;
3274 xts_key_reg = 4;
3275 } else {
3276 cmdlistptr->cipher_aes_256_xts.cmdlist =
3277 (uintptr_t)ce_vaddr;
3278 pcl_info = &(cmdlistptr->cipher_aes_256_xts);
3279
3280 encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
3281 iv_reg = 4;
3282 key_reg = 8;
3283 xts_key_reg = 8;
3284 }
3285 break;
3286 default:
3287 pr_err("Unknown mode of operation %d received, exiting now\n",
3288 mode);
3289 return -EINVAL;
3290 break;
3291 }
3292
3293 /* clear status register */
3294 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3295
3296 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3297 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3298
3299 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3300 &pcl_info->seg_size);
3301 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
3302 &pcl_info->encr_seg_cfg);
3303 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3304 &pcl_info->encr_seg_size);
3305 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3306 &pcl_info->encr_seg_start);
3307 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
3308 (uint32_t)0xffffffff, &pcl_info->encr_mask);
3309 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
3310 (uint32_t)0xffffffff, NULL);
3311 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
3312 (uint32_t)0xffffffff, NULL);
3313 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
3314 (uint32_t)0xffffffff, NULL);
3315 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
3316 &pcl_info->auth_seg_cfg);
3317 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
3318 &pcl_info->encr_key);
3319 for (i = 1; i < key_reg; i++)
3320 qce_add_cmd_element(pdev, &ce_vaddr,
3321 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
3322 0, NULL);
3323 if (xts_key_reg) {
3324 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
3325 0, &pcl_info->encr_xts_key);
3326 for (i = 1; i < xts_key_reg; i++)
3327 qce_add_cmd_element(pdev, &ce_vaddr,
3328 (CRYPTO_ENCR_XTS_KEY0_REG +
3329 i * sizeof(uint32_t)), 0, NULL);
3330 qce_add_cmd_element(pdev, &ce_vaddr,
3331 CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
3332 &pcl_info->encr_xts_du_size);
3333 }
3334 if (iv_reg) {
3335 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
3336 &pcl_info->encr_cntr_iv);
3337 for (i = 1; i < iv_reg; i++)
3338 qce_add_cmd_element(pdev, &ce_vaddr,
3339 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
3340 0, NULL);
3341 }
3342 /* Add dummy to align size to burst-size multiple */
3343 if (mode == QCE_MODE_XTS) {
3344 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
3345 0, &pcl_info->auth_seg_size);
3346 } else {
3347 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
3348 0, &pcl_info->auth_seg_size);
3349 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
3350 0, &pcl_info->auth_seg_size);
3351 }
3352 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3353 pdev->reg.crypto_cfg_le, NULL);
3354
3355 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3356 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3357 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3358
3359 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3360 *pvaddr = (unsigned char *) ce_vaddr;
3361
3362 return 0;
3363}
3364
3365static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
3366 unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
3367 bool mode_cbc)
3368{
3369
3370 struct sps_command_element *ce_vaddr;
3371 uintptr_t ce_vaddr_start;
3372 struct qce_cmdlistptr_ops *cmdlistptr;
3373 struct qce_cmdlist_info *pcl_info = NULL;
3374 int i = 0;
3375 uint32_t encr_cfg = 0;
3376 uint32_t key_reg = 0;
3377 uint32_t iv_reg = 0;
3378
3379 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3380 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3381 pdev->ce_bam_info.ce_burst_size);
3382 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3383 ce_vaddr_start = (uintptr_t)(*pvaddr);
3384
3385 /*
3386 * Designate chunks of the allocated memory to various
3387 * command list pointers related to cipher operations defined
3388 * in ce_cmdlistptrs_ops structure.
3389 */
3390 switch (alg) {
3391 case CIPHER_ALG_DES:
3392 if (mode_cbc) {
3393 cmdlistptr->cipher_des_cbc.cmdlist =
3394 (uintptr_t)ce_vaddr;
3395 pcl_info = &(cmdlistptr->cipher_des_cbc);
3396
3397
3398 encr_cfg = pdev->reg.encr_cfg_des_cbc;
3399 iv_reg = 2;
3400 key_reg = 2;
3401 } else {
3402 cmdlistptr->cipher_des_ecb.cmdlist =
3403 (uintptr_t)ce_vaddr;
3404 pcl_info = &(cmdlistptr->cipher_des_ecb);
3405
3406 encr_cfg = pdev->reg.encr_cfg_des_ecb;
3407 iv_reg = 0;
3408 key_reg = 2;
3409 }
3410 break;
3411 case CIPHER_ALG_3DES:
3412 if (mode_cbc) {
3413 cmdlistptr->cipher_3des_cbc.cmdlist =
3414 (uintptr_t)ce_vaddr;
3415 pcl_info = &(cmdlistptr->cipher_3des_cbc);
3416
3417 encr_cfg = pdev->reg.encr_cfg_3des_cbc;
3418 iv_reg = 2;
3419 key_reg = 6;
3420 } else {
3421 cmdlistptr->cipher_3des_ecb.cmdlist =
3422 (uintptr_t)ce_vaddr;
3423 pcl_info = &(cmdlistptr->cipher_3des_ecb);
3424
3425 encr_cfg = pdev->reg.encr_cfg_3des_ecb;
3426 iv_reg = 0;
3427 key_reg = 6;
3428 }
3429 break;
3430 default:
3431 pr_err("Unknown algorithms %d received, exiting now\n", alg);
3432 return -EINVAL;
3433 break;
3434 }
3435
3436 /* clear status register */
3437 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3438
3439 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3440 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3441
3442 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3443 &pcl_info->seg_size);
3444 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
3445 &pcl_info->encr_seg_cfg);
3446 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3447 &pcl_info->encr_seg_size);
3448 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3449 &pcl_info->encr_seg_start);
3450 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
3451 &pcl_info->auth_seg_cfg);
3452
3453 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
3454 &pcl_info->encr_key);
3455 for (i = 1; i < key_reg; i++)
3456 qce_add_cmd_element(pdev, &ce_vaddr,
3457 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
3458 0, NULL);
3459 if (iv_reg) {
3460 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
3461 &pcl_info->encr_cntr_iv);
3462 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
3463 NULL);
3464 }
3465
3466 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3467 pdev->reg.crypto_cfg_le, NULL);
3468
3469 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3470 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3471 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3472
3473 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3474 *pvaddr = (unsigned char *) ce_vaddr;
3475
3476 return 0;
3477}
3478
3479static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
3480 int cri_index, unsigned char **pvaddr)
3481{
3482 struct sps_command_element *ce_vaddr;
3483 uintptr_t ce_vaddr_start;
3484 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
3485 [cri_index].ce_sps.cmdlistptr;
3486 struct qce_cmdlist_info *pcl_info = NULL;
3487
3488 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3489 pdev->ce_bam_info.ce_burst_size);
3490 ce_vaddr_start = (uintptr_t)(*pvaddr);
3491 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3492
3493 cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
3494 pcl_info = &(cmdlistptr->cipher_null);
3495
3496 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
3497 pdev->ce_bam_info.ce_burst_size, NULL);
3498
3499 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
3500 pdev->reg.encr_cfg_aes_ecb_128, NULL);
3501 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3502 NULL);
3503 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3504 NULL);
3505
3506 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
3507 0, NULL);
3508 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
3509 0, NULL);
3510 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3511 NULL);
3512
3513 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3514 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3515 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3516
3517 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3518 *pvaddr = (unsigned char *) ce_vaddr;
3519 return 0;
3520}
3521
3522static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
3523 unsigned char **pvaddr, enum qce_hash_alg_enum alg,
3524 bool key_128)
3525{
3526 struct sps_command_element *ce_vaddr;
3527 uintptr_t ce_vaddr_start;
3528 struct qce_cmdlistptr_ops *cmdlistptr;
3529 struct qce_cmdlist_info *pcl_info = NULL;
3530 int i = 0;
3531 uint32_t key_reg = 0;
3532 uint32_t auth_cfg = 0;
3533 uint32_t iv_reg = 0;
3534
3535 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3536 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3537 pdev->ce_bam_info.ce_burst_size);
3538 ce_vaddr_start = (uintptr_t)(*pvaddr);
3539 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3540
3541 /*
3542 * Designate chunks of the allocated memory to various
3543 * command list pointers related to authentication operations
3544 * defined in ce_cmdlistptrs_ops structure.
3545 */
3546 switch (alg) {
3547 case QCE_HASH_SHA1:
3548 cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
3549 pcl_info = &(cmdlistptr->auth_sha1);
3550
3551 auth_cfg = pdev->reg.auth_cfg_sha1;
3552 iv_reg = 5;
3553
3554 /* clear status register */
3555 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
3556 0, NULL);
3557
3558 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3559 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3560
3561 break;
3562 case QCE_HASH_SHA256:
3563 cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
3564 pcl_info = &(cmdlistptr->auth_sha256);
3565
3566 auth_cfg = pdev->reg.auth_cfg_sha256;
3567 iv_reg = 8;
3568
3569 /* clear status register */
3570 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
3571 0, NULL);
3572
3573 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3574 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3575 /* 1 dummy write */
3576 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
3577 0, NULL);
3578 break;
3579 case QCE_HASH_SHA1_HMAC:
3580 cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
3581 pcl_info = &(cmdlistptr->auth_sha1_hmac);
3582
3583 auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
3584 key_reg = 16;
3585 iv_reg = 5;
3586
3587 /* clear status register */
3588 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
3589 0, NULL);
3590
3591 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3592 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3593 break;
3594 case QCE_HASH_SHA256_HMAC:
3595 cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
3596 pcl_info = &(cmdlistptr->auth_sha256_hmac);
3597
3598 auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
3599 key_reg = 16;
3600 iv_reg = 8;
3601
3602 /* clear status register */
3603 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
3604 NULL);
3605
3606 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3607 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3608 /* 1 dummy write */
3609 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
3610 0, NULL);
3611 break;
3612 case QCE_HASH_AES_CMAC:
3613 if (key_128 == true) {
3614 cmdlistptr->auth_aes_128_cmac.cmdlist =
3615 (uintptr_t)ce_vaddr;
3616 pcl_info = &(cmdlistptr->auth_aes_128_cmac);
3617
3618 auth_cfg = pdev->reg.auth_cfg_cmac_128;
3619 key_reg = 4;
3620 } else {
3621 cmdlistptr->auth_aes_256_cmac.cmdlist =
3622 (uintptr_t)ce_vaddr;
3623 pcl_info = &(cmdlistptr->auth_aes_256_cmac);
3624
3625 auth_cfg = pdev->reg.auth_cfg_cmac_256;
3626 key_reg = 8;
3627 }
3628
3629 /* clear status register */
3630 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
3631 NULL);
3632
3633 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3634 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3635 /* 1 dummy write */
3636 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
3637 0, NULL);
3638 break;
3639 default:
3640 pr_err("Unknown algorithms %d received, exiting now\n", alg);
3641 return -EINVAL;
3642 break;
3643 }
3644
3645 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3646 &pcl_info->seg_size);
3647 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
3648 &pcl_info->encr_seg_cfg);
3649 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
3650 auth_cfg, &pcl_info->auth_seg_cfg);
3651 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
3652 &pcl_info->auth_seg_size);
3653 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3654 &pcl_info->auth_seg_start);
3655
3656 if (alg == QCE_HASH_AES_CMAC) {
3657 /* reset auth iv, bytecount and key registers */
3658 for (i = 0; i < 16; i++)
3659 qce_add_cmd_element(pdev, &ce_vaddr,
3660 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
3661 0, NULL);
3662 for (i = 0; i < 16; i++)
3663 qce_add_cmd_element(pdev, &ce_vaddr,
3664 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
3665 0, NULL);
3666 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3667 0, NULL);
3668 } else {
3669 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
3670 &pcl_info->auth_iv);
3671 for (i = 1; i < iv_reg; i++)
3672 qce_add_cmd_element(pdev, &ce_vaddr,
3673 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
3674 0, NULL);
3675 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3676 0, &pcl_info->auth_bytecount);
3677 }
3678 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
3679
3680 if (key_reg) {
3681 qce_add_cmd_element(pdev, &ce_vaddr,
3682 CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
3683 for (i = 1; i < key_reg; i++)
3684 qce_add_cmd_element(pdev, &ce_vaddr,
3685 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
3686 0, NULL);
3687 }
3688 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3689 pdev->reg.crypto_cfg_le, NULL);
3690
3691 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3692 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3693 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3694
3695 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3696 *pvaddr = (unsigned char *) ce_vaddr;
3697
3698 return 0;
3699}
3700
3701static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
3702 int cri_index,
3703 unsigned char **pvaddr,
3704 uint32_t alg,
3705 uint32_t mode,
3706 uint32_t key_size,
3707 bool sha1)
3708{
3709 struct sps_command_element *ce_vaddr;
3710 uintptr_t ce_vaddr_start;
3711 struct qce_cmdlistptr_ops *cmdlistptr;
3712 struct qce_cmdlist_info *pcl_info = NULL;
3713 uint32_t key_reg;
3714 uint32_t iv_reg;
3715 uint32_t i;
3716 uint32_t enciv_in_word;
3717 uint32_t encr_cfg;
3718
3719 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3720 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3721 pdev->ce_bam_info.ce_burst_size);
3722
3723 ce_vaddr_start = (uintptr_t)(*pvaddr);
3724 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3725
3726 switch (alg) {
3727
3728 case CIPHER_ALG_DES:
3729
3730 switch (mode) {
3731
3732 case QCE_MODE_CBC:
3733 if (sha1) {
3734 cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
3735 (uintptr_t)ce_vaddr;
3736 pcl_info = &(cmdlistptr->
3737 aead_hmac_sha1_cbc_des);
3738 } else {
3739 cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
3740 (uintptr_t)ce_vaddr;
3741 pcl_info = &(cmdlistptr->
3742 aead_hmac_sha256_cbc_des);
3743 }
3744 encr_cfg = pdev->reg.encr_cfg_des_cbc;
3745 break;
3746 default:
3747 return -EINVAL;
3748 };
3749
3750 enciv_in_word = 2;
3751
3752 break;
3753
3754 case CIPHER_ALG_3DES:
3755 switch (mode) {
3756
3757 case QCE_MODE_CBC:
3758 if (sha1) {
3759 cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
3760 (uintptr_t)ce_vaddr;
3761 pcl_info = &(cmdlistptr->
3762 aead_hmac_sha1_cbc_3des);
3763 } else {
3764 cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
3765 (uintptr_t)ce_vaddr;
3766 pcl_info = &(cmdlistptr->
3767 aead_hmac_sha256_cbc_3des);
3768 }
3769 encr_cfg = pdev->reg.encr_cfg_3des_cbc;
3770 break;
3771 default:
3772 return -EINVAL;
3773 };
3774
3775 enciv_in_word = 2;
3776
3777 break;
3778
3779 case CIPHER_ALG_AES:
3780 switch (mode) {
3781
3782 case QCE_MODE_CBC:
3783 if (key_size == AES128_KEY_SIZE) {
3784 if (sha1) {
3785 cmdlistptr->
3786 aead_hmac_sha1_cbc_aes_128.
3787 cmdlist = (uintptr_t)ce_vaddr;
3788 pcl_info = &(cmdlistptr->
3789 aead_hmac_sha1_cbc_aes_128);
3790 } else {
3791 cmdlistptr->
3792 aead_hmac_sha256_cbc_aes_128.
3793 cmdlist = (uintptr_t)ce_vaddr;
3794 pcl_info = &(cmdlistptr->
3795 aead_hmac_sha256_cbc_aes_128);
3796 }
3797 encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
3798 } else if (key_size == AES256_KEY_SIZE) {
3799 if (sha1) {
3800 cmdlistptr->
3801 aead_hmac_sha1_cbc_aes_256.
3802 cmdlist = (uintptr_t)ce_vaddr;
3803 pcl_info = &(cmdlistptr->
3804 aead_hmac_sha1_cbc_aes_256);
3805 } else {
3806 cmdlistptr->
3807 aead_hmac_sha256_cbc_aes_256.
3808 cmdlist = (uintptr_t)ce_vaddr;
3809 pcl_info = &(cmdlistptr->
3810 aead_hmac_sha256_cbc_aes_256);
3811 }
3812 encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
3813 } else {
3814 return -EINVAL;
3815 }
3816 break;
3817 default:
3818 return -EINVAL;
3819 };
3820
3821 enciv_in_word = 4;
3822
3823 break;
3824
3825 default:
3826 return -EINVAL;
3827 };
3828
3829
3830 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3831
3832 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3833 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3834
3835
3836 key_reg = key_size/sizeof(uint32_t);
3837 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
3838 &pcl_info->encr_key);
3839 for (i = 1; i < key_reg; i++)
3840 qce_add_cmd_element(pdev, &ce_vaddr,
3841 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
3842 0, NULL);
3843
3844 if (mode != QCE_MODE_ECB) {
3845 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
3846 &pcl_info->encr_cntr_iv);
3847 for (i = 1; i < enciv_in_word; i++)
3848 qce_add_cmd_element(pdev, &ce_vaddr,
3849 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
3850 0, NULL);
3851 };
3852
3853 if (sha1)
3854 iv_reg = 5;
3855 else
3856 iv_reg = 8;
3857 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
3858 &pcl_info->auth_iv);
3859 for (i = 1; i < iv_reg; i++)
3860 qce_add_cmd_element(pdev, &ce_vaddr,
3861 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
3862 0, NULL);
3863
3864 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3865 0, &pcl_info->auth_bytecount);
3866 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
3867
3868 key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
3869 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
3870 &pcl_info->auth_key);
3871 for (i = 1; i < key_reg; i++)
3872 qce_add_cmd_element(pdev, &ce_vaddr,
3873 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
3874
3875 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3876 &pcl_info->seg_size);
3877
3878 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
3879 &pcl_info->encr_seg_cfg);
3880 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3881 &pcl_info->encr_seg_size);
3882 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3883 &pcl_info->encr_seg_start);
3884
3885 if (sha1)
3886 qce_add_cmd_element(
3887 pdev,
3888 &ce_vaddr,
3889 CRYPTO_AUTH_SEG_CFG_REG,
3890 pdev->reg.auth_cfg_aead_sha1_hmac,
3891 &pcl_info->auth_seg_cfg);
3892 else
3893 qce_add_cmd_element(
3894 pdev,
3895 &ce_vaddr,
3896 CRYPTO_AUTH_SEG_CFG_REG,
3897 pdev->reg.auth_cfg_aead_sha256_hmac,
3898 &pcl_info->auth_seg_cfg);
3899
3900 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
3901 &pcl_info->auth_seg_size);
3902 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3903 &pcl_info->auth_seg_start);
3904
3905 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3906 pdev->reg.crypto_cfg_le, NULL);
3907
3908 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3909 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3910 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3911
3912 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3913 *pvaddr = (unsigned char *) ce_vaddr;
3914 return 0;
3915}
3916
3917static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
3918 unsigned char **pvaddr, bool key_128)
3919{
3920 struct sps_command_element *ce_vaddr;
3921 uintptr_t ce_vaddr_start;
3922 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
3923 [cri_index].ce_sps.cmdlistptr;
3924 struct qce_cmdlist_info *pcl_info = NULL;
3925 int i = 0;
3926 uint32_t encr_cfg = 0;
3927 uint32_t auth_cfg = 0;
3928 uint32_t key_reg = 0;
3929
3930 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3931 pdev->ce_bam_info.ce_burst_size);
3932 ce_vaddr_start = (uintptr_t)(*pvaddr);
3933 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3934
3935 /*
3936 * Designate chunks of the allocated memory to various
3937 * command list pointers related to aead operations
3938 * defined in ce_cmdlistptrs_ops structure.
3939 */
3940 if (key_128 == true) {
3941 cmdlistptr->aead_aes_128_ccm.cmdlist =
3942 (uintptr_t)ce_vaddr;
3943 pcl_info = &(cmdlistptr->aead_aes_128_ccm);
3944
3945 auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
3946 encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
3947 key_reg = 4;
3948 } else {
3949
3950 cmdlistptr->aead_aes_256_ccm.cmdlist =
3951 (uintptr_t)ce_vaddr;
3952 pcl_info = &(cmdlistptr->aead_aes_256_ccm);
3953
3954 auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
3955 encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
3956
3957 key_reg = 8;
3958 }
3959
3960 /* clear status register */
3961 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3962
3963 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3964 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3965
3966 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
3967 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3968 NULL);
3969 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3970 &pcl_info->seg_size);
3971 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
3972 encr_cfg, &pcl_info->encr_seg_cfg);
3973 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3974 &pcl_info->encr_seg_size);
3975 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3976 &pcl_info->encr_seg_start);
3977 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
3978 (uint32_t)0xffffffff, &pcl_info->encr_mask);
3979 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
3980 (uint32_t)0xffffffff, NULL);
3981 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
3982 (uint32_t)0xffffffff, NULL);
3983 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
3984 (uint32_t)0xffffffff, NULL);
3985 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
3986 auth_cfg, &pcl_info->auth_seg_cfg);
3987 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
3988 &pcl_info->auth_seg_size);
3989 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3990 &pcl_info->auth_seg_start);
3991 /* reset auth iv, bytecount and key registers */
3992 for (i = 0; i < 8; i++)
3993 qce_add_cmd_element(pdev, &ce_vaddr,
3994 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
3995 0, NULL);
3996 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3997 0, NULL);
3998 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
3999 0, NULL);
4000 for (i = 0; i < 16; i++)
4001 qce_add_cmd_element(pdev, &ce_vaddr,
4002 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
4003 0, NULL);
4004 /* set auth key */
4005 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
4006 &pcl_info->auth_key);
4007 for (i = 1; i < key_reg; i++)
4008 qce_add_cmd_element(pdev, &ce_vaddr,
4009 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
4010 0, NULL);
4011 /* set NONCE info */
4012 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
4013 &pcl_info->auth_nonce_info);
4014 for (i = 1; i < 4; i++)
4015 qce_add_cmd_element(pdev, &ce_vaddr,
4016 (CRYPTO_AUTH_INFO_NONCE0_REG +
4017 i * sizeof(uint32_t)), 0, NULL);
4018
4019 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
4020 &pcl_info->encr_key);
4021 for (i = 1; i < key_reg; i++)
4022 qce_add_cmd_element(pdev, &ce_vaddr,
4023 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
4024 0, NULL);
4025 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
4026 &pcl_info->encr_cntr_iv);
4027 for (i = 1; i < 4; i++)
4028 qce_add_cmd_element(pdev, &ce_vaddr,
4029 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
4030 0, NULL);
4031 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
4032 &pcl_info->encr_ccm_cntr_iv);
4033 for (i = 1; i < 4; i++)
4034 qce_add_cmd_element(pdev, &ce_vaddr,
4035 (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
4036 0, NULL);
4037
4038 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4039 pdev->reg.crypto_cfg_le, NULL);
4040
4041 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
4042 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
4043 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
4044
4045 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4046 *pvaddr = (unsigned char *) ce_vaddr;
4047
4048 return 0;
4049}
4050
4051static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
4052 unsigned char **pvaddr, enum qce_ota_algo_enum alg)
4053{
4054 struct sps_command_element *ce_vaddr;
4055 uintptr_t ce_vaddr_start;
4056 struct qce_cmdlistptr_ops *cmdlistptr;
4057 struct qce_cmdlist_info *pcl_info = NULL;
4058 int i = 0;
4059 uint32_t encr_cfg = 0;
4060 uint32_t key_reg = 4;
4061
4062 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
4063 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
4064 pdev->ce_bam_info.ce_burst_size);
4065 ce_vaddr = (struct sps_command_element *)(*pvaddr);
4066 ce_vaddr_start = (uintptr_t)(*pvaddr);
4067
4068 /*
4069 * Designate chunks of the allocated memory to various
4070 * command list pointers related to f8 cipher algorithm defined
4071 * in ce_cmdlistptrs_ops structure.
4072 */
4073
4074 switch (alg) {
4075 case QCE_OTA_ALGO_KASUMI:
4076 cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
4077 pcl_info = &(cmdlistptr->f8_kasumi);
4078 encr_cfg = pdev->reg.encr_cfg_kasumi;
4079 break;
4080
4081 case QCE_OTA_ALGO_SNOW3G:
4082 default:
4083 cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
4084 pcl_info = &(cmdlistptr->f8_snow3g);
4085 encr_cfg = pdev->reg.encr_cfg_snow3g;
4086 break;
4087 }
4088 /* clear status register */
4089 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
4090 0, NULL);
4091 /* set config to big endian */
4092 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4093 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
4094
4095 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
4096 &pcl_info->seg_size);
4097
4098 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
4099 &pcl_info->encr_seg_cfg);
4100 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
4101 &pcl_info->encr_seg_size);
4102 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
4103 &pcl_info->encr_seg_start);
4104
4105 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
4106 &pcl_info->auth_seg_cfg);
4107
4108 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
4109 0, &pcl_info->auth_seg_size);
4110 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
4111 0, &pcl_info->auth_seg_start);
4112
4113 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
4114 &pcl_info->encr_key);
4115 for (i = 1; i < key_reg; i++)
4116 qce_add_cmd_element(pdev, &ce_vaddr,
4117 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
4118 0, NULL);
4119
4120 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
4121 &pcl_info->encr_cntr_iv);
4122 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
4123 NULL);
4124 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4125 pdev->reg.crypto_cfg_le, NULL);
4126
4127 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
4128 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
4129 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
4130
4131 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4132 *pvaddr = (unsigned char *) ce_vaddr;
4133
4134 return 0;
4135}
4136
4137static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
4138 unsigned char **pvaddr, enum qce_ota_algo_enum alg)
4139{
4140 struct sps_command_element *ce_vaddr;
4141 uintptr_t ce_vaddr_start;
4142 struct qce_cmdlistptr_ops *cmdlistptr;
4143 struct qce_cmdlist_info *pcl_info = NULL;
4144 int i = 0;
4145 uint32_t auth_cfg = 0;
4146 uint32_t iv_reg = 0;
4147
4148 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
4149 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
4150 pdev->ce_bam_info.ce_burst_size);
4151 ce_vaddr_start = (uintptr_t)(*pvaddr);
4152 ce_vaddr = (struct sps_command_element *)(*pvaddr);
4153
4154 /*
4155 * Designate chunks of the allocated memory to various
4156 * command list pointers related to authentication operations
4157 * defined in ce_cmdlistptrs_ops structure.
4158 */
4159 switch (alg) {
4160 case QCE_OTA_ALGO_KASUMI:
4161 cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
4162 pcl_info = &(cmdlistptr->f9_kasumi);
4163 auth_cfg = pdev->reg.auth_cfg_kasumi;
4164 break;
4165
4166 case QCE_OTA_ALGO_SNOW3G:
4167 default:
4168 cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
4169 pcl_info = &(cmdlistptr->f9_snow3g);
4170 auth_cfg = pdev->reg.auth_cfg_snow3g;
4171 };
4172
4173 /* clear status register */
4174 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
4175 0, NULL);
4176 /* set config to big endian */
4177 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4178 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
4179
4180 iv_reg = 5;
4181
4182 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
4183 &pcl_info->seg_size);
4184
4185 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
4186 &pcl_info->encr_seg_cfg);
4187
4188 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
4189 auth_cfg, &pcl_info->auth_seg_cfg);
4190 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
4191 &pcl_info->auth_seg_size);
4192 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
4193 &pcl_info->auth_seg_start);
4194
4195 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
4196 &pcl_info->auth_iv);
4197 for (i = 1; i < iv_reg; i++) {
4198 qce_add_cmd_element(pdev, &ce_vaddr,
4199 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
4200 0, NULL);
4201 }
4202 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
4203 0, &pcl_info->auth_bytecount);
4204 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
4205
4206 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4207 pdev->reg.crypto_cfg_le, NULL);
4208
4209 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
4210 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
4211 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
4212
4213 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4214 *pvaddr = (unsigned char *) ce_vaddr;
4215
4216 return 0;
4217}
4218
4219static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
4220 int cri_index, unsigned char **pvaddr)
4221{
4222 struct sps_command_element *ce_vaddr;
4223 uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
4224 struct qce_cmdlistptr_ops *cmdlistptr;
4225 struct qce_cmdlist_info *pcl_info = NULL;
4226
4227 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
4228 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
4229 pdev->ce_bam_info.ce_burst_size);
4230 ce_vaddr = (struct sps_command_element *)(*pvaddr);
4231 cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
4232 pcl_info = &(cmdlistptr->unlock_all_pipes);
4233
4234 /*
4235 * Designate chunks of the allocated memory to command list
4236 * to unlock pipes.
4237 */
4238 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4239 CRYPTO_CONFIG_RESET, NULL);
4240 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4241 *pvaddr = (unsigned char *) ce_vaddr;
4242
4243 return 0;
4244}
4245
4246static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
4247 unsigned char **pvaddr)
4248{
4249 struct sps_command_element *ce_vaddr =
4250 (struct sps_command_element *)(*pvaddr);
4251 /*
4252 * Designate chunks of the allocated memory to various
4253 * command list pointers related to operations defined
4254 * in ce_cmdlistptrs_ops structure.
4255 */
4256 ce_vaddr =
4257 (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
4258 pdev->ce_bam_info.ce_burst_size);
4259 *pvaddr = (unsigned char *) ce_vaddr;
4260
4261 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
4262 true);
4263 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
4264 true);
4265 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
4266 true);
4267 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
4268 true);
4269 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
4270 false);
4271 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
4272 false);
4273 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
4274 false);
4275 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
4276 false);
4277
4278 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4279 true);
4280 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4281 false);
4282 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4283 true);
4284 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4285 false);
4286
4287 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
4288 false);
4289 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
4290 false);
4291
4292 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
4293 false);
4294 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
4295 false);
4296
4297 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
4298 true);
4299 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
4300 false);
4301
4302 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4303 QCE_MODE_CBC, DES_KEY_SIZE, true);
4304 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4305 QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
4306 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4307 QCE_MODE_CBC, AES128_KEY_SIZE, true);
4308 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4309 QCE_MODE_CBC, AES256_KEY_SIZE, true);
4310 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4311 QCE_MODE_CBC, DES_KEY_SIZE, false);
4312 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4313 QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
4314 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4315 QCE_MODE_CBC, AES128_KEY_SIZE, false);
4316 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4317 QCE_MODE_CBC, AES256_KEY_SIZE, false);
4318
4319 _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
4320
4321 _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
4322 _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
4323 _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
4324 _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
4325 _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
4326 _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
4327 _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
4328
4329 return 0;
4330}
4331
4332static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
4333{
4334 unsigned char *vaddr;
4335 int i;
4336 unsigned char *iovec_vaddr;
4337 int iovec_memsize;
4338
4339 vaddr = pce_dev->coh_vmem;
4340 vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
4341 pce_dev->ce_bam_info.ce_burst_size);
4342 iovec_vaddr = pce_dev->iovec_vmem;
4343 iovec_memsize = pce_dev->iovec_memsize;
4344 for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
4345 /* Allow for 256 descriptor (cmd and data) entries per pipe */
4346 pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
4347 (struct sps_iovec *)iovec_vaddr;
4348 pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
4349 virt_to_phys(pce_dev->ce_request_info[i].
4350 ce_sps.in_transfer.iovec);
4351 iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
4352 iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
4353 pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
4354 (struct sps_iovec *)iovec_vaddr;
4355 pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
4356 virt_to_phys(pce_dev->ce_request_info[i].
4357 ce_sps.out_transfer.iovec);
4358 iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
4359 iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
4360 if (pce_dev->support_cmd_dscr)
4361 qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
4362 vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
4363 pce_dev->ce_bam_info.ce_burst_size);
4364 pce_dev->ce_request_info[i].ce_sps.result_dump =
4365 (uintptr_t)vaddr;
4366 pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
4367 GET_PHYS_ADDR((uintptr_t)vaddr);
4368 pce_dev->ce_request_info[i].ce_sps.result =
4369 (struct ce_result_dump_format *)vaddr;
4370 vaddr += CRYPTO_RESULT_DUMP_SIZE;
4371
4372 pce_dev->ce_request_info[i].ce_sps.result_dump_null =
4373 (uintptr_t)vaddr;
4374 pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
4375 GET_PHYS_ADDR((uintptr_t)vaddr);
4376 pce_dev->ce_request_info[i].ce_sps.result_null =
4377 (struct ce_result_dump_format *)vaddr;
4378 vaddr += CRYPTO_RESULT_DUMP_SIZE;
4379
4380 pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
4381 (uintptr_t)vaddr;
4382 vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
4383 }
4384 if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
4385 iovec_memsize < 0)
4386 panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
4387 pce_dev->memsize, (uintptr_t)vaddr -
4388 (uintptr_t)pce_dev->coh_vmem);
4389 return 0;
4390}
4391
4392static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
4393{
4394 uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
4395 uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
4396
4397 pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
4398 BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
4399 BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
4400 (pipe_pair << CRYPTO_PIPE_SET_SELECT);
4401
4402 pce_dev->reg.crypto_cfg_le =
4403 (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
4404
4405 /* Initialize encr_cfg register for AES alg */
4406 pce_dev->reg.encr_cfg_aes_cbc_128 =
4407 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4408 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4409 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4410
4411 pce_dev->reg.encr_cfg_aes_cbc_256 =
4412 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4413 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4414 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4415
4416 pce_dev->reg.encr_cfg_aes_ctr_128 =
4417 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4418 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4419 (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
4420
4421 pce_dev->reg.encr_cfg_aes_ctr_256 =
4422 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4423 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4424 (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
4425
4426 pce_dev->reg.encr_cfg_aes_xts_128 =
4427 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4428 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4429 (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
4430
4431 pce_dev->reg.encr_cfg_aes_xts_256 =
4432 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4433 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4434 (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
4435
4436 pce_dev->reg.encr_cfg_aes_ecb_128 =
4437 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4438 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4439 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4440
4441 pce_dev->reg.encr_cfg_aes_ecb_256 =
4442 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4443 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4444 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4445
4446 pce_dev->reg.encr_cfg_aes_ccm_128 =
4447 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4448 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4449 (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
4450 (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
4451
4452 pce_dev->reg.encr_cfg_aes_ccm_256 =
4453 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4454 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4455 (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
4456 (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
4457
4458 /* Initialize encr_cfg register for DES alg */
4459 pce_dev->reg.encr_cfg_des_ecb =
4460 (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
4461 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4462 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4463
4464 pce_dev->reg.encr_cfg_des_cbc =
4465 (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
4466 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4467 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4468
4469 pce_dev->reg.encr_cfg_3des_ecb =
4470 (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
4471 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4472 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4473
4474 pce_dev->reg.encr_cfg_3des_cbc =
4475 (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
4476 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4477 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4478
4479 /* Initialize encr_cfg register for kasumi/snow3g alg */
4480 pce_dev->reg.encr_cfg_kasumi =
4481 (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
4482
4483 pce_dev->reg.encr_cfg_snow3g =
4484 (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
4485
4486 /* Initialize auth_cfg register for CMAC alg */
4487 pce_dev->reg.auth_cfg_cmac_128 =
4488 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4489 (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
4490 (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
4491 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4492 (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
4493
4494 pce_dev->reg.auth_cfg_cmac_256 =
4495 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4496 (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
4497 (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
4498 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4499 (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
4500
4501 /* Initialize auth_cfg register for HMAC alg */
4502 pce_dev->reg.auth_cfg_hmac_sha1 =
4503 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4504 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
4505 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4506 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4507
4508 pce_dev->reg.auth_cfg_hmac_sha256 =
4509 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4510 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
4511 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4512 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4513
4514 /* Initialize auth_cfg register for SHA1/256 alg */
4515 pce_dev->reg.auth_cfg_sha1 =
4516 (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
4517 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
4518 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4519 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4520
4521 pce_dev->reg.auth_cfg_sha256 =
4522 (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
4523 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
4524 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4525 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4526
4527 /* Initialize auth_cfg register for AEAD alg */
4528 pce_dev->reg.auth_cfg_aead_sha1_hmac =
4529 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4530 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
4531 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4532 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
4533
4534 pce_dev->reg.auth_cfg_aead_sha256_hmac =
4535 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4536 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
4537 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4538 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
4539
4540 pce_dev->reg.auth_cfg_aes_ccm_128 =
4541 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4542 (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
4543 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4544 (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
4545 ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
4546 pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
4547
4548 pce_dev->reg.auth_cfg_aes_ccm_256 =
4549 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4550 (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
4551 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4552 (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
4553 ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
4554 pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
4555
4556 /* Initialize auth_cfg register for kasumi/snow3g */
4557 pce_dev->reg.auth_cfg_kasumi =
4558 (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
4559 BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
4560 pce_dev->reg.auth_cfg_snow3g =
4561 (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
4562 BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
4563 return 0;
4564}
4565
4566static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
4567 struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
4568{
4569 struct qce_cmdlist_info *cmdlistinfo;
4570 struct ce_sps_data *pce_sps_data;
4571
4572 pce_sps_data = &preq_info->ce_sps;
4573 if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
4574 !(pce_dev->no_ccm_mac_status_get_around)) {
4575 cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
4576 _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
4577 &pce_sps_data->in_transfer);
4578 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4579 pce_dev->ce_bam_info.ce_burst_size,
4580 &pce_sps_data->in_transfer);
4581 _qce_set_flag(&pce_sps_data->in_transfer,
4582 SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
4583 }
4584}
4585
4586static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
4587 struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
4588{
4589 struct ce_sps_data *pce_sps_data;
4590
4591 pce_sps_data = &preq_info->ce_sps;
4592
4593 if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
4594 !(pce_dev->no_ccm_mac_status_get_around)) {
4595 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4596 pce_dev->ce_bam_info.ce_burst_size,
4597 &pce_sps_data->out_transfer);
4598 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
4599 CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
4600 }
4601}
4602
4603/* QCE_DUMMY_REQ */
4604static void qce_dummy_complete(void *cookie, unsigned char *digest,
4605 unsigned char *authdata, int ret)
4606{
4607 if (!cookie)
4608 pr_err("invalid cookie\n");
4609}
4610
4611static int qce_dummy_req(struct qce_device *pce_dev)
4612{
4613 int ret = 0;
4614
4615 if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
4616 in_use, true) == false))
4617 return -EBUSY;
4618 ret = qce_process_sha_req(pce_dev, NULL);
4619 pce_dev->qce_stats.no_of_dummy_reqs++;
4620 return ret;
4621}
4622
4623static int select_mode(struct qce_device *pce_dev,
4624 struct ce_request_info *preq_info)
4625{
4626 struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
4627 unsigned int no_of_queued_req;
4628 unsigned int cadence;
4629
4630 if (!pce_dev->no_get_around) {
4631 _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
4632 return 0;
4633 }
4634
4635 /*
4636 * claim ownership of device
4637 */
4638again:
4639 if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
4640 != QCE_OWNER_NONE) {
4641 ndelay(40);
4642 goto again;
4643 }
4644 no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
4645 if (pce_dev->mode == IN_INTERRUPT_MODE) {
4646 if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
4647 pce_dev->mode = IN_BUNCH_MODE;
4648 pr_debug("pcedev %d mode switch to BUNCH\n",
4649 pce_dev->dev_no);
4650 _qce_set_flag(&pce_sps_data->out_transfer,
4651 SPS_IOVEC_FLAG_INT);
4652 pce_dev->intr_cadence = 0;
4653 atomic_set(&pce_dev->bunch_cmd_seq, 1);
4654 atomic_set(&pce_dev->last_intr_seq, 1);
4655 mod_timer(&(pce_dev->timer),
4656 (jiffies + DELAY_IN_JIFFIES));
4657 } else {
4658 _qce_set_flag(&pce_sps_data->out_transfer,
4659 SPS_IOVEC_FLAG_INT);
4660 }
4661 } else {
4662 pce_dev->intr_cadence++;
4663 cadence = (preq_info->req_len >> 7) + 1;
4664 if (cadence > SET_INTR_AT_REQ)
4665 cadence = SET_INTR_AT_REQ;
4666 if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
4667 == cadence) && pce_dev->cadence_flag))
4668 atomic_inc(&pce_dev->bunch_cmd_seq);
4669 else {
4670 _qce_set_flag(&pce_sps_data->out_transfer,
4671 SPS_IOVEC_FLAG_INT);
4672 pce_dev->intr_cadence = 0;
4673 atomic_set(&pce_dev->bunch_cmd_seq, 0);
4674 atomic_set(&pce_dev->last_intr_seq, 0);
4675 pce_dev->cadence_flag = ~pce_dev->cadence_flag;
4676 }
4677 }
4678
4679 return 0;
4680}
4681
4682static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
4683{
4684 int rc = 0;
4685 struct qce_device *pce_dev = (struct qce_device *) handle;
4686 struct aead_request *areq = (struct aead_request *) q_req->areq;
4687 uint32_t authsize = q_req->authsize;
4688 uint32_t totallen_in, out_len;
4689 uint32_t hw_pad_out = 0;
4690 int ce_burst_size;
4691 struct qce_cmdlist_info *cmdlistinfo = NULL;
4692 int req_info = -1;
4693 struct ce_request_info *preq_info;
4694 struct ce_sps_data *pce_sps_data;
4695
4696 req_info = qce_alloc_req_info(pce_dev);
4697 if (req_info < 0)
4698 return -EBUSY;
4699 preq_info = &pce_dev->ce_request_info[req_info];
4700 pce_sps_data = &preq_info->ce_sps;
4701
4702 ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
4703 totallen_in = areq->cryptlen + q_req->assoclen;
4704 if (q_req->dir == QCE_ENCRYPT) {
4705 q_req->cryptlen = areq->cryptlen;
4706 out_len = areq->cryptlen + authsize;
4707 hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
4708 } else {
4709 q_req->cryptlen = areq->cryptlen - authsize;
4710 out_len = q_req->cryptlen;
4711 hw_pad_out = authsize;
4712 }
4713
4714 /*
4715 * For crypto 5.0 that has burst size alignment requirement
4716 * for data descritpor,
4717 * the agent above(qcrypto) prepares the src scatter list with
4718 * memory starting with associated data, followed by
4719 * data stream to be ciphered.
4720 * The destination scatter list is pointing to the same
4721 * data area as source.
4722 */
4723 if (pce_dev->ce_bam_info.minor_version == 0)
4724 preq_info->src_nents = count_sg(areq->src, totallen_in);
4725 else
4726 preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
4727 areq->assoclen);
4728
4729 if (q_req->assoclen) {
4730 preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
4731
4732 /* formatted associated data input */
4733 qce_dma_map_sg(pce_dev->pdev, q_req->asg,
4734 preq_info->assoc_nents, DMA_TO_DEVICE);
4735 preq_info->asg = q_req->asg;
4736 } else {
4737 preq_info->assoc_nents = 0;
4738 preq_info->asg = NULL;
4739 }
4740 /* cipher input */
4741 qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
4742 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
4743 DMA_TO_DEVICE);
4744 /* cipher + mac output for encryption */
4745 if (areq->src != areq->dst) {
4746 if (pce_dev->ce_bam_info.minor_version == 0)
4747 /*
4748 * The destination scatter list is pointing to the same
4749 * data area as src.
4750 * Note, the associated data will be pass-through
4751 * at the beginning of destination area.
4752 */
4753 preq_info->dst_nents = count_sg(areq->dst,
4754 out_len + areq->assoclen);
4755 else
4756 preq_info->dst_nents = count_sg(areq->dst, out_len +
4757 areq->assoclen);
4758
4759 qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
4760 DMA_FROM_DEVICE);
4761 } else {
4762 preq_info->dst_nents = preq_info->src_nents;
4763 }
4764
4765 if (pce_dev->support_cmd_dscr) {
4766 cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
4767 q_req);
4768 if (cmdlistinfo == NULL) {
4769 pr_err("Unsupported cipher algorithm %d, mode %d\n",
4770 q_req->alg, q_req->mode);
4771 qce_free_req_info(pce_dev, req_info, false);
4772 return -EINVAL;
4773 }
4774 /* set up crypto device */
4775 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
4776 q_req->assoclen, cmdlistinfo);
4777 } else {
4778 /* set up crypto device */
4779 rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
4780 q_req->assoclen);
4781 }
4782
4783 if (rc < 0)
4784 goto bad;
4785
4786 preq_info->mode = q_req->mode;
4787
4788 /* setup for callback, and issue command to bam */
4789 preq_info->areq = q_req->areq;
4790 preq_info->qce_cb = q_req->qce_cb;
4791 preq_info->dir = q_req->dir;
4792
4793 /* setup xfer type for producer callback handling */
4794 preq_info->xfer_type = QCE_XFER_AEAD;
4795 preq_info->req_len = totallen_in;
4796
4797 _qce_sps_iovec_count_init(pce_dev, req_info);
4798
4799 if (pce_dev->support_cmd_dscr)
4800 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
4801 &pce_sps_data->in_transfer);
4802
4803 if (pce_dev->ce_bam_info.minor_version == 0) {
4804 goto bad;
4805 } else {
4806 if (q_req->assoclen && (_qce_sps_add_sg_data(
4807 pce_dev, q_req->asg, q_req->assoclen,
4808 &pce_sps_data->in_transfer)))
4809 goto bad;
4810 if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
4811 areq->assoclen,
4812 &pce_sps_data->in_transfer))
4813 goto bad;
4814 _qce_set_flag(&pce_sps_data->in_transfer,
4815 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
4816
4817 _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
4818
4819 if (pce_dev->no_get_around)
4820 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
4821 &pce_sps_data->cmdlistptr.unlock_all_pipes,
4822 &pce_sps_data->in_transfer);
4823
4824 /* Pass through to ignore associated data*/
4825 if (_qce_sps_add_data(
4826 GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4827 q_req->assoclen,
4828 &pce_sps_data->out_transfer))
4829 goto bad;
4830 if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
4831 areq->assoclen,
4832 &pce_sps_data->out_transfer))
4833 goto bad;
4834 /* Pass through to ignore hw_pad (padding of the MAC data) */
4835 if (_qce_sps_add_data(
4836 GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4837 hw_pad_out, &pce_sps_data->out_transfer))
4838 goto bad;
4839 if (pce_dev->no_get_around ||
4840 totallen_in <= SPS_MAX_PKT_SIZE) {
4841 if (_qce_sps_add_data(
4842 GET_PHYS_ADDR(pce_sps_data->result_dump),
4843 CRYPTO_RESULT_DUMP_SIZE,
4844 &pce_sps_data->out_transfer))
4845 goto bad;
4846 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
4847 } else {
4848 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
4849 }
4850
4851 _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
4852
4853 select_mode(pce_dev, preq_info);
4854 rc = _qce_sps_transfer(pce_dev, req_info);
4855 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
4856 }
4857 if (rc)
4858 goto bad;
4859 return 0;
4860
4861bad:
4862 if (preq_info->assoc_nents) {
4863 qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
4864 preq_info->assoc_nents, DMA_TO_DEVICE);
4865 }
4866 if (preq_info->src_nents) {
4867 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
4868 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
4869 DMA_TO_DEVICE);
4870 }
4871 if (areq->src != areq->dst) {
4872 qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
4873 DMA_FROM_DEVICE);
4874 }
4875 qce_free_req_info(pce_dev, req_info, false);
4876 return rc;
4877}
4878
4879static int _qce_suspend(void *handle)
4880{
4881 struct qce_device *pce_dev = (struct qce_device *)handle;
4882 struct sps_pipe *sps_pipe_info;
4883
4884 if (handle == NULL)
4885 return -ENODEV;
4886
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004887 sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
4888 sps_disconnect(sps_pipe_info);
4889
4890 sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
4891 sps_disconnect(sps_pipe_info);
4892
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004893 return 0;
4894}
4895
4896static int _qce_resume(void *handle)
4897{
4898 struct qce_device *pce_dev = (struct qce_device *)handle;
4899 struct sps_pipe *sps_pipe_info;
4900 struct sps_connect *sps_connect_info;
4901 int rc;
4902
4903 if (handle == NULL)
4904 return -ENODEV;
4905
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004906 sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
4907 sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
4908 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
4909 rc = sps_connect(sps_pipe_info, sps_connect_info);
4910 if (rc) {
4911 pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
4912 (uintptr_t)sps_pipe_info, rc);
4913 return rc;
4914 }
4915 sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
4916 sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
4917 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
4918 rc = sps_connect(sps_pipe_info, sps_connect_info);
4919 if (rc)
4920 pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
4921 (uintptr_t)sps_pipe_info, rc);
4922
4923 rc = sps_register_event(sps_pipe_info,
4924 &pce_dev->ce_bam_info.producer.event);
4925 if (rc)
4926 pr_err("Producer callback registration failed rc = %d\n", rc);
4927
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004928 return rc;
4929}
4930
4931struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
4932EXPORT_SYMBOL(qce_pm_table);
4933
4934int qce_aead_req(void *handle, struct qce_req *q_req)
4935{
4936 struct qce_device *pce_dev = (struct qce_device *)handle;
4937 struct aead_request *areq;
4938 uint32_t authsize;
4939 struct crypto_aead *aead;
4940 uint32_t ivsize;
4941 uint32_t totallen;
4942 int rc = 0;
4943 struct qce_cmdlist_info *cmdlistinfo = NULL;
4944 int req_info = -1;
4945 struct ce_sps_data *pce_sps_data;
4946 struct ce_request_info *preq_info;
4947
4948 if (q_req->mode == QCE_MODE_CCM)
4949 return _qce_aead_ccm_req(handle, q_req);
4950
4951 req_info = qce_alloc_req_info(pce_dev);
4952 if (req_info < 0)
4953 return -EBUSY;
4954 preq_info = &pce_dev->ce_request_info[req_info];
4955 pce_sps_data = &preq_info->ce_sps;
4956 areq = (struct aead_request *) q_req->areq;
4957 aead = crypto_aead_reqtfm(areq);
4958 ivsize = crypto_aead_ivsize(aead);
4959 q_req->ivsize = ivsize;
4960 authsize = q_req->authsize;
4961 if (q_req->dir == QCE_ENCRYPT)
4962 q_req->cryptlen = areq->cryptlen;
4963 else
4964 q_req->cryptlen = areq->cryptlen - authsize;
4965
4966 if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
4967 pr_err("Integer overflow on total aead req length.\n");
4968 return -EINVAL;
4969 }
4970
4971 totallen = q_req->cryptlen + areq->assoclen;
4972
4973 if (pce_dev->support_cmd_dscr) {
4974 cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
4975 req_info, q_req);
4976 if (cmdlistinfo == NULL) {
4977 pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
4978 q_req->alg, q_req->mode, q_req->encklen,
4979 q_req->authsize);
4980 qce_free_req_info(pce_dev, req_info, false);
4981 return -EINVAL;
4982 }
4983 /* set up crypto device */
4984 rc = _ce_setup_aead(pce_dev, q_req, totallen,
4985 areq->assoclen, cmdlistinfo);
4986 if (rc < 0) {
4987 qce_free_req_info(pce_dev, req_info, false);
4988 return -EINVAL;
4989 }
4990 }
4991
4992 /*
4993 * For crypto 5.0 that has burst size alignment requirement
4994 * for data descritpor,
4995 * the agent above(qcrypto) prepares the src scatter list with
4996 * memory starting with associated data, followed by
4997 * iv, and data stream to be ciphered.
4998 */
4999 preq_info->src_nents = count_sg(areq->src, totallen);
5000
5001
5002 /* cipher input */
5003 qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
5004 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
5005 DMA_TO_DEVICE);
5006 /* cipher output for encryption */
5007 if (areq->src != areq->dst) {
5008 preq_info->dst_nents = count_sg(areq->dst, totallen);
5009
5010 qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
5011 DMA_FROM_DEVICE);
5012 }
5013
5014
5015 /* setup for callback, and issue command to bam */
5016 preq_info->areq = q_req->areq;
5017 preq_info->qce_cb = q_req->qce_cb;
5018 preq_info->dir = q_req->dir;
5019 preq_info->asg = NULL;
5020
5021 /* setup xfer type for producer callback handling */
5022 preq_info->xfer_type = QCE_XFER_AEAD;
5023 preq_info->req_len = totallen;
5024
5025 _qce_sps_iovec_count_init(pce_dev, req_info);
5026
5027 if (pce_dev->support_cmd_dscr) {
5028 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5029 &pce_sps_data->in_transfer);
5030 } else {
5031 rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
5032 areq->assoclen);
5033 if (rc)
5034 goto bad;
5035 }
5036
5037 preq_info->mode = q_req->mode;
5038
5039 if (pce_dev->ce_bam_info.minor_version == 0) {
5040 if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
5041 &pce_sps_data->in_transfer))
5042 goto bad;
5043
5044 _qce_set_flag(&pce_sps_data->in_transfer,
5045 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5046
5047 if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
5048 &pce_sps_data->out_transfer))
5049 goto bad;
5050 if (totallen > SPS_MAX_PKT_SIZE) {
5051 _qce_set_flag(&pce_sps_data->out_transfer,
5052 SPS_IOVEC_FLAG_INT);
5053 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
5054 } else {
5055 if (_qce_sps_add_data(GET_PHYS_ADDR(
5056 pce_sps_data->result_dump),
5057 CRYPTO_RESULT_DUMP_SIZE,
5058 &pce_sps_data->out_transfer))
5059 goto bad;
5060 _qce_set_flag(&pce_sps_data->out_transfer,
5061 SPS_IOVEC_FLAG_INT);
5062 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
5063 }
5064 rc = _qce_sps_transfer(pce_dev, req_info);
5065 } else {
5066 if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
5067 &pce_sps_data->in_transfer))
5068 goto bad;
5069 _qce_set_flag(&pce_sps_data->in_transfer,
5070 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5071
5072 if (pce_dev->no_get_around)
5073 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5074 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5075 &pce_sps_data->in_transfer);
5076
5077 if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
5078 &pce_sps_data->out_transfer))
5079 goto bad;
5080
5081 if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
5082 if (_qce_sps_add_data(
5083 GET_PHYS_ADDR(pce_sps_data->result_dump),
5084 CRYPTO_RESULT_DUMP_SIZE,
5085 &pce_sps_data->out_transfer))
5086 goto bad;
5087 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
5088 } else {
5089 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
5090 }
5091 select_mode(pce_dev, preq_info);
5092 rc = _qce_sps_transfer(pce_dev, req_info);
5093 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5094 }
5095 if (rc)
5096 goto bad;
5097 return 0;
5098
5099bad:
5100 if (preq_info->src_nents)
5101 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
5102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
5103 DMA_TO_DEVICE);
5104 if (areq->src != areq->dst)
5105 qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
5106 DMA_FROM_DEVICE);
5107 qce_free_req_info(pce_dev, req_info, false);
5108
5109 return rc;
5110}
5111EXPORT_SYMBOL(qce_aead_req);
5112
5113int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
5114{
5115 int rc = 0;
5116 struct qce_device *pce_dev = (struct qce_device *) handle;
5117 struct ablkcipher_request *areq = (struct ablkcipher_request *)
5118 c_req->areq;
5119 struct qce_cmdlist_info *cmdlistinfo = NULL;
5120 int req_info = -1;
5121 struct ce_sps_data *pce_sps_data;
5122 struct ce_request_info *preq_info;
5123
5124 req_info = qce_alloc_req_info(pce_dev);
5125 if (req_info < 0)
5126 return -EBUSY;
5127 preq_info = &pce_dev->ce_request_info[req_info];
5128 pce_sps_data = &preq_info->ce_sps;
5129
5130 preq_info->src_nents = 0;
5131 preq_info->dst_nents = 0;
5132
5133 /* cipher input */
5134 preq_info->src_nents = count_sg(areq->src, areq->nbytes);
5135
5136 qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
5137 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
5138 DMA_TO_DEVICE);
5139 /* cipher output */
5140 if (areq->src != areq->dst) {
5141 preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
5142 qce_dma_map_sg(pce_dev->pdev, areq->dst,
5143 preq_info->dst_nents, DMA_FROM_DEVICE);
5144 } else {
5145 preq_info->dst_nents = preq_info->src_nents;
5146 }
5147 preq_info->dir = c_req->dir;
5148 if ((pce_dev->ce_bam_info.minor_version == 0) &&
5149 (preq_info->dir == QCE_DECRYPT) &&
5150 (c_req->mode == QCE_MODE_CBC)) {
5151 memcpy(preq_info->dec_iv, (unsigned char *)
5152 sg_virt(areq->src) + areq->src->length - 16,
5153 NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
5154 }
5155
5156 /* set up crypto device */
5157 if (pce_dev->support_cmd_dscr) {
5158 cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
5159 req_info, c_req);
5160 if (cmdlistinfo == NULL) {
5161 pr_err("Unsupported cipher algorithm %d, mode %d\n",
5162 c_req->alg, c_req->mode);
5163 qce_free_req_info(pce_dev, req_info, false);
5164 return -EINVAL;
5165 }
5166 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
5167 cmdlistinfo);
5168 } else {
5169 rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
5170 }
5171 if (rc < 0)
5172 goto bad;
5173
5174 preq_info->mode = c_req->mode;
5175
5176 /* setup for client callback, and issue command to BAM */
5177 preq_info->areq = areq;
5178 preq_info->qce_cb = c_req->qce_cb;
5179
5180 /* setup xfer type for producer callback handling */
5181 preq_info->xfer_type = QCE_XFER_CIPHERING;
5182 preq_info->req_len = areq->nbytes;
5183
5184 _qce_sps_iovec_count_init(pce_dev, req_info);
5185 if (pce_dev->support_cmd_dscr)
5186 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5187 &pce_sps_data->in_transfer);
5188 if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
5189 &pce_sps_data->in_transfer))
5190 goto bad;
5191 _qce_set_flag(&pce_sps_data->in_transfer,
5192 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5193
5194 if (pce_dev->no_get_around)
5195 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5196 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5197 &pce_sps_data->in_transfer);
5198
5199 if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
5200 &pce_sps_data->out_transfer))
5201 goto bad;
5202 if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
5203 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
5204 if (_qce_sps_add_data(
5205 GET_PHYS_ADDR(pce_sps_data->result_dump),
5206 CRYPTO_RESULT_DUMP_SIZE,
5207 &pce_sps_data->out_transfer))
5208 goto bad;
5209 } else {
5210 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
5211 }
5212
5213 select_mode(pce_dev, preq_info);
5214 rc = _qce_sps_transfer(pce_dev, req_info);
5215 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5216 if (rc)
5217 goto bad;
5218
5219 return 0;
5220bad:
5221 if (areq->src != areq->dst) {
5222 if (preq_info->dst_nents) {
5223 qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
5224 preq_info->dst_nents, DMA_FROM_DEVICE);
5225 }
5226 }
5227 if (preq_info->src_nents) {
5228 qce_dma_unmap_sg(pce_dev->pdev, areq->src,
5229 preq_info->src_nents,
5230 (areq->src == areq->dst) ?
5231 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5232 }
5233 qce_free_req_info(pce_dev, req_info, false);
5234 return rc;
5235}
5236EXPORT_SYMBOL(qce_ablk_cipher_req);
5237
5238int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
5239{
5240 struct qce_device *pce_dev = (struct qce_device *) handle;
5241 int rc;
5242
5243 struct ahash_request *areq;
5244 struct qce_cmdlist_info *cmdlistinfo = NULL;
5245 int req_info = -1;
5246 struct ce_sps_data *pce_sps_data;
5247 struct ce_request_info *preq_info;
5248 bool is_dummy = false;
5249
5250 if (!sreq) {
5251 sreq = &(pce_dev->dummyreq.sreq);
5252 req_info = DUMMY_REQ_INDEX;
5253 is_dummy = true;
5254 } else {
5255 req_info = qce_alloc_req_info(pce_dev);
5256 if (req_info < 0)
5257 return -EBUSY;
5258 }
5259
5260 areq = (struct ahash_request *)sreq->areq;
5261 preq_info = &pce_dev->ce_request_info[req_info];
5262 pce_sps_data = &preq_info->ce_sps;
5263
5264 preq_info->src_nents = count_sg(sreq->src, sreq->size);
5265 qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
5266 DMA_TO_DEVICE);
5267
5268 if (pce_dev->support_cmd_dscr) {
5269 cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
5270 if (cmdlistinfo == NULL) {
5271 pr_err("Unsupported hash algorithm %d\n", sreq->alg);
5272 qce_free_req_info(pce_dev, req_info, false);
5273 return -EINVAL;
5274 }
5275 rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
5276 } else {
5277 rc = _ce_setup_hash_direct(pce_dev, sreq);
5278 }
5279 if (rc < 0)
5280 goto bad;
5281
5282 preq_info->areq = areq;
5283 preq_info->qce_cb = sreq->qce_cb;
5284
5285 /* setup xfer type for producer callback handling */
5286 preq_info->xfer_type = QCE_XFER_HASHING;
5287 preq_info->req_len = sreq->size;
5288
5289 _qce_sps_iovec_count_init(pce_dev, req_info);
5290
5291 if (pce_dev->support_cmd_dscr)
5292 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5293 &pce_sps_data->in_transfer);
5294 if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
5295 &pce_sps_data->in_transfer))
5296 goto bad;
5297
5298 /* always ensure there is input data. ZLT does not work for bam-ndp */
5299 if (!areq->nbytes)
5300 _qce_sps_add_data(
5301 GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
5302 pce_dev->ce_bam_info.ce_burst_size,
5303 &pce_sps_data->in_transfer);
5304 _qce_set_flag(&pce_sps_data->in_transfer,
5305 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5306 if (pce_dev->no_get_around)
5307 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5308 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5309 &pce_sps_data->in_transfer);
5310
5311 if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5312 CRYPTO_RESULT_DUMP_SIZE,
5313 &pce_sps_data->out_transfer))
5314 goto bad;
5315
5316 if (is_dummy) {
5317 _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
5318 rc = _qce_sps_transfer(pce_dev, req_info);
5319 } else {
5320 select_mode(pce_dev, preq_info);
5321 rc = _qce_sps_transfer(pce_dev, req_info);
5322 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5323 }
5324 if (rc)
5325 goto bad;
5326 return 0;
5327bad:
5328 if (preq_info->src_nents) {
5329 qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
5330 preq_info->src_nents, DMA_TO_DEVICE);
5331 }
5332 qce_free_req_info(pce_dev, req_info, false);
5333 return rc;
5334}
5335EXPORT_SYMBOL(qce_process_sha_req);
5336
5337int qce_f8_req(void *handle, struct qce_f8_req *req,
5338 void *cookie, qce_comp_func_ptr_t qce_cb)
5339{
5340 struct qce_device *pce_dev = (struct qce_device *) handle;
5341 bool key_stream_mode;
5342 dma_addr_t dst;
5343 int rc;
5344 struct qce_cmdlist_info *cmdlistinfo;
5345 int req_info = -1;
5346 struct ce_request_info *preq_info;
5347 struct ce_sps_data *pce_sps_data;
5348
5349 req_info = qce_alloc_req_info(pce_dev);
5350 if (req_info < 0)
5351 return -EBUSY;
5352 preq_info = &pce_dev->ce_request_info[req_info];
5353 pce_sps_data = &preq_info->ce_sps;
5354
5355 switch (req->algorithm) {
5356 case QCE_OTA_ALGO_KASUMI:
5357 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
5358 break;
5359 case QCE_OTA_ALGO_SNOW3G:
5360 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
5361 break;
5362 default:
5363 qce_free_req_info(pce_dev, req_info, false);
5364 return -EINVAL;
5365 };
5366
5367 key_stream_mode = (req->data_in == NULL);
5368
5369 /* don't support key stream mode */
5370
5371 if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
5372 qce_free_req_info(pce_dev, req_info, false);
5373 return -EINVAL;
5374 }
5375
5376 /* F8 cipher input */
5377 preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
5378 req->data_in, req->data_len,
5379 (req->data_in == req->data_out) ?
5380 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5381
5382 /* F8 cipher output */
5383 if (req->data_in != req->data_out) {
5384 dst = dma_map_single(pce_dev->pdev, req->data_out,
5385 req->data_len, DMA_FROM_DEVICE);
5386 preq_info->phy_ota_dst = dst;
5387 } else {
5388 /* in place ciphering */
5389 dst = preq_info->phy_ota_src;
5390 preq_info->phy_ota_dst = 0;
5391 }
5392 preq_info->ota_size = req->data_len;
5393
5394
5395 /* set up crypto device */
5396 if (pce_dev->support_cmd_dscr)
5397 rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
5398 req->data_len, cmdlistinfo);
5399 else
5400 rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
5401 req->data_len);
5402 if (rc < 0)
5403 goto bad;
5404
5405 /* setup for callback, and issue command to sps */
5406 preq_info->areq = cookie;
5407 preq_info->qce_cb = qce_cb;
5408
5409 /* setup xfer type for producer callback handling */
5410 preq_info->xfer_type = QCE_XFER_F8;
5411 preq_info->req_len = req->data_len;
5412
5413 _qce_sps_iovec_count_init(pce_dev, req_info);
5414
5415 if (pce_dev->support_cmd_dscr)
5416 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5417 &pce_sps_data->in_transfer);
5418
5419 _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
5420 &pce_sps_data->in_transfer);
5421
5422 _qce_set_flag(&pce_sps_data->in_transfer,
5423 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5424
5425 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5426 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5427 &pce_sps_data->in_transfer);
5428
5429 _qce_sps_add_data((uint32_t)dst, req->data_len,
5430 &pce_sps_data->out_transfer);
5431
5432 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5433 CRYPTO_RESULT_DUMP_SIZE,
5434 &pce_sps_data->out_transfer);
5435
5436 select_mode(pce_dev, preq_info);
5437 rc = _qce_sps_transfer(pce_dev, req_info);
5438 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5439 if (rc)
5440 goto bad;
5441 return 0;
5442bad:
5443 if (preq_info->phy_ota_dst != 0)
5444 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
5445 req->data_len, DMA_FROM_DEVICE);
5446 if (preq_info->phy_ota_src != 0)
5447 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
5448 req->data_len,
5449 (req->data_in == req->data_out) ?
5450 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5451 qce_free_req_info(pce_dev, req_info, false);
5452 return rc;
5453}
5454EXPORT_SYMBOL(qce_f8_req);
5455
5456int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
5457 void *cookie, qce_comp_func_ptr_t qce_cb)
5458{
5459 struct qce_device *pce_dev = (struct qce_device *) handle;
5460 uint16_t num_pkt = mreq->num_pkt;
5461 uint16_t cipher_start = mreq->cipher_start;
5462 uint16_t cipher_size = mreq->cipher_size;
5463 struct qce_f8_req *req = &mreq->qce_f8_req;
5464 uint32_t total;
5465 dma_addr_t dst = 0;
5466 int rc = 0;
5467 struct qce_cmdlist_info *cmdlistinfo;
5468 int req_info = -1;
5469 struct ce_request_info *preq_info;
5470 struct ce_sps_data *pce_sps_data;
5471
5472 req_info = qce_alloc_req_info(pce_dev);
5473 if (req_info < 0)
5474 return -EBUSY;
5475 preq_info = &pce_dev->ce_request_info[req_info];
5476 pce_sps_data = &preq_info->ce_sps;
5477
5478 switch (req->algorithm) {
5479 case QCE_OTA_ALGO_KASUMI:
5480 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
5481 break;
5482 case QCE_OTA_ALGO_SNOW3G:
5483 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
5484 break;
5485 default:
5486 qce_free_req_info(pce_dev, req_info, false);
5487 return -EINVAL;
5488 };
5489
5490 total = num_pkt * req->data_len;
5491
5492 /* F8 cipher input */
5493 preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
5494 req->data_in, total,
5495 (req->data_in == req->data_out) ?
5496 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5497
5498 /* F8 cipher output */
5499 if (req->data_in != req->data_out) {
5500 dst = dma_map_single(pce_dev->pdev, req->data_out, total,
5501 DMA_FROM_DEVICE);
5502 preq_info->phy_ota_dst = dst;
5503 } else {
5504 /* in place ciphering */
5505 dst = preq_info->phy_ota_src;
5506 preq_info->phy_ota_dst = 0;
5507 }
5508
5509 preq_info->ota_size = total;
5510
5511 /* set up crypto device */
5512 if (pce_dev->support_cmd_dscr)
5513 rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
5514 cipher_size, cmdlistinfo);
5515 else
5516 rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
5517 cipher_start, cipher_size);
5518 if (rc)
5519 goto bad;
5520
5521 /* setup for callback, and issue command to sps */
5522 preq_info->areq = cookie;
5523 preq_info->qce_cb = qce_cb;
5524
5525 /* setup xfer type for producer callback handling */
5526 preq_info->xfer_type = QCE_XFER_F8;
5527 preq_info->req_len = total;
5528
5529 _qce_sps_iovec_count_init(pce_dev, req_info);
5530
5531 if (pce_dev->support_cmd_dscr)
5532 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5533 &pce_sps_data->in_transfer);
5534
5535 _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
5536 &pce_sps_data->in_transfer);
5537 _qce_set_flag(&pce_sps_data->in_transfer,
5538 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5539
5540 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5541 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5542 &pce_sps_data->in_transfer);
5543
5544 _qce_sps_add_data((uint32_t)dst, total,
5545 &pce_sps_data->out_transfer);
5546
5547 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5548 CRYPTO_RESULT_DUMP_SIZE,
5549 &pce_sps_data->out_transfer);
5550
5551 select_mode(pce_dev, preq_info);
5552 rc = _qce_sps_transfer(pce_dev, req_info);
5553 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5554
5555 if (rc == 0)
5556 return 0;
5557bad:
5558 if (preq_info->phy_ota_dst)
5559 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
5560 DMA_FROM_DEVICE);
5561 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
5562 (req->data_in == req->data_out) ?
5563 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5564 qce_free_req_info(pce_dev, req_info, false);
5565 return rc;
5566}
5567EXPORT_SYMBOL(qce_f8_multi_pkt_req);
5568
5569int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
5570 qce_comp_func_ptr_t qce_cb)
5571{
5572 struct qce_device *pce_dev = (struct qce_device *) handle;
5573 int rc;
5574 struct qce_cmdlist_info *cmdlistinfo;
5575 int req_info = -1;
5576 struct ce_sps_data *pce_sps_data;
5577 struct ce_request_info *preq_info;
5578
5579 req_info = qce_alloc_req_info(pce_dev);
5580 if (req_info < 0)
5581 return -EBUSY;
5582 preq_info = &pce_dev->ce_request_info[req_info];
5583 pce_sps_data = &preq_info->ce_sps;
5584 switch (req->algorithm) {
5585 case QCE_OTA_ALGO_KASUMI:
5586 cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
5587 break;
5588 case QCE_OTA_ALGO_SNOW3G:
5589 cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
5590 break;
5591 default:
5592 qce_free_req_info(pce_dev, req_info, false);
5593 return -EINVAL;
5594 };
5595
5596 preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
5597 req->msize, DMA_TO_DEVICE);
5598
5599 preq_info->ota_size = req->msize;
5600
5601 if (pce_dev->support_cmd_dscr)
5602 rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
5603 else
5604 rc = _ce_f9_setup_direct(pce_dev, req);
5605 if (rc < 0)
5606 goto bad;
5607
5608 /* setup for callback, and issue command to sps */
5609 preq_info->areq = cookie;
5610 preq_info->qce_cb = qce_cb;
5611
5612 /* setup xfer type for producer callback handling */
5613 preq_info->xfer_type = QCE_XFER_F9;
5614 preq_info->req_len = req->msize;
5615
5616 _qce_sps_iovec_count_init(pce_dev, req_info);
5617 if (pce_dev->support_cmd_dscr)
5618 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5619 &pce_sps_data->in_transfer);
5620 _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
5621 &pce_sps_data->in_transfer);
5622 _qce_set_flag(&pce_sps_data->in_transfer,
5623 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5624
5625 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5626 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5627 &pce_sps_data->in_transfer);
5628
5629 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5630 CRYPTO_RESULT_DUMP_SIZE,
5631 &pce_sps_data->out_transfer);
5632
5633 select_mode(pce_dev, preq_info);
5634 rc = _qce_sps_transfer(pce_dev, req_info);
5635 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5636 if (rc)
5637 goto bad;
5638 return 0;
5639bad:
5640 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
5641 req->msize, DMA_TO_DEVICE);
5642 qce_free_req_info(pce_dev, req_info, false);
5643 return rc;
5644}
5645EXPORT_SYMBOL(qce_f9_req);
5646
5647static int __qce_get_device_tree_data(struct platform_device *pdev,
5648 struct qce_device *pce_dev)
5649{
5650 struct resource *resource;
5651 int rc = 0;
5652
5653 pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
5654 "qcom,ce-hw-shared");
5655 pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
5656 "qcom,ce-hw-key");
5657
5658 pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
5659 of_property_read_bool((&pdev->dev)->of_node,
5660 "qcom,use-sw-aes-cbc-ecb-ctr-algo");
5661 pce_dev->use_sw_aead_algo =
5662 of_property_read_bool((&pdev->dev)->of_node,
5663 "qcom,use-sw-aead-algo");
5664 pce_dev->use_sw_aes_xts_algo =
5665 of_property_read_bool((&pdev->dev)->of_node,
5666 "qcom,use-sw-aes-xts-algo");
5667 pce_dev->use_sw_ahash_algo =
5668 of_property_read_bool((&pdev->dev)->of_node,
5669 "qcom,use-sw-ahash-algo");
5670 pce_dev->use_sw_hmac_algo =
5671 of_property_read_bool((&pdev->dev)->of_node,
5672 "qcom,use-sw-hmac-algo");
5673 pce_dev->use_sw_aes_ccm_algo =
5674 of_property_read_bool((&pdev->dev)->of_node,
5675 "qcom,use-sw-aes-ccm-algo");
5676 pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
5677 (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
5678 pce_dev->support_only_core_src_clk = of_property_read_bool(
5679 (&pdev->dev)->of_node, "qcom,support-core-clk-only");
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07005680 pce_dev->request_bw_before_clk = of_property_read_bool(
5681 (&pdev->dev)->of_node, "qcom,request-bw-before-clk");
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005682
5683 if (of_property_read_u32((&pdev->dev)->of_node,
5684 "qcom,bam-pipe-pair",
5685 &pce_dev->ce_bam_info.pipe_pair_index)) {
5686 pr_err("Fail to get bam pipe pair information.\n");
5687 return -EINVAL;
5688 }
5689 if (of_property_read_u32((&pdev->dev)->of_node,
5690 "qcom,ce-device",
5691 &pce_dev->ce_bam_info.ce_device)) {
5692 pr_err("Fail to get CE device information.\n");
5693 return -EINVAL;
5694 }
5695 if (of_property_read_u32((&pdev->dev)->of_node,
5696 "qcom,ce-hw-instance",
5697 &pce_dev->ce_bam_info.ce_hw_instance)) {
5698 pr_err("Fail to get CE hw instance information.\n");
5699 return -EINVAL;
5700 }
5701 if (of_property_read_u32((&pdev->dev)->of_node,
5702 "qcom,bam-ee",
5703 &pce_dev->ce_bam_info.bam_ee)) {
5704 pr_info("BAM Apps EE is not defined, setting to default 1\n");
5705 pce_dev->ce_bam_info.bam_ee = 1;
5706 }
5707 if (of_property_read_u32((&pdev->dev)->of_node,
5708 "qcom,ce-opp-freq",
5709 &pce_dev->ce_opp_freq_hz)) {
5710 pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
5711 pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
5712 }
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05305713
5714 if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-bypass"))
5715 pce_dev->bypass_s1_smmu = true;
5716
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005717 pce_dev->ce_bam_info.dest_pipe_index =
5718 2 * pce_dev->ce_bam_info.pipe_pair_index;
5719 pce_dev->ce_bam_info.src_pipe_index =
5720 pce_dev->ce_bam_info.dest_pipe_index + 1;
5721
5722 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5723 "crypto-base");
5724 if (resource) {
5725 pce_dev->phy_iobase = resource->start;
5726 pce_dev->iobase = ioremap_nocache(resource->start,
5727 resource_size(resource));
5728 if (!pce_dev->iobase) {
5729 pr_err("Can not map CRYPTO io memory\n");
5730 return -ENOMEM;
5731 }
5732 } else {
5733 pr_err("CRYPTO HW mem unavailable.\n");
5734 return -ENODEV;
5735 }
5736
5737 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5738 "crypto-bam-base");
5739 if (resource) {
5740 pce_dev->bam_mem = resource->start;
5741 pce_dev->bam_mem_size = resource_size(resource);
5742 } else {
5743 pr_err("CRYPTO BAM mem unavailable.\n");
5744 rc = -ENODEV;
5745 goto err_getting_bam_info;
5746 }
5747
5748 resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
5749 if (resource) {
5750 pce_dev->ce_bam_info.bam_irq = resource->start;
5751 } else {
5752 pr_err("CRYPTO BAM IRQ unavailable.\n");
5753 goto err_dev;
5754 }
5755 return rc;
5756err_dev:
5757 if (pce_dev->ce_bam_info.bam_iobase)
5758 iounmap(pce_dev->ce_bam_info.bam_iobase);
5759
5760err_getting_bam_info:
5761 if (pce_dev->iobase)
5762 iounmap(pce_dev->iobase);
5763
5764 return rc;
5765}
5766
5767static int __qce_init_clk(struct qce_device *pce_dev)
5768{
5769 int rc = 0;
5770
5771 pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
5772 if (!IS_ERR(pce_dev->ce_core_src_clk)) {
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07005773 if (pce_dev->request_bw_before_clk)
5774 goto skip_set_rate;
5775
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005776 rc = clk_set_rate(pce_dev->ce_core_src_clk,
5777 pce_dev->ce_opp_freq_hz);
5778 if (rc) {
5779 pr_err("Unable to set the core src clk @%uMhz.\n",
5780 pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
5781 goto exit_put_core_src_clk;
5782 }
5783 } else {
5784 if (pce_dev->support_only_core_src_clk) {
5785 rc = PTR_ERR(pce_dev->ce_core_src_clk);
5786 pce_dev->ce_core_src_clk = NULL;
5787 pr_err("Unable to get CE core src clk\n");
5788 return rc;
5789 }
5790 pr_warn("Unable to get CE core src clk, set to NULL\n");
5791 pce_dev->ce_core_src_clk = NULL;
5792 }
5793
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07005794skip_set_rate:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005795 if (pce_dev->support_only_core_src_clk) {
5796 pce_dev->ce_core_clk = NULL;
5797 pce_dev->ce_clk = NULL;
5798 pce_dev->ce_bus_clk = NULL;
5799 } else {
5800 pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
5801 if (IS_ERR(pce_dev->ce_core_clk)) {
5802 rc = PTR_ERR(pce_dev->ce_core_clk);
5803 pr_err("Unable to get CE core clk\n");
5804 goto exit_put_core_src_clk;
5805 }
5806 pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
5807 if (IS_ERR(pce_dev->ce_clk)) {
5808 rc = PTR_ERR(pce_dev->ce_clk);
5809 pr_err("Unable to get CE interface clk\n");
5810 goto exit_put_core_clk;
5811 }
5812
5813 pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
5814 if (IS_ERR(pce_dev->ce_bus_clk)) {
5815 rc = PTR_ERR(pce_dev->ce_bus_clk);
5816 pr_err("Unable to get CE BUS interface clk\n");
5817 goto exit_put_iface_clk;
5818 }
5819 }
5820 return rc;
5821
5822exit_put_iface_clk:
5823 if (pce_dev->ce_clk)
5824 clk_put(pce_dev->ce_clk);
5825exit_put_core_clk:
5826 if (pce_dev->ce_core_clk)
5827 clk_put(pce_dev->ce_core_clk);
5828exit_put_core_src_clk:
5829 if (pce_dev->ce_core_src_clk)
5830 clk_put(pce_dev->ce_core_src_clk);
5831 pr_err("Unable to init CE clks, rc = %d\n", rc);
5832 return rc;
5833}
5834
5835static void __qce_deinit_clk(struct qce_device *pce_dev)
5836{
5837 if (pce_dev->ce_bus_clk)
5838 clk_put(pce_dev->ce_bus_clk);
5839 if (pce_dev->ce_clk)
5840 clk_put(pce_dev->ce_clk);
5841 if (pce_dev->ce_core_clk)
5842 clk_put(pce_dev->ce_core_clk);
5843 if (pce_dev->ce_core_src_clk)
5844 clk_put(pce_dev->ce_core_src_clk);
5845}
5846
5847int qce_enable_clk(void *handle)
5848{
5849 struct qce_device *pce_dev = (struct qce_device *)handle;
5850 int rc = 0;
5851
5852 if (pce_dev->ce_core_src_clk) {
5853 rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
5854 if (rc) {
5855 pr_err("Unable to enable/prepare CE core src clk\n");
5856 return rc;
5857 }
5858 }
5859
5860 if (pce_dev->support_only_core_src_clk)
5861 return rc;
5862
5863 if (pce_dev->ce_core_clk) {
5864 rc = clk_prepare_enable(pce_dev->ce_core_clk);
5865 if (rc) {
5866 pr_err("Unable to enable/prepare CE core clk\n");
5867 goto exit_disable_core_src_clk;
5868 }
5869 }
5870
5871 if (pce_dev->ce_clk) {
5872 rc = clk_prepare_enable(pce_dev->ce_clk);
5873 if (rc) {
5874 pr_err("Unable to enable/prepare CE iface clk\n");
5875 goto exit_disable_core_clk;
5876 }
5877 }
5878
5879 if (pce_dev->ce_bus_clk) {
5880 rc = clk_prepare_enable(pce_dev->ce_bus_clk);
5881 if (rc) {
5882 pr_err("Unable to enable/prepare CE BUS clk\n");
5883 goto exit_disable_ce_clk;
5884 }
5885 }
5886 return rc;
5887
5888exit_disable_ce_clk:
5889 if (pce_dev->ce_clk)
5890 clk_disable_unprepare(pce_dev->ce_clk);
5891exit_disable_core_clk:
5892 if (pce_dev->ce_core_clk)
5893 clk_disable_unprepare(pce_dev->ce_core_clk);
5894exit_disable_core_src_clk:
5895 if (pce_dev->ce_core_src_clk)
5896 clk_disable_unprepare(pce_dev->ce_core_src_clk);
5897 return rc;
5898}
5899EXPORT_SYMBOL(qce_enable_clk);
5900
5901int qce_disable_clk(void *handle)
5902{
5903 struct qce_device *pce_dev = (struct qce_device *) handle;
5904 int rc = 0;
5905
5906 if (pce_dev->ce_bus_clk)
5907 clk_disable_unprepare(pce_dev->ce_bus_clk);
5908 if (pce_dev->ce_clk)
5909 clk_disable_unprepare(pce_dev->ce_clk);
5910 if (pce_dev->ce_core_clk)
5911 clk_disable_unprepare(pce_dev->ce_core_clk);
5912 if (pce_dev->ce_core_src_clk)
5913 clk_disable_unprepare(pce_dev->ce_core_src_clk);
5914
5915 return rc;
5916}
5917EXPORT_SYMBOL(qce_disable_clk);
5918
5919/* dummy req setup */
5920static int setup_dummy_req(struct qce_device *pce_dev)
5921{
5922 char *input =
5923 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
5924 int len = DUMMY_REQ_DATA_LEN;
5925
5926 memcpy(pce_dev->dummyreq_in_buf, input, len);
5927 sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
5928 sg_mark_end(&pce_dev->dummyreq.sg);
5929
5930 pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
5931 pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
5932 pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
5933 pce_dev->dummyreq.sreq.auth_data[0] = 0;
5934 pce_dev->dummyreq.sreq.auth_data[1] = 0;
5935 pce_dev->dummyreq.sreq.auth_data[2] = 0;
5936 pce_dev->dummyreq.sreq.auth_data[3] = 0;
5937 pce_dev->dummyreq.sreq.first_blk = 1;
5938 pce_dev->dummyreq.sreq.last_blk = 1;
5939 pce_dev->dummyreq.sreq.size = len;
5940 pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
5941 pce_dev->dummyreq.sreq.flags = 0;
5942 pce_dev->dummyreq.sreq.authkey = NULL;
5943
5944 pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
5945 pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
5946
5947 return 0;
5948}
5949
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05305950static void qce_iommu_release_iomapping(struct qce_device *pce_dev)
5951{
5952 if (pce_dev->smmu_mapping)
5953 arm_iommu_release_mapping(pce_dev->smmu_mapping);
5954
5955 pce_dev->smmu_mapping = NULL;
5956}
5957
5958static int qce_smmu_init(struct qce_device *pce_dev)
5959{
5960 struct dma_iommu_mapping *mapping;
5961 int s1_bypass = 1;
5962 int ret = 0;
5963
5964 mapping = arm_iommu_create_mapping(&platform_bus_type,
5965 CRYPTO_SMMU_IOVA_START, CRYPTO_SMMU_IOVA_SIZE);
5966 if (IS_ERR(mapping)) {
5967 ret = PTR_ERR(mapping);
5968 pr_err("Create mapping failed, err = %d\n", ret);
5969 return ret;
5970 }
5971
5972 ret = iommu_domain_set_attr(mapping->domain,
5973 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
5974 if (ret < 0) {
5975 pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
5976 goto ext_fail_set_attr;
5977 }
5978
5979 ret = arm_iommu_attach_device(pce_dev->pdev, mapping);
5980 if (ret < 0) {
5981 pr_err("Attach device failed, err = %d\n", ret);
5982 goto ext_fail_set_attr;
5983 }
5984 pce_dev->smmu_mapping = mapping;
5985 return ret;
5986
5987ext_fail_set_attr:
5988 qce_iommu_release_iomapping(pce_dev);
5989 return ret;
5990}
5991
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005992/* crypto engine open function. */
5993void *qce_open(struct platform_device *pdev, int *rc)
5994{
5995 struct qce_device *pce_dev;
5996 int i;
5997 static int pcedev_no = 1;
5998
5999 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
6000 if (!pce_dev) {
6001 *rc = -ENOMEM;
6002 pr_err("Can not allocate memory: %d\n", *rc);
6003 return NULL;
6004 }
6005 pce_dev->pdev = &pdev->dev;
6006
6007 mutex_lock(&qce_iomap_mutex);
6008 if (pdev->dev.of_node) {
6009 *rc = __qce_get_device_tree_data(pdev, pce_dev);
6010 if (*rc)
6011 goto err_pce_dev;
6012 } else {
6013 *rc = -EINVAL;
6014 pr_err("Device Node not found.\n");
6015 goto err_pce_dev;
6016 }
6017
6018 for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
6019 pce_dev->ce_request_info[i].in_use = false;
6020 pce_dev->ce_request_index = 0;
6021
6022 pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
6023 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
6024 pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
6025
6026 if (pce_dev->coh_vmem == NULL) {
6027 *rc = -ENOMEM;
6028 pr_err("Can not allocate coherent memory for sps data\n");
6029 goto err_iobase;
6030 }
6031
6032 pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
6033 MAX_QCE_ALLOC_BAM_REQ * 2;
6034 pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
6035 if (pce_dev->iovec_vmem == NULL)
6036 goto err_mem;
6037
6038 pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
6039 if (pce_dev->dummyreq_in_buf == NULL)
6040 goto err_mem;
6041
6042 *rc = __qce_init_clk(pce_dev);
6043 if (*rc)
6044 goto err_mem;
6045 *rc = qce_enable_clk(pce_dev);
6046 if (*rc)
6047 goto err_enable_clk;
6048
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05306049 if (pce_dev->bypass_s1_smmu) {
6050 if (qce_smmu_init(pce_dev)) {
6051 *rc = -EIO;
6052 goto err_smmu;
6053 }
6054 }
6055
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006056 if (_probe_ce_engine(pce_dev)) {
6057 *rc = -ENXIO;
6058 goto err;
6059 }
6060 *rc = 0;
6061
6062 qce_init_ce_cfg_val(pce_dev);
6063 *rc = qce_sps_init(pce_dev);
6064 if (*rc)
6065 goto err;
6066 qce_setup_ce_sps_data(pce_dev);
6067 qce_disable_clk(pce_dev);
6068 setup_dummy_req(pce_dev);
6069 atomic_set(&pce_dev->no_of_queued_req, 0);
6070 pce_dev->mode = IN_INTERRUPT_MODE;
6071 init_timer(&(pce_dev->timer));
6072 pce_dev->timer.function = qce_multireq_timeout;
6073 pce_dev->timer.data = (unsigned long)pce_dev;
6074 pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
6075 pce_dev->intr_cadence = 0;
6076 pce_dev->dev_no = pcedev_no;
6077 pcedev_no++;
6078 pce_dev->owner = QCE_OWNER_NONE;
6079 mutex_unlock(&qce_iomap_mutex);
6080 return pce_dev;
6081err:
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05306082 if (pce_dev->bypass_s1_smmu)
6083 qce_iommu_release_iomapping(pce_dev);
6084err_smmu:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006085 qce_disable_clk(pce_dev);
6086
6087err_enable_clk:
6088 __qce_deinit_clk(pce_dev);
6089
6090err_mem:
6091 kfree(pce_dev->dummyreq_in_buf);
6092 kfree(pce_dev->iovec_vmem);
6093 if (pce_dev->coh_vmem)
6094 dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
6095 pce_dev->coh_vmem, pce_dev->coh_pmem);
6096err_iobase:
6097 if (pce_dev->iobase)
6098 iounmap(pce_dev->iobase);
6099err_pce_dev:
6100 mutex_unlock(&qce_iomap_mutex);
6101 kfree(pce_dev);
6102 return NULL;
6103}
6104EXPORT_SYMBOL(qce_open);
6105
6106/* crypto engine close function. */
6107int qce_close(void *handle)
6108{
6109 struct qce_device *pce_dev = (struct qce_device *) handle;
6110
6111 if (handle == NULL)
6112 return -ENODEV;
6113
6114 mutex_lock(&qce_iomap_mutex);
6115 qce_enable_clk(pce_dev);
6116 qce_sps_exit(pce_dev);
6117
6118 if (pce_dev->iobase)
6119 iounmap(pce_dev->iobase);
6120 if (pce_dev->coh_vmem)
6121 dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
6122 pce_dev->coh_vmem, pce_dev->coh_pmem);
6123 kfree(pce_dev->dummyreq_in_buf);
6124 kfree(pce_dev->iovec_vmem);
6125
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05306126 if (pce_dev->bypass_s1_smmu)
6127 qce_iommu_release_iomapping(pce_dev);
6128
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006129 qce_disable_clk(pce_dev);
6130 __qce_deinit_clk(pce_dev);
6131 mutex_unlock(&qce_iomap_mutex);
6132 kfree(handle);
6133
6134 return 0;
6135}
6136EXPORT_SYMBOL(qce_close);
6137
6138#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
6139 1 << CRYPTO_ENCR_KASUMI_SEL |\
6140 1 << CRYPTO_AUTH_SNOW3G_SEL |\
6141 1 << CRYPTO_AUTH_KASUMI_SEL)
6142
6143int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
6144{
6145 struct qce_device *pce_dev = (struct qce_device *)handle;
6146
6147 if (ce_support == NULL)
6148 return -EINVAL;
6149
6150 ce_support->sha1_hmac_20 = false;
6151 ce_support->sha1_hmac = false;
6152 ce_support->sha256_hmac = false;
6153 ce_support->sha_hmac = true;
6154 ce_support->cmac = true;
6155 ce_support->aes_key_192 = false;
6156 ce_support->aes_xts = true;
6157 if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
6158 ce_support->ota = true;
6159 else
6160 ce_support->ota = false;
6161 ce_support->bam = true;
6162 ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
6163 ce_support->hw_key = pce_dev->support_hw_key;
6164 ce_support->aes_ccm = true;
6165 ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07006166 ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006167 if (pce_dev->ce_bam_info.minor_version)
6168 ce_support->aligned_only = false;
6169 else
6170 ce_support->aligned_only = true;
6171
6172 ce_support->use_sw_aes_cbc_ecb_ctr_algo =
6173 pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
6174 ce_support->use_sw_aead_algo =
6175 pce_dev->use_sw_aead_algo;
6176 ce_support->use_sw_aes_xts_algo =
6177 pce_dev->use_sw_aes_xts_algo;
6178 ce_support->use_sw_ahash_algo =
6179 pce_dev->use_sw_ahash_algo;
6180 ce_support->use_sw_hmac_algo =
6181 pce_dev->use_sw_hmac_algo;
6182 ce_support->use_sw_aes_ccm_algo =
6183 pce_dev->use_sw_aes_ccm_algo;
6184 ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
6185 ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
6186 if (pce_dev->no_get_around)
6187 ce_support->max_request = MAX_QCE_BAM_REQ;
6188 else
6189 ce_support->max_request = 1;
6190 return 0;
6191}
6192EXPORT_SYMBOL(qce_hw_support);
6193
6194void qce_dump_req(void *handle)
6195{
6196 int i;
6197 struct qce_device *pce_dev = (struct qce_device *)handle;
6198
6199 for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
6200 pr_info("qce_dump_req %d %d\n", i,
6201 pce_dev->ce_request_info[i].in_use);
6202 if (pce_dev->ce_request_info[i].in_use == true)
6203 _qce_dump_descr_fifos(pce_dev, i);
6204 }
6205}
6206EXPORT_SYMBOL(qce_dump_req);
6207
6208MODULE_LICENSE("GPL v2");
6209MODULE_DESCRIPTION("Crypto Engine driver");