blob: 35d7542c077a743c7db10cab73181cc69779e3f1 [file] [log] [blame]
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001/*
2 * QTI Crypto Engine driver.
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mod_devicetable.h>
21#include <linux/device.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/dma-mapping.h>
25#include <linux/io.h>
26#include <linux/platform_device.h>
27#include <linux/spinlock.h>
28#include <linux/delay.h>
29#include <linux/crypto.h>
30#include <linux/bitops.h>
AnilKumar Chimata9abc7e42017-04-07 16:27:29 -070031#include <linux/clk/qcom.h>
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070032#include <linux/qcrypto.h>
33#include <crypto/hash.h>
34#include <crypto/sha.h>
35#include <soc/qcom/socinfo.h>
AnilKumar Chimatae9960e02017-07-26 18:31:37 +053036#include <asm/dma-iommu.h>
37#include <linux/iommu.h>
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070038
39#include "qce.h"
40#include "qce50.h"
41#include "qcryptohw_50.h"
42#include "qce_ota.h"
43
AnilKumar Chimatae9960e02017-07-26 18:31:37 +053044#define CRYPTO_SMMU_IOVA_START 0x10000000
45#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
46
AnilKumar Chimatae78789a2017-04-07 12:18:46 -070047#define CRYPTO_CONFIG_RESET 0xE01EF
48#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
49#define QCE_MAX_NUM_DSCR 0x200
50#define QCE_SECTOR_SIZE 0x200
51#define CE_CLK_100MHZ 100000000
52#define CE_CLK_DIV 1000000
53
54#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
55#define CRYPTO_CORE_MINOR_VER_NUM 0x03
56#define CRYPTO_CORE_STEP_VER_NUM 0x1
57
58#define CRYPTO_REQ_USER_PAT 0xdead0000
59
60static DEFINE_MUTEX(bam_register_lock);
61static DEFINE_MUTEX(qce_iomap_mutex);
62
63struct bam_registration_info {
64 struct list_head qlist;
65 unsigned long handle;
66 uint32_t cnt;
67 uint32_t bam_mem;
68 void __iomem *bam_iobase;
69 bool support_cmd_dscr;
70};
71static LIST_HEAD(qce50_bam_list);
72
73/* Used to determine the mode */
74#define MAX_BUNCH_MODE_REQ 2
75/* Max number of request supported */
76#define MAX_QCE_BAM_REQ 8
77/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
78#define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2)
79/* To create extra request space to hold dummy request */
80#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
81/* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
82#define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
83/* QCE driver modes */
84#define IN_INTERRUPT_MODE 0
85#define IN_BUNCH_MODE 1
86/* Dummy request data length */
87#define DUMMY_REQ_DATA_LEN 64
88/* Delay timer to expire when in bunch mode */
89#define DELAY_IN_JIFFIES 5
90/* Index to point the dummy request */
91#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
92
93#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
94
95enum qce_owner {
96 QCE_OWNER_NONE = 0,
97 QCE_OWNER_CLIENT = 1,
98 QCE_OWNER_TIMEOUT = 2
99};
100
101struct dummy_request {
102 struct qce_sha_req sreq;
103 struct scatterlist sg;
104 struct ahash_request areq;
105};
106
107/*
108 * CE HW device structure.
109 * Each engine has an instance of the structure.
110 * Each engine can only handle one crypto operation at one time. It is up to
111 * the sw above to ensure single threading of operation on an engine.
112 */
113struct qce_device {
114 struct device *pdev; /* Handle to platform_device structure */
115 struct bam_registration_info *pbam;
116
117 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
118 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
119 int memsize; /* Memory allocated */
120 unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
121 int iovec_memsize; /* Memory allocated */
122 uint32_t bam_mem; /* bam physical address, from DT */
123 uint32_t bam_mem_size; /* bam io size, from DT */
124 int is_shared; /* CE HW is shared */
125 bool support_cmd_dscr;
126 bool support_hw_key;
127 bool support_clk_mgmt_sus_res;
128 bool support_only_core_src_clk;
AnilKumar Chimata70cf1772017-05-02 18:39:39 -0700129 bool request_bw_before_clk;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700130
131 void __iomem *iobase; /* Virtual io base of CE HW */
132 unsigned int phy_iobase; /* Physical io base of CE HW */
133
134 struct clk *ce_core_src_clk; /* Handle to CE src clk*/
135 struct clk *ce_core_clk; /* Handle to CE clk */
136 struct clk *ce_clk; /* Handle to CE clk */
137 struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
138 bool no_get_around;
139 bool no_ccm_mac_status_get_around;
140 unsigned int ce_opp_freq_hz;
141 bool use_sw_aes_cbc_ecb_ctr_algo;
142 bool use_sw_aead_algo;
143 bool use_sw_aes_xts_algo;
144 bool use_sw_ahash_algo;
145 bool use_sw_hmac_algo;
146 bool use_sw_aes_ccm_algo;
147 uint32_t engines_avail;
148 struct qce_ce_cfg_reg_setting reg;
149 struct ce_bam_info ce_bam_info;
150 struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
151 unsigned int ce_request_index;
152 enum qce_owner owner;
153 atomic_t no_of_queued_req;
154 struct timer_list timer;
155 struct dummy_request dummyreq;
156 unsigned int mode;
157 unsigned int intr_cadence;
158 unsigned int dev_no;
159 struct qce_driver_stats qce_stats;
160 atomic_t bunch_cmd_seq;
161 atomic_t last_intr_seq;
162 bool cadence_flag;
163 uint8_t *dummyreq_in_buf;
AnilKumar Chimatae9960e02017-07-26 18:31:37 +0530164 struct dma_iommu_mapping *smmu_mapping;
165 bool bypass_s1_smmu;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700166};
167
168static void print_notify_debug(struct sps_event_notify *notify);
169static void _sps_producer_callback(struct sps_event_notify *notify);
170static int qce_dummy_req(struct qce_device *pce_dev);
171
172static int _qce50_disp_stats;
173
174/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
175static uint32_t _std_init_vector_sha1[] = {
176 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
177};
178
179/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
180static uint32_t _std_init_vector_sha256[] = {
181 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
182 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
183};
184
185static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
186 unsigned int len)
187{
188 unsigned int n;
189
190 n = len / sizeof(uint32_t);
191 for (; n > 0; n--) {
192 *iv = ((*b << 24) & 0xff000000) |
193 (((*(b+1)) << 16) & 0xff0000) |
194 (((*(b+2)) << 8) & 0xff00) |
195 (*(b+3) & 0xff);
196 b += sizeof(uint32_t);
197 iv++;
198 }
199
200 n = len % sizeof(uint32_t);
201 if (n == 3) {
202 *iv = ((*b << 24) & 0xff000000) |
203 (((*(b+1)) << 16) & 0xff0000) |
204 (((*(b+2)) << 8) & 0xff00);
205 } else if (n == 2) {
206 *iv = ((*b << 24) & 0xff000000) |
207 (((*(b+1)) << 16) & 0xff0000);
208 } else if (n == 1) {
209 *iv = ((*b << 24) & 0xff000000);
210 }
211}
212
213static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
214 unsigned int len)
215{
216 unsigned int i, j;
217 unsigned char swap_iv[AES_IV_LENGTH];
218
219 memset(swap_iv, 0, AES_IV_LENGTH);
220 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
221 swap_iv[i] = b[j];
222 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
223}
224
225static int count_sg(struct scatterlist *sg, int nbytes)
226{
227 int i;
228
229 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
230 nbytes -= sg->length;
231 return i;
232}
233
234static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
235 enum dma_data_direction direction)
236{
237 int i;
238
239 for (i = 0; i < nents; ++i) {
240 dma_map_sg(dev, sg, 1, direction);
241 sg = sg_next(sg);
242 }
243
244 return nents;
245}
246
247static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
248 int nents, enum dma_data_direction direction)
249{
250 int i;
251
252 for (i = 0; i < nents; ++i) {
253 dma_unmap_sg(dev, sg, 1, direction);
254 sg = sg_next(sg);
255 }
256
257 return nents;
258}
259
260static int _probe_ce_engine(struct qce_device *pce_dev)
261{
262 unsigned int rev;
263 unsigned int maj_rev, min_rev, step_rev;
264
265 rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
266 /*
267 * Ensure previous instructions (setting the GO register)
268 * was completed before checking the version.
269 */
270 mb();
271 maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
272 min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
273 step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
274
275 if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
276 pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
277 pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
278 return -EIO;
279 }
280
281 /*
282 * The majority of crypto HW bugs have been fixed in 5.3.0 and
283 * above. That allows a single sps transfer of consumer
284 * pipe, and a single sps transfer of producer pipe
285 * for a crypto request. no_get_around flag indicates this.
286 *
287 * In 5.3.1, the CCM MAC_FAILED in result dump issue is
288 * fixed. no_ccm_mac_status_get_around flag indicates this.
289 */
290 pce_dev->no_get_around = (min_rev >=
291 CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
292 if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
293 pce_dev->no_ccm_mac_status_get_around = true;
294 else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
295 (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
296 pce_dev->no_ccm_mac_status_get_around = true;
297 else
298 pce_dev->no_ccm_mac_status_get_around = false;
299
300 pce_dev->ce_bam_info.minor_version = min_rev;
301
302 pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
303 CRYPTO_ENGINES_AVAIL);
304 dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
305 maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
306
307 pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
308
AnilKumar Chimata70cf1772017-05-02 18:39:39 -0700309 dev_info(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -0700310 pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
311 pce_dev->ce_bam_info.dest_pipe_index,
312 pce_dev->ce_bam_info.src_pipe_index,
313 pce_dev->ce_bam_info.bam_iobase,
314 pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
315 return 0;
316};
317
318static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
319 struct qce_device *pce_dev,
320 int req_info, struct qce_sha_req *sreq)
321{
322 struct ce_sps_data *pce_sps_data;
323 struct qce_cmdlistptr_ops *cmdlistptr;
324
325 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
326 cmdlistptr = &pce_sps_data->cmdlistptr;
327 switch (sreq->alg) {
328 case QCE_HASH_SHA1:
329 return &cmdlistptr->auth_sha1;
330 case QCE_HASH_SHA256:
331 return &cmdlistptr->auth_sha256;
332 case QCE_HASH_SHA1_HMAC:
333 return &cmdlistptr->auth_sha1_hmac;
334 case QCE_HASH_SHA256_HMAC:
335 return &cmdlistptr->auth_sha256_hmac;
336 case QCE_HASH_AES_CMAC:
337 if (sreq->authklen == AES128_KEY_SIZE)
338 return &cmdlistptr->auth_aes_128_cmac;
339 return &cmdlistptr->auth_aes_256_cmac;
340 default:
341 return NULL;
342 }
343 return NULL;
344}
345
346static int _ce_setup_hash(struct qce_device *pce_dev,
347 struct qce_sha_req *sreq,
348 struct qce_cmdlist_info *cmdlistinfo)
349{
350 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
351 uint32_t diglen;
352 int i;
353 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
354 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
355 bool sha1 = false;
356 struct sps_command_element *pce = NULL;
357 bool use_hw_key = false;
358 bool use_pipe_key = false;
359 uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
360 uint32_t auth_cfg;
361
362 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
363 (sreq->alg == QCE_HASH_SHA256_HMAC) ||
364 (sreq->alg == QCE_HASH_AES_CMAC)) {
365
366
367 /* no more check for null key. use flag */
368 if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
369 == QCRYPTO_CTX_USE_HW_KEY)
370 use_hw_key = true;
371 else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
372 QCRYPTO_CTX_USE_PIPE_KEY)
373 use_pipe_key = true;
374 pce = cmdlistinfo->go_proc;
375 if (use_hw_key == true) {
376 pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
377 pce_dev->phy_iobase);
378 } else {
379 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
380 pce_dev->phy_iobase);
381 pce = cmdlistinfo->auth_key;
382 if (use_pipe_key == false) {
383 _byte_stream_to_net_words(mackey32,
384 sreq->authkey,
385 sreq->authklen);
386 for (i = 0; i < authk_size_in_word; i++, pce++)
387 pce->data = mackey32[i];
388 }
389 }
390 }
391
392 if (sreq->alg == QCE_HASH_AES_CMAC)
393 goto go_proc;
394
395 /* if not the last, the size has to be on the block boundary */
396 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
397 return -EIO;
398
399 switch (sreq->alg) {
400 case QCE_HASH_SHA1:
401 case QCE_HASH_SHA1_HMAC:
402 diglen = SHA1_DIGEST_SIZE;
403 sha1 = true;
404 break;
405 case QCE_HASH_SHA256:
406 case QCE_HASH_SHA256_HMAC:
407 diglen = SHA256_DIGEST_SIZE;
408 break;
409 default:
410 return -EINVAL;
411 }
412
413 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
414 if (sreq->first_blk) {
415 if (sha1) {
416 for (i = 0; i < 5; i++)
417 auth32[i] = _std_init_vector_sha1[i];
418 } else {
419 for (i = 0; i < 8; i++)
420 auth32[i] = _std_init_vector_sha256[i];
421 }
422 } else {
423 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
424 }
425
426 pce = cmdlistinfo->auth_iv;
427 for (i = 0; i < 5; i++, pce++)
428 pce->data = auth32[i];
429
430 if ((sreq->alg == QCE_HASH_SHA256) ||
431 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
432 for (i = 5; i < 8; i++, pce++)
433 pce->data = auth32[i];
434 }
435
436 /* write auth_bytecnt 0/1, start with 0 */
437 pce = cmdlistinfo->auth_bytecount;
438 for (i = 0; i < 2; i++, pce++)
439 pce->data = sreq->auth_data[i];
440
441 /* Set/reset last bit in CFG register */
442 pce = cmdlistinfo->auth_seg_cfg;
443 auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
444 1 << CRYPTO_FIRST |
445 1 << CRYPTO_USE_PIPE_KEY_AUTH |
446 1 << CRYPTO_USE_HW_KEY_AUTH);
447 if (sreq->last_blk)
448 auth_cfg |= 1 << CRYPTO_LAST;
449 if (sreq->first_blk)
450 auth_cfg |= 1 << CRYPTO_FIRST;
451 if (use_hw_key)
452 auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
453 if (use_pipe_key)
454 auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
455 pce->data = auth_cfg;
456go_proc:
457 /* write auth seg size */
458 pce = cmdlistinfo->auth_seg_size;
459 pce->data = sreq->size;
460
461 pce = cmdlistinfo->encr_seg_cfg;
462 pce->data = 0;
463
464 /* write auth seg size start*/
465 pce = cmdlistinfo->auth_seg_start;
466 pce->data = 0;
467
468 /* write seg size */
469 pce = cmdlistinfo->seg_size;
470
471 /* always ensure there is input data. ZLT does not work for bam-ndp */
472 if (sreq->size)
473 pce->data = sreq->size;
474 else
475 pce->data = pce_dev->ce_bam_info.ce_burst_size;
476
477 return 0;
478}
479
480static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
481 struct qce_device *pce_dev,
482 int req_info, struct qce_req *creq)
483{
484 struct ce_sps_data *pce_sps_data;
485 struct qce_cmdlistptr_ops *cmdlistptr;
486
487 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
488 cmdlistptr = &pce_sps_data->cmdlistptr;
489 switch (creq->alg) {
490 case CIPHER_ALG_DES:
491 switch (creq->mode) {
492 case QCE_MODE_CBC:
493 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
494 return &cmdlistptr->aead_hmac_sha1_cbc_des;
495 else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
496 return &cmdlistptr->aead_hmac_sha256_cbc_des;
497 else
498 return NULL;
499 break;
500 default:
501 return NULL;
502 }
503 break;
504 case CIPHER_ALG_3DES:
505 switch (creq->mode) {
506 case QCE_MODE_CBC:
507 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
508 return &cmdlistptr->aead_hmac_sha1_cbc_3des;
509 else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
510 return &cmdlistptr->aead_hmac_sha256_cbc_3des;
511 else
512 return NULL;
513 break;
514 default:
515 return NULL;
516 }
517 break;
518 case CIPHER_ALG_AES:
519 switch (creq->mode) {
520 case QCE_MODE_CBC:
521 if (creq->encklen == AES128_KEY_SIZE) {
522 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
523 return &cmdlistptr->
524 aead_hmac_sha1_cbc_aes_128;
525 else if (creq->auth_alg ==
526 QCE_HASH_SHA256_HMAC)
527 return &cmdlistptr->
528 aead_hmac_sha256_cbc_aes_128;
529 else
530 return NULL;
531 } else if (creq->encklen == AES256_KEY_SIZE) {
532 if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
533 return &cmdlistptr->
534 aead_hmac_sha1_cbc_aes_256;
535 else if (creq->auth_alg ==
536 QCE_HASH_SHA256_HMAC)
537 return &cmdlistptr->
538 aead_hmac_sha256_cbc_aes_256;
539 else
540 return NULL;
541 } else
542 return NULL;
543 break;
544 default:
545 return NULL;
546 }
547 break;
548
549 default:
550 return NULL;
551 }
552 return NULL;
553}
554
555static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
556 uint32_t totallen_in, uint32_t coffset,
557 struct qce_cmdlist_info *cmdlistinfo)
558{
559 int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
560 int i;
561 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
562 struct sps_command_element *pce;
563 uint32_t a_cfg;
564 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
565 uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
566 uint32_t enck_size_in_word = 0;
567 uint32_t enciv_in_word;
568 uint32_t key_size;
569 uint32_t encr_cfg = 0;
570 uint32_t ivsize = q_req->ivsize;
571
572 key_size = q_req->encklen;
573 enck_size_in_word = key_size/sizeof(uint32_t);
574
575 switch (q_req->alg) {
576 case CIPHER_ALG_DES:
577 enciv_in_word = 2;
578 break;
579 case CIPHER_ALG_3DES:
580 enciv_in_word = 2;
581 break;
582 case CIPHER_ALG_AES:
583 if ((key_size != AES128_KEY_SIZE) &&
584 (key_size != AES256_KEY_SIZE))
585 return -EINVAL;
586 enciv_in_word = 4;
587 break;
588 default:
589 return -EINVAL;
590 }
591
592 /* only support cbc mode */
593 if (q_req->mode != QCE_MODE_CBC)
594 return -EINVAL;
595
596 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
597 pce = cmdlistinfo->encr_cntr_iv;
598 for (i = 0; i < enciv_in_word; i++, pce++)
599 pce->data = enciv32[i];
600
601 /*
602 * write encr key
603 * do not use hw key or pipe key
604 */
605 _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
606 pce = cmdlistinfo->encr_key;
607 for (i = 0; i < enck_size_in_word; i++, pce++)
608 pce->data = enckey32[i];
609
610 /* write encr seg cfg */
611 pce = cmdlistinfo->encr_seg_cfg;
612 encr_cfg = pce->data;
613 if (q_req->dir == QCE_ENCRYPT)
614 encr_cfg |= (1 << CRYPTO_ENCODE);
615 else
616 encr_cfg &= ~(1 << CRYPTO_ENCODE);
617 pce->data = encr_cfg;
618
619 /* we only support sha1-hmac and sha256-hmac at this point */
620 _byte_stream_to_net_words(mackey32, q_req->authkey,
621 q_req->authklen);
622 pce = cmdlistinfo->auth_key;
623 for (i = 0; i < authk_size_in_word; i++, pce++)
624 pce->data = mackey32[i];
625 pce = cmdlistinfo->auth_iv;
626
627 if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
628 for (i = 0; i < 5; i++, pce++)
629 pce->data = _std_init_vector_sha1[i];
630 else
631 for (i = 0; i < 8; i++, pce++)
632 pce->data = _std_init_vector_sha256[i];
633
634 /* write auth_bytecnt 0/1, start with 0 */
635 pce = cmdlistinfo->auth_bytecount;
636 for (i = 0; i < 2; i++, pce++)
637 pce->data = 0;
638
639 pce = cmdlistinfo->auth_seg_cfg;
640 a_cfg = pce->data;
641 a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
642 if (q_req->dir == QCE_ENCRYPT)
643 a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
644 else
645 a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
646 pce->data = a_cfg;
647
648 /* write auth seg size */
649 pce = cmdlistinfo->auth_seg_size;
650 pce->data = totallen_in;
651
652 /* write auth seg size start*/
653 pce = cmdlistinfo->auth_seg_start;
654 pce->data = 0;
655
656 /* write seg size */
657 pce = cmdlistinfo->seg_size;
658 pce->data = totallen_in;
659
660 /* write encr seg size */
661 pce = cmdlistinfo->encr_seg_size;
662 pce->data = q_req->cryptlen;
663
664 /* write encr seg start */
665 pce = cmdlistinfo->encr_seg_start;
666 pce->data = (coffset & 0xffff);
667
668 return 0;
669
670}
671
672static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
673 struct qce_device *pce_dev,
674 int req_info, struct qce_req *creq)
675{
676 struct ce_request_info *preq_info;
677 struct ce_sps_data *pce_sps_data;
678 struct qce_cmdlistptr_ops *cmdlistptr;
679
680 preq_info = &pce_dev->ce_request_info[req_info];
681 pce_sps_data = &preq_info->ce_sps;
682 cmdlistptr = &pce_sps_data->cmdlistptr;
683 if (creq->alg != CIPHER_ALG_AES) {
684 switch (creq->alg) {
685 case CIPHER_ALG_DES:
686 if (creq->mode == QCE_MODE_ECB)
687 return &cmdlistptr->cipher_des_ecb;
688 return &cmdlistptr->cipher_des_cbc;
689 case CIPHER_ALG_3DES:
690 if (creq->mode == QCE_MODE_ECB)
691 return &cmdlistptr->cipher_3des_ecb;
692 return &cmdlistptr->cipher_3des_cbc;
693 default:
694 return NULL;
695 }
696 } else {
697 switch (creq->mode) {
698 case QCE_MODE_ECB:
699 if (creq->encklen == AES128_KEY_SIZE)
700 return &cmdlistptr->cipher_aes_128_ecb;
701 return &cmdlistptr->cipher_aes_256_ecb;
702 case QCE_MODE_CBC:
703 case QCE_MODE_CTR:
704 if (creq->encklen == AES128_KEY_SIZE)
705 return &cmdlistptr->cipher_aes_128_cbc_ctr;
706 return &cmdlistptr->cipher_aes_256_cbc_ctr;
707 case QCE_MODE_XTS:
708 if (creq->encklen/2 == AES128_KEY_SIZE)
709 return &cmdlistptr->cipher_aes_128_xts;
710 return &cmdlistptr->cipher_aes_256_xts;
711 case QCE_MODE_CCM:
712 if (creq->encklen == AES128_KEY_SIZE)
713 return &cmdlistptr->aead_aes_128_ccm;
714 return &cmdlistptr->aead_aes_256_ccm;
715 default:
716 return NULL;
717 }
718 }
719 return NULL;
720}
721
722static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
723 uint32_t totallen_in, uint32_t coffset,
724 struct qce_cmdlist_info *cmdlistinfo)
725{
726 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
727 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
728 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
729 0, 0, 0, 0};
730 uint32_t enck_size_in_word = 0;
731 uint32_t key_size;
732 bool use_hw_key = false;
733 bool use_pipe_key = false;
734 uint32_t encr_cfg = 0;
735 uint32_t ivsize = creq->ivsize;
736 int i;
737 struct sps_command_element *pce = NULL;
738
739 if (creq->mode == QCE_MODE_XTS)
740 key_size = creq->encklen/2;
741 else
742 key_size = creq->encklen;
743
744 pce = cmdlistinfo->go_proc;
745 if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
746 use_hw_key = true;
747 } else {
748 if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
749 QCRYPTO_CTX_USE_PIPE_KEY)
750 use_pipe_key = true;
751 }
752 pce = cmdlistinfo->go_proc;
753 if (use_hw_key == true)
754 pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
755 pce_dev->phy_iobase);
756 else
757 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
758 pce_dev->phy_iobase);
759 if ((use_pipe_key == false) && (use_hw_key == false)) {
760 _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
761 enck_size_in_word = key_size/sizeof(uint32_t);
762 }
763
764 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
765 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
766 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
767 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
768 uint32_t auth_cfg = 0;
769
770 /* write nonce */
771 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
772 pce = cmdlistinfo->auth_nonce_info;
773 for (i = 0; i < noncelen32; i++, pce++)
774 pce->data = nonce32[i];
775
776 if (creq->authklen == AES128_KEY_SIZE)
777 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
778 else {
779 if (creq->authklen == AES256_KEY_SIZE)
780 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
781 }
782 if (creq->dir == QCE_ENCRYPT)
783 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
784 else
785 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
786 auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
787
788 if (use_hw_key == true) {
789 auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
790 } else {
791 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
792 /* write auth key */
793 pce = cmdlistinfo->auth_key;
794 for (i = 0; i < authklen32; i++, pce++)
795 pce->data = enckey32[i];
796 }
797
798 pce = cmdlistinfo->auth_seg_cfg;
799 pce->data = auth_cfg;
800
801 pce = cmdlistinfo->auth_seg_size;
802 if (creq->dir == QCE_ENCRYPT)
803 pce->data = totallen_in;
804 else
805 pce->data = totallen_in - creq->authsize;
806 pce = cmdlistinfo->auth_seg_start;
807 pce->data = 0;
808 } else {
809 if (creq->op != QCE_REQ_AEAD) {
810 pce = cmdlistinfo->auth_seg_cfg;
811 pce->data = 0;
812 }
813 }
814 switch (creq->mode) {
815 case QCE_MODE_ECB:
816 if (key_size == AES128_KEY_SIZE)
817 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
818 else
819 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
820 break;
821 case QCE_MODE_CBC:
822 if (key_size == AES128_KEY_SIZE)
823 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
824 else
825 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
826 break;
827 case QCE_MODE_XTS:
828 if (key_size == AES128_KEY_SIZE)
829 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
830 else
831 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
832 break;
833 case QCE_MODE_CCM:
834 if (key_size == AES128_KEY_SIZE)
835 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
836 else
837 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
838 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
839 (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
840 break;
841 case QCE_MODE_CTR:
842 default:
843 if (key_size == AES128_KEY_SIZE)
844 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
845 else
846 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
847 break;
848 }
849
850 switch (creq->alg) {
851 case CIPHER_ALG_DES:
852 if (creq->mode != QCE_MODE_ECB) {
853 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
854 pce = cmdlistinfo->encr_cntr_iv;
855 pce->data = enciv32[0];
856 pce++;
857 pce->data = enciv32[1];
858 }
859 if (use_hw_key == false) {
860 pce = cmdlistinfo->encr_key;
861 pce->data = enckey32[0];
862 pce++;
863 pce->data = enckey32[1];
864 }
865 break;
866 case CIPHER_ALG_3DES:
867 if (creq->mode != QCE_MODE_ECB) {
868 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
869 pce = cmdlistinfo->encr_cntr_iv;
870 pce->data = enciv32[0];
871 pce++;
872 pce->data = enciv32[1];
873 }
874 if (use_hw_key == false) {
875 /* write encr key */
876 pce = cmdlistinfo->encr_key;
877 for (i = 0; i < 6; i++, pce++)
878 pce->data = enckey32[i];
879 }
880 break;
881 case CIPHER_ALG_AES:
882 default:
883 if (creq->mode == QCE_MODE_XTS) {
884 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
885 = {0, 0, 0, 0, 0, 0, 0, 0};
886 uint32_t xtsklen =
887 creq->encklen/(2 * sizeof(uint32_t));
888
889 if ((use_hw_key == false) && (use_pipe_key == false)) {
890 _byte_stream_to_net_words(xtskey32,
891 (creq->enckey + creq->encklen/2),
892 creq->encklen/2);
893 /* write xts encr key */
894 pce = cmdlistinfo->encr_xts_key;
895 for (i = 0; i < xtsklen; i++, pce++)
896 pce->data = xtskey32[i];
897 }
898 /* write xts du size */
899 pce = cmdlistinfo->encr_xts_du_size;
900 switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
901 case QCRYPTO_CTX_XTS_DU_SIZE_512B:
902 pce->data = min((unsigned int)QCE_SECTOR_SIZE,
903 creq->cryptlen);
904 break;
905 case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
906 pce->data =
907 min((unsigned int)QCE_SECTOR_SIZE * 2,
908 creq->cryptlen);
909 break;
910 default:
911 pce->data = creq->cryptlen;
912 break;
913 }
914 }
915 if (creq->mode != QCE_MODE_ECB) {
916 if (creq->mode == QCE_MODE_XTS)
917 _byte_stream_swap_to_net_words(enciv32,
918 creq->iv, ivsize);
919 else
920 _byte_stream_to_net_words(enciv32, creq->iv,
921 ivsize);
922 /* write encr cntr iv */
923 pce = cmdlistinfo->encr_cntr_iv;
924 for (i = 0; i < 4; i++, pce++)
925 pce->data = enciv32[i];
926
927 if (creq->mode == QCE_MODE_CCM) {
928 /* write cntr iv for ccm */
929 pce = cmdlistinfo->encr_ccm_cntr_iv;
930 for (i = 0; i < 4; i++, pce++)
931 pce->data = enciv32[i];
932 /* update cntr_iv[3] by one */
933 pce = cmdlistinfo->encr_cntr_iv;
934 pce += 3;
935 pce->data += 1;
936 }
937 }
938
939 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
940 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
941 CRYPTO_ENCR_KEY_SZ);
942 } else {
943 if (use_hw_key == false) {
944 /* write encr key */
945 pce = cmdlistinfo->encr_key;
946 for (i = 0; i < enck_size_in_word; i++, pce++)
947 pce->data = enckey32[i];
948 }
949 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
950 break;
951 } /* end of switch (creq->mode) */
952
953 if (use_pipe_key)
954 encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
955 << CRYPTO_USE_PIPE_KEY_ENCR);
956
957 /* write encr seg cfg */
958 pce = cmdlistinfo->encr_seg_cfg;
959 if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
960 if (creq->dir == QCE_ENCRYPT)
961 pce->data |= (1 << CRYPTO_ENCODE);
962 else
963 pce->data &= ~(1 << CRYPTO_ENCODE);
964 encr_cfg = pce->data;
965 } else {
966 encr_cfg |=
967 ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
968 }
969 if (use_hw_key == true)
970 encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
971 else
972 encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
973 pce->data = encr_cfg;
974
975 /* write encr seg size */
976 pce = cmdlistinfo->encr_seg_size;
977 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
978 pce->data = (creq->cryptlen + creq->authsize);
979 else
980 pce->data = creq->cryptlen;
981
982 /* write encr seg start */
983 pce = cmdlistinfo->encr_seg_start;
984 pce->data = (coffset & 0xffff);
985
986 /* write seg size */
987 pce = cmdlistinfo->seg_size;
988 pce->data = totallen_in;
989
990 return 0;
991};
992
993static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
994 struct qce_cmdlist_info *cmdlistinfo)
995{
996 uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
997 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
998 uint32_t cfg;
999 struct sps_command_element *pce;
1000 int i;
1001
1002 switch (req->algorithm) {
1003 case QCE_OTA_ALGO_KASUMI:
1004 cfg = pce_dev->reg.auth_cfg_kasumi;
1005 break;
1006 case QCE_OTA_ALGO_SNOW3G:
1007 default:
1008 cfg = pce_dev->reg.auth_cfg_snow3g;
1009 break;
1010 };
1011
1012 /* write key in CRYPTO_AUTH_IV0-3_REG */
1013 _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
1014 pce = cmdlistinfo->auth_iv;
1015 for (i = 0; i < key_size_in_word; i++, pce++)
1016 pce->data = ikey32[i];
1017
1018 /* write last bits in CRYPTO_AUTH_IV4_REG */
1019 pce->data = req->last_bits;
1020
1021 /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
1022 pce = cmdlistinfo->auth_bytecount;
1023 pce->data = req->fresh;
1024
1025 /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
1026 pce++;
1027 pce->data = req->count_i;
1028
1029 /* write auth seg cfg */
1030 pce = cmdlistinfo->auth_seg_cfg;
1031 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1032 cfg |= BIT(CRYPTO_F9_DIRECTION);
1033 pce->data = cfg;
1034
1035 /* write auth seg size */
1036 pce = cmdlistinfo->auth_seg_size;
1037 pce->data = req->msize;
1038
1039 /* write auth seg start*/
1040 pce = cmdlistinfo->auth_seg_start;
1041 pce->data = 0;
1042
1043 /* write seg size */
1044 pce = cmdlistinfo->seg_size;
1045 pce->data = req->msize;
1046
1047
1048 /* write go */
1049 pce = cmdlistinfo->go_proc;
1050 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
1051 return 0;
1052}
1053
1054static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
1055 bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
1056 uint16_t cipher_size,
1057 struct qce_cmdlist_info *cmdlistinfo)
1058{
1059 uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
1060 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
1061 uint32_t cfg;
1062 struct sps_command_element *pce;
1063 int i;
1064
1065 switch (req->algorithm) {
1066 case QCE_OTA_ALGO_KASUMI:
1067 cfg = pce_dev->reg.encr_cfg_kasumi;
1068 break;
1069 case QCE_OTA_ALGO_SNOW3G:
1070 default:
1071 cfg = pce_dev->reg.encr_cfg_snow3g;
1072 break;
1073 };
1074 /* write key */
1075 _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
1076 pce = cmdlistinfo->encr_key;
1077 for (i = 0; i < key_size_in_word; i++, pce++)
1078 pce->data = ckey32[i];
1079
1080 /* write encr seg cfg */
1081 pce = cmdlistinfo->encr_seg_cfg;
1082 if (key_stream_mode)
1083 cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
1084 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1085 cfg |= BIT(CRYPTO_F8_DIRECTION);
1086 pce->data = cfg;
1087
1088 /* write encr seg start */
1089 pce = cmdlistinfo->encr_seg_start;
1090 pce->data = (cipher_offset & 0xffff);
1091
1092 /* write encr seg size */
1093 pce = cmdlistinfo->encr_seg_size;
1094 pce->data = cipher_size;
1095
1096 /* write seg size */
1097 pce = cmdlistinfo->seg_size;
1098 pce->data = req->data_len;
1099
1100 /* write cntr0_iv0 for countC */
1101 pce = cmdlistinfo->encr_cntr_iv;
1102 pce->data = req->count_c;
1103 /* write cntr1_iv1 for nPkts, and bearer */
1104 pce++;
1105 if (npkts == 1)
1106 npkts = 0;
1107 pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
1108 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
1109
1110 /* write go */
1111 pce = cmdlistinfo->go_proc;
1112 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
1113
1114 return 0;
1115}
1116
1117static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
1118{
1119 int i, j, ents;
1120 struct ce_sps_data *pce_sps_data;
1121 struct sps_iovec *iovec;
1122 uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
1123
1124 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
1125 iovec = pce_sps_data->in_transfer.iovec;
1126 pr_info("==============================================\n");
1127 pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
1128 pr_info("==============================================\n");
1129 for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
1130 pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
1131 iovec->addr, iovec->size, iovec->flags);
1132 if (iovec->flags & cmd_flags) {
1133 struct sps_command_element *pced;
1134
1135 pced = (struct sps_command_element *)
1136 (GET_VIRT_ADDR(iovec->addr));
1137 ents = iovec->size/(sizeof(struct sps_command_element));
1138 for (j = 0; j < ents; j++) {
1139 pr_info(" [%d] [0x%x] 0x%x\n", j,
1140 pced->addr, pced->data);
1141 pced++;
1142 }
1143 }
1144 iovec++;
1145 }
1146
1147 pr_info("==============================================\n");
1148 pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
1149 pr_info("==============================================\n");
1150 iovec = pce_sps_data->out_transfer.iovec;
1151 for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
1152 pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
1153 iovec->addr, iovec->size, iovec->flags);
1154 iovec++;
1155 }
1156}
1157
1158#ifdef QCE_DEBUG
1159
1160static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
1161{
1162 _qce_dump_descr_fifos(pce_dev, req_info);
1163}
1164
1165#define QCE_WRITE_REG(val, addr) \
1166{ \
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05301167 pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07001168 writel_relaxed(val, addr); \
1169}
1170
1171#else
1172
1173static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
1174{
1175}
1176
1177#define QCE_WRITE_REG(val, addr) \
1178 writel_relaxed(val, addr)
1179
1180#endif
1181
1182static int _ce_setup_hash_direct(struct qce_device *pce_dev,
1183 struct qce_sha_req *sreq)
1184{
1185 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
1186 uint32_t diglen;
1187 bool use_hw_key = false;
1188 bool use_pipe_key = false;
1189 int i;
1190 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
1191 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1192 uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
1193 bool sha1 = false;
1194 uint32_t auth_cfg = 0;
1195
1196 /* clear status */
1197 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1198
1199 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1200 CRYPTO_CONFIG_REG));
1201 /*
1202 * Ensure previous instructions (setting the CONFIG register)
1203 * was completed before issuing starting to set other config register
1204 * This is to ensure the configurations are done in correct endian-ness
1205 * as set in the CONFIG registers
1206 */
1207 mb();
1208
1209 if (sreq->alg == QCE_HASH_AES_CMAC) {
1210 /* write seg_cfg */
1211 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1212 /* write seg_cfg */
1213 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1214 /* write seg_cfg */
1215 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1216
1217 /* Clear auth_ivn, auth_keyn registers */
1218 for (i = 0; i < 16; i++) {
1219 QCE_WRITE_REG(0, (pce_dev->iobase +
1220 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1221 QCE_WRITE_REG(0, (pce_dev->iobase +
1222 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
1223 }
1224 /* write auth_bytecnt 0/1/2/3, start with 0 */
1225 for (i = 0; i < 4; i++)
1226 QCE_WRITE_REG(0, pce_dev->iobase +
1227 CRYPTO_AUTH_BYTECNT0_REG +
1228 i * sizeof(uint32_t));
1229
1230 if (sreq->authklen == AES128_KEY_SIZE)
1231 auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
1232 else
1233 auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
1234 }
1235
1236 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
1237 (sreq->alg == QCE_HASH_SHA256_HMAC) ||
1238 (sreq->alg == QCE_HASH_AES_CMAC)) {
1239
1240 _byte_stream_to_net_words(mackey32, sreq->authkey,
1241 sreq->authklen);
1242
1243 /* no more check for null key. use flag to check*/
1244
1245 if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
1246 QCRYPTO_CTX_USE_HW_KEY) {
1247 use_hw_key = true;
1248 } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
1249 QCRYPTO_CTX_USE_PIPE_KEY) {
1250 use_pipe_key = true;
1251 } else {
1252 /* setup key */
1253 for (i = 0; i < authk_size_in_word; i++)
1254 QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
1255 (CRYPTO_AUTH_KEY0_REG +
1256 i*sizeof(uint32_t))));
1257 }
1258 }
1259
1260 if (sreq->alg == QCE_HASH_AES_CMAC)
1261 goto go_proc;
1262
1263 /* if not the last, the size has to be on the block boundary */
1264 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
1265 return -EIO;
1266
1267 switch (sreq->alg) {
1268 case QCE_HASH_SHA1:
1269 auth_cfg = pce_dev->reg.auth_cfg_sha1;
1270 diglen = SHA1_DIGEST_SIZE;
1271 sha1 = true;
1272 break;
1273 case QCE_HASH_SHA1_HMAC:
1274 auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
1275 diglen = SHA1_DIGEST_SIZE;
1276 sha1 = true;
1277 break;
1278 case QCE_HASH_SHA256:
1279 auth_cfg = pce_dev->reg.auth_cfg_sha256;
1280 diglen = SHA256_DIGEST_SIZE;
1281 break;
1282 case QCE_HASH_SHA256_HMAC:
1283 auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
1284 diglen = SHA256_DIGEST_SIZE;
1285 break;
1286 default:
1287 return -EINVAL;
1288 }
1289
1290 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
1291 if (sreq->first_blk) {
1292 if (sha1) {
1293 for (i = 0; i < 5; i++)
1294 auth32[i] = _std_init_vector_sha1[i];
1295 } else {
1296 for (i = 0; i < 8; i++)
1297 auth32[i] = _std_init_vector_sha256[i];
1298 }
1299 } else {
1300 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
1301 }
1302
1303 /* Set auth_ivn, auth_keyn registers */
1304 for (i = 0; i < 5; i++)
1305 QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
1306 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1307
1308 if ((sreq->alg == QCE_HASH_SHA256) ||
1309 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
1310 for (i = 5; i < 8; i++)
1311 QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
1312 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1313 }
1314
1315
1316 /* write auth_bytecnt 0/1/2/3, start with 0 */
1317 for (i = 0; i < 2; i++)
1318 QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
1319 CRYPTO_AUTH_BYTECNT0_REG +
1320 i * sizeof(uint32_t));
1321
1322 /* Set/reset last bit in CFG register */
1323 if (sreq->last_blk)
1324 auth_cfg |= 1 << CRYPTO_LAST;
1325 else
1326 auth_cfg &= ~(1 << CRYPTO_LAST);
1327 if (sreq->first_blk)
1328 auth_cfg |= 1 << CRYPTO_FIRST;
1329 else
1330 auth_cfg &= ~(1 << CRYPTO_FIRST);
1331 if (use_hw_key)
1332 auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
1333 if (use_pipe_key)
1334 auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
1335go_proc:
1336 /* write seg_cfg */
1337 QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1338 /* write auth seg_size */
1339 QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1340
1341 /* write auth_seg_start */
1342 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1343
1344 /* reset encr seg_cfg */
1345 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1346
1347 /* write seg_size */
1348 QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1349
1350 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1351 CRYPTO_CONFIG_REG));
1352 /* issue go to crypto */
1353 if (use_hw_key == false) {
1354 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1355 (1 << CRYPTO_CLR_CNTXT)),
1356 pce_dev->iobase + CRYPTO_GOPROC_REG);
1357 } else {
1358 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1359 pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
1360 }
1361 /*
1362 * Ensure previous instructions (setting the GO register)
1363 * was completed before issuing a DMA transfer request
1364 */
1365 mb();
1366 return 0;
1367}
1368
1369static int _ce_setup_aead_direct(struct qce_device *pce_dev,
1370 struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
1371{
1372 int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
1373 int i;
1374 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
1375 uint32_t a_cfg;
1376 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
1377 uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
1378 uint32_t enck_size_in_word = 0;
1379 uint32_t enciv_in_word;
1380 uint32_t key_size;
1381 uint32_t ivsize = q_req->ivsize;
1382 uint32_t encr_cfg;
1383
1384
1385 /* clear status */
1386 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1387
1388 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1389 CRYPTO_CONFIG_REG));
1390 /*
1391 * Ensure previous instructions (setting the CONFIG register)
1392 * was completed before issuing starting to set other config register
1393 * This is to ensure the configurations are done in correct endian-ness
1394 * as set in the CONFIG registers
1395 */
1396 mb();
1397
1398 key_size = q_req->encklen;
1399 enck_size_in_word = key_size/sizeof(uint32_t);
1400
1401 switch (q_req->alg) {
1402
1403 case CIPHER_ALG_DES:
1404
1405 switch (q_req->mode) {
1406 case QCE_MODE_CBC:
1407 encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
1408 break;
1409 default:
1410 return -EINVAL;
1411 }
1412
1413 enciv_in_word = 2;
1414 break;
1415
1416 case CIPHER_ALG_3DES:
1417
1418 switch (q_req->mode) {
1419 case QCE_MODE_CBC:
1420 encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
1421 break;
1422 default:
1423 return -EINVAL;
1424 }
1425
1426 enciv_in_word = 2;
1427
1428 break;
1429
1430 case CIPHER_ALG_AES:
1431
1432 switch (q_req->mode) {
1433 case QCE_MODE_CBC:
1434 if (key_size == AES128_KEY_SIZE)
1435 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
1436 else if (key_size == AES256_KEY_SIZE)
1437 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
1438 else
1439 return -EINVAL;
1440 break;
1441 default:
1442 return -EINVAL;
1443 }
1444
1445 enciv_in_word = 4;
1446 break;
1447
1448 default:
1449 return -EINVAL;
1450 }
1451
1452
1453
1454
1455 /* write CNTR0_IV0_REG */
1456 if (q_req->mode != QCE_MODE_ECB) {
1457 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
1458 for (i = 0; i < enciv_in_word; i++)
1459 QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
1460 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
1461 }
1462
1463 /*
1464 * write encr key
1465 * do not use hw key or pipe key
1466 */
1467 _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
1468 for (i = 0; i < enck_size_in_word; i++)
1469 QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
1470 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
1471
1472 /* write encr seg cfg */
1473 if (q_req->dir == QCE_ENCRYPT)
1474 encr_cfg |= (1 << CRYPTO_ENCODE);
1475 QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1476
1477 /* we only support sha1-hmac and sha256-hmac at this point */
1478 _byte_stream_to_net_words(mackey32, q_req->authkey,
1479 q_req->authklen);
1480 for (i = 0; i < authk_size_in_word; i++)
1481 QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
1482 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
1483
1484 if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
1485 for (i = 0; i < 5; i++)
1486 QCE_WRITE_REG(_std_init_vector_sha1[i],
1487 pce_dev->iobase +
1488 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
1489 } else {
1490 for (i = 0; i < 8; i++)
1491 QCE_WRITE_REG(_std_init_vector_sha256[i],
1492 pce_dev->iobase +
1493 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
1494 }
1495
1496 /* write auth_bytecnt 0/1, start with 0 */
1497 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
1498 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
1499
1500 /* write encr seg size */
1501 QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
1502 CRYPTO_ENCR_SEG_SIZE_REG);
1503
1504 /* write encr start */
1505 QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
1506 CRYPTO_ENCR_SEG_START_REG);
1507
1508 if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
1509 a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
1510 else
1511 a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
1512
1513 if (q_req->dir == QCE_ENCRYPT)
1514 a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
1515 else
1516 a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1517
1518 /* write auth seg_cfg */
1519 QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1520
1521 /* write auth seg_size */
1522 QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1523
1524 /* write auth_seg_start */
1525 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1526
1527
1528 /* write seg_size */
1529 QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1530
1531
1532 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1533
1534 CRYPTO_CONFIG_REG));
1535 /* issue go to crypto */
1536 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1537 (1 << CRYPTO_CLR_CNTXT)),
1538 pce_dev->iobase + CRYPTO_GOPROC_REG);
1539 /*
1540 * Ensure previous instructions (setting the GO register)
1541 * was completed before issuing a DMA transfer request
1542 */
1543 mb();
1544 return 0;
1545};
1546
1547static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
1548 struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
1549{
1550 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
1551 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1552 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
1553 0, 0, 0, 0};
1554 uint32_t enck_size_in_word = 0;
1555 uint32_t key_size;
1556 bool use_hw_key = false;
1557 bool use_pipe_key = false;
1558 uint32_t encr_cfg = 0;
1559 uint32_t ivsize = creq->ivsize;
1560 int i;
1561
1562 /* clear status */
1563 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1564
1565 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1566 CRYPTO_CONFIG_REG));
1567 /*
1568 * Ensure previous instructions (setting the CONFIG register)
1569 * was completed before issuing starting to set other config register
1570 * This is to ensure the configurations are done in correct endian-ness
1571 * as set in the CONFIG registers
1572 */
1573 mb();
1574
1575 if (creq->mode == QCE_MODE_XTS)
1576 key_size = creq->encklen/2;
1577 else
1578 key_size = creq->encklen;
1579
1580 if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
1581 use_hw_key = true;
1582 } else {
1583 if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
1584 QCRYPTO_CTX_USE_PIPE_KEY)
1585 use_pipe_key = true;
1586 }
1587 if ((use_pipe_key == false) && (use_hw_key == false)) {
1588 _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
1589 enck_size_in_word = key_size/sizeof(uint32_t);
1590 }
1591 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
1592 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
1593 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
1594 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
1595 uint32_t auth_cfg = 0;
1596
1597 /* Clear auth_ivn, auth_keyn registers */
1598 for (i = 0; i < 16; i++) {
1599 QCE_WRITE_REG(0, (pce_dev->iobase +
1600 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1601 QCE_WRITE_REG(0, (pce_dev->iobase +
1602 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
1603 }
1604 /* write auth_bytecnt 0/1/2/3, start with 0 */
1605 for (i = 0; i < 4; i++)
1606 QCE_WRITE_REG(0, pce_dev->iobase +
1607 CRYPTO_AUTH_BYTECNT0_REG +
1608 i * sizeof(uint32_t));
1609 /* write nonce */
1610 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
1611 for (i = 0; i < noncelen32; i++)
1612 QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
1613 CRYPTO_AUTH_INFO_NONCE0_REG +
1614 (i*sizeof(uint32_t)));
1615
1616 if (creq->authklen == AES128_KEY_SIZE)
1617 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
1618 else {
1619 if (creq->authklen == AES256_KEY_SIZE)
1620 auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
1621 }
1622 if (creq->dir == QCE_ENCRYPT)
1623 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1624 else
1625 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
1626 auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
1627
1628 if (use_hw_key == true) {
1629 auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
1630 } else {
1631 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
1632 /* write auth key */
1633 for (i = 0; i < authklen32; i++)
1634 QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
1635 CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
1636 }
1637 QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
1638 CRYPTO_AUTH_SEG_CFG_REG);
1639 if (creq->dir == QCE_ENCRYPT) {
1640 QCE_WRITE_REG(totallen_in, pce_dev->iobase +
1641 CRYPTO_AUTH_SEG_SIZE_REG);
1642 } else {
1643 QCE_WRITE_REG((totallen_in - creq->authsize),
1644 pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1645 }
1646 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1647 } else {
1648 if (creq->op != QCE_REQ_AEAD)
1649 QCE_WRITE_REG(0, pce_dev->iobase +
1650 CRYPTO_AUTH_SEG_CFG_REG);
1651 }
1652 /*
1653 * Ensure previous instructions (write to all AUTH registers)
1654 * was completed before accessing a register that is not in
1655 * in the same 1K range.
1656 */
1657 mb();
1658 switch (creq->mode) {
1659 case QCE_MODE_ECB:
1660 if (key_size == AES128_KEY_SIZE)
1661 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
1662 else
1663 encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
1664 break;
1665 case QCE_MODE_CBC:
1666 if (key_size == AES128_KEY_SIZE)
1667 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
1668 else
1669 encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
1670 break;
1671 case QCE_MODE_XTS:
1672 if (key_size == AES128_KEY_SIZE)
1673 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
1674 else
1675 encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
1676 break;
1677 case QCE_MODE_CCM:
1678 if (key_size == AES128_KEY_SIZE)
1679 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
1680 else
1681 encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
1682 break;
1683 case QCE_MODE_CTR:
1684 default:
1685 if (key_size == AES128_KEY_SIZE)
1686 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
1687 else
1688 encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
1689 break;
1690 }
1691
1692 switch (creq->alg) {
1693 case CIPHER_ALG_DES:
1694 if (creq->mode != QCE_MODE_ECB) {
1695 encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
1696 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
1697 QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
1698 CRYPTO_CNTR0_IV0_REG);
1699 QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
1700 CRYPTO_CNTR1_IV1_REG);
1701 } else {
1702 encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
1703 }
1704 if (use_hw_key == false) {
1705 QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
1706 CRYPTO_ENCR_KEY0_REG);
1707 QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
1708 CRYPTO_ENCR_KEY1_REG);
1709 }
1710 break;
1711 case CIPHER_ALG_3DES:
1712 if (creq->mode != QCE_MODE_ECB) {
1713 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
1714 QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
1715 CRYPTO_CNTR0_IV0_REG);
1716 QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
1717 CRYPTO_CNTR1_IV1_REG);
1718 encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
1719 } else {
1720 encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
1721 }
1722 if (use_hw_key == false) {
1723 /* write encr key */
1724 for (i = 0; i < 6; i++)
1725 QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
1726 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
1727 }
1728 break;
1729 case CIPHER_ALG_AES:
1730 default:
1731 if (creq->mode == QCE_MODE_XTS) {
1732 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
1733 = {0, 0, 0, 0, 0, 0, 0, 0};
1734 uint32_t xtsklen =
1735 creq->encklen/(2 * sizeof(uint32_t));
1736
1737 if ((use_hw_key == false) && (use_pipe_key == false)) {
1738 _byte_stream_to_net_words(xtskey32,
1739 (creq->enckey + creq->encklen/2),
1740 creq->encklen/2);
1741 /* write xts encr key */
1742 for (i = 0; i < xtsklen; i++)
1743 QCE_WRITE_REG(xtskey32[i],
1744 pce_dev->iobase +
1745 CRYPTO_ENCR_XTS_KEY0_REG +
1746 (i * sizeof(uint32_t)));
1747 }
1748 /* write xts du size */
1749 switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
1750 case QCRYPTO_CTX_XTS_DU_SIZE_512B:
1751 QCE_WRITE_REG(
1752 min((uint32_t)QCE_SECTOR_SIZE,
1753 creq->cryptlen), pce_dev->iobase +
1754 CRYPTO_ENCR_XTS_DU_SIZE_REG);
1755 break;
1756 case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
1757 QCE_WRITE_REG(
1758 min((uint32_t)(QCE_SECTOR_SIZE * 2),
1759 creq->cryptlen), pce_dev->iobase +
1760 CRYPTO_ENCR_XTS_DU_SIZE_REG);
1761 break;
1762 default:
1763 QCE_WRITE_REG(creq->cryptlen,
1764 pce_dev->iobase +
1765 CRYPTO_ENCR_XTS_DU_SIZE_REG);
1766 break;
1767 }
1768 }
1769 if (creq->mode != QCE_MODE_ECB) {
1770 if (creq->mode == QCE_MODE_XTS)
1771 _byte_stream_swap_to_net_words(enciv32,
1772 creq->iv, ivsize);
1773 else
1774 _byte_stream_to_net_words(enciv32, creq->iv,
1775 ivsize);
1776
1777 /* write encr cntr iv */
1778 for (i = 0; i <= 3; i++)
1779 QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
1780 CRYPTO_CNTR0_IV0_REG +
1781 (i * sizeof(uint32_t)));
1782
1783 if (creq->mode == QCE_MODE_CCM) {
1784 /* write cntr iv for ccm */
1785 for (i = 0; i <= 3; i++)
1786 QCE_WRITE_REG(enciv32[i],
1787 pce_dev->iobase +
1788 CRYPTO_ENCR_CCM_INT_CNTR0_REG +
1789 (i * sizeof(uint32_t)));
1790 /* update cntr_iv[3] by one */
1791 QCE_WRITE_REG((enciv32[3] + 1),
1792 pce_dev->iobase +
1793 CRYPTO_CNTR0_IV0_REG +
1794 (3 * sizeof(uint32_t)));
1795 }
1796 }
1797
1798 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
1799 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
1800 CRYPTO_ENCR_KEY_SZ);
1801 } else {
1802 if ((use_hw_key == false) && (use_pipe_key == false)) {
1803 for (i = 0; i < enck_size_in_word; i++)
1804 QCE_WRITE_REG(enckey32[i],
1805 pce_dev->iobase +
1806 CRYPTO_ENCR_KEY0_REG +
1807 (i * sizeof(uint32_t)));
1808 }
1809 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
1810 break;
1811 } /* end of switch (creq->mode) */
1812
1813 if (use_pipe_key)
1814 encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
1815 << CRYPTO_USE_PIPE_KEY_ENCR);
1816
1817 /* write encr seg cfg */
1818 encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
1819 if (use_hw_key == true)
1820 encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
1821 else
1822 encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
1823 /* write encr seg cfg */
1824 QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1825
1826 /* write encr seg size */
1827 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
1828 QCE_WRITE_REG((creq->cryptlen + creq->authsize),
1829 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1830 } else {
1831 QCE_WRITE_REG(creq->cryptlen,
1832 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1833 }
1834
1835 /* write encr seg start */
1836 QCE_WRITE_REG((coffset & 0xffff),
1837 pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
1838
1839 /* write encr counter mask */
1840 QCE_WRITE_REG(0xffffffff,
1841 pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
1842 QCE_WRITE_REG(0xffffffff,
1843 pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
1844 QCE_WRITE_REG(0xffffffff,
1845 pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
1846 QCE_WRITE_REG(0xffffffff,
1847 pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
1848
1849 /* write seg size */
1850 QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1851
1852 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1853 CRYPTO_CONFIG_REG));
1854 /* issue go to crypto */
1855 if (use_hw_key == false) {
1856 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1857 (1 << CRYPTO_CLR_CNTXT)),
1858 pce_dev->iobase + CRYPTO_GOPROC_REG);
1859 } else {
1860 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1861 pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
1862 }
1863 /*
1864 * Ensure previous instructions (setting the GO register)
1865 * was completed before issuing a DMA transfer request
1866 */
1867 mb();
1868 return 0;
1869};
1870
1871static int _ce_f9_setup_direct(struct qce_device *pce_dev,
1872 struct qce_f9_req *req)
1873{
1874 uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
1875 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
1876 uint32_t auth_cfg;
1877 int i;
1878
1879 switch (req->algorithm) {
1880 case QCE_OTA_ALGO_KASUMI:
1881 auth_cfg = pce_dev->reg.auth_cfg_kasumi;
1882 break;
1883 case QCE_OTA_ALGO_SNOW3G:
1884 default:
1885 auth_cfg = pce_dev->reg.auth_cfg_snow3g;
1886 break;
1887 };
1888
1889 /* clear status */
1890 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1891
1892 /* set big endian configuration */
1893 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1894 CRYPTO_CONFIG_REG));
1895 /*
1896 * Ensure previous instructions (setting the CONFIG register)
1897 * was completed before issuing starting to set other config register
1898 * This is to ensure the configurations are done in correct endian-ness
1899 * as set in the CONFIG registers
1900 */
1901 mb();
1902
1903 /* write enc_seg_cfg */
1904 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1905
1906 /* write ecn_seg_size */
1907 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
1908
1909 /* write key in CRYPTO_AUTH_IV0-3_REG */
1910 _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
1911 for (i = 0; i < key_size_in_word; i++)
1912 QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
1913 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
1914
1915 /* write last bits in CRYPTO_AUTH_IV4_REG */
1916 QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
1917 CRYPTO_AUTH_IV4_REG));
1918
1919 /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
1920 QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
1921 CRYPTO_AUTH_BYTECNT0_REG));
1922
1923 /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
1924 QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
1925 CRYPTO_AUTH_BYTECNT1_REG));
1926
1927 /* write auth seg cfg */
1928 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1929 auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
1930 QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1931
1932 /* write auth seg size */
1933 QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1934
1935 /* write auth seg start*/
1936 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
1937
1938 /* write seg size */
1939 QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1940
1941 /* set little endian configuration before go*/
1942 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
1943 CRYPTO_CONFIG_REG));
1944 /* write go */
1945 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
1946 (1 << CRYPTO_CLR_CNTXT)),
1947 pce_dev->iobase + CRYPTO_GOPROC_REG);
1948 /*
1949 * Ensure previous instructions (setting the GO register)
1950 * was completed before issuing a DMA transfer request
1951 */
1952 mb();
1953 return 0;
1954}
1955
1956static int _ce_f8_setup_direct(struct qce_device *pce_dev,
1957 struct qce_f8_req *req, bool key_stream_mode,
1958 uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
1959{
1960 int i = 0;
1961 uint32_t encr_cfg = 0;
1962 uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
1963 uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
1964
1965 switch (req->algorithm) {
1966 case QCE_OTA_ALGO_KASUMI:
1967 encr_cfg = pce_dev->reg.encr_cfg_kasumi;
1968 break;
1969 case QCE_OTA_ALGO_SNOW3G:
1970 default:
1971 encr_cfg = pce_dev->reg.encr_cfg_snow3g;
1972 break;
1973 };
1974 /* clear status */
1975 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
1976 /* set big endian configuration */
1977 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
1978 CRYPTO_CONFIG_REG));
1979 /* write auth seg configuration */
1980 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1981 /* write auth seg size */
1982 QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
1983
1984 /* write key */
1985 _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
1986
1987 for (i = 0; i < key_size_in_word; i++)
1988 QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
1989 (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
1990 /* write encr seg cfg */
1991 if (key_stream_mode)
1992 encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
1993 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1994 encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
1995 QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
1996 CRYPTO_ENCR_SEG_CFG_REG);
1997
1998 /* write encr seg start */
1999 QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
2000 CRYPTO_ENCR_SEG_START_REG);
2001 /* write encr seg size */
2002 QCE_WRITE_REG(cipher_size, pce_dev->iobase +
2003 CRYPTO_ENCR_SEG_SIZE_REG);
2004
2005 /* write seg size */
2006 QCE_WRITE_REG(req->data_len, pce_dev->iobase +
2007 CRYPTO_SEG_SIZE_REG);
2008
2009 /* write cntr0_iv0 for countC */
2010 QCE_WRITE_REG(req->count_c, pce_dev->iobase +
2011 CRYPTO_CNTR0_IV0_REG);
2012 /* write cntr1_iv1 for nPkts, and bearer */
2013 if (npkts == 1)
2014 npkts = 0;
2015 QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
2016 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
2017 pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
2018
2019 /* set little endian configuration before go*/
2020 QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
2021 CRYPTO_CONFIG_REG));
2022 /* write go */
2023 QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
2024 (1 << CRYPTO_CLR_CNTXT)),
2025 pce_dev->iobase + CRYPTO_GOPROC_REG);
2026 /*
2027 * Ensure previous instructions (setting the GO register)
2028 * was completed before issuing a DMA transfer request
2029 */
2030 mb();
2031 return 0;
2032}
2033
2034
2035static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
2036{
2037 int rc = 0;
2038 struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
2039 [req_info].ce_sps;
2040
2041 if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
2042 return rc;
2043
2044 rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
2045 GET_PHYS_ADDR(pce_sps_data->
2046 cmdlistptr.unlock_all_pipes.cmdlist),
2047 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
2048 if (rc) {
2049 pr_err("sps_xfr_one() fail rc=%d", rc);
2050 rc = -EINVAL;
2051 }
2052 return rc;
2053}
2054
2055static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
2056 bool is_complete);
2057
2058static int _aead_complete(struct qce_device *pce_dev, int req_info)
2059{
2060 struct aead_request *areq;
2061 unsigned char mac[SHA256_DIGEST_SIZE];
2062 uint32_t ccm_fail_status = 0;
2063 uint32_t result_dump_status;
2064 int32_t result_status = 0;
2065 struct ce_request_info *preq_info;
2066 struct ce_sps_data *pce_sps_data;
2067 qce_comp_func_ptr_t qce_callback;
2068
2069 preq_info = &pce_dev->ce_request_info[req_info];
2070 pce_sps_data = &preq_info->ce_sps;
2071 qce_callback = preq_info->qce_cb;
2072 areq = (struct aead_request *) preq_info->areq;
2073 if (areq->src != areq->dst) {
2074 qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
2075 DMA_FROM_DEVICE);
2076 }
2077 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
2078 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2079 DMA_TO_DEVICE);
2080
2081 if (preq_info->asg)
2082 qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
2083 preq_info->assoc_nents, DMA_TO_DEVICE);
2084 /* check MAC */
2085 memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
2086 SHA256_DIGEST_SIZE);
2087
2088 /* read status before unlock */
2089 if (preq_info->dir == QCE_DECRYPT) {
2090 if (pce_dev->no_get_around)
2091 if (pce_dev->no_ccm_mac_status_get_around)
2092 ccm_fail_status = be32_to_cpu(pce_sps_data->
2093 result->status);
2094 else
2095 ccm_fail_status = be32_to_cpu(pce_sps_data->
2096 result_null->status);
2097 else
2098 ccm_fail_status = readl_relaxed(pce_dev->iobase +
2099 CRYPTO_STATUS_REG);
2100 }
2101 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2102 qce_free_req_info(pce_dev, req_info, true);
2103 qce_callback(areq, mac, NULL, -ENXIO);
2104 return -ENXIO;
2105 }
2106 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2107 pce_sps_data->result->status = 0;
2108
2109 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2110 | (1 << CRYPTO_HSD_ERR))) {
2111 pr_err("aead operation error. Status %x\n", result_dump_status);
2112 result_status = -ENXIO;
2113 } else if (pce_sps_data->consumer_status |
2114 pce_sps_data->producer_status) {
2115 pr_err("aead sps operation error. sps status %x %x\n",
2116 pce_sps_data->consumer_status,
2117 pce_sps_data->producer_status);
2118 result_status = -ENXIO;
2119 }
2120
2121 if (preq_info->mode == QCE_MODE_CCM) {
2122 /*
2123 * Not from result dump, instead, use the status we just
2124 * read of device for MAC_FAILED.
2125 */
2126 if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
2127 (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
2128 result_status = -EBADMSG;
2129 qce_free_req_info(pce_dev, req_info, true);
2130 qce_callback(areq, mac, NULL, result_status);
2131
2132 } else {
2133 uint32_t ivsize = 0;
2134 struct crypto_aead *aead;
2135 unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
2136
2137 aead = crypto_aead_reqtfm(areq);
2138 ivsize = crypto_aead_ivsize(aead);
2139 memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
2140 sizeof(iv));
2141 qce_free_req_info(pce_dev, req_info, true);
2142 qce_callback(areq, mac, iv, result_status);
2143
2144 }
2145 return 0;
2146};
2147
2148static int _sha_complete(struct qce_device *pce_dev, int req_info)
2149{
2150 struct ahash_request *areq;
2151 unsigned char digest[SHA256_DIGEST_SIZE];
2152 uint32_t bytecount32[2];
2153 int32_t result_status = 0;
2154 uint32_t result_dump_status;
2155 struct ce_request_info *preq_info;
2156 struct ce_sps_data *pce_sps_data;
2157 qce_comp_func_ptr_t qce_callback;
2158
2159 preq_info = &pce_dev->ce_request_info[req_info];
2160 pce_sps_data = &preq_info->ce_sps;
2161 qce_callback = preq_info->qce_cb;
2162 areq = (struct ahash_request *) preq_info->areq;
Brahmaji Ked6a1d42017-06-27 19:27:37 +05302163 if (!areq) {
2164 pr_err("sha operation error. areq is NULL\n");
2165 return -ENXIO;
2166 }
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002167 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
2168 DMA_TO_DEVICE);
2169 memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
2170 SHA256_DIGEST_SIZE);
2171 _byte_stream_to_net_words(bytecount32,
2172 (unsigned char *)pce_sps_data->result->auth_byte_count,
2173 2 * CRYPTO_REG_SIZE);
2174
2175 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2176 qce_free_req_info(pce_dev, req_info, true);
2177 qce_callback(areq, digest, (char *)bytecount32,
2178 -ENXIO);
2179 return -ENXIO;
2180 }
2181
2182 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2183 pce_sps_data->result->status = 0;
2184 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2185 | (1 << CRYPTO_HSD_ERR))) {
2186
2187 pr_err("sha operation error. Status %x\n", result_dump_status);
2188 result_status = -ENXIO;
2189 } else if (pce_sps_data->consumer_status) {
2190 pr_err("sha sps operation error. sps status %x\n",
2191 pce_sps_data->consumer_status);
2192 result_status = -ENXIO;
2193 }
2194 qce_free_req_info(pce_dev, req_info, true);
2195 qce_callback(areq, digest, (char *)bytecount32, result_status);
2196 return 0;
2197}
2198
2199static int _f9_complete(struct qce_device *pce_dev, int req_info)
2200{
2201 uint32_t mac_i;
2202 int32_t result_status = 0;
2203 uint32_t result_dump_status;
2204 struct ce_request_info *preq_info;
2205 struct ce_sps_data *pce_sps_data;
2206 qce_comp_func_ptr_t qce_callback;
2207 void *areq;
2208
2209 preq_info = &pce_dev->ce_request_info[req_info];
2210 pce_sps_data = &preq_info->ce_sps;
2211 qce_callback = preq_info->qce_cb;
2212 areq = preq_info->areq;
2213 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
2214 preq_info->ota_size, DMA_TO_DEVICE);
2215 _byte_stream_to_net_words(&mac_i,
2216 (char *)(&pce_sps_data->result->auth_iv[0]),
2217 CRYPTO_REG_SIZE);
2218
2219 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2220 qce_free_req_info(pce_dev, req_info, true);
2221 qce_callback(areq, NULL, NULL, -ENXIO);
2222 return -ENXIO;
2223 }
2224
2225 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2226 pce_sps_data->result->status = 0;
2227 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2228 | (1 << CRYPTO_HSD_ERR))) {
2229 pr_err("f9 operation error. Status %x\n", result_dump_status);
2230 result_status = -ENXIO;
2231 } else if (pce_sps_data->consumer_status |
2232 pce_sps_data->producer_status) {
2233 pr_err("f9 sps operation error. sps status %x %x\n",
2234 pce_sps_data->consumer_status,
2235 pce_sps_data->producer_status);
2236 result_status = -ENXIO;
2237 }
2238 qce_free_req_info(pce_dev, req_info, true);
2239 qce_callback(areq, (char *)&mac_i, NULL, result_status);
2240
2241 return 0;
2242}
2243
2244static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
2245{
2246 struct ablkcipher_request *areq;
2247 unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
2248 int32_t result_status = 0;
2249 uint32_t result_dump_status;
2250 struct ce_request_info *preq_info;
2251 struct ce_sps_data *pce_sps_data;
2252 qce_comp_func_ptr_t qce_callback;
2253
2254 preq_info = &pce_dev->ce_request_info[req_info];
2255 pce_sps_data = &preq_info->ce_sps;
2256 qce_callback = preq_info->qce_cb;
2257 areq = (struct ablkcipher_request *) preq_info->areq;
2258 if (areq->src != areq->dst) {
2259 qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
2260 preq_info->dst_nents, DMA_FROM_DEVICE);
2261 }
2262 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
2263 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2264 DMA_TO_DEVICE);
2265
2266 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2267 qce_free_req_info(pce_dev, req_info, true);
2268 qce_callback(areq, NULL, NULL, -ENXIO);
2269 return -ENXIO;
2270 }
2271 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2272 pce_sps_data->result->status = 0;
2273
2274 if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2275 | (1 << CRYPTO_HSD_ERR))) {
2276 pr_err("ablk_cipher operation error. Status %x\n",
2277 result_dump_status);
2278 result_status = -ENXIO;
2279 } else if (pce_sps_data->consumer_status |
2280 pce_sps_data->producer_status) {
2281 pr_err("ablk_cipher sps operation error. sps status %x %x\n",
2282 pce_sps_data->consumer_status,
2283 pce_sps_data->producer_status);
2284 result_status = -ENXIO;
2285 }
2286
2287 if (preq_info->mode == QCE_MODE_ECB) {
2288 qce_free_req_info(pce_dev, req_info, true);
2289 qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
2290 result_status);
2291 } else {
2292 if (pce_dev->ce_bam_info.minor_version == 0) {
2293 if (preq_info->mode == QCE_MODE_CBC) {
2294 if (preq_info->dir == QCE_DECRYPT)
2295 memcpy(iv, (char *)preq_info->dec_iv,
2296 sizeof(iv));
2297 else
2298 memcpy(iv, (unsigned char *)
2299 (sg_virt(areq->src) +
2300 areq->src->length - 16),
2301 sizeof(iv));
2302 }
2303 if ((preq_info->mode == QCE_MODE_CTR) ||
2304 (preq_info->mode == QCE_MODE_XTS)) {
2305 uint32_t num_blk = 0;
2306 uint32_t cntr_iv3 = 0;
2307 unsigned long long cntr_iv64 = 0;
2308 unsigned char *b = (unsigned char *)(&cntr_iv3);
2309
2310 memcpy(iv, areq->info, sizeof(iv));
2311 if (preq_info->mode != QCE_MODE_XTS)
2312 num_blk = areq->nbytes/16;
2313 else
2314 num_blk = 1;
2315 cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
2316 (((*(iv + 13)) << 16) & 0xff0000) |
2317 (((*(iv + 14)) << 8) & 0xff00) |
2318 (*(iv + 15) & 0xff);
2319 cntr_iv64 =
2320 (((unsigned long long)cntr_iv3 &
2321 0xFFFFFFFFULL) +
2322 (unsigned long long)num_blk) %
2323 (unsigned long long)(0x100000000ULL);
2324
2325 cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
2326 *(iv + 15) = (char)(*b);
2327 *(iv + 14) = (char)(*(b + 1));
2328 *(iv + 13) = (char)(*(b + 2));
2329 *(iv + 12) = (char)(*(b + 3));
2330 }
2331 } else {
2332 memcpy(iv,
2333 (char *)(pce_sps_data->result->encr_cntr_iv),
2334 sizeof(iv));
2335 }
2336 qce_free_req_info(pce_dev, req_info, true);
2337 qce_callback(areq, NULL, iv, result_status);
2338 }
2339 return 0;
2340}
2341
2342static int _f8_complete(struct qce_device *pce_dev, int req_info)
2343{
2344 int32_t result_status = 0;
2345 uint32_t result_dump_status;
2346 uint32_t result_dump_status2;
2347 struct ce_request_info *preq_info;
2348 struct ce_sps_data *pce_sps_data;
2349 qce_comp_func_ptr_t qce_callback;
2350 void *areq;
2351
2352 preq_info = &pce_dev->ce_request_info[req_info];
2353 pce_sps_data = &preq_info->ce_sps;
2354 qce_callback = preq_info->qce_cb;
2355 areq = preq_info->areq;
2356 if (preq_info->phy_ota_dst)
2357 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
2358 preq_info->ota_size, DMA_FROM_DEVICE);
2359 if (preq_info->phy_ota_src)
2360 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
2361 preq_info->ota_size, (preq_info->phy_ota_dst) ?
2362 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
2363
2364 if (_qce_unlock_other_pipes(pce_dev, req_info)) {
2365 qce_free_req_info(pce_dev, req_info, true);
2366 qce_callback(areq, NULL, NULL, -ENXIO);
2367 return -ENXIO;
2368 }
2369 result_dump_status = be32_to_cpu(pce_sps_data->result->status);
2370 result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
2371
2372 if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
2373 | (1 << CRYPTO_HSD_ERR)))) {
2374 pr_err(
2375 "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
2376 result_dump_status, result_dump_status2, req_info);
2377 result_status = -ENXIO;
2378 } else if (pce_sps_data->consumer_status |
2379 pce_sps_data->producer_status) {
2380 pr_err("f8 sps operation error. sps status %x %x\n",
2381 pce_sps_data->consumer_status,
2382 pce_sps_data->producer_status);
2383 result_status = -ENXIO;
2384 }
2385 pce_sps_data->result->status = 0;
2386 pce_sps_data->result->status2 = 0;
2387 qce_free_req_info(pce_dev, req_info, true);
2388 qce_callback(areq, NULL, NULL, result_status);
2389 return 0;
2390}
2391
2392static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
2393{
2394 struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
2395 .ce_sps;
2396 pce_sps_data->in_transfer.iovec_count = 0;
2397 pce_sps_data->out_transfer.iovec_count = 0;
2398}
2399
2400static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
2401{
2402 struct sps_iovec *iovec;
2403
2404 if (sps_bam_pipe->iovec_count == 0)
2405 return;
2406 iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
2407 iovec->flags |= flag;
2408}
2409
2410static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
2411 struct sps_transfer *sps_bam_pipe)
2412{
2413 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2414 sps_bam_pipe->iovec_count;
2415 uint32_t data_cnt;
2416
2417 while (len > 0) {
2418 if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
2419 pr_err("Num of descrptor %d exceed max (%d)",
2420 sps_bam_pipe->iovec_count,
2421 (uint32_t)QCE_MAX_NUM_DSCR);
2422 return -ENOMEM;
2423 }
2424 if (len > SPS_MAX_PKT_SIZE)
2425 data_cnt = SPS_MAX_PKT_SIZE;
2426 else
2427 data_cnt = len;
2428 iovec->size = data_cnt;
2429 iovec->addr = SPS_GET_LOWER_ADDR(paddr);
2430 iovec->flags = SPS_GET_UPPER_ADDR(paddr);
2431 sps_bam_pipe->iovec_count++;
2432 iovec++;
2433 paddr += data_cnt;
2434 len -= data_cnt;
2435 }
2436 return 0;
2437}
2438
2439static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
2440 struct scatterlist *sg_src, uint32_t nbytes,
2441 struct sps_transfer *sps_bam_pipe)
2442{
2443 uint32_t data_cnt, len;
2444 dma_addr_t addr;
2445 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2446 sps_bam_pipe->iovec_count;
2447
2448 while (nbytes > 0) {
2449 len = min(nbytes, sg_dma_len(sg_src));
2450 nbytes -= len;
2451 addr = sg_dma_address(sg_src);
2452 if (pce_dev->ce_bam_info.minor_version == 0)
2453 len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
2454 while (len > 0) {
2455 if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
2456 pr_err("Num of descrptor %d exceed max (%d)",
2457 sps_bam_pipe->iovec_count,
2458 (uint32_t)QCE_MAX_NUM_DSCR);
2459 return -ENOMEM;
2460 }
2461 if (len > SPS_MAX_PKT_SIZE) {
2462 data_cnt = SPS_MAX_PKT_SIZE;
2463 iovec->size = data_cnt;
2464 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2465 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2466 } else {
2467 data_cnt = len;
2468 iovec->size = data_cnt;
2469 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2470 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2471 }
2472 iovec++;
2473 sps_bam_pipe->iovec_count++;
2474 addr += data_cnt;
2475 len -= data_cnt;
2476 }
2477 sg_src = sg_next(sg_src);
2478 }
2479 return 0;
2480}
2481
2482static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
2483 struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
2484 struct sps_transfer *sps_bam_pipe)
2485{
2486 uint32_t data_cnt, len;
2487 dma_addr_t addr;
2488 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2489 sps_bam_pipe->iovec_count;
2490 unsigned int res_within_sg;
2491
2492 if (!sg_src)
2493 return -ENOENT;
2494 res_within_sg = sg_dma_len(sg_src);
2495
2496 while (off > 0) {
2497 if (!sg_src) {
2498 pr_err("broken sg list off %d nbytes %d\n",
2499 off, nbytes);
2500 return -ENOENT;
2501 }
2502 len = sg_dma_len(sg_src);
2503 if (off < len) {
2504 res_within_sg = len - off;
2505 break;
2506 }
2507 off -= len;
2508 sg_src = sg_next(sg_src);
2509 if (sg_src)
2510 res_within_sg = sg_dma_len(sg_src);
2511 }
2512 while (nbytes > 0 && sg_src) {
2513 len = min(nbytes, res_within_sg);
2514 nbytes -= len;
2515 addr = sg_dma_address(sg_src) + off;
2516 if (pce_dev->ce_bam_info.minor_version == 0)
2517 len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
2518 while (len > 0) {
2519 if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
2520 pr_err("Num of descrptor %d exceed max (%d)",
2521 sps_bam_pipe->iovec_count,
2522 (uint32_t)QCE_MAX_NUM_DSCR);
2523 return -ENOMEM;
2524 }
2525 if (len > SPS_MAX_PKT_SIZE) {
2526 data_cnt = SPS_MAX_PKT_SIZE;
2527 iovec->size = data_cnt;
2528 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2529 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2530 } else {
2531 data_cnt = len;
2532 iovec->size = data_cnt;
2533 iovec->addr = SPS_GET_LOWER_ADDR(addr);
2534 iovec->flags = SPS_GET_UPPER_ADDR(addr);
2535 }
2536 iovec++;
2537 sps_bam_pipe->iovec_count++;
2538 addr += data_cnt;
2539 len -= data_cnt;
2540 }
2541 if (nbytes) {
2542 sg_src = sg_next(sg_src);
2543 if (!sg_src) {
2544 pr_err("more data bytes %d\n", nbytes);
2545 return -ENOMEM;
2546 }
2547 res_within_sg = sg_dma_len(sg_src);
2548 off = 0;
2549 }
2550 }
2551 return 0;
2552}
2553
2554static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
2555 struct qce_cmdlist_info *cmdptr,
2556 struct sps_transfer *sps_bam_pipe)
2557{
2558 dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
2559 struct sps_iovec *iovec = sps_bam_pipe->iovec +
2560 sps_bam_pipe->iovec_count;
2561 iovec->size = cmdptr->size;
2562 iovec->addr = SPS_GET_LOWER_ADDR(paddr);
2563 iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
2564 sps_bam_pipe->iovec_count++;
2565 if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
2566 pr_err("Num of descrptor %d exceed max (%d)",
2567 sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
2568 return -ENOMEM;
2569 }
2570 return 0;
2571}
2572
2573static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
2574{
2575 int rc = 0;
2576 struct ce_sps_data *pce_sps_data;
2577
2578 pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
2579 pce_sps_data->out_transfer.user =
2580 (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
2581 (unsigned int) req_info));
2582 pce_sps_data->in_transfer.user =
2583 (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
2584 (unsigned int) req_info));
2585 _qce_dump_descr_fifos_dbg(pce_dev, req_info);
2586
2587 if (pce_sps_data->in_transfer.iovec_count) {
2588 rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
2589 &pce_sps_data->in_transfer);
2590 if (rc) {
2591 pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
2592 (uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
2593 rc);
2594 goto ret;
2595 }
2596 }
2597 rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
2598 &pce_sps_data->out_transfer);
2599 if (rc)
2600 pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
2601 (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
2602ret:
2603 if (rc)
2604 _qce_dump_descr_fifos(pce_dev, req_info);
2605 return rc;
2606}
2607
2608/**
2609 * Allocate and Connect a CE peripheral's SPS endpoint
2610 *
2611 * This function allocates endpoint context and
2612 * connect it with memory endpoint by calling
2613 * appropriate SPS driver APIs.
2614 *
2615 * Also registers a SPS callback function with
2616 * SPS driver
2617 *
2618 * This function should only be called once typically
2619 * during driver probe.
2620 *
2621 * @pce_dev - Pointer to qce_device structure
2622 * @ep - Pointer to sps endpoint data structure
2623 * @is_produce - 1 means Producer endpoint
2624 * 0 means Consumer endpoint
2625 *
2626 * @return - 0 if successful else negative value.
2627 *
2628 */
2629static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
2630 struct qce_sps_ep_conn_data *ep,
2631 bool is_producer)
2632{
2633 int rc = 0;
2634 struct sps_pipe *sps_pipe_info;
2635 struct sps_connect *sps_connect_info = &ep->connect;
2636 struct sps_register_event *sps_event = &ep->event;
2637
2638 /* Allocate endpoint context */
2639 sps_pipe_info = sps_alloc_endpoint();
2640 if (!sps_pipe_info) {
2641 pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
2642 is_producer);
2643 rc = -ENOMEM;
2644 goto out;
2645 }
2646 /* Now save the sps pipe handle */
2647 ep->pipe = sps_pipe_info;
2648
2649 /* Get default connection configuration for an endpoint */
2650 rc = sps_get_config(sps_pipe_info, sps_connect_info);
2651 if (rc) {
2652 pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
2653 (uintptr_t)sps_pipe_info, rc);
2654 goto get_config_err;
2655 }
2656
2657 /* Modify the default connection configuration */
2658 if (is_producer) {
2659 /*
2660 * For CE producer transfer, source should be
2661 * CE peripheral where as destination should
2662 * be system memory.
2663 */
2664 sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
2665 sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
2666 /* Producer pipe will handle this connection */
2667 sps_connect_info->mode = SPS_MODE_SRC;
2668 sps_connect_info->options =
2669 SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
2670 } else {
2671 /* For CE consumer transfer, source should be
2672 * system memory where as destination should
2673 * CE peripheral
2674 */
2675 sps_connect_info->source = SPS_DEV_HANDLE_MEM;
2676 sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
2677 sps_connect_info->mode = SPS_MODE_DEST;
2678 sps_connect_info->options =
2679 SPS_O_AUTO_ENABLE;
2680 }
2681
2682 /* Producer pipe index */
2683 sps_connect_info->src_pipe_index =
2684 pce_dev->ce_bam_info.src_pipe_index;
2685 /* Consumer pipe index */
2686 sps_connect_info->dest_pipe_index =
2687 pce_dev->ce_bam_info.dest_pipe_index;
2688 /* Set pipe group */
2689 sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
2690 sps_connect_info->event_thresh = 0x10;
2691 /*
2692 * Max. no of scatter/gather buffers that can
2693 * be passed by block layer = 32 (NR_SG).
2694 * Each BAM descritor needs 64 bits (8 bytes).
2695 * One BAM descriptor is required per buffer transfer.
2696 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
2697 * But due to HW limitation we need to allocate atleast one extra
2698 * descriptor memory (256 bytes + 8 bytes). But in order to be
2699 * in power of 2, we are allocating 512 bytes of memory.
2700 */
2701 sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
2702 sizeof(struct sps_iovec);
2703 if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
2704 sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
2705 sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
2706 sps_connect_info->desc.size,
2707 &sps_connect_info->desc.phys_base,
2708 GFP_KERNEL);
2709 if (sps_connect_info->desc.base == NULL) {
2710 rc = -ENOMEM;
2711 pr_err("Can not allocate coherent memory for sps data\n");
2712 goto get_config_err;
2713 }
2714
2715 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
2716
2717 /* Establish connection between peripheral and memory endpoint */
2718 rc = sps_connect(sps_pipe_info, sps_connect_info);
2719 if (rc) {
2720 pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
2721 (uintptr_t)sps_pipe_info, rc);
2722 goto sps_connect_err;
2723 }
2724
2725 sps_event->mode = SPS_TRIGGER_CALLBACK;
2726 sps_event->xfer_done = NULL;
2727 sps_event->user = (void *)pce_dev;
2728 if (is_producer) {
2729 sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
2730 sps_event->callback = _sps_producer_callback;
2731 rc = sps_register_event(ep->pipe, sps_event);
2732 if (rc) {
2733 pr_err("Producer callback registration failed rc=%d\n",
2734 rc);
2735 goto sps_connect_err;
2736 }
2737 } else {
2738 sps_event->options = SPS_O_EOT;
2739 sps_event->callback = NULL;
2740 }
2741
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05302742 pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002743 is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
2744 (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
2745 goto out;
2746
2747sps_connect_err:
2748 dma_free_coherent(pce_dev->pdev,
2749 sps_connect_info->desc.size,
2750 sps_connect_info->desc.base,
2751 sps_connect_info->desc.phys_base);
2752get_config_err:
2753 sps_free_endpoint(sps_pipe_info);
2754out:
2755 return rc;
2756}
2757
2758/**
2759 * Disconnect and Deallocate a CE peripheral's SPS endpoint
2760 *
2761 * This function disconnect endpoint and deallocates
2762 * endpoint context.
2763 *
2764 * This function should only be called once typically
2765 * during driver remove.
2766 *
2767 * @pce_dev - Pointer to qce_device structure
2768 * @ep - Pointer to sps endpoint data structure
2769 *
2770 */
2771static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
2772 struct qce_sps_ep_conn_data *ep)
2773{
2774 struct sps_pipe *sps_pipe_info = ep->pipe;
2775 struct sps_connect *sps_connect_info = &ep->connect;
2776
2777 sps_disconnect(sps_pipe_info);
2778 dma_free_coherent(pce_dev->pdev,
2779 sps_connect_info->desc.size,
2780 sps_connect_info->desc.base,
2781 sps_connect_info->desc.phys_base);
2782 sps_free_endpoint(sps_pipe_info);
2783}
2784
2785static void qce_sps_release_bam(struct qce_device *pce_dev)
2786{
2787 struct bam_registration_info *pbam;
2788
2789 mutex_lock(&bam_register_lock);
2790 pbam = pce_dev->pbam;
2791 if (pbam == NULL)
2792 goto ret;
2793
2794 pbam->cnt--;
2795 if (pbam->cnt > 0)
2796 goto ret;
2797
2798 if (pce_dev->ce_bam_info.bam_handle) {
2799 sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
2800
2801 pr_debug("deregister bam handle 0x%lx\n",
2802 pce_dev->ce_bam_info.bam_handle);
2803 pce_dev->ce_bam_info.bam_handle = 0;
2804 }
2805 iounmap(pbam->bam_iobase);
2806 pr_debug("delete bam 0x%x\n", pbam->bam_mem);
2807 list_del(&pbam->qlist);
2808 kfree(pbam);
2809
2810ret:
2811 pce_dev->pbam = NULL;
2812 mutex_unlock(&bam_register_lock);
2813}
2814
2815static int qce_sps_get_bam(struct qce_device *pce_dev)
2816{
2817 int rc = 0;
2818 struct sps_bam_props bam = {0};
2819 struct bam_registration_info *pbam = NULL;
2820 struct bam_registration_info *p;
2821 uint32_t bam_cfg = 0;
2822
2823
2824 mutex_lock(&bam_register_lock);
2825
2826 list_for_each_entry(p, &qce50_bam_list, qlist) {
2827 if (p->bam_mem == pce_dev->bam_mem) {
2828 pbam = p; /* found */
2829 break;
2830 }
2831 }
2832
2833 if (pbam) {
2834 pr_debug("found bam 0x%x\n", pbam->bam_mem);
2835 pbam->cnt++;
2836 pce_dev->ce_bam_info.bam_handle = pbam->handle;
2837 pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
2838 pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
2839 pce_dev->pbam = pbam;
2840 pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
2841 goto ret;
2842 }
2843
2844 pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
2845 if (!pbam) {
2846 rc = -ENOMEM;
2847 goto ret;
2848 }
2849 pbam->cnt = 1;
2850 pbam->bam_mem = pce_dev->bam_mem;
2851 pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
2852 pce_dev->bam_mem_size);
2853 if (!pbam->bam_iobase) {
2854 kfree(pbam);
2855 rc = -ENOMEM;
2856 pr_err("Can not map BAM io memory\n");
2857 goto ret;
2858 }
2859 pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
2860 pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
2861 pbam->handle = 0;
2862 pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
2863 bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
2864 CRYPTO_BAM_CNFG_BITS_REG);
2865 pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
2866 true : false;
2867 if (pbam->support_cmd_dscr == false) {
2868 pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
2869 bam_cfg);
2870 pce_dev->no_get_around = false;
2871 }
2872 pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
2873
2874 bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
2875 bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
2876
2877 /*
2878 * This event thresold value is only significant for BAM-to-BAM
2879 * transfer. It's ignored for BAM-to-System mode transfer.
2880 */
2881 bam.event_threshold = 0x10; /* Pipe event threshold */
2882 /*
2883 * This threshold controls when the BAM publish
2884 * the descriptor size on the sideband interface.
2885 * SPS HW will only be used when
2886 * data transfer size > 64 bytes.
2887 */
2888 bam.summing_threshold = 64;
2889 /* SPS driver wll handle the crypto BAM IRQ */
2890 bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
2891 /*
2892 * Set flag to indicate BAM global device control is managed
2893 * remotely.
2894 */
2895 if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
2896 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2897 else
2898 bam.manage = SPS_BAM_MGR_LOCAL;
2899
2900 bam.ee = pce_dev->ce_bam_info.bam_ee;
2901 bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
2902 bam.options |= SPS_BAM_CACHED_WP;
2903 pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05302904 pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002905
2906 /* Register CE Peripheral BAM device to SPS driver */
2907 rc = sps_register_bam_device(&bam, &pbam->handle);
2908 if (rc) {
2909 pr_err("sps_register_bam_device() failed! err=%d", rc);
2910 rc = -EIO;
2911 iounmap(pbam->bam_iobase);
2912 kfree(pbam);
2913 goto ret;
2914 }
2915
2916 pce_dev->pbam = pbam;
2917 list_add_tail(&pbam->qlist, &qce50_bam_list);
2918 pce_dev->ce_bam_info.bam_handle = pbam->handle;
2919
2920ret:
2921 mutex_unlock(&bam_register_lock);
2922
2923 return rc;
2924}
2925/**
2926 * Initialize SPS HW connected with CE core
2927 *
2928 * This function register BAM HW resources with
2929 * SPS driver and then initialize 2 SPS endpoints
2930 *
2931 * This function should only be called once typically
2932 * during driver probe.
2933 *
2934 * @pce_dev - Pointer to qce_device structure
2935 *
2936 * @return - 0 if successful else negative value.
2937 *
2938 */
2939static int qce_sps_init(struct qce_device *pce_dev)
2940{
2941 int rc = 0;
2942
2943 rc = qce_sps_get_bam(pce_dev);
2944 if (rc)
2945 return rc;
2946 pr_debug("BAM device registered. bam_handle=0x%lx\n",
2947 pce_dev->ce_bam_info.bam_handle);
2948
2949 rc = qce_sps_init_ep_conn(pce_dev,
2950 &pce_dev->ce_bam_info.producer, true);
2951 if (rc)
2952 goto sps_connect_producer_err;
2953 rc = qce_sps_init_ep_conn(pce_dev,
2954 &pce_dev->ce_bam_info.consumer, false);
2955 if (rc)
2956 goto sps_connect_consumer_err;
2957
2958 pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
2959 (unsigned long long)pce_dev->ce_bam_info.bam_mem,
2960 (unsigned int)pce_dev->ce_bam_info.bam_irq);
2961 return rc;
2962
2963sps_connect_consumer_err:
2964 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
2965sps_connect_producer_err:
2966 qce_sps_release_bam(pce_dev);
2967 return rc;
2968}
2969
2970static inline int qce_alloc_req_info(struct qce_device *pce_dev)
2971{
2972 int i;
2973 int request_index = pce_dev->ce_request_index;
2974
2975 for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
2976 request_index++;
2977 if (request_index >= MAX_QCE_BAM_REQ)
2978 request_index = 0;
Brahmaji Ked6a1d42017-06-27 19:27:37 +05302979 if (atomic_xchg(&pce_dev->ce_request_info[request_index].
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002980 in_use, true) == false) {
2981 pce_dev->ce_request_index = request_index;
2982 return request_index;
2983 }
2984 }
2985 pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
2986 pce_dev->dev_no, atomic_read(
2987 &pce_dev->no_of_queued_req));
2988 return -EBUSY;
2989}
2990
2991static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
2992 bool is_complete)
2993{
2994 pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
Brahmaji Ked6a1d42017-06-27 19:27:37 +05302995 if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
2996 false) == true) {
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07002997 if (req_info < MAX_QCE_BAM_REQ && is_complete)
2998 atomic_dec(&pce_dev->no_of_queued_req);
2999 } else
3000 pr_warn("request info %d free already\n", req_info);
3001}
3002
3003static void print_notify_debug(struct sps_event_notify *notify)
3004{
3005 phys_addr_t addr =
3006 DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
3007 notify->data.transfer.iovec.addr);
mohamed sunfeerc6b8e6d2017-06-29 15:13:34 +05303008 pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07003009 notify->event_id, &addr,
3010 notify->data.transfer.iovec.size,
3011 notify->data.transfer.iovec.flags,
3012 notify->data.transfer.user);
3013}
3014
3015static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
3016{
3017 struct ce_request_info *preq_info;
3018
3019 preq_info = &pce_dev->ce_request_info[req_info];
3020
3021 switch (preq_info->xfer_type) {
3022 case QCE_XFER_CIPHERING:
3023 _ablk_cipher_complete(pce_dev, req_info);
3024 break;
3025 case QCE_XFER_HASHING:
3026 _sha_complete(pce_dev, req_info);
3027 break;
3028 case QCE_XFER_AEAD:
3029 _aead_complete(pce_dev, req_info);
3030 break;
3031 case QCE_XFER_F8:
3032 _f8_complete(pce_dev, req_info);
3033 break;
3034 case QCE_XFER_F9:
3035 _f9_complete(pce_dev, req_info);
3036 break;
3037 default:
3038 qce_free_req_info(pce_dev, req_info, true);
3039 break;
3040 }
3041}
3042
3043static void qce_multireq_timeout(unsigned long data)
3044{
3045 struct qce_device *pce_dev = (struct qce_device *)data;
3046 int ret = 0;
3047 int last_seq;
3048 unsigned long flags;
3049
3050 last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
3051 if (last_seq == 0 ||
3052 last_seq != atomic_read(&pce_dev->last_intr_seq)) {
3053 atomic_set(&pce_dev->last_intr_seq, last_seq);
3054 mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
3055 return;
3056 }
3057 /* last bunch mode command time out */
3058
3059 /*
3060 * From here to dummy request finish sps request and set owner back
3061 * to none, we disable interrupt.
3062 * So it won't get preempted or interrupted. If bam inerrupts happen
3063 * between, and completion callback gets called from BAM, a new
3064 * request may be issued by the client driver. Deadlock may happen.
3065 */
3066 local_irq_save(flags);
3067 if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
3068 != QCE_OWNER_NONE) {
3069 local_irq_restore(flags);
3070 mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
3071 return;
3072 }
3073
3074 ret = qce_dummy_req(pce_dev);
3075 if (ret)
3076 pr_warn("pcedev %d: Failed to insert dummy req\n",
3077 pce_dev->dev_no);
3078 cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
3079 pce_dev->mode = IN_INTERRUPT_MODE;
3080 local_irq_restore(flags);
3081
3082 del_timer(&(pce_dev->timer));
3083 pce_dev->qce_stats.no_of_timeouts++;
3084 pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
3085}
3086
3087void qce_get_driver_stats(void *handle)
3088{
3089 struct qce_device *pce_dev = (struct qce_device *) handle;
3090
3091 if (!_qce50_disp_stats)
3092 return;
3093 pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
3094 pce_dev->qce_stats.no_of_timeouts);
3095 pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
3096 pce_dev->qce_stats.no_of_dummy_reqs);
3097 if (pce_dev->mode)
3098 pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
3099 else
3100 pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
3101 pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
3102 atomic_read(&pce_dev->no_of_queued_req));
3103}
3104EXPORT_SYMBOL(qce_get_driver_stats);
3105
3106void qce_clear_driver_stats(void *handle)
3107{
3108 struct qce_device *pce_dev = (struct qce_device *) handle;
3109
3110 pce_dev->qce_stats.no_of_timeouts = 0;
3111 pce_dev->qce_stats.no_of_dummy_reqs = 0;
3112}
3113EXPORT_SYMBOL(qce_clear_driver_stats);
3114
3115static void _sps_producer_callback(struct sps_event_notify *notify)
3116{
3117 struct qce_device *pce_dev = (struct qce_device *)
3118 ((struct sps_event_notify *)notify)->user;
3119 int rc = 0;
3120 unsigned int req_info;
3121 struct ce_sps_data *pce_sps_data;
3122 struct ce_request_info *preq_info;
3123
3124 print_notify_debug(notify);
3125
3126 req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
3127 if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
3128 pr_warn("request information %d out of range\n", req_info);
3129 return;
3130 }
3131
3132 req_info = req_info & 0x00ff;
3133 if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
3134 pr_warn("request information %d out of range\n", req_info);
3135 return;
3136 }
3137
3138 preq_info = &pce_dev->ce_request_info[req_info];
3139
3140 pce_sps_data = &preq_info->ce_sps;
3141 if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
3142 preq_info->xfer_type == QCE_XFER_AEAD) &&
3143 pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
3144 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
3145 pce_sps_data->out_transfer.iovec_count = 0;
3146 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
3147 CRYPTO_RESULT_DUMP_SIZE,
3148 &pce_sps_data->out_transfer);
3149 _qce_set_flag(&pce_sps_data->out_transfer,
3150 SPS_IOVEC_FLAG_INT);
3151 rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
3152 &pce_sps_data->out_transfer);
3153 if (rc) {
3154 pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
3155 (uintptr_t)pce_dev->ce_bam_info.producer.pipe,
3156 rc);
3157 }
3158 return;
3159 }
3160
3161 _qce_req_complete(pce_dev, req_info);
3162}
3163
3164/**
3165 * De-initialize SPS HW connected with CE core
3166 *
3167 * This function deinitialize SPS endpoints and then
3168 * deregisters BAM resources from SPS driver.
3169 *
3170 * This function should only be called once typically
3171 * during driver remove.
3172 *
3173 * @pce_dev - Pointer to qce_device structure
3174 *
3175 */
3176static void qce_sps_exit(struct qce_device *pce_dev)
3177{
3178 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
3179 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
3180 qce_sps_release_bam(pce_dev);
3181}
3182
3183static void qce_add_cmd_element(struct qce_device *pdev,
3184 struct sps_command_element **cmd_ptr, u32 addr,
3185 u32 data, struct sps_command_element **populate)
3186{
3187 (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
3188 (*cmd_ptr)->command = 0;
3189 (*cmd_ptr)->data = data;
3190 (*cmd_ptr)->mask = 0xFFFFFFFF;
3191 (*cmd_ptr)->reserved = 0;
3192 if (populate != NULL)
3193 *populate = *cmd_ptr;
3194 (*cmd_ptr)++;
3195}
3196
3197static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
3198 unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
3199 bool key_128)
3200{
3201 struct sps_command_element *ce_vaddr;
3202 uintptr_t ce_vaddr_start;
3203 struct qce_cmdlistptr_ops *cmdlistptr;
3204 struct qce_cmdlist_info *pcl_info = NULL;
3205 int i = 0;
3206 uint32_t encr_cfg = 0;
3207 uint32_t key_reg = 0;
3208 uint32_t xts_key_reg = 0;
3209 uint32_t iv_reg = 0;
3210
3211 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3212 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3213 pdev->ce_bam_info.ce_burst_size);
3214 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3215 ce_vaddr_start = (uintptr_t)(*pvaddr);
3216 /*
3217 * Designate chunks of the allocated memory to various
3218 * command list pointers related to AES cipher operations defined
3219 * in ce_cmdlistptrs_ops structure.
3220 */
3221 switch (mode) {
3222 case QCE_MODE_CBC:
3223 case QCE_MODE_CTR:
3224 if (key_128 == true) {
3225 cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
3226 (uintptr_t)ce_vaddr;
3227 pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
3228 if (mode == QCE_MODE_CBC)
3229 encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
3230 else
3231 encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
3232 iv_reg = 4;
3233 key_reg = 4;
3234 xts_key_reg = 0;
3235 } else {
3236 cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
3237 (uintptr_t)ce_vaddr;
3238 pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
3239
3240 if (mode == QCE_MODE_CBC)
3241 encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
3242 else
3243 encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
3244 iv_reg = 4;
3245 key_reg = 8;
3246 xts_key_reg = 0;
3247 }
3248 break;
3249 case QCE_MODE_ECB:
3250 if (key_128 == true) {
3251 cmdlistptr->cipher_aes_128_ecb.cmdlist =
3252 (uintptr_t)ce_vaddr;
3253 pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
3254
3255 encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
3256 iv_reg = 0;
3257 key_reg = 4;
3258 xts_key_reg = 0;
3259 } else {
3260 cmdlistptr->cipher_aes_256_ecb.cmdlist =
3261 (uintptr_t)ce_vaddr;
3262 pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
3263
3264 encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
3265 iv_reg = 0;
3266 key_reg = 8;
3267 xts_key_reg = 0;
3268 }
3269 break;
3270 case QCE_MODE_XTS:
3271 if (key_128 == true) {
3272 cmdlistptr->cipher_aes_128_xts.cmdlist =
3273 (uintptr_t)ce_vaddr;
3274 pcl_info = &(cmdlistptr->cipher_aes_128_xts);
3275
3276 encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
3277 iv_reg = 4;
3278 key_reg = 4;
3279 xts_key_reg = 4;
3280 } else {
3281 cmdlistptr->cipher_aes_256_xts.cmdlist =
3282 (uintptr_t)ce_vaddr;
3283 pcl_info = &(cmdlistptr->cipher_aes_256_xts);
3284
3285 encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
3286 iv_reg = 4;
3287 key_reg = 8;
3288 xts_key_reg = 8;
3289 }
3290 break;
3291 default:
3292 pr_err("Unknown mode of operation %d received, exiting now\n",
3293 mode);
3294 return -EINVAL;
3295 break;
3296 }
3297
3298 /* clear status register */
3299 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3300
3301 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3302 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3303
3304 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3305 &pcl_info->seg_size);
3306 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
3307 &pcl_info->encr_seg_cfg);
3308 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3309 &pcl_info->encr_seg_size);
3310 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3311 &pcl_info->encr_seg_start);
3312 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
3313 (uint32_t)0xffffffff, &pcl_info->encr_mask);
3314 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
3315 (uint32_t)0xffffffff, NULL);
3316 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
3317 (uint32_t)0xffffffff, NULL);
3318 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
3319 (uint32_t)0xffffffff, NULL);
3320 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
3321 &pcl_info->auth_seg_cfg);
3322 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
3323 &pcl_info->encr_key);
3324 for (i = 1; i < key_reg; i++)
3325 qce_add_cmd_element(pdev, &ce_vaddr,
3326 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
3327 0, NULL);
3328 if (xts_key_reg) {
3329 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
3330 0, &pcl_info->encr_xts_key);
3331 for (i = 1; i < xts_key_reg; i++)
3332 qce_add_cmd_element(pdev, &ce_vaddr,
3333 (CRYPTO_ENCR_XTS_KEY0_REG +
3334 i * sizeof(uint32_t)), 0, NULL);
3335 qce_add_cmd_element(pdev, &ce_vaddr,
3336 CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
3337 &pcl_info->encr_xts_du_size);
3338 }
3339 if (iv_reg) {
3340 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
3341 &pcl_info->encr_cntr_iv);
3342 for (i = 1; i < iv_reg; i++)
3343 qce_add_cmd_element(pdev, &ce_vaddr,
3344 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
3345 0, NULL);
3346 }
3347 /* Add dummy to align size to burst-size multiple */
3348 if (mode == QCE_MODE_XTS) {
3349 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
3350 0, &pcl_info->auth_seg_size);
3351 } else {
3352 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
3353 0, &pcl_info->auth_seg_size);
3354 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
3355 0, &pcl_info->auth_seg_size);
3356 }
3357 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3358 pdev->reg.crypto_cfg_le, NULL);
3359
3360 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3361 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3362 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3363
3364 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3365 *pvaddr = (unsigned char *) ce_vaddr;
3366
3367 return 0;
3368}
3369
3370static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
3371 unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
3372 bool mode_cbc)
3373{
3374
3375 struct sps_command_element *ce_vaddr;
3376 uintptr_t ce_vaddr_start;
3377 struct qce_cmdlistptr_ops *cmdlistptr;
3378 struct qce_cmdlist_info *pcl_info = NULL;
3379 int i = 0;
3380 uint32_t encr_cfg = 0;
3381 uint32_t key_reg = 0;
3382 uint32_t iv_reg = 0;
3383
3384 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3385 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3386 pdev->ce_bam_info.ce_burst_size);
3387 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3388 ce_vaddr_start = (uintptr_t)(*pvaddr);
3389
3390 /*
3391 * Designate chunks of the allocated memory to various
3392 * command list pointers related to cipher operations defined
3393 * in ce_cmdlistptrs_ops structure.
3394 */
3395 switch (alg) {
3396 case CIPHER_ALG_DES:
3397 if (mode_cbc) {
3398 cmdlistptr->cipher_des_cbc.cmdlist =
3399 (uintptr_t)ce_vaddr;
3400 pcl_info = &(cmdlistptr->cipher_des_cbc);
3401
3402
3403 encr_cfg = pdev->reg.encr_cfg_des_cbc;
3404 iv_reg = 2;
3405 key_reg = 2;
3406 } else {
3407 cmdlistptr->cipher_des_ecb.cmdlist =
3408 (uintptr_t)ce_vaddr;
3409 pcl_info = &(cmdlistptr->cipher_des_ecb);
3410
3411 encr_cfg = pdev->reg.encr_cfg_des_ecb;
3412 iv_reg = 0;
3413 key_reg = 2;
3414 }
3415 break;
3416 case CIPHER_ALG_3DES:
3417 if (mode_cbc) {
3418 cmdlistptr->cipher_3des_cbc.cmdlist =
3419 (uintptr_t)ce_vaddr;
3420 pcl_info = &(cmdlistptr->cipher_3des_cbc);
3421
3422 encr_cfg = pdev->reg.encr_cfg_3des_cbc;
3423 iv_reg = 2;
3424 key_reg = 6;
3425 } else {
3426 cmdlistptr->cipher_3des_ecb.cmdlist =
3427 (uintptr_t)ce_vaddr;
3428 pcl_info = &(cmdlistptr->cipher_3des_ecb);
3429
3430 encr_cfg = pdev->reg.encr_cfg_3des_ecb;
3431 iv_reg = 0;
3432 key_reg = 6;
3433 }
3434 break;
3435 default:
3436 pr_err("Unknown algorithms %d received, exiting now\n", alg);
3437 return -EINVAL;
3438 break;
3439 }
3440
3441 /* clear status register */
3442 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3443
3444 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3445 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3446
3447 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3448 &pcl_info->seg_size);
3449 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
3450 &pcl_info->encr_seg_cfg);
3451 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3452 &pcl_info->encr_seg_size);
3453 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3454 &pcl_info->encr_seg_start);
3455 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
3456 &pcl_info->auth_seg_cfg);
3457
3458 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
3459 &pcl_info->encr_key);
3460 for (i = 1; i < key_reg; i++)
3461 qce_add_cmd_element(pdev, &ce_vaddr,
3462 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
3463 0, NULL);
3464 if (iv_reg) {
3465 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
3466 &pcl_info->encr_cntr_iv);
3467 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
3468 NULL);
3469 }
3470
3471 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3472 pdev->reg.crypto_cfg_le, NULL);
3473
3474 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3475 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3476 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3477
3478 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3479 *pvaddr = (unsigned char *) ce_vaddr;
3480
3481 return 0;
3482}
3483
3484static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
3485 int cri_index, unsigned char **pvaddr)
3486{
3487 struct sps_command_element *ce_vaddr;
3488 uintptr_t ce_vaddr_start;
3489 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
3490 [cri_index].ce_sps.cmdlistptr;
3491 struct qce_cmdlist_info *pcl_info = NULL;
3492
3493 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3494 pdev->ce_bam_info.ce_burst_size);
3495 ce_vaddr_start = (uintptr_t)(*pvaddr);
3496 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3497
3498 cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
3499 pcl_info = &(cmdlistptr->cipher_null);
3500
3501 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
3502 pdev->ce_bam_info.ce_burst_size, NULL);
3503
3504 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
3505 pdev->reg.encr_cfg_aes_ecb_128, NULL);
3506 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3507 NULL);
3508 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3509 NULL);
3510
3511 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
3512 0, NULL);
3513 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
3514 0, NULL);
3515 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3516 NULL);
3517
3518 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3519 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3520 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3521
3522 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3523 *pvaddr = (unsigned char *) ce_vaddr;
3524 return 0;
3525}
3526
3527static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
3528 unsigned char **pvaddr, enum qce_hash_alg_enum alg,
3529 bool key_128)
3530{
3531 struct sps_command_element *ce_vaddr;
3532 uintptr_t ce_vaddr_start;
3533 struct qce_cmdlistptr_ops *cmdlistptr;
3534 struct qce_cmdlist_info *pcl_info = NULL;
3535 int i = 0;
3536 uint32_t key_reg = 0;
3537 uint32_t auth_cfg = 0;
3538 uint32_t iv_reg = 0;
3539
3540 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3541 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3542 pdev->ce_bam_info.ce_burst_size);
3543 ce_vaddr_start = (uintptr_t)(*pvaddr);
3544 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3545
3546 /*
3547 * Designate chunks of the allocated memory to various
3548 * command list pointers related to authentication operations
3549 * defined in ce_cmdlistptrs_ops structure.
3550 */
3551 switch (alg) {
3552 case QCE_HASH_SHA1:
3553 cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
3554 pcl_info = &(cmdlistptr->auth_sha1);
3555
3556 auth_cfg = pdev->reg.auth_cfg_sha1;
3557 iv_reg = 5;
3558
3559 /* clear status register */
3560 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
3561 0, NULL);
3562
3563 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3564 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3565
3566 break;
3567 case QCE_HASH_SHA256:
3568 cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
3569 pcl_info = &(cmdlistptr->auth_sha256);
3570
3571 auth_cfg = pdev->reg.auth_cfg_sha256;
3572 iv_reg = 8;
3573
3574 /* clear status register */
3575 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
3576 0, NULL);
3577
3578 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3579 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3580 /* 1 dummy write */
3581 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
3582 0, NULL);
3583 break;
3584 case QCE_HASH_SHA1_HMAC:
3585 cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
3586 pcl_info = &(cmdlistptr->auth_sha1_hmac);
3587
3588 auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
3589 key_reg = 16;
3590 iv_reg = 5;
3591
3592 /* clear status register */
3593 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
3594 0, NULL);
3595
3596 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3597 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3598 break;
3599 case QCE_HASH_SHA256_HMAC:
3600 cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
3601 pcl_info = &(cmdlistptr->auth_sha256_hmac);
3602
3603 auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
3604 key_reg = 16;
3605 iv_reg = 8;
3606
3607 /* clear status register */
3608 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
3609 NULL);
3610
3611 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3612 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3613 /* 1 dummy write */
3614 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
3615 0, NULL);
3616 break;
3617 case QCE_HASH_AES_CMAC:
3618 if (key_128 == true) {
3619 cmdlistptr->auth_aes_128_cmac.cmdlist =
3620 (uintptr_t)ce_vaddr;
3621 pcl_info = &(cmdlistptr->auth_aes_128_cmac);
3622
3623 auth_cfg = pdev->reg.auth_cfg_cmac_128;
3624 key_reg = 4;
3625 } else {
3626 cmdlistptr->auth_aes_256_cmac.cmdlist =
3627 (uintptr_t)ce_vaddr;
3628 pcl_info = &(cmdlistptr->auth_aes_256_cmac);
3629
3630 auth_cfg = pdev->reg.auth_cfg_cmac_256;
3631 key_reg = 8;
3632 }
3633
3634 /* clear status register */
3635 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
3636 NULL);
3637
3638 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3639 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3640 /* 1 dummy write */
3641 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
3642 0, NULL);
3643 break;
3644 default:
3645 pr_err("Unknown algorithms %d received, exiting now\n", alg);
3646 return -EINVAL;
3647 break;
3648 }
3649
3650 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3651 &pcl_info->seg_size);
3652 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
3653 &pcl_info->encr_seg_cfg);
3654 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
3655 auth_cfg, &pcl_info->auth_seg_cfg);
3656 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
3657 &pcl_info->auth_seg_size);
3658 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3659 &pcl_info->auth_seg_start);
3660
3661 if (alg == QCE_HASH_AES_CMAC) {
3662 /* reset auth iv, bytecount and key registers */
3663 for (i = 0; i < 16; i++)
3664 qce_add_cmd_element(pdev, &ce_vaddr,
3665 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
3666 0, NULL);
3667 for (i = 0; i < 16; i++)
3668 qce_add_cmd_element(pdev, &ce_vaddr,
3669 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
3670 0, NULL);
3671 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3672 0, NULL);
3673 } else {
3674 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
3675 &pcl_info->auth_iv);
3676 for (i = 1; i < iv_reg; i++)
3677 qce_add_cmd_element(pdev, &ce_vaddr,
3678 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
3679 0, NULL);
3680 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3681 0, &pcl_info->auth_bytecount);
3682 }
3683 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
3684
3685 if (key_reg) {
3686 qce_add_cmd_element(pdev, &ce_vaddr,
3687 CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
3688 for (i = 1; i < key_reg; i++)
3689 qce_add_cmd_element(pdev, &ce_vaddr,
3690 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
3691 0, NULL);
3692 }
3693 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3694 pdev->reg.crypto_cfg_le, NULL);
3695
3696 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3697 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3698 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3699
3700 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3701 *pvaddr = (unsigned char *) ce_vaddr;
3702
3703 return 0;
3704}
3705
3706static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
3707 int cri_index,
3708 unsigned char **pvaddr,
3709 uint32_t alg,
3710 uint32_t mode,
3711 uint32_t key_size,
3712 bool sha1)
3713{
3714 struct sps_command_element *ce_vaddr;
3715 uintptr_t ce_vaddr_start;
3716 struct qce_cmdlistptr_ops *cmdlistptr;
3717 struct qce_cmdlist_info *pcl_info = NULL;
3718 uint32_t key_reg;
3719 uint32_t iv_reg;
3720 uint32_t i;
3721 uint32_t enciv_in_word;
3722 uint32_t encr_cfg;
3723
3724 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
3725 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3726 pdev->ce_bam_info.ce_burst_size);
3727
3728 ce_vaddr_start = (uintptr_t)(*pvaddr);
3729 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3730
3731 switch (alg) {
3732
3733 case CIPHER_ALG_DES:
3734
3735 switch (mode) {
3736
3737 case QCE_MODE_CBC:
3738 if (sha1) {
3739 cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
3740 (uintptr_t)ce_vaddr;
3741 pcl_info = &(cmdlistptr->
3742 aead_hmac_sha1_cbc_des);
3743 } else {
3744 cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
3745 (uintptr_t)ce_vaddr;
3746 pcl_info = &(cmdlistptr->
3747 aead_hmac_sha256_cbc_des);
3748 }
3749 encr_cfg = pdev->reg.encr_cfg_des_cbc;
3750 break;
3751 default:
3752 return -EINVAL;
3753 };
3754
3755 enciv_in_word = 2;
3756
3757 break;
3758
3759 case CIPHER_ALG_3DES:
3760 switch (mode) {
3761
3762 case QCE_MODE_CBC:
3763 if (sha1) {
3764 cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
3765 (uintptr_t)ce_vaddr;
3766 pcl_info = &(cmdlistptr->
3767 aead_hmac_sha1_cbc_3des);
3768 } else {
3769 cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
3770 (uintptr_t)ce_vaddr;
3771 pcl_info = &(cmdlistptr->
3772 aead_hmac_sha256_cbc_3des);
3773 }
3774 encr_cfg = pdev->reg.encr_cfg_3des_cbc;
3775 break;
3776 default:
3777 return -EINVAL;
3778 };
3779
3780 enciv_in_word = 2;
3781
3782 break;
3783
3784 case CIPHER_ALG_AES:
3785 switch (mode) {
3786
3787 case QCE_MODE_CBC:
3788 if (key_size == AES128_KEY_SIZE) {
3789 if (sha1) {
3790 cmdlistptr->
3791 aead_hmac_sha1_cbc_aes_128.
3792 cmdlist = (uintptr_t)ce_vaddr;
3793 pcl_info = &(cmdlistptr->
3794 aead_hmac_sha1_cbc_aes_128);
3795 } else {
3796 cmdlistptr->
3797 aead_hmac_sha256_cbc_aes_128.
3798 cmdlist = (uintptr_t)ce_vaddr;
3799 pcl_info = &(cmdlistptr->
3800 aead_hmac_sha256_cbc_aes_128);
3801 }
3802 encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
3803 } else if (key_size == AES256_KEY_SIZE) {
3804 if (sha1) {
3805 cmdlistptr->
3806 aead_hmac_sha1_cbc_aes_256.
3807 cmdlist = (uintptr_t)ce_vaddr;
3808 pcl_info = &(cmdlistptr->
3809 aead_hmac_sha1_cbc_aes_256);
3810 } else {
3811 cmdlistptr->
3812 aead_hmac_sha256_cbc_aes_256.
3813 cmdlist = (uintptr_t)ce_vaddr;
3814 pcl_info = &(cmdlistptr->
3815 aead_hmac_sha256_cbc_aes_256);
3816 }
3817 encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
3818 } else {
3819 return -EINVAL;
3820 }
3821 break;
3822 default:
3823 return -EINVAL;
3824 };
3825
3826 enciv_in_word = 4;
3827
3828 break;
3829
3830 default:
3831 return -EINVAL;
3832 };
3833
3834
3835 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3836
3837 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3838 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3839
3840
3841 key_reg = key_size/sizeof(uint32_t);
3842 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
3843 &pcl_info->encr_key);
3844 for (i = 1; i < key_reg; i++)
3845 qce_add_cmd_element(pdev, &ce_vaddr,
3846 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
3847 0, NULL);
3848
3849 if (mode != QCE_MODE_ECB) {
3850 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
3851 &pcl_info->encr_cntr_iv);
3852 for (i = 1; i < enciv_in_word; i++)
3853 qce_add_cmd_element(pdev, &ce_vaddr,
3854 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
3855 0, NULL);
3856 };
3857
3858 if (sha1)
3859 iv_reg = 5;
3860 else
3861 iv_reg = 8;
3862 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
3863 &pcl_info->auth_iv);
3864 for (i = 1; i < iv_reg; i++)
3865 qce_add_cmd_element(pdev, &ce_vaddr,
3866 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
3867 0, NULL);
3868
3869 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
3870 0, &pcl_info->auth_bytecount);
3871 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
3872
3873 key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
3874 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
3875 &pcl_info->auth_key);
3876 for (i = 1; i < key_reg; i++)
3877 qce_add_cmd_element(pdev, &ce_vaddr,
3878 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
3879
3880 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3881 &pcl_info->seg_size);
3882
3883 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
3884 &pcl_info->encr_seg_cfg);
3885 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3886 &pcl_info->encr_seg_size);
3887 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3888 &pcl_info->encr_seg_start);
3889
3890 if (sha1)
3891 qce_add_cmd_element(
3892 pdev,
3893 &ce_vaddr,
3894 CRYPTO_AUTH_SEG_CFG_REG,
3895 pdev->reg.auth_cfg_aead_sha1_hmac,
3896 &pcl_info->auth_seg_cfg);
3897 else
3898 qce_add_cmd_element(
3899 pdev,
3900 &ce_vaddr,
3901 CRYPTO_AUTH_SEG_CFG_REG,
3902 pdev->reg.auth_cfg_aead_sha256_hmac,
3903 &pcl_info->auth_seg_cfg);
3904
3905 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
3906 &pcl_info->auth_seg_size);
3907 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3908 &pcl_info->auth_seg_start);
3909
3910 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3911 pdev->reg.crypto_cfg_le, NULL);
3912
3913 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
3914 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
3915 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
3916
3917 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
3918 *pvaddr = (unsigned char *) ce_vaddr;
3919 return 0;
3920}
3921
3922static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
3923 unsigned char **pvaddr, bool key_128)
3924{
3925 struct sps_command_element *ce_vaddr;
3926 uintptr_t ce_vaddr_start;
3927 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
3928 [cri_index].ce_sps.cmdlistptr;
3929 struct qce_cmdlist_info *pcl_info = NULL;
3930 int i = 0;
3931 uint32_t encr_cfg = 0;
3932 uint32_t auth_cfg = 0;
3933 uint32_t key_reg = 0;
3934
3935 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
3936 pdev->ce_bam_info.ce_burst_size);
3937 ce_vaddr_start = (uintptr_t)(*pvaddr);
3938 ce_vaddr = (struct sps_command_element *)(*pvaddr);
3939
3940 /*
3941 * Designate chunks of the allocated memory to various
3942 * command list pointers related to aead operations
3943 * defined in ce_cmdlistptrs_ops structure.
3944 */
3945 if (key_128 == true) {
3946 cmdlistptr->aead_aes_128_ccm.cmdlist =
3947 (uintptr_t)ce_vaddr;
3948 pcl_info = &(cmdlistptr->aead_aes_128_ccm);
3949
3950 auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
3951 encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
3952 key_reg = 4;
3953 } else {
3954
3955 cmdlistptr->aead_aes_256_ccm.cmdlist =
3956 (uintptr_t)ce_vaddr;
3957 pcl_info = &(cmdlistptr->aead_aes_256_ccm);
3958
3959 auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
3960 encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
3961
3962 key_reg = 8;
3963 }
3964
3965 /* clear status register */
3966 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
3967
3968 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
3969 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
3970
3971 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
3972 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3973 NULL);
3974 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
3975 &pcl_info->seg_size);
3976 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
3977 encr_cfg, &pcl_info->encr_seg_cfg);
3978 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
3979 &pcl_info->encr_seg_size);
3980 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
3981 &pcl_info->encr_seg_start);
3982 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
3983 (uint32_t)0xffffffff, &pcl_info->encr_mask);
3984 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
3985 (uint32_t)0xffffffff, NULL);
3986 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
3987 (uint32_t)0xffffffff, NULL);
3988 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
3989 (uint32_t)0xffffffff, NULL);
3990 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
3991 auth_cfg, &pcl_info->auth_seg_cfg);
3992 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
3993 &pcl_info->auth_seg_size);
3994 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
3995 &pcl_info->auth_seg_start);
3996 /* reset auth iv, bytecount and key registers */
3997 for (i = 0; i < 8; i++)
3998 qce_add_cmd_element(pdev, &ce_vaddr,
3999 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
4000 0, NULL);
4001 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
4002 0, NULL);
4003 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
4004 0, NULL);
4005 for (i = 0; i < 16; i++)
4006 qce_add_cmd_element(pdev, &ce_vaddr,
4007 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
4008 0, NULL);
4009 /* set auth key */
4010 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
4011 &pcl_info->auth_key);
4012 for (i = 1; i < key_reg; i++)
4013 qce_add_cmd_element(pdev, &ce_vaddr,
4014 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
4015 0, NULL);
4016 /* set NONCE info */
4017 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
4018 &pcl_info->auth_nonce_info);
4019 for (i = 1; i < 4; i++)
4020 qce_add_cmd_element(pdev, &ce_vaddr,
4021 (CRYPTO_AUTH_INFO_NONCE0_REG +
4022 i * sizeof(uint32_t)), 0, NULL);
4023
4024 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
4025 &pcl_info->encr_key);
4026 for (i = 1; i < key_reg; i++)
4027 qce_add_cmd_element(pdev, &ce_vaddr,
4028 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
4029 0, NULL);
4030 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
4031 &pcl_info->encr_cntr_iv);
4032 for (i = 1; i < 4; i++)
4033 qce_add_cmd_element(pdev, &ce_vaddr,
4034 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
4035 0, NULL);
4036 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
4037 &pcl_info->encr_ccm_cntr_iv);
4038 for (i = 1; i < 4; i++)
4039 qce_add_cmd_element(pdev, &ce_vaddr,
4040 (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
4041 0, NULL);
4042
4043 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4044 pdev->reg.crypto_cfg_le, NULL);
4045
4046 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
4047 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
4048 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
4049
4050 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4051 *pvaddr = (unsigned char *) ce_vaddr;
4052
4053 return 0;
4054}
4055
4056static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
4057 unsigned char **pvaddr, enum qce_ota_algo_enum alg)
4058{
4059 struct sps_command_element *ce_vaddr;
4060 uintptr_t ce_vaddr_start;
4061 struct qce_cmdlistptr_ops *cmdlistptr;
4062 struct qce_cmdlist_info *pcl_info = NULL;
4063 int i = 0;
4064 uint32_t encr_cfg = 0;
4065 uint32_t key_reg = 4;
4066
4067 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
4068 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
4069 pdev->ce_bam_info.ce_burst_size);
4070 ce_vaddr = (struct sps_command_element *)(*pvaddr);
4071 ce_vaddr_start = (uintptr_t)(*pvaddr);
4072
4073 /*
4074 * Designate chunks of the allocated memory to various
4075 * command list pointers related to f8 cipher algorithm defined
4076 * in ce_cmdlistptrs_ops structure.
4077 */
4078
4079 switch (alg) {
4080 case QCE_OTA_ALGO_KASUMI:
4081 cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
4082 pcl_info = &(cmdlistptr->f8_kasumi);
4083 encr_cfg = pdev->reg.encr_cfg_kasumi;
4084 break;
4085
4086 case QCE_OTA_ALGO_SNOW3G:
4087 default:
4088 cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
4089 pcl_info = &(cmdlistptr->f8_snow3g);
4090 encr_cfg = pdev->reg.encr_cfg_snow3g;
4091 break;
4092 }
4093 /* clear status register */
4094 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
4095 0, NULL);
4096 /* set config to big endian */
4097 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4098 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
4099
4100 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
4101 &pcl_info->seg_size);
4102
4103 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
4104 &pcl_info->encr_seg_cfg);
4105 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
4106 &pcl_info->encr_seg_size);
4107 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
4108 &pcl_info->encr_seg_start);
4109
4110 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
4111 &pcl_info->auth_seg_cfg);
4112
4113 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
4114 0, &pcl_info->auth_seg_size);
4115 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
4116 0, &pcl_info->auth_seg_start);
4117
4118 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
4119 &pcl_info->encr_key);
4120 for (i = 1; i < key_reg; i++)
4121 qce_add_cmd_element(pdev, &ce_vaddr,
4122 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
4123 0, NULL);
4124
4125 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
4126 &pcl_info->encr_cntr_iv);
4127 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
4128 NULL);
4129 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4130 pdev->reg.crypto_cfg_le, NULL);
4131
4132 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
4133 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
4134 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
4135
4136 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4137 *pvaddr = (unsigned char *) ce_vaddr;
4138
4139 return 0;
4140}
4141
4142static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
4143 unsigned char **pvaddr, enum qce_ota_algo_enum alg)
4144{
4145 struct sps_command_element *ce_vaddr;
4146 uintptr_t ce_vaddr_start;
4147 struct qce_cmdlistptr_ops *cmdlistptr;
4148 struct qce_cmdlist_info *pcl_info = NULL;
4149 int i = 0;
4150 uint32_t auth_cfg = 0;
4151 uint32_t iv_reg = 0;
4152
4153 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
4154 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
4155 pdev->ce_bam_info.ce_burst_size);
4156 ce_vaddr_start = (uintptr_t)(*pvaddr);
4157 ce_vaddr = (struct sps_command_element *)(*pvaddr);
4158
4159 /*
4160 * Designate chunks of the allocated memory to various
4161 * command list pointers related to authentication operations
4162 * defined in ce_cmdlistptrs_ops structure.
4163 */
4164 switch (alg) {
4165 case QCE_OTA_ALGO_KASUMI:
4166 cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
4167 pcl_info = &(cmdlistptr->f9_kasumi);
4168 auth_cfg = pdev->reg.auth_cfg_kasumi;
4169 break;
4170
4171 case QCE_OTA_ALGO_SNOW3G:
4172 default:
4173 cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
4174 pcl_info = &(cmdlistptr->f9_snow3g);
4175 auth_cfg = pdev->reg.auth_cfg_snow3g;
4176 };
4177
4178 /* clear status register */
4179 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
4180 0, NULL);
4181 /* set config to big endian */
4182 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4183 pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
4184
4185 iv_reg = 5;
4186
4187 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
4188 &pcl_info->seg_size);
4189
4190 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
4191 &pcl_info->encr_seg_cfg);
4192
4193 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
4194 auth_cfg, &pcl_info->auth_seg_cfg);
4195 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
4196 &pcl_info->auth_seg_size);
4197 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
4198 &pcl_info->auth_seg_start);
4199
4200 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
4201 &pcl_info->auth_iv);
4202 for (i = 1; i < iv_reg; i++) {
4203 qce_add_cmd_element(pdev, &ce_vaddr,
4204 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
4205 0, NULL);
4206 }
4207 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
4208 0, &pcl_info->auth_bytecount);
4209 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
4210
4211 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4212 pdev->reg.crypto_cfg_le, NULL);
4213
4214 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
4215 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
4216 (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
4217
4218 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4219 *pvaddr = (unsigned char *) ce_vaddr;
4220
4221 return 0;
4222}
4223
4224static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
4225 int cri_index, unsigned char **pvaddr)
4226{
4227 struct sps_command_element *ce_vaddr;
4228 uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
4229 struct qce_cmdlistptr_ops *cmdlistptr;
4230 struct qce_cmdlist_info *pcl_info = NULL;
4231
4232 cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
4233 *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
4234 pdev->ce_bam_info.ce_burst_size);
4235 ce_vaddr = (struct sps_command_element *)(*pvaddr);
4236 cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
4237 pcl_info = &(cmdlistptr->unlock_all_pipes);
4238
4239 /*
4240 * Designate chunks of the allocated memory to command list
4241 * to unlock pipes.
4242 */
4243 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
4244 CRYPTO_CONFIG_RESET, NULL);
4245 pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
4246 *pvaddr = (unsigned char *) ce_vaddr;
4247
4248 return 0;
4249}
4250
4251static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
4252 unsigned char **pvaddr)
4253{
4254 struct sps_command_element *ce_vaddr =
4255 (struct sps_command_element *)(*pvaddr);
4256 /*
4257 * Designate chunks of the allocated memory to various
4258 * command list pointers related to operations defined
4259 * in ce_cmdlistptrs_ops structure.
4260 */
4261 ce_vaddr =
4262 (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
4263 pdev->ce_bam_info.ce_burst_size);
4264 *pvaddr = (unsigned char *) ce_vaddr;
4265
4266 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
4267 true);
4268 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
4269 true);
4270 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
4271 true);
4272 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
4273 true);
4274 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
4275 false);
4276 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
4277 false);
4278 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
4279 false);
4280 _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
4281 false);
4282
4283 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4284 true);
4285 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4286 false);
4287 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4288 true);
4289 _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4290 false);
4291
4292 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
4293 false);
4294 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
4295 false);
4296
4297 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
4298 false);
4299 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
4300 false);
4301
4302 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
4303 true);
4304 _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
4305 false);
4306
4307 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4308 QCE_MODE_CBC, DES_KEY_SIZE, true);
4309 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4310 QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
4311 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4312 QCE_MODE_CBC, AES128_KEY_SIZE, true);
4313 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4314 QCE_MODE_CBC, AES256_KEY_SIZE, true);
4315 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
4316 QCE_MODE_CBC, DES_KEY_SIZE, false);
4317 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
4318 QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
4319 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4320 QCE_MODE_CBC, AES128_KEY_SIZE, false);
4321 _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
4322 QCE_MODE_CBC, AES256_KEY_SIZE, false);
4323
4324 _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
4325
4326 _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
4327 _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
4328 _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
4329 _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
4330 _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
4331 _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
4332 _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
4333
4334 return 0;
4335}
4336
4337static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
4338{
4339 unsigned char *vaddr;
4340 int i;
4341 unsigned char *iovec_vaddr;
4342 int iovec_memsize;
4343
4344 vaddr = pce_dev->coh_vmem;
4345 vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
4346 pce_dev->ce_bam_info.ce_burst_size);
4347 iovec_vaddr = pce_dev->iovec_vmem;
4348 iovec_memsize = pce_dev->iovec_memsize;
4349 for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
4350 /* Allow for 256 descriptor (cmd and data) entries per pipe */
4351 pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
4352 (struct sps_iovec *)iovec_vaddr;
4353 pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
4354 virt_to_phys(pce_dev->ce_request_info[i].
4355 ce_sps.in_transfer.iovec);
4356 iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
4357 iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
4358 pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
4359 (struct sps_iovec *)iovec_vaddr;
4360 pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
4361 virt_to_phys(pce_dev->ce_request_info[i].
4362 ce_sps.out_transfer.iovec);
4363 iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
4364 iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
4365 if (pce_dev->support_cmd_dscr)
4366 qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
4367 vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
4368 pce_dev->ce_bam_info.ce_burst_size);
4369 pce_dev->ce_request_info[i].ce_sps.result_dump =
4370 (uintptr_t)vaddr;
4371 pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
4372 GET_PHYS_ADDR((uintptr_t)vaddr);
4373 pce_dev->ce_request_info[i].ce_sps.result =
4374 (struct ce_result_dump_format *)vaddr;
4375 vaddr += CRYPTO_RESULT_DUMP_SIZE;
4376
4377 pce_dev->ce_request_info[i].ce_sps.result_dump_null =
4378 (uintptr_t)vaddr;
4379 pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
4380 GET_PHYS_ADDR((uintptr_t)vaddr);
4381 pce_dev->ce_request_info[i].ce_sps.result_null =
4382 (struct ce_result_dump_format *)vaddr;
4383 vaddr += CRYPTO_RESULT_DUMP_SIZE;
4384
4385 pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
4386 (uintptr_t)vaddr;
4387 vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
4388 }
4389 if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
4390 iovec_memsize < 0)
4391 panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
4392 pce_dev->memsize, (uintptr_t)vaddr -
4393 (uintptr_t)pce_dev->coh_vmem);
4394 return 0;
4395}
4396
4397static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
4398{
4399 uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
4400 uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
4401
4402 pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
4403 BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
4404 BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
4405 (pipe_pair << CRYPTO_PIPE_SET_SELECT);
4406
4407 pce_dev->reg.crypto_cfg_le =
4408 (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
4409
4410 /* Initialize encr_cfg register for AES alg */
4411 pce_dev->reg.encr_cfg_aes_cbc_128 =
4412 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4413 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4414 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4415
4416 pce_dev->reg.encr_cfg_aes_cbc_256 =
4417 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4418 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4419 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4420
4421 pce_dev->reg.encr_cfg_aes_ctr_128 =
4422 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4423 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4424 (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
4425
4426 pce_dev->reg.encr_cfg_aes_ctr_256 =
4427 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4428 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4429 (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
4430
4431 pce_dev->reg.encr_cfg_aes_xts_128 =
4432 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4433 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4434 (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
4435
4436 pce_dev->reg.encr_cfg_aes_xts_256 =
4437 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4438 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4439 (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
4440
4441 pce_dev->reg.encr_cfg_aes_ecb_128 =
4442 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4443 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4444 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4445
4446 pce_dev->reg.encr_cfg_aes_ecb_256 =
4447 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4448 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4449 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4450
4451 pce_dev->reg.encr_cfg_aes_ccm_128 =
4452 (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
4453 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4454 (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
4455 (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
4456
4457 pce_dev->reg.encr_cfg_aes_ccm_256 =
4458 (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
4459 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
4460 (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
4461 (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
4462
4463 /* Initialize encr_cfg register for DES alg */
4464 pce_dev->reg.encr_cfg_des_ecb =
4465 (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
4466 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4467 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4468
4469 pce_dev->reg.encr_cfg_des_cbc =
4470 (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
4471 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4472 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4473
4474 pce_dev->reg.encr_cfg_3des_ecb =
4475 (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
4476 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4477 (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
4478
4479 pce_dev->reg.encr_cfg_3des_cbc =
4480 (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
4481 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
4482 (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
4483
4484 /* Initialize encr_cfg register for kasumi/snow3g alg */
4485 pce_dev->reg.encr_cfg_kasumi =
4486 (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
4487
4488 pce_dev->reg.encr_cfg_snow3g =
4489 (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
4490
4491 /* Initialize auth_cfg register for CMAC alg */
4492 pce_dev->reg.auth_cfg_cmac_128 =
4493 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4494 (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
4495 (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
4496 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4497 (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
4498
4499 pce_dev->reg.auth_cfg_cmac_256 =
4500 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4501 (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
4502 (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
4503 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4504 (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
4505
4506 /* Initialize auth_cfg register for HMAC alg */
4507 pce_dev->reg.auth_cfg_hmac_sha1 =
4508 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4509 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
4510 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4511 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4512
4513 pce_dev->reg.auth_cfg_hmac_sha256 =
4514 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4515 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
4516 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4517 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4518
4519 /* Initialize auth_cfg register for SHA1/256 alg */
4520 pce_dev->reg.auth_cfg_sha1 =
4521 (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
4522 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
4523 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4524 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4525
4526 pce_dev->reg.auth_cfg_sha256 =
4527 (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
4528 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
4529 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4530 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
4531
4532 /* Initialize auth_cfg register for AEAD alg */
4533 pce_dev->reg.auth_cfg_aead_sha1_hmac =
4534 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4535 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
4536 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4537 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
4538
4539 pce_dev->reg.auth_cfg_aead_sha256_hmac =
4540 (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
4541 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
4542 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
4543 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
4544
4545 pce_dev->reg.auth_cfg_aes_ccm_128 =
4546 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4547 (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
4548 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4549 (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
4550 ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
4551 pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
4552
4553 pce_dev->reg.auth_cfg_aes_ccm_256 =
4554 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
4555 (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
4556 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
4557 (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
4558 ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
4559 pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
4560
4561 /* Initialize auth_cfg register for kasumi/snow3g */
4562 pce_dev->reg.auth_cfg_kasumi =
4563 (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
4564 BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
4565 pce_dev->reg.auth_cfg_snow3g =
4566 (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
4567 BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
4568 return 0;
4569}
4570
4571static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
4572 struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
4573{
4574 struct qce_cmdlist_info *cmdlistinfo;
4575 struct ce_sps_data *pce_sps_data;
4576
4577 pce_sps_data = &preq_info->ce_sps;
4578 if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
4579 !(pce_dev->no_ccm_mac_status_get_around)) {
4580 cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
4581 _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
4582 &pce_sps_data->in_transfer);
4583 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4584 pce_dev->ce_bam_info.ce_burst_size,
4585 &pce_sps_data->in_transfer);
4586 _qce_set_flag(&pce_sps_data->in_transfer,
4587 SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
4588 }
4589}
4590
4591static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
4592 struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
4593{
4594 struct ce_sps_data *pce_sps_data;
4595
4596 pce_sps_data = &preq_info->ce_sps;
4597
4598 if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
4599 !(pce_dev->no_ccm_mac_status_get_around)) {
4600 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4601 pce_dev->ce_bam_info.ce_burst_size,
4602 &pce_sps_data->out_transfer);
4603 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
4604 CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
4605 }
4606}
4607
4608/* QCE_DUMMY_REQ */
4609static void qce_dummy_complete(void *cookie, unsigned char *digest,
4610 unsigned char *authdata, int ret)
4611{
4612 if (!cookie)
4613 pr_err("invalid cookie\n");
4614}
4615
4616static int qce_dummy_req(struct qce_device *pce_dev)
4617{
4618 int ret = 0;
4619
Brahmaji Ked6a1d42017-06-27 19:27:37 +05304620 if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004621 in_use, true) == false))
4622 return -EBUSY;
4623 ret = qce_process_sha_req(pce_dev, NULL);
4624 pce_dev->qce_stats.no_of_dummy_reqs++;
4625 return ret;
4626}
4627
4628static int select_mode(struct qce_device *pce_dev,
4629 struct ce_request_info *preq_info)
4630{
4631 struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
4632 unsigned int no_of_queued_req;
4633 unsigned int cadence;
4634
4635 if (!pce_dev->no_get_around) {
4636 _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
4637 return 0;
4638 }
4639
4640 /*
4641 * claim ownership of device
4642 */
4643again:
4644 if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
4645 != QCE_OWNER_NONE) {
4646 ndelay(40);
4647 goto again;
4648 }
4649 no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
4650 if (pce_dev->mode == IN_INTERRUPT_MODE) {
4651 if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
4652 pce_dev->mode = IN_BUNCH_MODE;
4653 pr_debug("pcedev %d mode switch to BUNCH\n",
4654 pce_dev->dev_no);
4655 _qce_set_flag(&pce_sps_data->out_transfer,
4656 SPS_IOVEC_FLAG_INT);
4657 pce_dev->intr_cadence = 0;
4658 atomic_set(&pce_dev->bunch_cmd_seq, 1);
4659 atomic_set(&pce_dev->last_intr_seq, 1);
4660 mod_timer(&(pce_dev->timer),
4661 (jiffies + DELAY_IN_JIFFIES));
4662 } else {
4663 _qce_set_flag(&pce_sps_data->out_transfer,
4664 SPS_IOVEC_FLAG_INT);
4665 }
4666 } else {
4667 pce_dev->intr_cadence++;
4668 cadence = (preq_info->req_len >> 7) + 1;
4669 if (cadence > SET_INTR_AT_REQ)
4670 cadence = SET_INTR_AT_REQ;
4671 if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
4672 == cadence) && pce_dev->cadence_flag))
4673 atomic_inc(&pce_dev->bunch_cmd_seq);
4674 else {
4675 _qce_set_flag(&pce_sps_data->out_transfer,
4676 SPS_IOVEC_FLAG_INT);
4677 pce_dev->intr_cadence = 0;
4678 atomic_set(&pce_dev->bunch_cmd_seq, 0);
4679 atomic_set(&pce_dev->last_intr_seq, 0);
4680 pce_dev->cadence_flag = ~pce_dev->cadence_flag;
4681 }
4682 }
4683
4684 return 0;
4685}
4686
4687static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
4688{
4689 int rc = 0;
4690 struct qce_device *pce_dev = (struct qce_device *) handle;
4691 struct aead_request *areq = (struct aead_request *) q_req->areq;
4692 uint32_t authsize = q_req->authsize;
4693 uint32_t totallen_in, out_len;
4694 uint32_t hw_pad_out = 0;
4695 int ce_burst_size;
4696 struct qce_cmdlist_info *cmdlistinfo = NULL;
4697 int req_info = -1;
4698 struct ce_request_info *preq_info;
4699 struct ce_sps_data *pce_sps_data;
4700
4701 req_info = qce_alloc_req_info(pce_dev);
4702 if (req_info < 0)
4703 return -EBUSY;
4704 preq_info = &pce_dev->ce_request_info[req_info];
4705 pce_sps_data = &preq_info->ce_sps;
4706
4707 ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
4708 totallen_in = areq->cryptlen + q_req->assoclen;
4709 if (q_req->dir == QCE_ENCRYPT) {
4710 q_req->cryptlen = areq->cryptlen;
4711 out_len = areq->cryptlen + authsize;
4712 hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
4713 } else {
4714 q_req->cryptlen = areq->cryptlen - authsize;
4715 out_len = q_req->cryptlen;
4716 hw_pad_out = authsize;
4717 }
4718
4719 /*
4720 * For crypto 5.0 that has burst size alignment requirement
4721 * for data descritpor,
4722 * the agent above(qcrypto) prepares the src scatter list with
4723 * memory starting with associated data, followed by
4724 * data stream to be ciphered.
4725 * The destination scatter list is pointing to the same
4726 * data area as source.
4727 */
4728 if (pce_dev->ce_bam_info.minor_version == 0)
4729 preq_info->src_nents = count_sg(areq->src, totallen_in);
4730 else
4731 preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
4732 areq->assoclen);
4733
4734 if (q_req->assoclen) {
4735 preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
4736
4737 /* formatted associated data input */
4738 qce_dma_map_sg(pce_dev->pdev, q_req->asg,
4739 preq_info->assoc_nents, DMA_TO_DEVICE);
4740 preq_info->asg = q_req->asg;
4741 } else {
4742 preq_info->assoc_nents = 0;
4743 preq_info->asg = NULL;
4744 }
4745 /* cipher input */
4746 qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
4747 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
4748 DMA_TO_DEVICE);
4749 /* cipher + mac output for encryption */
4750 if (areq->src != areq->dst) {
4751 if (pce_dev->ce_bam_info.minor_version == 0)
4752 /*
4753 * The destination scatter list is pointing to the same
4754 * data area as src.
4755 * Note, the associated data will be pass-through
4756 * at the beginning of destination area.
4757 */
4758 preq_info->dst_nents = count_sg(areq->dst,
4759 out_len + areq->assoclen);
4760 else
4761 preq_info->dst_nents = count_sg(areq->dst, out_len +
4762 areq->assoclen);
4763
4764 qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
4765 DMA_FROM_DEVICE);
4766 } else {
4767 preq_info->dst_nents = preq_info->src_nents;
4768 }
4769
4770 if (pce_dev->support_cmd_dscr) {
4771 cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
4772 q_req);
4773 if (cmdlistinfo == NULL) {
4774 pr_err("Unsupported cipher algorithm %d, mode %d\n",
4775 q_req->alg, q_req->mode);
4776 qce_free_req_info(pce_dev, req_info, false);
4777 return -EINVAL;
4778 }
4779 /* set up crypto device */
4780 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
4781 q_req->assoclen, cmdlistinfo);
4782 } else {
4783 /* set up crypto device */
4784 rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
4785 q_req->assoclen);
4786 }
4787
4788 if (rc < 0)
4789 goto bad;
4790
4791 preq_info->mode = q_req->mode;
4792
4793 /* setup for callback, and issue command to bam */
4794 preq_info->areq = q_req->areq;
4795 preq_info->qce_cb = q_req->qce_cb;
4796 preq_info->dir = q_req->dir;
4797
4798 /* setup xfer type for producer callback handling */
4799 preq_info->xfer_type = QCE_XFER_AEAD;
4800 preq_info->req_len = totallen_in;
4801
4802 _qce_sps_iovec_count_init(pce_dev, req_info);
4803
4804 if (pce_dev->support_cmd_dscr)
4805 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
4806 &pce_sps_data->in_transfer);
4807
4808 if (pce_dev->ce_bam_info.minor_version == 0) {
4809 goto bad;
4810 } else {
4811 if (q_req->assoclen && (_qce_sps_add_sg_data(
4812 pce_dev, q_req->asg, q_req->assoclen,
4813 &pce_sps_data->in_transfer)))
4814 goto bad;
4815 if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
4816 areq->assoclen,
4817 &pce_sps_data->in_transfer))
4818 goto bad;
4819 _qce_set_flag(&pce_sps_data->in_transfer,
4820 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
4821
4822 _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
4823
4824 if (pce_dev->no_get_around)
4825 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
4826 &pce_sps_data->cmdlistptr.unlock_all_pipes,
4827 &pce_sps_data->in_transfer);
4828
4829 /* Pass through to ignore associated data*/
4830 if (_qce_sps_add_data(
4831 GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4832 q_req->assoclen,
4833 &pce_sps_data->out_transfer))
4834 goto bad;
4835 if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
4836 areq->assoclen,
4837 &pce_sps_data->out_transfer))
4838 goto bad;
4839 /* Pass through to ignore hw_pad (padding of the MAC data) */
4840 if (_qce_sps_add_data(
4841 GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
4842 hw_pad_out, &pce_sps_data->out_transfer))
4843 goto bad;
4844 if (pce_dev->no_get_around ||
4845 totallen_in <= SPS_MAX_PKT_SIZE) {
4846 if (_qce_sps_add_data(
4847 GET_PHYS_ADDR(pce_sps_data->result_dump),
4848 CRYPTO_RESULT_DUMP_SIZE,
4849 &pce_sps_data->out_transfer))
4850 goto bad;
4851 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
4852 } else {
4853 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
4854 }
4855
4856 _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
4857
4858 select_mode(pce_dev, preq_info);
4859 rc = _qce_sps_transfer(pce_dev, req_info);
4860 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
4861 }
4862 if (rc)
4863 goto bad;
4864 return 0;
4865
4866bad:
4867 if (preq_info->assoc_nents) {
4868 qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
4869 preq_info->assoc_nents, DMA_TO_DEVICE);
4870 }
4871 if (preq_info->src_nents) {
4872 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
4873 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
4874 DMA_TO_DEVICE);
4875 }
4876 if (areq->src != areq->dst) {
4877 qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
4878 DMA_FROM_DEVICE);
4879 }
4880 qce_free_req_info(pce_dev, req_info, false);
4881 return rc;
4882}
4883
4884static int _qce_suspend(void *handle)
4885{
4886 struct qce_device *pce_dev = (struct qce_device *)handle;
4887 struct sps_pipe *sps_pipe_info;
4888
4889 if (handle == NULL)
4890 return -ENODEV;
4891
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004892 sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
4893 sps_disconnect(sps_pipe_info);
4894
4895 sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
4896 sps_disconnect(sps_pipe_info);
4897
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004898 return 0;
4899}
4900
4901static int _qce_resume(void *handle)
4902{
4903 struct qce_device *pce_dev = (struct qce_device *)handle;
4904 struct sps_pipe *sps_pipe_info;
4905 struct sps_connect *sps_connect_info;
4906 int rc;
4907
4908 if (handle == NULL)
4909 return -ENODEV;
4910
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004911 sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
4912 sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
4913 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
4914 rc = sps_connect(sps_pipe_info, sps_connect_info);
4915 if (rc) {
4916 pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
4917 (uintptr_t)sps_pipe_info, rc);
4918 return rc;
4919 }
4920 sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
4921 sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
4922 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
4923 rc = sps_connect(sps_pipe_info, sps_connect_info);
4924 if (rc)
4925 pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
4926 (uintptr_t)sps_pipe_info, rc);
4927
4928 rc = sps_register_event(sps_pipe_info,
4929 &pce_dev->ce_bam_info.producer.event);
4930 if (rc)
4931 pr_err("Producer callback registration failed rc = %d\n", rc);
4932
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07004933 return rc;
4934}
4935
4936struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
4937EXPORT_SYMBOL(qce_pm_table);
4938
4939int qce_aead_req(void *handle, struct qce_req *q_req)
4940{
4941 struct qce_device *pce_dev = (struct qce_device *)handle;
4942 struct aead_request *areq;
4943 uint32_t authsize;
4944 struct crypto_aead *aead;
4945 uint32_t ivsize;
4946 uint32_t totallen;
4947 int rc = 0;
4948 struct qce_cmdlist_info *cmdlistinfo = NULL;
4949 int req_info = -1;
4950 struct ce_sps_data *pce_sps_data;
4951 struct ce_request_info *preq_info;
4952
4953 if (q_req->mode == QCE_MODE_CCM)
4954 return _qce_aead_ccm_req(handle, q_req);
4955
4956 req_info = qce_alloc_req_info(pce_dev);
4957 if (req_info < 0)
4958 return -EBUSY;
4959 preq_info = &pce_dev->ce_request_info[req_info];
4960 pce_sps_data = &preq_info->ce_sps;
4961 areq = (struct aead_request *) q_req->areq;
4962 aead = crypto_aead_reqtfm(areq);
4963 ivsize = crypto_aead_ivsize(aead);
4964 q_req->ivsize = ivsize;
4965 authsize = q_req->authsize;
4966 if (q_req->dir == QCE_ENCRYPT)
4967 q_req->cryptlen = areq->cryptlen;
4968 else
4969 q_req->cryptlen = areq->cryptlen - authsize;
4970
4971 if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
4972 pr_err("Integer overflow on total aead req length.\n");
4973 return -EINVAL;
4974 }
4975
4976 totallen = q_req->cryptlen + areq->assoclen;
4977
4978 if (pce_dev->support_cmd_dscr) {
4979 cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
4980 req_info, q_req);
4981 if (cmdlistinfo == NULL) {
4982 pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
4983 q_req->alg, q_req->mode, q_req->encklen,
4984 q_req->authsize);
4985 qce_free_req_info(pce_dev, req_info, false);
4986 return -EINVAL;
4987 }
4988 /* set up crypto device */
4989 rc = _ce_setup_aead(pce_dev, q_req, totallen,
4990 areq->assoclen, cmdlistinfo);
4991 if (rc < 0) {
4992 qce_free_req_info(pce_dev, req_info, false);
4993 return -EINVAL;
4994 }
4995 }
4996
4997 /*
4998 * For crypto 5.0 that has burst size alignment requirement
4999 * for data descritpor,
5000 * the agent above(qcrypto) prepares the src scatter list with
5001 * memory starting with associated data, followed by
5002 * iv, and data stream to be ciphered.
5003 */
5004 preq_info->src_nents = count_sg(areq->src, totallen);
5005
5006
5007 /* cipher input */
5008 qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
5009 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
5010 DMA_TO_DEVICE);
5011 /* cipher output for encryption */
5012 if (areq->src != areq->dst) {
5013 preq_info->dst_nents = count_sg(areq->dst, totallen);
5014
5015 qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
5016 DMA_FROM_DEVICE);
5017 }
5018
5019
5020 /* setup for callback, and issue command to bam */
5021 preq_info->areq = q_req->areq;
5022 preq_info->qce_cb = q_req->qce_cb;
5023 preq_info->dir = q_req->dir;
5024 preq_info->asg = NULL;
5025
5026 /* setup xfer type for producer callback handling */
5027 preq_info->xfer_type = QCE_XFER_AEAD;
5028 preq_info->req_len = totallen;
5029
5030 _qce_sps_iovec_count_init(pce_dev, req_info);
5031
5032 if (pce_dev->support_cmd_dscr) {
5033 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5034 &pce_sps_data->in_transfer);
5035 } else {
5036 rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
5037 areq->assoclen);
5038 if (rc)
5039 goto bad;
5040 }
5041
5042 preq_info->mode = q_req->mode;
5043
5044 if (pce_dev->ce_bam_info.minor_version == 0) {
5045 if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
5046 &pce_sps_data->in_transfer))
5047 goto bad;
5048
5049 _qce_set_flag(&pce_sps_data->in_transfer,
5050 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5051
5052 if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
5053 &pce_sps_data->out_transfer))
5054 goto bad;
5055 if (totallen > SPS_MAX_PKT_SIZE) {
5056 _qce_set_flag(&pce_sps_data->out_transfer,
5057 SPS_IOVEC_FLAG_INT);
5058 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
5059 } else {
5060 if (_qce_sps_add_data(GET_PHYS_ADDR(
5061 pce_sps_data->result_dump),
5062 CRYPTO_RESULT_DUMP_SIZE,
5063 &pce_sps_data->out_transfer))
5064 goto bad;
5065 _qce_set_flag(&pce_sps_data->out_transfer,
5066 SPS_IOVEC_FLAG_INT);
5067 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
5068 }
5069 rc = _qce_sps_transfer(pce_dev, req_info);
5070 } else {
5071 if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
5072 &pce_sps_data->in_transfer))
5073 goto bad;
5074 _qce_set_flag(&pce_sps_data->in_transfer,
5075 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5076
5077 if (pce_dev->no_get_around)
5078 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5079 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5080 &pce_sps_data->in_transfer);
5081
5082 if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
5083 &pce_sps_data->out_transfer))
5084 goto bad;
5085
5086 if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
5087 if (_qce_sps_add_data(
5088 GET_PHYS_ADDR(pce_sps_data->result_dump),
5089 CRYPTO_RESULT_DUMP_SIZE,
5090 &pce_sps_data->out_transfer))
5091 goto bad;
5092 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
5093 } else {
5094 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
5095 }
5096 select_mode(pce_dev, preq_info);
5097 rc = _qce_sps_transfer(pce_dev, req_info);
5098 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5099 }
5100 if (rc)
5101 goto bad;
5102 return 0;
5103
5104bad:
5105 if (preq_info->src_nents)
5106 qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
5107 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
5108 DMA_TO_DEVICE);
5109 if (areq->src != areq->dst)
5110 qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
5111 DMA_FROM_DEVICE);
5112 qce_free_req_info(pce_dev, req_info, false);
5113
5114 return rc;
5115}
5116EXPORT_SYMBOL(qce_aead_req);
5117
5118int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
5119{
5120 int rc = 0;
5121 struct qce_device *pce_dev = (struct qce_device *) handle;
5122 struct ablkcipher_request *areq = (struct ablkcipher_request *)
5123 c_req->areq;
5124 struct qce_cmdlist_info *cmdlistinfo = NULL;
5125 int req_info = -1;
5126 struct ce_sps_data *pce_sps_data;
5127 struct ce_request_info *preq_info;
5128
5129 req_info = qce_alloc_req_info(pce_dev);
5130 if (req_info < 0)
5131 return -EBUSY;
5132 preq_info = &pce_dev->ce_request_info[req_info];
5133 pce_sps_data = &preq_info->ce_sps;
5134
5135 preq_info->src_nents = 0;
5136 preq_info->dst_nents = 0;
5137
5138 /* cipher input */
5139 preq_info->src_nents = count_sg(areq->src, areq->nbytes);
5140
5141 qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
5142 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
5143 DMA_TO_DEVICE);
5144 /* cipher output */
5145 if (areq->src != areq->dst) {
5146 preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
5147 qce_dma_map_sg(pce_dev->pdev, areq->dst,
5148 preq_info->dst_nents, DMA_FROM_DEVICE);
5149 } else {
5150 preq_info->dst_nents = preq_info->src_nents;
5151 }
5152 preq_info->dir = c_req->dir;
5153 if ((pce_dev->ce_bam_info.minor_version == 0) &&
5154 (preq_info->dir == QCE_DECRYPT) &&
5155 (c_req->mode == QCE_MODE_CBC)) {
5156 memcpy(preq_info->dec_iv, (unsigned char *)
5157 sg_virt(areq->src) + areq->src->length - 16,
5158 NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
5159 }
5160
5161 /* set up crypto device */
5162 if (pce_dev->support_cmd_dscr) {
5163 cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
5164 req_info, c_req);
5165 if (cmdlistinfo == NULL) {
5166 pr_err("Unsupported cipher algorithm %d, mode %d\n",
5167 c_req->alg, c_req->mode);
5168 qce_free_req_info(pce_dev, req_info, false);
5169 return -EINVAL;
5170 }
5171 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
5172 cmdlistinfo);
5173 } else {
5174 rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
5175 }
5176 if (rc < 0)
5177 goto bad;
5178
5179 preq_info->mode = c_req->mode;
5180
5181 /* setup for client callback, and issue command to BAM */
5182 preq_info->areq = areq;
5183 preq_info->qce_cb = c_req->qce_cb;
5184
5185 /* setup xfer type for producer callback handling */
5186 preq_info->xfer_type = QCE_XFER_CIPHERING;
5187 preq_info->req_len = areq->nbytes;
5188
5189 _qce_sps_iovec_count_init(pce_dev, req_info);
5190 if (pce_dev->support_cmd_dscr)
5191 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5192 &pce_sps_data->in_transfer);
5193 if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
5194 &pce_sps_data->in_transfer))
5195 goto bad;
5196 _qce_set_flag(&pce_sps_data->in_transfer,
5197 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5198
5199 if (pce_dev->no_get_around)
5200 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5201 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5202 &pce_sps_data->in_transfer);
5203
5204 if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
5205 &pce_sps_data->out_transfer))
5206 goto bad;
5207 if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
5208 pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
5209 if (_qce_sps_add_data(
5210 GET_PHYS_ADDR(pce_sps_data->result_dump),
5211 CRYPTO_RESULT_DUMP_SIZE,
5212 &pce_sps_data->out_transfer))
5213 goto bad;
5214 } else {
5215 pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
5216 }
5217
5218 select_mode(pce_dev, preq_info);
5219 rc = _qce_sps_transfer(pce_dev, req_info);
5220 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5221 if (rc)
5222 goto bad;
5223
5224 return 0;
5225bad:
5226 if (areq->src != areq->dst) {
5227 if (preq_info->dst_nents) {
5228 qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
5229 preq_info->dst_nents, DMA_FROM_DEVICE);
5230 }
5231 }
5232 if (preq_info->src_nents) {
5233 qce_dma_unmap_sg(pce_dev->pdev, areq->src,
5234 preq_info->src_nents,
5235 (areq->src == areq->dst) ?
5236 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5237 }
5238 qce_free_req_info(pce_dev, req_info, false);
5239 return rc;
5240}
5241EXPORT_SYMBOL(qce_ablk_cipher_req);
5242
5243int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
5244{
5245 struct qce_device *pce_dev = (struct qce_device *) handle;
5246 int rc;
5247
5248 struct ahash_request *areq;
5249 struct qce_cmdlist_info *cmdlistinfo = NULL;
5250 int req_info = -1;
5251 struct ce_sps_data *pce_sps_data;
5252 struct ce_request_info *preq_info;
5253 bool is_dummy = false;
5254
5255 if (!sreq) {
5256 sreq = &(pce_dev->dummyreq.sreq);
5257 req_info = DUMMY_REQ_INDEX;
5258 is_dummy = true;
5259 } else {
5260 req_info = qce_alloc_req_info(pce_dev);
5261 if (req_info < 0)
5262 return -EBUSY;
5263 }
5264
5265 areq = (struct ahash_request *)sreq->areq;
5266 preq_info = &pce_dev->ce_request_info[req_info];
5267 pce_sps_data = &preq_info->ce_sps;
5268
5269 preq_info->src_nents = count_sg(sreq->src, sreq->size);
5270 qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
5271 DMA_TO_DEVICE);
5272
5273 if (pce_dev->support_cmd_dscr) {
5274 cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
5275 if (cmdlistinfo == NULL) {
5276 pr_err("Unsupported hash algorithm %d\n", sreq->alg);
5277 qce_free_req_info(pce_dev, req_info, false);
5278 return -EINVAL;
5279 }
5280 rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
5281 } else {
5282 rc = _ce_setup_hash_direct(pce_dev, sreq);
5283 }
5284 if (rc < 0)
5285 goto bad;
5286
5287 preq_info->areq = areq;
5288 preq_info->qce_cb = sreq->qce_cb;
5289
5290 /* setup xfer type for producer callback handling */
5291 preq_info->xfer_type = QCE_XFER_HASHING;
5292 preq_info->req_len = sreq->size;
5293
5294 _qce_sps_iovec_count_init(pce_dev, req_info);
5295
5296 if (pce_dev->support_cmd_dscr)
5297 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5298 &pce_sps_data->in_transfer);
5299 if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
5300 &pce_sps_data->in_transfer))
5301 goto bad;
5302
5303 /* always ensure there is input data. ZLT does not work for bam-ndp */
5304 if (!areq->nbytes)
5305 _qce_sps_add_data(
5306 GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
5307 pce_dev->ce_bam_info.ce_burst_size,
5308 &pce_sps_data->in_transfer);
5309 _qce_set_flag(&pce_sps_data->in_transfer,
5310 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5311 if (pce_dev->no_get_around)
5312 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5313 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5314 &pce_sps_data->in_transfer);
5315
5316 if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5317 CRYPTO_RESULT_DUMP_SIZE,
5318 &pce_sps_data->out_transfer))
5319 goto bad;
5320
5321 if (is_dummy) {
5322 _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
5323 rc = _qce_sps_transfer(pce_dev, req_info);
5324 } else {
5325 select_mode(pce_dev, preq_info);
5326 rc = _qce_sps_transfer(pce_dev, req_info);
5327 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5328 }
5329 if (rc)
5330 goto bad;
5331 return 0;
5332bad:
5333 if (preq_info->src_nents) {
5334 qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
5335 preq_info->src_nents, DMA_TO_DEVICE);
5336 }
5337 qce_free_req_info(pce_dev, req_info, false);
5338 return rc;
5339}
5340EXPORT_SYMBOL(qce_process_sha_req);
5341
5342int qce_f8_req(void *handle, struct qce_f8_req *req,
5343 void *cookie, qce_comp_func_ptr_t qce_cb)
5344{
5345 struct qce_device *pce_dev = (struct qce_device *) handle;
5346 bool key_stream_mode;
5347 dma_addr_t dst;
5348 int rc;
5349 struct qce_cmdlist_info *cmdlistinfo;
5350 int req_info = -1;
5351 struct ce_request_info *preq_info;
5352 struct ce_sps_data *pce_sps_data;
5353
5354 req_info = qce_alloc_req_info(pce_dev);
5355 if (req_info < 0)
5356 return -EBUSY;
5357 preq_info = &pce_dev->ce_request_info[req_info];
5358 pce_sps_data = &preq_info->ce_sps;
5359
5360 switch (req->algorithm) {
5361 case QCE_OTA_ALGO_KASUMI:
5362 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
5363 break;
5364 case QCE_OTA_ALGO_SNOW3G:
5365 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
5366 break;
5367 default:
5368 qce_free_req_info(pce_dev, req_info, false);
5369 return -EINVAL;
5370 };
5371
5372 key_stream_mode = (req->data_in == NULL);
5373
5374 /* don't support key stream mode */
5375
5376 if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
5377 qce_free_req_info(pce_dev, req_info, false);
5378 return -EINVAL;
5379 }
5380
5381 /* F8 cipher input */
5382 preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
5383 req->data_in, req->data_len,
5384 (req->data_in == req->data_out) ?
5385 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5386
5387 /* F8 cipher output */
5388 if (req->data_in != req->data_out) {
5389 dst = dma_map_single(pce_dev->pdev, req->data_out,
5390 req->data_len, DMA_FROM_DEVICE);
5391 preq_info->phy_ota_dst = dst;
5392 } else {
5393 /* in place ciphering */
5394 dst = preq_info->phy_ota_src;
5395 preq_info->phy_ota_dst = 0;
5396 }
5397 preq_info->ota_size = req->data_len;
5398
5399
5400 /* set up crypto device */
5401 if (pce_dev->support_cmd_dscr)
5402 rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
5403 req->data_len, cmdlistinfo);
5404 else
5405 rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
5406 req->data_len);
5407 if (rc < 0)
5408 goto bad;
5409
5410 /* setup for callback, and issue command to sps */
5411 preq_info->areq = cookie;
5412 preq_info->qce_cb = qce_cb;
5413
5414 /* setup xfer type for producer callback handling */
5415 preq_info->xfer_type = QCE_XFER_F8;
5416 preq_info->req_len = req->data_len;
5417
5418 _qce_sps_iovec_count_init(pce_dev, req_info);
5419
5420 if (pce_dev->support_cmd_dscr)
5421 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5422 &pce_sps_data->in_transfer);
5423
5424 _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
5425 &pce_sps_data->in_transfer);
5426
5427 _qce_set_flag(&pce_sps_data->in_transfer,
5428 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5429
5430 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5431 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5432 &pce_sps_data->in_transfer);
5433
5434 _qce_sps_add_data((uint32_t)dst, req->data_len,
5435 &pce_sps_data->out_transfer);
5436
5437 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5438 CRYPTO_RESULT_DUMP_SIZE,
5439 &pce_sps_data->out_transfer);
5440
5441 select_mode(pce_dev, preq_info);
5442 rc = _qce_sps_transfer(pce_dev, req_info);
5443 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5444 if (rc)
5445 goto bad;
5446 return 0;
5447bad:
5448 if (preq_info->phy_ota_dst != 0)
5449 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
5450 req->data_len, DMA_FROM_DEVICE);
5451 if (preq_info->phy_ota_src != 0)
5452 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
5453 req->data_len,
5454 (req->data_in == req->data_out) ?
5455 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5456 qce_free_req_info(pce_dev, req_info, false);
5457 return rc;
5458}
5459EXPORT_SYMBOL(qce_f8_req);
5460
5461int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
5462 void *cookie, qce_comp_func_ptr_t qce_cb)
5463{
5464 struct qce_device *pce_dev = (struct qce_device *) handle;
5465 uint16_t num_pkt = mreq->num_pkt;
5466 uint16_t cipher_start = mreq->cipher_start;
5467 uint16_t cipher_size = mreq->cipher_size;
5468 struct qce_f8_req *req = &mreq->qce_f8_req;
5469 uint32_t total;
5470 dma_addr_t dst = 0;
5471 int rc = 0;
5472 struct qce_cmdlist_info *cmdlistinfo;
5473 int req_info = -1;
5474 struct ce_request_info *preq_info;
5475 struct ce_sps_data *pce_sps_data;
5476
5477 req_info = qce_alloc_req_info(pce_dev);
5478 if (req_info < 0)
5479 return -EBUSY;
5480 preq_info = &pce_dev->ce_request_info[req_info];
5481 pce_sps_data = &preq_info->ce_sps;
5482
5483 switch (req->algorithm) {
5484 case QCE_OTA_ALGO_KASUMI:
5485 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
5486 break;
5487 case QCE_OTA_ALGO_SNOW3G:
5488 cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
5489 break;
5490 default:
5491 qce_free_req_info(pce_dev, req_info, false);
5492 return -EINVAL;
5493 };
5494
5495 total = num_pkt * req->data_len;
5496
5497 /* F8 cipher input */
5498 preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
5499 req->data_in, total,
5500 (req->data_in == req->data_out) ?
5501 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5502
5503 /* F8 cipher output */
5504 if (req->data_in != req->data_out) {
5505 dst = dma_map_single(pce_dev->pdev, req->data_out, total,
5506 DMA_FROM_DEVICE);
5507 preq_info->phy_ota_dst = dst;
5508 } else {
5509 /* in place ciphering */
5510 dst = preq_info->phy_ota_src;
5511 preq_info->phy_ota_dst = 0;
5512 }
5513
5514 preq_info->ota_size = total;
5515
5516 /* set up crypto device */
5517 if (pce_dev->support_cmd_dscr)
5518 rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
5519 cipher_size, cmdlistinfo);
5520 else
5521 rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
5522 cipher_start, cipher_size);
5523 if (rc)
5524 goto bad;
5525
5526 /* setup for callback, and issue command to sps */
5527 preq_info->areq = cookie;
5528 preq_info->qce_cb = qce_cb;
5529
5530 /* setup xfer type for producer callback handling */
5531 preq_info->xfer_type = QCE_XFER_F8;
5532 preq_info->req_len = total;
5533
5534 _qce_sps_iovec_count_init(pce_dev, req_info);
5535
5536 if (pce_dev->support_cmd_dscr)
5537 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5538 &pce_sps_data->in_transfer);
5539
5540 _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
5541 &pce_sps_data->in_transfer);
5542 _qce_set_flag(&pce_sps_data->in_transfer,
5543 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5544
5545 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5546 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5547 &pce_sps_data->in_transfer);
5548
5549 _qce_sps_add_data((uint32_t)dst, total,
5550 &pce_sps_data->out_transfer);
5551
5552 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5553 CRYPTO_RESULT_DUMP_SIZE,
5554 &pce_sps_data->out_transfer);
5555
5556 select_mode(pce_dev, preq_info);
5557 rc = _qce_sps_transfer(pce_dev, req_info);
5558 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5559
5560 if (rc == 0)
5561 return 0;
5562bad:
5563 if (preq_info->phy_ota_dst)
5564 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
5565 DMA_FROM_DEVICE);
5566 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
5567 (req->data_in == req->data_out) ?
5568 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
5569 qce_free_req_info(pce_dev, req_info, false);
5570 return rc;
5571}
5572EXPORT_SYMBOL(qce_f8_multi_pkt_req);
5573
5574int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
5575 qce_comp_func_ptr_t qce_cb)
5576{
5577 struct qce_device *pce_dev = (struct qce_device *) handle;
5578 int rc;
5579 struct qce_cmdlist_info *cmdlistinfo;
5580 int req_info = -1;
5581 struct ce_sps_data *pce_sps_data;
5582 struct ce_request_info *preq_info;
5583
5584 req_info = qce_alloc_req_info(pce_dev);
5585 if (req_info < 0)
5586 return -EBUSY;
5587 preq_info = &pce_dev->ce_request_info[req_info];
5588 pce_sps_data = &preq_info->ce_sps;
5589 switch (req->algorithm) {
5590 case QCE_OTA_ALGO_KASUMI:
5591 cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
5592 break;
5593 case QCE_OTA_ALGO_SNOW3G:
5594 cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
5595 break;
5596 default:
5597 qce_free_req_info(pce_dev, req_info, false);
5598 return -EINVAL;
5599 };
5600
5601 preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
5602 req->msize, DMA_TO_DEVICE);
5603
5604 preq_info->ota_size = req->msize;
5605
5606 if (pce_dev->support_cmd_dscr)
5607 rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
5608 else
5609 rc = _ce_f9_setup_direct(pce_dev, req);
5610 if (rc < 0)
5611 goto bad;
5612
5613 /* setup for callback, and issue command to sps */
5614 preq_info->areq = cookie;
5615 preq_info->qce_cb = qce_cb;
5616
5617 /* setup xfer type for producer callback handling */
5618 preq_info->xfer_type = QCE_XFER_F9;
5619 preq_info->req_len = req->msize;
5620
5621 _qce_sps_iovec_count_init(pce_dev, req_info);
5622 if (pce_dev->support_cmd_dscr)
5623 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
5624 &pce_sps_data->in_transfer);
5625 _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
5626 &pce_sps_data->in_transfer);
5627 _qce_set_flag(&pce_sps_data->in_transfer,
5628 SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
5629
5630 _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
5631 &pce_sps_data->cmdlistptr.unlock_all_pipes,
5632 &pce_sps_data->in_transfer);
5633
5634 _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
5635 CRYPTO_RESULT_DUMP_SIZE,
5636 &pce_sps_data->out_transfer);
5637
5638 select_mode(pce_dev, preq_info);
5639 rc = _qce_sps_transfer(pce_dev, req_info);
5640 cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
5641 if (rc)
5642 goto bad;
5643 return 0;
5644bad:
5645 dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
5646 req->msize, DMA_TO_DEVICE);
5647 qce_free_req_info(pce_dev, req_info, false);
5648 return rc;
5649}
5650EXPORT_SYMBOL(qce_f9_req);
5651
5652static int __qce_get_device_tree_data(struct platform_device *pdev,
5653 struct qce_device *pce_dev)
5654{
5655 struct resource *resource;
5656 int rc = 0;
5657
5658 pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
5659 "qcom,ce-hw-shared");
5660 pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
5661 "qcom,ce-hw-key");
5662
5663 pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
5664 of_property_read_bool((&pdev->dev)->of_node,
5665 "qcom,use-sw-aes-cbc-ecb-ctr-algo");
5666 pce_dev->use_sw_aead_algo =
5667 of_property_read_bool((&pdev->dev)->of_node,
5668 "qcom,use-sw-aead-algo");
5669 pce_dev->use_sw_aes_xts_algo =
5670 of_property_read_bool((&pdev->dev)->of_node,
5671 "qcom,use-sw-aes-xts-algo");
5672 pce_dev->use_sw_ahash_algo =
5673 of_property_read_bool((&pdev->dev)->of_node,
5674 "qcom,use-sw-ahash-algo");
5675 pce_dev->use_sw_hmac_algo =
5676 of_property_read_bool((&pdev->dev)->of_node,
5677 "qcom,use-sw-hmac-algo");
5678 pce_dev->use_sw_aes_ccm_algo =
5679 of_property_read_bool((&pdev->dev)->of_node,
5680 "qcom,use-sw-aes-ccm-algo");
5681 pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
5682 (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
5683 pce_dev->support_only_core_src_clk = of_property_read_bool(
5684 (&pdev->dev)->of_node, "qcom,support-core-clk-only");
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07005685 pce_dev->request_bw_before_clk = of_property_read_bool(
5686 (&pdev->dev)->of_node, "qcom,request-bw-before-clk");
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005687
5688 if (of_property_read_u32((&pdev->dev)->of_node,
5689 "qcom,bam-pipe-pair",
5690 &pce_dev->ce_bam_info.pipe_pair_index)) {
5691 pr_err("Fail to get bam pipe pair information.\n");
5692 return -EINVAL;
5693 }
5694 if (of_property_read_u32((&pdev->dev)->of_node,
5695 "qcom,ce-device",
5696 &pce_dev->ce_bam_info.ce_device)) {
5697 pr_err("Fail to get CE device information.\n");
5698 return -EINVAL;
5699 }
5700 if (of_property_read_u32((&pdev->dev)->of_node,
5701 "qcom,ce-hw-instance",
5702 &pce_dev->ce_bam_info.ce_hw_instance)) {
5703 pr_err("Fail to get CE hw instance information.\n");
5704 return -EINVAL;
5705 }
5706 if (of_property_read_u32((&pdev->dev)->of_node,
5707 "qcom,bam-ee",
5708 &pce_dev->ce_bam_info.bam_ee)) {
5709 pr_info("BAM Apps EE is not defined, setting to default 1\n");
5710 pce_dev->ce_bam_info.bam_ee = 1;
5711 }
5712 if (of_property_read_u32((&pdev->dev)->of_node,
5713 "qcom,ce-opp-freq",
5714 &pce_dev->ce_opp_freq_hz)) {
5715 pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
5716 pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
5717 }
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05305718
5719 if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-bypass"))
5720 pce_dev->bypass_s1_smmu = true;
5721
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005722 pce_dev->ce_bam_info.dest_pipe_index =
5723 2 * pce_dev->ce_bam_info.pipe_pair_index;
5724 pce_dev->ce_bam_info.src_pipe_index =
5725 pce_dev->ce_bam_info.dest_pipe_index + 1;
5726
5727 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5728 "crypto-base");
5729 if (resource) {
5730 pce_dev->phy_iobase = resource->start;
5731 pce_dev->iobase = ioremap_nocache(resource->start,
5732 resource_size(resource));
5733 if (!pce_dev->iobase) {
5734 pr_err("Can not map CRYPTO io memory\n");
5735 return -ENOMEM;
5736 }
5737 } else {
5738 pr_err("CRYPTO HW mem unavailable.\n");
5739 return -ENODEV;
5740 }
5741
5742 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
5743 "crypto-bam-base");
5744 if (resource) {
5745 pce_dev->bam_mem = resource->start;
5746 pce_dev->bam_mem_size = resource_size(resource);
5747 } else {
5748 pr_err("CRYPTO BAM mem unavailable.\n");
5749 rc = -ENODEV;
5750 goto err_getting_bam_info;
5751 }
5752
5753 resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
5754 if (resource) {
5755 pce_dev->ce_bam_info.bam_irq = resource->start;
5756 } else {
5757 pr_err("CRYPTO BAM IRQ unavailable.\n");
5758 goto err_dev;
5759 }
5760 return rc;
5761err_dev:
5762 if (pce_dev->ce_bam_info.bam_iobase)
5763 iounmap(pce_dev->ce_bam_info.bam_iobase);
5764
5765err_getting_bam_info:
5766 if (pce_dev->iobase)
5767 iounmap(pce_dev->iobase);
5768
5769 return rc;
5770}
5771
5772static int __qce_init_clk(struct qce_device *pce_dev)
5773{
5774 int rc = 0;
5775
5776 pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
5777 if (!IS_ERR(pce_dev->ce_core_src_clk)) {
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07005778 if (pce_dev->request_bw_before_clk)
5779 goto skip_set_rate;
5780
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005781 rc = clk_set_rate(pce_dev->ce_core_src_clk,
5782 pce_dev->ce_opp_freq_hz);
5783 if (rc) {
5784 pr_err("Unable to set the core src clk @%uMhz.\n",
5785 pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
5786 goto exit_put_core_src_clk;
5787 }
5788 } else {
5789 if (pce_dev->support_only_core_src_clk) {
5790 rc = PTR_ERR(pce_dev->ce_core_src_clk);
5791 pce_dev->ce_core_src_clk = NULL;
5792 pr_err("Unable to get CE core src clk\n");
5793 return rc;
5794 }
5795 pr_warn("Unable to get CE core src clk, set to NULL\n");
5796 pce_dev->ce_core_src_clk = NULL;
5797 }
5798
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07005799skip_set_rate:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005800 if (pce_dev->support_only_core_src_clk) {
5801 pce_dev->ce_core_clk = NULL;
5802 pce_dev->ce_clk = NULL;
5803 pce_dev->ce_bus_clk = NULL;
5804 } else {
5805 pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
5806 if (IS_ERR(pce_dev->ce_core_clk)) {
5807 rc = PTR_ERR(pce_dev->ce_core_clk);
5808 pr_err("Unable to get CE core clk\n");
5809 goto exit_put_core_src_clk;
5810 }
5811 pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
5812 if (IS_ERR(pce_dev->ce_clk)) {
5813 rc = PTR_ERR(pce_dev->ce_clk);
5814 pr_err("Unable to get CE interface clk\n");
5815 goto exit_put_core_clk;
5816 }
5817
5818 pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
5819 if (IS_ERR(pce_dev->ce_bus_clk)) {
5820 rc = PTR_ERR(pce_dev->ce_bus_clk);
5821 pr_err("Unable to get CE BUS interface clk\n");
5822 goto exit_put_iface_clk;
5823 }
5824 }
5825 return rc;
5826
5827exit_put_iface_clk:
5828 if (pce_dev->ce_clk)
5829 clk_put(pce_dev->ce_clk);
5830exit_put_core_clk:
5831 if (pce_dev->ce_core_clk)
5832 clk_put(pce_dev->ce_core_clk);
5833exit_put_core_src_clk:
5834 if (pce_dev->ce_core_src_clk)
5835 clk_put(pce_dev->ce_core_src_clk);
5836 pr_err("Unable to init CE clks, rc = %d\n", rc);
5837 return rc;
5838}
5839
5840static void __qce_deinit_clk(struct qce_device *pce_dev)
5841{
5842 if (pce_dev->ce_bus_clk)
5843 clk_put(pce_dev->ce_bus_clk);
5844 if (pce_dev->ce_clk)
5845 clk_put(pce_dev->ce_clk);
5846 if (pce_dev->ce_core_clk)
5847 clk_put(pce_dev->ce_core_clk);
5848 if (pce_dev->ce_core_src_clk)
5849 clk_put(pce_dev->ce_core_src_clk);
5850}
5851
5852int qce_enable_clk(void *handle)
5853{
5854 struct qce_device *pce_dev = (struct qce_device *)handle;
5855 int rc = 0;
5856
5857 if (pce_dev->ce_core_src_clk) {
5858 rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
5859 if (rc) {
5860 pr_err("Unable to enable/prepare CE core src clk\n");
5861 return rc;
5862 }
5863 }
5864
5865 if (pce_dev->support_only_core_src_clk)
5866 return rc;
5867
5868 if (pce_dev->ce_core_clk) {
5869 rc = clk_prepare_enable(pce_dev->ce_core_clk);
5870 if (rc) {
5871 pr_err("Unable to enable/prepare CE core clk\n");
5872 goto exit_disable_core_src_clk;
5873 }
5874 }
5875
5876 if (pce_dev->ce_clk) {
5877 rc = clk_prepare_enable(pce_dev->ce_clk);
5878 if (rc) {
5879 pr_err("Unable to enable/prepare CE iface clk\n");
5880 goto exit_disable_core_clk;
5881 }
5882 }
5883
5884 if (pce_dev->ce_bus_clk) {
5885 rc = clk_prepare_enable(pce_dev->ce_bus_clk);
5886 if (rc) {
5887 pr_err("Unable to enable/prepare CE BUS clk\n");
5888 goto exit_disable_ce_clk;
5889 }
5890 }
5891 return rc;
5892
5893exit_disable_ce_clk:
5894 if (pce_dev->ce_clk)
5895 clk_disable_unprepare(pce_dev->ce_clk);
5896exit_disable_core_clk:
5897 if (pce_dev->ce_core_clk)
5898 clk_disable_unprepare(pce_dev->ce_core_clk);
5899exit_disable_core_src_clk:
5900 if (pce_dev->ce_core_src_clk)
5901 clk_disable_unprepare(pce_dev->ce_core_src_clk);
5902 return rc;
5903}
5904EXPORT_SYMBOL(qce_enable_clk);
5905
5906int qce_disable_clk(void *handle)
5907{
5908 struct qce_device *pce_dev = (struct qce_device *) handle;
5909 int rc = 0;
5910
5911 if (pce_dev->ce_bus_clk)
5912 clk_disable_unprepare(pce_dev->ce_bus_clk);
5913 if (pce_dev->ce_clk)
5914 clk_disable_unprepare(pce_dev->ce_clk);
5915 if (pce_dev->ce_core_clk)
5916 clk_disable_unprepare(pce_dev->ce_core_clk);
5917 if (pce_dev->ce_core_src_clk)
5918 clk_disable_unprepare(pce_dev->ce_core_src_clk);
5919
5920 return rc;
5921}
5922EXPORT_SYMBOL(qce_disable_clk);
5923
5924/* dummy req setup */
5925static int setup_dummy_req(struct qce_device *pce_dev)
5926{
5927 char *input =
5928 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
5929 int len = DUMMY_REQ_DATA_LEN;
5930
5931 memcpy(pce_dev->dummyreq_in_buf, input, len);
5932 sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
5933 sg_mark_end(&pce_dev->dummyreq.sg);
5934
5935 pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
5936 pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
5937 pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
5938 pce_dev->dummyreq.sreq.auth_data[0] = 0;
5939 pce_dev->dummyreq.sreq.auth_data[1] = 0;
5940 pce_dev->dummyreq.sreq.auth_data[2] = 0;
5941 pce_dev->dummyreq.sreq.auth_data[3] = 0;
5942 pce_dev->dummyreq.sreq.first_blk = 1;
5943 pce_dev->dummyreq.sreq.last_blk = 1;
5944 pce_dev->dummyreq.sreq.size = len;
5945 pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
5946 pce_dev->dummyreq.sreq.flags = 0;
5947 pce_dev->dummyreq.sreq.authkey = NULL;
5948
5949 pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
5950 pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
5951
5952 return 0;
5953}
5954
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05305955static void qce_iommu_release_iomapping(struct qce_device *pce_dev)
5956{
5957 if (pce_dev->smmu_mapping)
5958 arm_iommu_release_mapping(pce_dev->smmu_mapping);
5959
5960 pce_dev->smmu_mapping = NULL;
5961}
5962
5963static int qce_smmu_init(struct qce_device *pce_dev)
5964{
5965 struct dma_iommu_mapping *mapping;
5966 int s1_bypass = 1;
5967 int ret = 0;
5968
5969 mapping = arm_iommu_create_mapping(&platform_bus_type,
5970 CRYPTO_SMMU_IOVA_START, CRYPTO_SMMU_IOVA_SIZE);
5971 if (IS_ERR(mapping)) {
5972 ret = PTR_ERR(mapping);
5973 pr_err("Create mapping failed, err = %d\n", ret);
5974 return ret;
5975 }
5976
5977 ret = iommu_domain_set_attr(mapping->domain,
5978 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
5979 if (ret < 0) {
5980 pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
5981 goto ext_fail_set_attr;
5982 }
5983
5984 ret = arm_iommu_attach_device(pce_dev->pdev, mapping);
5985 if (ret < 0) {
5986 pr_err("Attach device failed, err = %d\n", ret);
5987 goto ext_fail_set_attr;
5988 }
5989 pce_dev->smmu_mapping = mapping;
5990 return ret;
5991
5992ext_fail_set_attr:
5993 qce_iommu_release_iomapping(pce_dev);
5994 return ret;
5995}
5996
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07005997/* crypto engine open function. */
5998void *qce_open(struct platform_device *pdev, int *rc)
5999{
6000 struct qce_device *pce_dev;
6001 int i;
6002 static int pcedev_no = 1;
6003
6004 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
6005 if (!pce_dev) {
6006 *rc = -ENOMEM;
6007 pr_err("Can not allocate memory: %d\n", *rc);
6008 return NULL;
6009 }
6010 pce_dev->pdev = &pdev->dev;
6011
6012 mutex_lock(&qce_iomap_mutex);
6013 if (pdev->dev.of_node) {
6014 *rc = __qce_get_device_tree_data(pdev, pce_dev);
6015 if (*rc)
6016 goto err_pce_dev;
6017 } else {
6018 *rc = -EINVAL;
6019 pr_err("Device Node not found.\n");
6020 goto err_pce_dev;
6021 }
6022
6023 for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
Brahmaji Ked6a1d42017-06-27 19:27:37 +05306024 atomic_set(&pce_dev->ce_request_info[i].in_use, false);
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006025 pce_dev->ce_request_index = 0;
6026
6027 pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
6028 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
6029 pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
6030
6031 if (pce_dev->coh_vmem == NULL) {
6032 *rc = -ENOMEM;
6033 pr_err("Can not allocate coherent memory for sps data\n");
6034 goto err_iobase;
6035 }
6036
6037 pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
6038 MAX_QCE_ALLOC_BAM_REQ * 2;
6039 pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
6040 if (pce_dev->iovec_vmem == NULL)
6041 goto err_mem;
6042
6043 pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
6044 if (pce_dev->dummyreq_in_buf == NULL)
6045 goto err_mem;
6046
6047 *rc = __qce_init_clk(pce_dev);
6048 if (*rc)
6049 goto err_mem;
6050 *rc = qce_enable_clk(pce_dev);
6051 if (*rc)
6052 goto err_enable_clk;
6053
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05306054 if (pce_dev->bypass_s1_smmu) {
6055 if (qce_smmu_init(pce_dev)) {
6056 *rc = -EIO;
6057 goto err_smmu;
6058 }
6059 }
6060
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006061 if (_probe_ce_engine(pce_dev)) {
6062 *rc = -ENXIO;
6063 goto err;
6064 }
6065 *rc = 0;
6066
6067 qce_init_ce_cfg_val(pce_dev);
6068 *rc = qce_sps_init(pce_dev);
6069 if (*rc)
6070 goto err;
6071 qce_setup_ce_sps_data(pce_dev);
6072 qce_disable_clk(pce_dev);
6073 setup_dummy_req(pce_dev);
6074 atomic_set(&pce_dev->no_of_queued_req, 0);
6075 pce_dev->mode = IN_INTERRUPT_MODE;
6076 init_timer(&(pce_dev->timer));
6077 pce_dev->timer.function = qce_multireq_timeout;
6078 pce_dev->timer.data = (unsigned long)pce_dev;
6079 pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
6080 pce_dev->intr_cadence = 0;
6081 pce_dev->dev_no = pcedev_no;
6082 pcedev_no++;
6083 pce_dev->owner = QCE_OWNER_NONE;
6084 mutex_unlock(&qce_iomap_mutex);
6085 return pce_dev;
6086err:
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05306087 if (pce_dev->bypass_s1_smmu)
6088 qce_iommu_release_iomapping(pce_dev);
6089err_smmu:
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006090 qce_disable_clk(pce_dev);
6091
6092err_enable_clk:
6093 __qce_deinit_clk(pce_dev);
6094
6095err_mem:
6096 kfree(pce_dev->dummyreq_in_buf);
6097 kfree(pce_dev->iovec_vmem);
6098 if (pce_dev->coh_vmem)
6099 dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
6100 pce_dev->coh_vmem, pce_dev->coh_pmem);
6101err_iobase:
6102 if (pce_dev->iobase)
6103 iounmap(pce_dev->iobase);
6104err_pce_dev:
6105 mutex_unlock(&qce_iomap_mutex);
6106 kfree(pce_dev);
6107 return NULL;
6108}
6109EXPORT_SYMBOL(qce_open);
6110
6111/* crypto engine close function. */
6112int qce_close(void *handle)
6113{
6114 struct qce_device *pce_dev = (struct qce_device *) handle;
6115
6116 if (handle == NULL)
6117 return -ENODEV;
6118
6119 mutex_lock(&qce_iomap_mutex);
6120 qce_enable_clk(pce_dev);
6121 qce_sps_exit(pce_dev);
6122
6123 if (pce_dev->iobase)
6124 iounmap(pce_dev->iobase);
6125 if (pce_dev->coh_vmem)
6126 dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
6127 pce_dev->coh_vmem, pce_dev->coh_pmem);
6128 kfree(pce_dev->dummyreq_in_buf);
6129 kfree(pce_dev->iovec_vmem);
6130
AnilKumar Chimatae9960e02017-07-26 18:31:37 +05306131 if (pce_dev->bypass_s1_smmu)
6132 qce_iommu_release_iomapping(pce_dev);
6133
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006134 qce_disable_clk(pce_dev);
6135 __qce_deinit_clk(pce_dev);
6136 mutex_unlock(&qce_iomap_mutex);
6137 kfree(handle);
6138
6139 return 0;
6140}
6141EXPORT_SYMBOL(qce_close);
6142
6143#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
6144 1 << CRYPTO_ENCR_KASUMI_SEL |\
6145 1 << CRYPTO_AUTH_SNOW3G_SEL |\
6146 1 << CRYPTO_AUTH_KASUMI_SEL)
6147
6148int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
6149{
6150 struct qce_device *pce_dev = (struct qce_device *)handle;
6151
6152 if (ce_support == NULL)
6153 return -EINVAL;
6154
6155 ce_support->sha1_hmac_20 = false;
6156 ce_support->sha1_hmac = false;
6157 ce_support->sha256_hmac = false;
6158 ce_support->sha_hmac = true;
6159 ce_support->cmac = true;
6160 ce_support->aes_key_192 = false;
6161 ce_support->aes_xts = true;
6162 if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
6163 ce_support->ota = true;
6164 else
6165 ce_support->ota = false;
6166 ce_support->bam = true;
6167 ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
6168 ce_support->hw_key = pce_dev->support_hw_key;
6169 ce_support->aes_ccm = true;
6170 ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
AnilKumar Chimata70cf1772017-05-02 18:39:39 -07006171 ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006172 if (pce_dev->ce_bam_info.minor_version)
6173 ce_support->aligned_only = false;
6174 else
6175 ce_support->aligned_only = true;
6176
6177 ce_support->use_sw_aes_cbc_ecb_ctr_algo =
6178 pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
6179 ce_support->use_sw_aead_algo =
6180 pce_dev->use_sw_aead_algo;
6181 ce_support->use_sw_aes_xts_algo =
6182 pce_dev->use_sw_aes_xts_algo;
6183 ce_support->use_sw_ahash_algo =
6184 pce_dev->use_sw_ahash_algo;
6185 ce_support->use_sw_hmac_algo =
6186 pce_dev->use_sw_hmac_algo;
6187 ce_support->use_sw_aes_ccm_algo =
6188 pce_dev->use_sw_aes_ccm_algo;
6189 ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
6190 ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
6191 if (pce_dev->no_get_around)
6192 ce_support->max_request = MAX_QCE_BAM_REQ;
6193 else
6194 ce_support->max_request = 1;
6195 return 0;
6196}
6197EXPORT_SYMBOL(qce_hw_support);
6198
6199void qce_dump_req(void *handle)
6200{
6201 int i;
Brahmaji Ked6a1d42017-06-27 19:27:37 +05306202 bool req_in_use;
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006203 struct qce_device *pce_dev = (struct qce_device *)handle;
6204
6205 for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
Brahmaji Ked6a1d42017-06-27 19:27:37 +05306206 req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
6207 pr_info("qce_dump_req %d %d\n", i, req_in_use);
6208 if (req_in_use == true)
AnilKumar Chimatae78789a2017-04-07 12:18:46 -07006209 _qce_dump_descr_fifos(pce_dev, i);
6210 }
6211}
6212EXPORT_SYMBOL(qce_dump_req);
6213
6214MODULE_LICENSE("GPL v2");
6215MODULE_DESCRIPTION("Crypto Engine driver");