blob: 2d244e629ed2e514a9830bca1d99a7b064e2ecb0 [file] [log] [blame]
Yuan Kang045e3672012-06-22 19:48:47 -05001/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
Yuan Kang045e3672012-06-22 19:48:47 -050075#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
Yuan Kang045e3672012-06-22 19:48:47 -050092#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
Ruchika Guptacfc6f112013-10-25 12:01:03 +053097
98static struct list_head hash_list;
99
Yuan Kang045e3672012-06-22 19:48:47 -0500100/* ahash per-session context */
101struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120};
121
122/* ahash state */
123struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
130 u8 caam_ctx[MAX_CTX_LEN];
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135};
136
137/* Common job descriptor seq in/out ptr routines */
138
139/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
142 int ctx_len)
143{
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
146 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
147}
148
149/* Map req->result, and append seq_out_ptr command that points to it */
150static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
151 u8 *result, int digestsize)
152{
153 dma_addr_t dst_dma;
154
155 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
156 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
157
158 return dst_dma;
159}
160
161/* Map current buffer in state and put it in link table */
162static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
164 u8 *buf, int buflen)
165{
166 dma_addr_t buf_dma;
167
168 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
169 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
170
171 return buf_dma;
172}
173
174/* Map req->src and put it in link table */
175static inline void src_map_to_sec4_sg(struct device *jrdev,
176 struct scatterlist *src, int src_nents,
Yuan Kang643b39b2012-06-22 19:48:49 -0500177 struct sec4_sg_entry *sec4_sg,
178 bool chained)
Yuan Kang045e3672012-06-22 19:48:47 -0500179{
Yuan Kang643b39b2012-06-22 19:48:49 -0500180 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500181 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182}
183
184/*
185 * Only put buffer in link table if it contains data, which is possible,
186 * since a buffer has previously been used, and needs to be unmapped,
187 */
188static inline dma_addr_t
189try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190 u8 *buf, dma_addr_t buf_dma, int buflen,
191 int last_buflen)
192{
193 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195 if (buflen)
196 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
197 else
198 buf_dma = 0;
199
200 return buf_dma;
201}
202
203/* Map state->caam_ctx, and add it to link table */
204static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205 struct caam_hash_state *state,
206 int ctx_len,
207 struct sec4_sg_entry *sec4_sg,
208 u32 flag)
209{
210 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
212}
213
214/* Common shared descriptor commands */
215static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216{
217 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218 ctx->split_key_len, CLASS_2 |
219 KEY_DEST_MDHA_SPLIT | KEY_ENC);
220}
221
222/* Append key if it has been set */
223static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
224{
225 u32 *key_jump_cmd;
226
Kim Phillips61bb86b2012-07-13 17:49:28 -0500227 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500228
229 if (ctx->split_key_len) {
230 /* Skip if already shared */
231 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
232 JUMP_COND_SHRD);
233
234 append_key_ahash(desc, ctx);
235
236 set_jump_tgt_here(desc, key_jump_cmd);
237 }
238
239 /* Propagate errors from shared to job descriptor */
240 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241}
242
243/*
244 * For ahash read data from seqin following state->caam_ctx,
245 * and write resulting class2 context to seqout, which may be state->caam_ctx
246 * or req->result
247 */
248static inline void ahash_append_load_str(u32 *desc, int digestsize)
249{
250 /* Calculate remaining bytes to read */
251 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252
253 /* Read remaining bytes */
254 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255 FIFOLD_TYPE_MSG | KEY_VLF);
256
257 /* Store class2 context bytes */
258 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259 LDST_SRCDST_BYTE_CONTEXT);
260}
261
262/*
263 * For ahash update, final and finup, import context, read and write to seqout
264 */
265static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266 int digestsize,
267 struct caam_hash_ctx *ctx)
268{
269 init_sh_desc_key_ahash(desc, ctx);
270
271 /* Import context from software */
272 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273 LDST_CLASS_2_CCB | ctx->ctx_len);
274
275 /* Class 2 operation */
276 append_operation(desc, op | state | OP_ALG_ENCRYPT);
277
278 /*
279 * Load from buf and/or src and write to req->result or state->context
280 */
281 ahash_append_load_str(desc, digestsize);
282}
283
284/* For ahash firsts and digest, read and write to seqout */
285static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286 int digestsize, struct caam_hash_ctx *ctx)
287{
288 init_sh_desc_key_ahash(desc, ctx);
289
290 /* Class 2 operation */
291 append_operation(desc, op | state | OP_ALG_ENCRYPT);
292
293 /*
294 * Load from buf and/or src and write to req->result or state->context
295 */
296 ahash_append_load_str(desc, digestsize);
297}
298
299static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300{
301 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302 int digestsize = crypto_ahash_digestsize(ahash);
303 struct device *jrdev = ctx->jrdev;
304 u32 have_key = 0;
305 u32 *desc;
306
307 if (ctx->split_key_len)
308 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309
310 /* ahash_update shared descriptor */
311 desc = ctx->sh_desc_update;
312
Kim Phillips61bb86b2012-07-13 17:49:28 -0500313 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500314
315 /* Import context from software */
316 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317 LDST_CLASS_2_CCB | ctx->ctx_len);
318
319 /* Class 2 operation */
320 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
321 OP_ALG_ENCRYPT);
322
323 /* Load data and write to result or context */
324 ahash_append_load_str(desc, ctx->ctx_len);
325
326 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327 DMA_TO_DEVICE);
328 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329 dev_err(jrdev, "unable to map shared descriptor\n");
330 return -ENOMEM;
331 }
332#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300333 print_hex_dump(KERN_ERR,
334 "ahash update shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500335 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
336#endif
337
338 /* ahash_update_first shared descriptor */
339 desc = ctx->sh_desc_update_first;
340
341 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
342 ctx->ctx_len, ctx);
343
344 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
345 desc_bytes(desc),
346 DMA_TO_DEVICE);
347 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
348 dev_err(jrdev, "unable to map shared descriptor\n");
349 return -ENOMEM;
350 }
351#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300352 print_hex_dump(KERN_ERR,
353 "ahash update first shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500354 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
355#endif
356
357 /* ahash_final shared descriptor */
358 desc = ctx->sh_desc_fin;
359
360 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
361 OP_ALG_AS_FINALIZE, digestsize, ctx);
362
363 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
364 DMA_TO_DEVICE);
365 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
366 dev_err(jrdev, "unable to map shared descriptor\n");
367 return -ENOMEM;
368 }
369#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300370 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500371 DUMP_PREFIX_ADDRESS, 16, 4, desc,
372 desc_bytes(desc), 1);
373#endif
374
375 /* ahash_finup shared descriptor */
376 desc = ctx->sh_desc_finup;
377
378 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
379 OP_ALG_AS_FINALIZE, digestsize, ctx);
380
381 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
382 DMA_TO_DEVICE);
383 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
384 dev_err(jrdev, "unable to map shared descriptor\n");
385 return -ENOMEM;
386 }
387#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300388 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500389 DUMP_PREFIX_ADDRESS, 16, 4, desc,
390 desc_bytes(desc), 1);
391#endif
392
393 /* ahash_digest shared descriptor */
394 desc = ctx->sh_desc_digest;
395
396 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
397 digestsize, ctx);
398
399 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
400 desc_bytes(desc),
401 DMA_TO_DEVICE);
402 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
403 dev_err(jrdev, "unable to map shared descriptor\n");
404 return -ENOMEM;
405 }
406#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300407 print_hex_dump(KERN_ERR,
408 "ahash digest shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500409 DUMP_PREFIX_ADDRESS, 16, 4, desc,
410 desc_bytes(desc), 1);
411#endif
412
413 return 0;
414}
415
Kim Phillips66b3e882013-03-26 18:10:14 -0500416static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500417 u32 keylen)
418{
419 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
420 ctx->split_key_pad_len, key_in, keylen,
421 ctx->alg_op);
422}
423
424/* Digest hash size if it is too large */
Kim Phillips66b3e882013-03-26 18:10:14 -0500425static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500426 u32 *keylen, u8 *key_out, u32 digestsize)
427{
428 struct device *jrdev = ctx->jrdev;
429 u32 *desc;
430 struct split_key_result result;
431 dma_addr_t src_dma, dst_dma;
432 int ret = 0;
433
Vakul Garg9c23b7d2013-07-10 06:26:13 +0000434 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800435 if (!desc) {
436 dev_err(jrdev, "unable to allocate key input memory\n");
437 return -ENOMEM;
438 }
Yuan Kang045e3672012-06-22 19:48:47 -0500439
440 init_job_desc(desc, 0);
441
442 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
443 DMA_TO_DEVICE);
444 if (dma_mapping_error(jrdev, src_dma)) {
445 dev_err(jrdev, "unable to map key input memory\n");
446 kfree(desc);
447 return -ENOMEM;
448 }
449 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
450 DMA_FROM_DEVICE);
451 if (dma_mapping_error(jrdev, dst_dma)) {
452 dev_err(jrdev, "unable to map key output memory\n");
453 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
454 kfree(desc);
455 return -ENOMEM;
456 }
457
458 /* Job descriptor to perform unkeyed hash on key_in */
459 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
460 OP_ALG_AS_INITFINAL);
461 append_seq_in_ptr(desc, src_dma, *keylen, 0);
462 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
463 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
464 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
465 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
466 LDST_SRCDST_BYTE_CONTEXT);
467
468#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300469 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500470 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300471 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500472 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
473#endif
474
475 result.err = 0;
476 init_completion(&result.completion);
477
478 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
479 if (!ret) {
480 /* in progress */
481 wait_for_completion_interruptible(&result.completion);
482 ret = result.err;
483#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300484 print_hex_dump(KERN_ERR,
485 "digested key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500486 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
487 digestsize, 1);
488#endif
489 }
490 *keylen = digestsize;
491
492 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
493 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
494
495 kfree(desc);
496
497 return ret;
498}
499
500static int ahash_setkey(struct crypto_ahash *ahash,
501 const u8 *key, unsigned int keylen)
502{
503 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
504 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
505 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
506 struct device *jrdev = ctx->jrdev;
507 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
508 int digestsize = crypto_ahash_digestsize(ahash);
509 int ret = 0;
510 u8 *hashed_key = NULL;
511
512#ifdef DEBUG
513 printk(KERN_ERR "keylen %d\n", keylen);
514#endif
515
516 if (keylen > blocksize) {
517 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
518 GFP_DMA);
519 if (!hashed_key)
520 return -ENOMEM;
521 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
522 digestsize);
523 if (ret)
524 goto badkey;
525 key = hashed_key;
526 }
527
528 /* Pick class 2 key length from algorithm submask */
529 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
530 OP_ALG_ALGSEL_SHIFT] * 2;
531 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
532
533#ifdef DEBUG
534 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
535 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300536 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500537 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
538#endif
539
540 ret = gen_split_hash_key(ctx, key, keylen);
541 if (ret)
542 goto badkey;
543
544 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
545 DMA_TO_DEVICE);
546 if (dma_mapping_error(jrdev, ctx->key_dma)) {
547 dev_err(jrdev, "unable to map key i/o memory\n");
Horia Geanta3d67be22014-04-18 13:01:41 +0300548 ret = -ENOMEM;
549 goto map_err;
Yuan Kang045e3672012-06-22 19:48:47 -0500550 }
551#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300552 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500553 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
554 ctx->split_key_pad_len, 1);
555#endif
556
557 ret = ahash_set_sh_desc(ahash);
558 if (ret) {
559 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
560 DMA_TO_DEVICE);
561 }
562
Horia Geanta3d67be22014-04-18 13:01:41 +0300563map_err:
Yuan Kang045e3672012-06-22 19:48:47 -0500564 kfree(hashed_key);
565 return ret;
566badkey:
567 kfree(hashed_key);
568 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
569 return -EINVAL;
570}
571
572/*
573 * ahash_edesc - s/w-extended ahash descriptor
574 * @dst_dma: physical mapped address of req->result
575 * @sec4_sg_dma: physical mapped address of h/w link table
Yuan Kang643b39b2012-06-22 19:48:49 -0500576 * @chained: if source is chained
Yuan Kang045e3672012-06-22 19:48:47 -0500577 * @src_nents: number of segments in input scatterlist
578 * @sec4_sg_bytes: length of dma mapped sec4_sg space
579 * @sec4_sg: pointer to h/w link table
580 * @hw_desc: the h/w job descriptor followed by any referenced link tables
581 */
582struct ahash_edesc {
583 dma_addr_t dst_dma;
584 dma_addr_t sec4_sg_dma;
Yuan Kang643b39b2012-06-22 19:48:49 -0500585 bool chained;
Yuan Kang045e3672012-06-22 19:48:47 -0500586 int src_nents;
587 int sec4_sg_bytes;
588 struct sec4_sg_entry *sec4_sg;
589 u32 hw_desc[0];
590};
591
592static inline void ahash_unmap(struct device *dev,
593 struct ahash_edesc *edesc,
594 struct ahash_request *req, int dst_len)
595{
596 if (edesc->src_nents)
Yuan Kang643b39b2012-06-22 19:48:49 -0500597 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
598 DMA_TO_DEVICE, edesc->chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500599 if (edesc->dst_dma)
600 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
601
602 if (edesc->sec4_sg_bytes)
603 dma_unmap_single(dev, edesc->sec4_sg_dma,
604 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
605}
606
607static inline void ahash_unmap_ctx(struct device *dev,
608 struct ahash_edesc *edesc,
609 struct ahash_request *req, int dst_len, u32 flag)
610{
611 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
612 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
613 struct caam_hash_state *state = ahash_request_ctx(req);
614
615 if (state->ctx_dma)
616 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
617 ahash_unmap(dev, edesc, req, dst_len);
618}
619
620static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
621 void *context)
622{
623 struct ahash_request *req = context;
624 struct ahash_edesc *edesc;
625 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
626 int digestsize = crypto_ahash_digestsize(ahash);
627#ifdef DEBUG
628 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
629 struct caam_hash_state *state = ahash_request_ctx(req);
630
631 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
632#endif
633
634 edesc = (struct ahash_edesc *)((char *)desc -
635 offsetof(struct ahash_edesc, hw_desc));
636 if (err) {
637 char tmp[CAAM_ERROR_STR_MAX];
638
639 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
640 }
641
642 ahash_unmap(jrdev, edesc, req, digestsize);
643 kfree(edesc);
644
645#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300646 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500647 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
648 ctx->ctx_len, 1);
649 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300650 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500651 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
652 digestsize, 1);
653#endif
654
655 req->base.complete(&req->base, err);
656}
657
658static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
659 void *context)
660{
661 struct ahash_request *req = context;
662 struct ahash_edesc *edesc;
663 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
664 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
665#ifdef DEBUG
666 struct caam_hash_state *state = ahash_request_ctx(req);
667 int digestsize = crypto_ahash_digestsize(ahash);
668
669 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
670#endif
671
672 edesc = (struct ahash_edesc *)((char *)desc -
673 offsetof(struct ahash_edesc, hw_desc));
674 if (err) {
675 char tmp[CAAM_ERROR_STR_MAX];
676
677 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
678 }
679
680 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
681 kfree(edesc);
682
683#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300684 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500685 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
686 ctx->ctx_len, 1);
687 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300688 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500689 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
690 digestsize, 1);
691#endif
692
693 req->base.complete(&req->base, err);
694}
695
696static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
697 void *context)
698{
699 struct ahash_request *req = context;
700 struct ahash_edesc *edesc;
701 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
702 int digestsize = crypto_ahash_digestsize(ahash);
703#ifdef DEBUG
704 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
705 struct caam_hash_state *state = ahash_request_ctx(req);
706
707 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
708#endif
709
710 edesc = (struct ahash_edesc *)((char *)desc -
711 offsetof(struct ahash_edesc, hw_desc));
712 if (err) {
713 char tmp[CAAM_ERROR_STR_MAX];
714
715 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
716 }
717
718 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
719 kfree(edesc);
720
721#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300722 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500723 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
724 ctx->ctx_len, 1);
725 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300726 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500727 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
728 digestsize, 1);
729#endif
730
731 req->base.complete(&req->base, err);
732}
733
734static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
735 void *context)
736{
737 struct ahash_request *req = context;
738 struct ahash_edesc *edesc;
739 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
740 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
741#ifdef DEBUG
742 struct caam_hash_state *state = ahash_request_ctx(req);
743 int digestsize = crypto_ahash_digestsize(ahash);
744
745 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
746#endif
747
748 edesc = (struct ahash_edesc *)((char *)desc -
749 offsetof(struct ahash_edesc, hw_desc));
750 if (err) {
751 char tmp[CAAM_ERROR_STR_MAX];
752
753 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
754 }
755
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
757 kfree(edesc);
758
759#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762 ctx->ctx_len, 1);
763 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
766 digestsize, 1);
767#endif
768
769 req->base.complete(&req->base, err);
770}
771
772/* submit update job descriptor */
773static int ahash_update_ctx(struct ahash_request *req)
774{
775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777 struct caam_hash_state *state = ahash_request_ctx(req);
778 struct device *jrdev = ctx->jrdev;
779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784 int *next_buflen = state->current_buf ? &state->buflen_0 :
785 &state->buflen_1, last_buflen;
786 int in_len = *buflen + req->nbytes, to_hash;
787 u32 *sh_desc = ctx->sh_desc_update, *desc;
788 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -0500791 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -0500792 int ret = 0;
793 int sh_len;
794
795 last_buflen = *next_buflen;
796 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797 to_hash = in_len - *next_buflen;
798
799 if (to_hash) {
Yuan Kang643b39b2012-06-22 19:48:49 -0500800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
801 &chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry);
805
806 /*
807 * allocate space for base edesc and hw desc commands,
808 * link tables
809 */
810 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
811 sec4_sg_bytes, GFP_DMA | flags);
812 if (!edesc) {
813 dev_err(jrdev,
814 "could not allocate extended descriptor\n");
815 return -ENOMEM;
816 }
817
818 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500819 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -0500820 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822 DESC_JOB_IO_LEN;
823 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
824 sec4_sg_bytes,
825 DMA_TO_DEVICE);
826
827 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
828 edesc->sec4_sg, DMA_BIDIRECTIONAL);
829
830 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
831 edesc->sec4_sg + 1,
832 buf, state->buf_dma,
833 *buflen, last_buflen);
834
835 if (src_nents) {
836 src_map_to_sec4_sg(jrdev, req->src, src_nents,
Yuan Kang643b39b2012-06-22 19:48:49 -0500837 edesc->sec4_sg + sec4_sg_src_index,
838 chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500839 if (*next_buflen) {
840 sg_copy_part(next_buf, req->src, to_hash -
841 *buflen, req->nbytes);
842 state->current_buf = !state->current_buf;
843 }
844 } else {
845 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
846 SEC4_SG_LEN_FIN;
847 }
848
849 sh_len = desc_len(sh_desc);
850 desc = edesc->hw_desc;
851 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
852 HDR_REVERSE);
853
854 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
855 to_hash, LDST_SGF);
856
857 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
858
859#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300860 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500861 DUMP_PREFIX_ADDRESS, 16, 4, desc,
862 desc_bytes(desc), 1);
863#endif
864
865 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
866 if (!ret) {
867 ret = -EINPROGRESS;
868 } else {
869 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
870 DMA_BIDIRECTIONAL);
871 kfree(edesc);
872 }
873 } else if (*next_buflen) {
874 sg_copy(buf + *buflen, req->src, req->nbytes);
875 *buflen = *next_buflen;
876 *next_buflen = last_buflen;
877 }
878#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300879 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500880 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300881 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500882 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
883 *next_buflen, 1);
884#endif
885
886 return ret;
887}
888
889static int ahash_final_ctx(struct ahash_request *req)
890{
891 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
892 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
893 struct caam_hash_state *state = ahash_request_ctx(req);
894 struct device *jrdev = ctx->jrdev;
895 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
896 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
897 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
898 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
899 int last_buflen = state->current_buf ? state->buflen_0 :
900 state->buflen_1;
901 u32 *sh_desc = ctx->sh_desc_fin, *desc;
902 dma_addr_t ptr = ctx->sh_desc_fin_dma;
903 int sec4_sg_bytes;
904 int digestsize = crypto_ahash_digestsize(ahash);
905 struct ahash_edesc *edesc;
906 int ret = 0;
907 int sh_len;
908
909 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
910
911 /* allocate space for base edesc and hw desc commands, link tables */
912 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
913 sec4_sg_bytes, GFP_DMA | flags);
914 if (!edesc) {
915 dev_err(jrdev, "could not allocate extended descriptor\n");
916 return -ENOMEM;
917 }
918
919 sh_len = desc_len(sh_desc);
920 desc = edesc->hw_desc;
921 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
922
923 edesc->sec4_sg_bytes = sec4_sg_bytes;
924 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
925 DESC_JOB_IO_LEN;
926 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
927 sec4_sg_bytes, DMA_TO_DEVICE);
928 edesc->src_nents = 0;
929
930 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
931 DMA_TO_DEVICE);
932
933 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
934 buf, state->buf_dma, buflen,
935 last_buflen);
936 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
937
938 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
939 LDST_SGF);
940
941 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
942 digestsize);
943
944#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300945 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500946 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
947#endif
948
949 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
950 if (!ret) {
951 ret = -EINPROGRESS;
952 } else {
953 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
954 kfree(edesc);
955 }
956
957 return ret;
958}
959
960static int ahash_finup_ctx(struct ahash_request *req)
961{
962 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
963 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
964 struct caam_hash_state *state = ahash_request_ctx(req);
965 struct device *jrdev = ctx->jrdev;
966 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
967 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
968 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
969 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
970 int last_buflen = state->current_buf ? state->buflen_0 :
971 state->buflen_1;
972 u32 *sh_desc = ctx->sh_desc_finup, *desc;
973 dma_addr_t ptr = ctx->sh_desc_finup_dma;
974 int sec4_sg_bytes, sec4_sg_src_index;
975 int src_nents;
976 int digestsize = crypto_ahash_digestsize(ahash);
977 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -0500978 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -0500979 int ret = 0;
980 int sh_len;
981
Yuan Kang643b39b2012-06-22 19:48:49 -0500982 src_nents = __sg_count(req->src, req->nbytes, &chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500983 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
984 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
985 sizeof(struct sec4_sg_entry);
986
987 /* allocate space for base edesc and hw desc commands, link tables */
988 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
989 sec4_sg_bytes, GFP_DMA | flags);
990 if (!edesc) {
991 dev_err(jrdev, "could not allocate extended descriptor\n");
992 return -ENOMEM;
993 }
994
995 sh_len = desc_len(sh_desc);
996 desc = edesc->hw_desc;
997 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
998
999 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001000 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001001 edesc->sec4_sg_bytes = sec4_sg_bytes;
1002 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1003 DESC_JOB_IO_LEN;
1004 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1005 sec4_sg_bytes, DMA_TO_DEVICE);
1006
1007 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
1008 DMA_TO_DEVICE);
1009
1010 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1011 buf, state->buf_dma, buflen,
1012 last_buflen);
1013
1014 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
Yuan Kang643b39b2012-06-22 19:48:49 -05001015 sec4_sg_src_index, chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001016
1017 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1018 buflen + req->nbytes, LDST_SGF);
1019
1020 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1021 digestsize);
1022
1023#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001024 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001025 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1026#endif
1027
1028 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1029 if (!ret) {
1030 ret = -EINPROGRESS;
1031 } else {
1032 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1033 kfree(edesc);
1034 }
1035
1036 return ret;
1037}
1038
1039static int ahash_digest(struct ahash_request *req)
1040{
1041 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1042 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1043 struct device *jrdev = ctx->jrdev;
1044 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1045 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1046 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1047 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1048 int digestsize = crypto_ahash_digestsize(ahash);
1049 int src_nents, sec4_sg_bytes;
1050 dma_addr_t src_dma;
1051 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -05001052 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001053 int ret = 0;
1054 u32 options;
1055 int sh_len;
1056
Yuan Kang643b39b2012-06-22 19:48:49 -05001057 src_nents = sg_count(req->src, req->nbytes, &chained);
1058 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1059 chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001060 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1061
1062 /* allocate space for base edesc and hw desc commands, link tables */
1063 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1064 DESC_JOB_IO_LEN, GFP_DMA | flags);
1065 if (!edesc) {
1066 dev_err(jrdev, "could not allocate extended descriptor\n");
1067 return -ENOMEM;
1068 }
1069 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1070 DESC_JOB_IO_LEN;
1071 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1072 sec4_sg_bytes, DMA_TO_DEVICE);
1073 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001074 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001075
1076 sh_len = desc_len(sh_desc);
1077 desc = edesc->hw_desc;
1078 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1079
1080 if (src_nents) {
1081 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1082 src_dma = edesc->sec4_sg_dma;
1083 options = LDST_SGF;
1084 } else {
1085 src_dma = sg_dma_address(req->src);
1086 options = 0;
1087 }
1088 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1089
1090 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1091 digestsize);
1092
1093#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001094 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001095 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1096#endif
1097
1098 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1099 if (!ret) {
1100 ret = -EINPROGRESS;
1101 } else {
1102 ahash_unmap(jrdev, edesc, req, digestsize);
1103 kfree(edesc);
1104 }
1105
1106 return ret;
1107}
1108
1109/* submit ahash final if it the first job descriptor */
1110static int ahash_final_no_ctx(struct ahash_request *req)
1111{
1112 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1113 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1114 struct caam_hash_state *state = ahash_request_ctx(req);
1115 struct device *jrdev = ctx->jrdev;
1116 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1117 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1118 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1119 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1120 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1121 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1122 int digestsize = crypto_ahash_digestsize(ahash);
1123 struct ahash_edesc *edesc;
1124 int ret = 0;
1125 int sh_len;
1126
1127 /* allocate space for base edesc and hw desc commands, link tables */
1128 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1129 GFP_DMA | flags);
1130 if (!edesc) {
1131 dev_err(jrdev, "could not allocate extended descriptor\n");
1132 return -ENOMEM;
1133 }
1134
1135 sh_len = desc_len(sh_desc);
1136 desc = edesc->hw_desc;
1137 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1138
1139 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1140
1141 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1142
1143 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1144 digestsize);
1145 edesc->src_nents = 0;
1146
1147#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001148 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001149 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1150#endif
1151
1152 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1153 if (!ret) {
1154 ret = -EINPROGRESS;
1155 } else {
1156 ahash_unmap(jrdev, edesc, req, digestsize);
1157 kfree(edesc);
1158 }
1159
1160 return ret;
1161}
1162
1163/* submit ahash update if it the first job descriptor after update */
1164static int ahash_update_no_ctx(struct ahash_request *req)
1165{
1166 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1167 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1168 struct caam_hash_state *state = ahash_request_ctx(req);
1169 struct device *jrdev = ctx->jrdev;
1170 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1171 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1172 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1173 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1174 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1175 int *next_buflen = state->current_buf ? &state->buflen_0 :
1176 &state->buflen_1;
1177 int in_len = *buflen + req->nbytes, to_hash;
1178 int sec4_sg_bytes, src_nents;
1179 struct ahash_edesc *edesc;
1180 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1181 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
Yuan Kang643b39b2012-06-22 19:48:49 -05001182 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001183 int ret = 0;
1184 int sh_len;
1185
1186 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1187 to_hash = in_len - *next_buflen;
1188
1189 if (to_hash) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001190 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1191 &chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001192 sec4_sg_bytes = (1 + src_nents) *
1193 sizeof(struct sec4_sg_entry);
1194
1195 /*
1196 * allocate space for base edesc and hw desc commands,
1197 * link tables
1198 */
1199 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1200 sec4_sg_bytes, GFP_DMA | flags);
1201 if (!edesc) {
1202 dev_err(jrdev,
1203 "could not allocate extended descriptor\n");
1204 return -ENOMEM;
1205 }
1206
1207 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001208 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001209 edesc->sec4_sg_bytes = sec4_sg_bytes;
1210 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1211 DESC_JOB_IO_LEN;
1212 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1213 sec4_sg_bytes,
1214 DMA_TO_DEVICE);
1215
1216 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1217 buf, *buflen);
1218 src_map_to_sec4_sg(jrdev, req->src, src_nents,
Yuan Kang643b39b2012-06-22 19:48:49 -05001219 edesc->sec4_sg + 1, chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001220 if (*next_buflen) {
1221 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1222 req->nbytes);
1223 state->current_buf = !state->current_buf;
1224 }
1225
1226 sh_len = desc_len(sh_desc);
1227 desc = edesc->hw_desc;
1228 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1229 HDR_REVERSE);
1230
1231 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1232
1233 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1234
1235#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001236 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001237 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1238 desc_bytes(desc), 1);
1239#endif
1240
1241 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1242 if (!ret) {
1243 ret = -EINPROGRESS;
1244 state->update = ahash_update_ctx;
1245 state->finup = ahash_finup_ctx;
1246 state->final = ahash_final_ctx;
1247 } else {
1248 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1249 DMA_TO_DEVICE);
1250 kfree(edesc);
1251 }
1252 } else if (*next_buflen) {
1253 sg_copy(buf + *buflen, req->src, req->nbytes);
1254 *buflen = *next_buflen;
1255 *next_buflen = 0;
1256 }
1257#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001258 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001259 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001260 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001261 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1262 *next_buflen, 1);
1263#endif
1264
1265 return ret;
1266}
1267
1268/* submit ahash finup if it the first job descriptor after update */
1269static int ahash_finup_no_ctx(struct ahash_request *req)
1270{
1271 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1272 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1273 struct caam_hash_state *state = ahash_request_ctx(req);
1274 struct device *jrdev = ctx->jrdev;
1275 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1276 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1277 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1278 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1279 int last_buflen = state->current_buf ? state->buflen_0 :
1280 state->buflen_1;
1281 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1282 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1283 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1284 int digestsize = crypto_ahash_digestsize(ahash);
1285 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -05001286 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001287 int sh_len;
1288 int ret = 0;
1289
Yuan Kang643b39b2012-06-22 19:48:49 -05001290 src_nents = __sg_count(req->src, req->nbytes, &chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001291 sec4_sg_src_index = 2;
1292 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1293 sizeof(struct sec4_sg_entry);
1294
1295 /* allocate space for base edesc and hw desc commands, link tables */
1296 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1297 sec4_sg_bytes, GFP_DMA | flags);
1298 if (!edesc) {
1299 dev_err(jrdev, "could not allocate extended descriptor\n");
1300 return -ENOMEM;
1301 }
1302
1303 sh_len = desc_len(sh_desc);
1304 desc = edesc->hw_desc;
1305 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1306
1307 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001308 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001309 edesc->sec4_sg_bytes = sec4_sg_bytes;
1310 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1311 DESC_JOB_IO_LEN;
1312 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1313 sec4_sg_bytes, DMA_TO_DEVICE);
1314
1315 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1316 state->buf_dma, buflen,
1317 last_buflen);
1318
Yuan Kang643b39b2012-06-22 19:48:49 -05001319 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1320 chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001321
1322 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1323 req->nbytes, LDST_SGF);
1324
1325 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1326 digestsize);
1327
1328#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001329 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001330 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1331#endif
1332
1333 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1334 if (!ret) {
1335 ret = -EINPROGRESS;
1336 } else {
1337 ahash_unmap(jrdev, edesc, req, digestsize);
1338 kfree(edesc);
1339 }
1340
1341 return ret;
1342}
1343
1344/* submit first update job descriptor after init */
1345static int ahash_update_first(struct ahash_request *req)
1346{
1347 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1348 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1349 struct caam_hash_state *state = ahash_request_ctx(req);
1350 struct device *jrdev = ctx->jrdev;
1351 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1352 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1353 u8 *next_buf = state->buf_0 + state->current_buf *
1354 CAAM_MAX_HASH_BLOCK_SIZE;
1355 int *next_buflen = &state->buflen_0 + state->current_buf;
1356 int to_hash;
1357 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1358 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1359 int sec4_sg_bytes, src_nents;
1360 dma_addr_t src_dma;
1361 u32 options;
1362 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -05001363 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001364 int ret = 0;
1365 int sh_len;
1366
1367 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1368 1);
1369 to_hash = req->nbytes - *next_buflen;
1370
1371 if (to_hash) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001372 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1373 &chained);
1374 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1375 DMA_TO_DEVICE, chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001376 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1377
1378 /*
1379 * allocate space for base edesc and hw desc commands,
1380 * link tables
1381 */
1382 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1383 sec4_sg_bytes, GFP_DMA | flags);
1384 if (!edesc) {
1385 dev_err(jrdev,
1386 "could not allocate extended descriptor\n");
1387 return -ENOMEM;
1388 }
1389
1390 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001391 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001392 edesc->sec4_sg_bytes = sec4_sg_bytes;
1393 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1394 DESC_JOB_IO_LEN;
1395 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1396 sec4_sg_bytes,
1397 DMA_TO_DEVICE);
1398
1399 if (src_nents) {
1400 sg_to_sec4_sg_last(req->src, src_nents,
1401 edesc->sec4_sg, 0);
1402 src_dma = edesc->sec4_sg_dma;
1403 options = LDST_SGF;
1404 } else {
1405 src_dma = sg_dma_address(req->src);
1406 options = 0;
1407 }
1408
1409 if (*next_buflen)
1410 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1411
1412 sh_len = desc_len(sh_desc);
1413 desc = edesc->hw_desc;
1414 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1415 HDR_REVERSE);
1416
1417 append_seq_in_ptr(desc, src_dma, to_hash, options);
1418
1419 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1420
1421#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001422 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001423 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1424 desc_bytes(desc), 1);
1425#endif
1426
1427 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1428 req);
1429 if (!ret) {
1430 ret = -EINPROGRESS;
1431 state->update = ahash_update_ctx;
1432 state->finup = ahash_finup_ctx;
1433 state->final = ahash_final_ctx;
1434 } else {
1435 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1436 DMA_TO_DEVICE);
1437 kfree(edesc);
1438 }
1439 } else if (*next_buflen) {
1440 state->update = ahash_update_no_ctx;
1441 state->finup = ahash_finup_no_ctx;
1442 state->final = ahash_final_no_ctx;
1443 sg_copy(next_buf, req->src, req->nbytes);
1444 }
1445#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001446 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001447 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1448 *next_buflen, 1);
1449#endif
1450
1451 return ret;
1452}
1453
1454static int ahash_finup_first(struct ahash_request *req)
1455{
1456 return ahash_digest(req);
1457}
1458
1459static int ahash_init(struct ahash_request *req)
1460{
1461 struct caam_hash_state *state = ahash_request_ctx(req);
1462
1463 state->update = ahash_update_first;
1464 state->finup = ahash_finup_first;
1465 state->final = ahash_final_no_ctx;
1466
1467 state->current_buf = 0;
1468
1469 return 0;
1470}
1471
1472static int ahash_update(struct ahash_request *req)
1473{
1474 struct caam_hash_state *state = ahash_request_ctx(req);
1475
1476 return state->update(req);
1477}
1478
1479static int ahash_finup(struct ahash_request *req)
1480{
1481 struct caam_hash_state *state = ahash_request_ctx(req);
1482
1483 return state->finup(req);
1484}
1485
1486static int ahash_final(struct ahash_request *req)
1487{
1488 struct caam_hash_state *state = ahash_request_ctx(req);
1489
1490 return state->final(req);
1491}
1492
1493static int ahash_export(struct ahash_request *req, void *out)
1494{
1495 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1496 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1497 struct caam_hash_state *state = ahash_request_ctx(req);
1498
1499 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1500 memcpy(out + sizeof(struct caam_hash_ctx), state,
1501 sizeof(struct caam_hash_state));
1502 return 0;
1503}
1504
1505static int ahash_import(struct ahash_request *req, const void *in)
1506{
1507 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1508 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1509 struct caam_hash_state *state = ahash_request_ctx(req);
1510
1511 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1512 memcpy(state, in + sizeof(struct caam_hash_ctx),
1513 sizeof(struct caam_hash_state));
1514 return 0;
1515}
1516
1517struct caam_hash_template {
1518 char name[CRYPTO_MAX_ALG_NAME];
1519 char driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001520 char hmac_name[CRYPTO_MAX_ALG_NAME];
1521 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kang045e3672012-06-22 19:48:47 -05001522 unsigned int blocksize;
1523 struct ahash_alg template_ahash;
1524 u32 alg_type;
1525 u32 alg_op;
1526};
1527
1528/* ahash descriptors */
1529static struct caam_hash_template driver_hash[] = {
1530 {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001531 .name = "sha1",
1532 .driver_name = "sha1-caam",
1533 .hmac_name = "hmac(sha1)",
1534 .hmac_driver_name = "hmac-sha1-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001535 .blocksize = SHA1_BLOCK_SIZE,
1536 .template_ahash = {
1537 .init = ahash_init,
1538 .update = ahash_update,
1539 .final = ahash_final,
1540 .finup = ahash_finup,
1541 .digest = ahash_digest,
1542 .export = ahash_export,
1543 .import = ahash_import,
1544 .setkey = ahash_setkey,
1545 .halg = {
1546 .digestsize = SHA1_DIGEST_SIZE,
1547 },
1548 },
1549 .alg_type = OP_ALG_ALGSEL_SHA1,
1550 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1551 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001552 .name = "sha224",
1553 .driver_name = "sha224-caam",
1554 .hmac_name = "hmac(sha224)",
1555 .hmac_driver_name = "hmac-sha224-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001556 .blocksize = SHA224_BLOCK_SIZE,
1557 .template_ahash = {
1558 .init = ahash_init,
1559 .update = ahash_update,
1560 .final = ahash_final,
1561 .finup = ahash_finup,
1562 .digest = ahash_digest,
1563 .export = ahash_export,
1564 .import = ahash_import,
1565 .setkey = ahash_setkey,
1566 .halg = {
1567 .digestsize = SHA224_DIGEST_SIZE,
1568 },
1569 },
1570 .alg_type = OP_ALG_ALGSEL_SHA224,
1571 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1572 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001573 .name = "sha256",
1574 .driver_name = "sha256-caam",
1575 .hmac_name = "hmac(sha256)",
1576 .hmac_driver_name = "hmac-sha256-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001577 .blocksize = SHA256_BLOCK_SIZE,
1578 .template_ahash = {
1579 .init = ahash_init,
1580 .update = ahash_update,
1581 .final = ahash_final,
1582 .finup = ahash_finup,
1583 .digest = ahash_digest,
1584 .export = ahash_export,
1585 .import = ahash_import,
1586 .setkey = ahash_setkey,
1587 .halg = {
1588 .digestsize = SHA256_DIGEST_SIZE,
1589 },
1590 },
1591 .alg_type = OP_ALG_ALGSEL_SHA256,
1592 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1593 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001594 .name = "sha384",
1595 .driver_name = "sha384-caam",
1596 .hmac_name = "hmac(sha384)",
1597 .hmac_driver_name = "hmac-sha384-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001598 .blocksize = SHA384_BLOCK_SIZE,
1599 .template_ahash = {
1600 .init = ahash_init,
1601 .update = ahash_update,
1602 .final = ahash_final,
1603 .finup = ahash_finup,
1604 .digest = ahash_digest,
1605 .export = ahash_export,
1606 .import = ahash_import,
1607 .setkey = ahash_setkey,
1608 .halg = {
1609 .digestsize = SHA384_DIGEST_SIZE,
1610 },
1611 },
1612 .alg_type = OP_ALG_ALGSEL_SHA384,
1613 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1614 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001615 .name = "sha512",
1616 .driver_name = "sha512-caam",
1617 .hmac_name = "hmac(sha512)",
1618 .hmac_driver_name = "hmac-sha512-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001619 .blocksize = SHA512_BLOCK_SIZE,
1620 .template_ahash = {
1621 .init = ahash_init,
1622 .update = ahash_update,
1623 .final = ahash_final,
1624 .finup = ahash_finup,
1625 .digest = ahash_digest,
1626 .export = ahash_export,
1627 .import = ahash_import,
1628 .setkey = ahash_setkey,
1629 .halg = {
1630 .digestsize = SHA512_DIGEST_SIZE,
1631 },
1632 },
1633 .alg_type = OP_ALG_ALGSEL_SHA512,
1634 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1635 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001636 .name = "md5",
1637 .driver_name = "md5-caam",
1638 .hmac_name = "hmac(md5)",
1639 .hmac_driver_name = "hmac-md5-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001640 .blocksize = MD5_BLOCK_WORDS * 4,
1641 .template_ahash = {
1642 .init = ahash_init,
1643 .update = ahash_update,
1644 .final = ahash_final,
1645 .finup = ahash_finup,
1646 .digest = ahash_digest,
1647 .export = ahash_export,
1648 .import = ahash_import,
1649 .setkey = ahash_setkey,
1650 .halg = {
1651 .digestsize = MD5_DIGEST_SIZE,
1652 },
1653 },
1654 .alg_type = OP_ALG_ALGSEL_MD5,
1655 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1656 },
1657};
1658
1659struct caam_hash_alg {
1660 struct list_head entry;
Yuan Kang045e3672012-06-22 19:48:47 -05001661 int alg_type;
1662 int alg_op;
1663 struct ahash_alg ahash_alg;
1664};
1665
1666static int caam_hash_cra_init(struct crypto_tfm *tfm)
1667{
1668 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1669 struct crypto_alg *base = tfm->__crt_alg;
1670 struct hash_alg_common *halg =
1671 container_of(base, struct hash_alg_common, base);
1672 struct ahash_alg *alg =
1673 container_of(halg, struct ahash_alg, halg);
1674 struct caam_hash_alg *caam_hash =
1675 container_of(alg, struct caam_hash_alg, ahash_alg);
1676 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Yuan Kang045e3672012-06-22 19:48:47 -05001677 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1678 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1679 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1680 HASH_MSG_LEN + 32,
1681 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1682 HASH_MSG_LEN + 64,
1683 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
Yuan Kang045e3672012-06-22 19:48:47 -05001684 int ret = 0;
1685
1686 /*
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301687 * Get a Job ring from Job Ring driver to ensure in-order
Yuan Kang045e3672012-06-22 19:48:47 -05001688 * crypto request processing per tfm
1689 */
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301690 ctx->jrdev = caam_jr_alloc();
1691 if (IS_ERR(ctx->jrdev)) {
1692 pr_err("Job Ring Device allocation for transform failed\n");
1693 return PTR_ERR(ctx->jrdev);
1694 }
Yuan Kang045e3672012-06-22 19:48:47 -05001695 /* copy descriptor header template value */
1696 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1697 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1698
1699 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1700 OP_ALG_ALGSEL_SHIFT];
1701
1702 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1703 sizeof(struct caam_hash_state));
1704
1705 ret = ahash_set_sh_desc(ahash);
1706
1707 return ret;
1708}
1709
1710static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1711{
1712 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1713
1714 if (ctx->sh_desc_update_dma &&
1715 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1716 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1717 desc_bytes(ctx->sh_desc_update),
1718 DMA_TO_DEVICE);
1719 if (ctx->sh_desc_update_first_dma &&
1720 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1721 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1722 desc_bytes(ctx->sh_desc_update_first),
1723 DMA_TO_DEVICE);
1724 if (ctx->sh_desc_fin_dma &&
1725 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1726 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1727 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1728 if (ctx->sh_desc_digest_dma &&
1729 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1730 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1731 desc_bytes(ctx->sh_desc_digest),
1732 DMA_TO_DEVICE);
1733 if (ctx->sh_desc_finup_dma &&
1734 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1735 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1736 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301737
1738 caam_jr_free(ctx->jrdev);
Yuan Kang045e3672012-06-22 19:48:47 -05001739}
1740
1741static void __exit caam_algapi_hash_exit(void)
1742{
Yuan Kang045e3672012-06-22 19:48:47 -05001743 struct caam_hash_alg *t_alg, *n;
1744
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301745 if (!hash_list.next)
Yuan Kang045e3672012-06-22 19:48:47 -05001746 return;
1747
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301748 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
Yuan Kang045e3672012-06-22 19:48:47 -05001749 crypto_unregister_ahash(&t_alg->ahash_alg);
1750 list_del(&t_alg->entry);
1751 kfree(t_alg);
1752 }
1753}
1754
1755static struct caam_hash_alg *
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301756caam_hash_alloc(struct caam_hash_template *template,
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001757 bool keyed)
Yuan Kang045e3672012-06-22 19:48:47 -05001758{
1759 struct caam_hash_alg *t_alg;
1760 struct ahash_alg *halg;
1761 struct crypto_alg *alg;
1762
1763 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1764 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301765 pr_err("failed to allocate t_alg\n");
Yuan Kang045e3672012-06-22 19:48:47 -05001766 return ERR_PTR(-ENOMEM);
1767 }
1768
1769 t_alg->ahash_alg = template->template_ahash;
1770 halg = &t_alg->ahash_alg;
1771 alg = &halg->halg.base;
1772
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001773 if (keyed) {
1774 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1775 template->hmac_name);
1776 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1777 template->hmac_driver_name);
1778 } else {
1779 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1780 template->name);
1781 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1782 template->driver_name);
1783 }
Yuan Kang045e3672012-06-22 19:48:47 -05001784 alg->cra_module = THIS_MODULE;
1785 alg->cra_init = caam_hash_cra_init;
1786 alg->cra_exit = caam_hash_cra_exit;
1787 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1788 alg->cra_priority = CAAM_CRA_PRIORITY;
1789 alg->cra_blocksize = template->blocksize;
1790 alg->cra_alignmask = 0;
1791 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1792 alg->cra_type = &crypto_ahash_type;
1793
1794 t_alg->alg_type = template->alg_type;
1795 t_alg->alg_op = template->alg_op;
Yuan Kang045e3672012-06-22 19:48:47 -05001796
1797 return t_alg;
1798}
1799
1800static int __init caam_algapi_hash_init(void)
1801{
Yuan Kang045e3672012-06-22 19:48:47 -05001802 int i = 0, err = 0;
1803
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301804 INIT_LIST_HEAD(&hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05001805
1806 /* register crypto algorithms the device supports */
1807 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1808 /* TODO: check if h/w supports alg */
1809 struct caam_hash_alg *t_alg;
1810
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001811 /* register hmac version */
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301812 t_alg = caam_hash_alloc(&driver_hash[i], true);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001813 if (IS_ERR(t_alg)) {
1814 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301815 pr_warn("%s alg allocation failed\n",
1816 driver_hash[i].driver_name);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001817 continue;
1818 }
1819
1820 err = crypto_register_ahash(&t_alg->ahash_alg);
1821 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301822 pr_warn("%s alg registration failed\n",
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001823 t_alg->ahash_alg.halg.base.cra_driver_name);
1824 kfree(t_alg);
1825 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301826 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001827
1828 /* register unkeyed version */
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301829 t_alg = caam_hash_alloc(&driver_hash[i], false);
Yuan Kang045e3672012-06-22 19:48:47 -05001830 if (IS_ERR(t_alg)) {
1831 err = PTR_ERR(t_alg);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301832 pr_warn("%s alg allocation failed\n",
1833 driver_hash[i].driver_name);
Yuan Kang045e3672012-06-22 19:48:47 -05001834 continue;
1835 }
1836
1837 err = crypto_register_ahash(&t_alg->ahash_alg);
1838 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301839 pr_warn("%s alg registration failed\n",
Yuan Kang045e3672012-06-22 19:48:47 -05001840 t_alg->ahash_alg.halg.base.cra_driver_name);
1841 kfree(t_alg);
1842 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301843 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05001844 }
1845
1846 return err;
1847}
1848
1849module_init(caam_algapi_hash_init);
1850module_exit(caam_algapi_hash_exit);
1851
1852MODULE_LICENSE("GPL");
1853MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1854MODULE_AUTHOR("Freescale Semiconductor - NMG");