blob: 72acf8e5ac2fe6160ea963a0ee3dd11b4872dabe [file] [log] [blame]
Yuan Kang045e3672012-06-22 19:48:47 -05001/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
Yuan Kang045e3672012-06-22 19:48:47 -050075#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
Yuan Kang045e3672012-06-22 19:48:47 -050092#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
Ruchika Guptacfc6f112013-10-25 12:01:03 +053097
98static struct list_head hash_list;
99
Yuan Kang045e3672012-06-22 19:48:47 -0500100/* ahash per-session context */
101struct caam_hash_ctx {
102 struct device *jrdev;
103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
108 dma_addr_t sh_desc_update_dma;
109 dma_addr_t sh_desc_update_first_dma;
110 dma_addr_t sh_desc_fin_dma;
111 dma_addr_t sh_desc_digest_dma;
112 dma_addr_t sh_desc_finup_dma;
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120};
121
122/* ahash state */
123struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
Victoria Milhoane7472422015-08-05 11:28:35 -0700130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
Yuan Kang045e3672012-06-22 19:48:47 -0500131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135};
136
137/* Common job descriptor seq in/out ptr routines */
138
139/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
Horia Geantace572082014-07-11 15:34:49 +0300140static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
141 struct caam_hash_state *state,
142 int ctx_len)
Yuan Kang045e3672012-06-22 19:48:47 -0500143{
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
145 ctx_len, DMA_FROM_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300146 if (dma_mapping_error(jrdev, state->ctx_dma)) {
147 dev_err(jrdev, "unable to map ctx\n");
148 return -ENOMEM;
149 }
150
Yuan Kang045e3672012-06-22 19:48:47 -0500151 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
Horia Geantace572082014-07-11 15:34:49 +0300152
153 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500154}
155
156/* Map req->result, and append seq_out_ptr command that points to it */
157static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
158 u8 *result, int digestsize)
159{
160 dma_addr_t dst_dma;
161
162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
163 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
164
165 return dst_dma;
166}
167
168/* Map current buffer in state and put it in link table */
169static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
170 struct sec4_sg_entry *sec4_sg,
171 u8 *buf, int buflen)
172{
173 dma_addr_t buf_dma;
174
175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
176 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
177
178 return buf_dma;
179}
180
181/* Map req->src and put it in link table */
182static inline void src_map_to_sec4_sg(struct device *jrdev,
183 struct scatterlist *src, int src_nents,
Yuan Kang643b39b2012-06-22 19:48:49 -0500184 struct sec4_sg_entry *sec4_sg,
185 bool chained)
Yuan Kang045e3672012-06-22 19:48:47 -0500186{
Yuan Kang643b39b2012-06-22 19:48:49 -0500187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
189}
190
191/*
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
194 */
195static inline dma_addr_t
196try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 u8 *buf, dma_addr_t buf_dma, int buflen,
198 int last_buflen)
199{
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
202 if (buflen)
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
204 else
205 buf_dma = 0;
206
207 return buf_dma;
208}
209
210/* Map state->caam_ctx, and add it to link table */
Horia Geantace572082014-07-11 15:34:49 +0300211static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 struct caam_hash_state *state, int ctx_len,
213 struct sec4_sg_entry *sec4_sg, u32 flag)
Yuan Kang045e3672012-06-22 19:48:47 -0500214{
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
Horia Geantace572082014-07-11 15:34:49 +0300216 if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 dev_err(jrdev, "unable to map ctx\n");
218 return -ENOMEM;
219 }
220
Yuan Kang045e3672012-06-22 19:48:47 -0500221 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
Horia Geantace572082014-07-11 15:34:49 +0300222
223 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500224}
225
226/* Common shared descriptor commands */
227static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
228{
229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
230 ctx->split_key_len, CLASS_2 |
231 KEY_DEST_MDHA_SPLIT | KEY_ENC);
232}
233
234/* Append key if it has been set */
235static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
236{
237 u32 *key_jump_cmd;
238
Kim Phillips61bb86b2012-07-13 17:49:28 -0500239 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500240
241 if (ctx->split_key_len) {
242 /* Skip if already shared */
243 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
244 JUMP_COND_SHRD);
245
246 append_key_ahash(desc, ctx);
247
248 set_jump_tgt_here(desc, key_jump_cmd);
249 }
250
251 /* Propagate errors from shared to job descriptor */
252 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
253}
254
255/*
256 * For ahash read data from seqin following state->caam_ctx,
257 * and write resulting class2 context to seqout, which may be state->caam_ctx
258 * or req->result
259 */
260static inline void ahash_append_load_str(u32 *desc, int digestsize)
261{
262 /* Calculate remaining bytes to read */
263 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
264
265 /* Read remaining bytes */
266 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
267 FIFOLD_TYPE_MSG | KEY_VLF);
268
269 /* Store class2 context bytes */
270 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
271 LDST_SRCDST_BYTE_CONTEXT);
272}
273
274/*
275 * For ahash update, final and finup, import context, read and write to seqout
276 */
277static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
278 int digestsize,
279 struct caam_hash_ctx *ctx)
280{
281 init_sh_desc_key_ahash(desc, ctx);
282
283 /* Import context from software */
284 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
285 LDST_CLASS_2_CCB | ctx->ctx_len);
286
287 /* Class 2 operation */
288 append_operation(desc, op | state | OP_ALG_ENCRYPT);
289
290 /*
291 * Load from buf and/or src and write to req->result or state->context
292 */
293 ahash_append_load_str(desc, digestsize);
294}
295
296/* For ahash firsts and digest, read and write to seqout */
297static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
298 int digestsize, struct caam_hash_ctx *ctx)
299{
300 init_sh_desc_key_ahash(desc, ctx);
301
302 /* Class 2 operation */
303 append_operation(desc, op | state | OP_ALG_ENCRYPT);
304
305 /*
306 * Load from buf and/or src and write to req->result or state->context
307 */
308 ahash_append_load_str(desc, digestsize);
309}
310
311static int ahash_set_sh_desc(struct crypto_ahash *ahash)
312{
313 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
314 int digestsize = crypto_ahash_digestsize(ahash);
315 struct device *jrdev = ctx->jrdev;
316 u32 have_key = 0;
317 u32 *desc;
318
319 if (ctx->split_key_len)
320 have_key = OP_ALG_AAI_HMAC_PRECOMP;
321
322 /* ahash_update shared descriptor */
323 desc = ctx->sh_desc_update;
324
Kim Phillips61bb86b2012-07-13 17:49:28 -0500325 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500326
327 /* Import context from software */
328 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
329 LDST_CLASS_2_CCB | ctx->ctx_len);
330
331 /* Class 2 operation */
332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
333 OP_ALG_ENCRYPT);
334
335 /* Load data and write to result or context */
336 ahash_append_load_str(desc, ctx->ctx_len);
337
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
339 DMA_TO_DEVICE);
340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
341 dev_err(jrdev, "unable to map shared descriptor\n");
342 return -ENOMEM;
343 }
344#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300345 print_hex_dump(KERN_ERR,
346 "ahash update shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500347 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
348#endif
349
350 /* ahash_update_first shared descriptor */
351 desc = ctx->sh_desc_update_first;
352
353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
354 ctx->ctx_len, ctx);
355
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
357 desc_bytes(desc),
358 DMA_TO_DEVICE);
359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
360 dev_err(jrdev, "unable to map shared descriptor\n");
361 return -ENOMEM;
362 }
363#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300364 print_hex_dump(KERN_ERR,
365 "ahash update first shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
367#endif
368
369 /* ahash_final shared descriptor */
370 desc = ctx->sh_desc_fin;
371
372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
373 OP_ALG_AS_FINALIZE, digestsize, ctx);
374
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
376 DMA_TO_DEVICE);
377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
378 dev_err(jrdev, "unable to map shared descriptor\n");
379 return -ENOMEM;
380 }
381#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300382 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500383 DUMP_PREFIX_ADDRESS, 16, 4, desc,
384 desc_bytes(desc), 1);
385#endif
386
387 /* ahash_finup shared descriptor */
388 desc = ctx->sh_desc_finup;
389
390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
391 OP_ALG_AS_FINALIZE, digestsize, ctx);
392
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
394 DMA_TO_DEVICE);
395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
396 dev_err(jrdev, "unable to map shared descriptor\n");
397 return -ENOMEM;
398 }
399#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300400 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500401 DUMP_PREFIX_ADDRESS, 16, 4, desc,
402 desc_bytes(desc), 1);
403#endif
404
405 /* ahash_digest shared descriptor */
406 desc = ctx->sh_desc_digest;
407
408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
409 digestsize, ctx);
410
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
412 desc_bytes(desc),
413 DMA_TO_DEVICE);
414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
415 dev_err(jrdev, "unable to map shared descriptor\n");
416 return -ENOMEM;
417 }
418#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300419 print_hex_dump(KERN_ERR,
420 "ahash digest shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500421 DUMP_PREFIX_ADDRESS, 16, 4, desc,
422 desc_bytes(desc), 1);
423#endif
424
425 return 0;
426}
427
Kim Phillips66b3e882013-03-26 18:10:14 -0500428static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500429 u32 keylen)
430{
431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
432 ctx->split_key_pad_len, key_in, keylen,
433 ctx->alg_op);
434}
435
436/* Digest hash size if it is too large */
Kim Phillips66b3e882013-03-26 18:10:14 -0500437static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500438 u32 *keylen, u8 *key_out, u32 digestsize)
439{
440 struct device *jrdev = ctx->jrdev;
441 u32 *desc;
442 struct split_key_result result;
443 dma_addr_t src_dma, dst_dma;
444 int ret = 0;
445
Vakul Garg9c23b7d2013-07-10 06:26:13 +0000446 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800447 if (!desc) {
448 dev_err(jrdev, "unable to allocate key input memory\n");
449 return -ENOMEM;
450 }
Yuan Kang045e3672012-06-22 19:48:47 -0500451
452 init_job_desc(desc, 0);
453
454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
455 DMA_TO_DEVICE);
456 if (dma_mapping_error(jrdev, src_dma)) {
457 dev_err(jrdev, "unable to map key input memory\n");
458 kfree(desc);
459 return -ENOMEM;
460 }
461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
462 DMA_FROM_DEVICE);
463 if (dma_mapping_error(jrdev, dst_dma)) {
464 dev_err(jrdev, "unable to map key output memory\n");
465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
466 kfree(desc);
467 return -ENOMEM;
468 }
469
470 /* Job descriptor to perform unkeyed hash on key_in */
471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
472 OP_ALG_AS_INITFINAL);
473 append_seq_in_ptr(desc, src_dma, *keylen, 0);
474 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
475 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
476 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
477 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
478 LDST_SRCDST_BYTE_CONTEXT);
479
480#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300481 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500482 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300483 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500484 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
485#endif
486
487 result.err = 0;
488 init_completion(&result.completion);
489
490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
491 if (!ret) {
492 /* in progress */
493 wait_for_completion_interruptible(&result.completion);
494 ret = result.err;
495#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300496 print_hex_dump(KERN_ERR,
497 "digested key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500498 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
499 digestsize, 1);
500#endif
501 }
Yuan Kang045e3672012-06-22 19:48:47 -0500502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
504
Horia Geantae11aa9f2014-07-11 15:34:50 +0300505 *keylen = digestsize;
506
Yuan Kang045e3672012-06-22 19:48:47 -0500507 kfree(desc);
508
509 return ret;
510}
511
512static int ahash_setkey(struct crypto_ahash *ahash,
513 const u8 *key, unsigned int keylen)
514{
515 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
516 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518 struct device *jrdev = ctx->jrdev;
519 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
520 int digestsize = crypto_ahash_digestsize(ahash);
521 int ret = 0;
522 u8 *hashed_key = NULL;
523
524#ifdef DEBUG
525 printk(KERN_ERR "keylen %d\n", keylen);
526#endif
527
528 if (keylen > blocksize) {
529 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
530 GFP_DMA);
531 if (!hashed_key)
532 return -ENOMEM;
533 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
534 digestsize);
535 if (ret)
536 goto badkey;
537 key = hashed_key;
538 }
539
540 /* Pick class 2 key length from algorithm submask */
541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
542 OP_ALG_ALGSEL_SHIFT] * 2;
543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
544
545#ifdef DEBUG
546 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
547 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
550#endif
551
552 ret = gen_split_hash_key(ctx, key, keylen);
553 if (ret)
554 goto badkey;
555
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
557 DMA_TO_DEVICE);
558 if (dma_mapping_error(jrdev, ctx->key_dma)) {
559 dev_err(jrdev, "unable to map key i/o memory\n");
Horia Geanta3d67be22014-04-18 13:01:41 +0300560 ret = -ENOMEM;
561 goto map_err;
Yuan Kang045e3672012-06-22 19:48:47 -0500562 }
563#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
566 ctx->split_key_pad_len, 1);
567#endif
568
569 ret = ahash_set_sh_desc(ahash);
570 if (ret) {
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
572 DMA_TO_DEVICE);
573 }
574
Horia Geanta3d67be22014-04-18 13:01:41 +0300575map_err:
Yuan Kang045e3672012-06-22 19:48:47 -0500576 kfree(hashed_key);
577 return ret;
578badkey:
579 kfree(hashed_key);
580 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
581 return -EINVAL;
582}
583
584/*
585 * ahash_edesc - s/w-extended ahash descriptor
586 * @dst_dma: physical mapped address of req->result
587 * @sec4_sg_dma: physical mapped address of h/w link table
Yuan Kang643b39b2012-06-22 19:48:49 -0500588 * @chained: if source is chained
Yuan Kang045e3672012-06-22 19:48:47 -0500589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
591 * @sec4_sg: pointer to h/w link table
592 * @hw_desc: the h/w job descriptor followed by any referenced link tables
593 */
594struct ahash_edesc {
595 dma_addr_t dst_dma;
596 dma_addr_t sec4_sg_dma;
Yuan Kang643b39b2012-06-22 19:48:49 -0500597 bool chained;
Yuan Kang045e3672012-06-22 19:48:47 -0500598 int src_nents;
599 int sec4_sg_bytes;
600 struct sec4_sg_entry *sec4_sg;
601 u32 hw_desc[0];
602};
603
604static inline void ahash_unmap(struct device *dev,
605 struct ahash_edesc *edesc,
606 struct ahash_request *req, int dst_len)
607{
608 if (edesc->src_nents)
Yuan Kang643b39b2012-06-22 19:48:49 -0500609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
610 DMA_TO_DEVICE, edesc->chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500611 if (edesc->dst_dma)
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
613
614 if (edesc->sec4_sg_bytes)
615 dma_unmap_single(dev, edesc->sec4_sg_dma,
616 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
617}
618
619static inline void ahash_unmap_ctx(struct device *dev,
620 struct ahash_edesc *edesc,
621 struct ahash_request *req, int dst_len, u32 flag)
622{
623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
624 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
625 struct caam_hash_state *state = ahash_request_ctx(req);
626
627 if (state->ctx_dma)
628 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
629 ahash_unmap(dev, edesc, req, dst_len);
630}
631
632static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
633 void *context)
634{
635 struct ahash_request *req = context;
636 struct ahash_edesc *edesc;
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 int digestsize = crypto_ahash_digestsize(ahash);
639#ifdef DEBUG
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 struct caam_hash_state *state = ahash_request_ctx(req);
642
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
644#endif
645
646 edesc = (struct ahash_edesc *)((char *)desc -
647 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200648 if (err)
649 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500650
651 ahash_unmap(jrdev, edesc, req, digestsize);
652 kfree(edesc);
653
654#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
657 ctx->ctx_len, 1);
658 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500660 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
661 digestsize, 1);
662#endif
663
664 req->base.complete(&req->base, err);
665}
666
667static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
668 void *context)
669{
670 struct ahash_request *req = context;
671 struct ahash_edesc *edesc;
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
674#ifdef DEBUG
675 struct caam_hash_state *state = ahash_request_ctx(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
677
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
679#endif
680
681 edesc = (struct ahash_edesc *)((char *)desc -
682 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200683 if (err)
684 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500685
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
687 kfree(edesc);
688
689#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
692 ctx->ctx_len, 1);
693 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500695 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
696 digestsize, 1);
697#endif
698
699 req->base.complete(&req->base, err);
700}
701
702static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
703 void *context)
704{
705 struct ahash_request *req = context;
706 struct ahash_edesc *edesc;
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 int digestsize = crypto_ahash_digestsize(ahash);
709#ifdef DEBUG
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 struct caam_hash_state *state = ahash_request_ctx(req);
712
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
714#endif
715
716 edesc = (struct ahash_edesc *)((char *)desc -
717 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200718 if (err)
719 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500720
Horia Geantabc9e05f2014-07-11 15:34:52 +0300721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500722 kfree(edesc);
723
724#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
727 ctx->ctx_len, 1);
728 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500730 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
731 digestsize, 1);
732#endif
733
734 req->base.complete(&req->base, err);
735}
736
737static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
738 void *context)
739{
740 struct ahash_request *req = context;
741 struct ahash_edesc *edesc;
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
744#ifdef DEBUG
745 struct caam_hash_state *state = ahash_request_ctx(req);
746 int digestsize = crypto_ahash_digestsize(ahash);
747
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
749#endif
750
751 edesc = (struct ahash_edesc *)((char *)desc -
752 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200753 if (err)
754 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500755
Horia Geantaef62b232014-07-11 15:34:51 +0300756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500757 kfree(edesc);
758
759#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762 ctx->ctx_len, 1);
763 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
766 digestsize, 1);
767#endif
768
769 req->base.complete(&req->base, err);
770}
771
772/* submit update job descriptor */
773static int ahash_update_ctx(struct ahash_request *req)
774{
775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
777 struct caam_hash_state *state = ahash_request_ctx(req);
778 struct device *jrdev = ctx->jrdev;
779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
780 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
781 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
782 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
783 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
784 int *next_buflen = state->current_buf ? &state->buflen_0 :
785 &state->buflen_1, last_buflen;
786 int in_len = *buflen + req->nbytes, to_hash;
787 u32 *sh_desc = ctx->sh_desc_update, *desc;
788 dma_addr_t ptr = ctx->sh_desc_update_dma;
789 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
790 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -0500791 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -0500792 int ret = 0;
793 int sh_len;
794
795 last_buflen = *next_buflen;
796 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
797 to_hash = in_len - *next_buflen;
798
799 if (to_hash) {
Yuan Kang643b39b2012-06-22 19:48:49 -0500800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
801 &chained);
Yuan Kang045e3672012-06-22 19:48:47 -0500802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
804 sizeof(struct sec4_sg_entry);
805
806 /*
807 * allocate space for base edesc and hw desc commands,
808 * link tables
809 */
Victoria Milhoandde20ae2015-08-05 11:28:39 -0700810 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
Yuan Kang045e3672012-06-22 19:48:47 -0500811 sec4_sg_bytes, GFP_DMA | flags);
812 if (!edesc) {
813 dev_err(jrdev,
814 "could not allocate extended descriptor\n");
815 return -ENOMEM;
816 }
817
818 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -0500819 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -0500820 edesc->sec4_sg_bytes = sec4_sg_bytes;
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
822 DESC_JOB_IO_LEN;
Yuan Kang045e3672012-06-22 19:48:47 -0500823
Horia Geantace572082014-07-11 15:34:49 +0300824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
825 edesc->sec4_sg, DMA_BIDIRECTIONAL);
826 if (ret)
827 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500828
829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
830 edesc->sec4_sg + 1,
831 buf, state->buf_dma,
Victoria Milhoan7d5196a2015-08-05 11:28:40 -0700832 *next_buflen, *buflen);
Yuan Kang045e3672012-06-22 19:48:47 -0500833
834 if (src_nents) {
835 src_map_to_sec4_sg(jrdev, req->src, src_nents,
Yuan Kang643b39b2012-06-22 19:48:49 -0500836 edesc->sec4_sg + sec4_sg_src_index,
837 chained);
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -0700838 if (*next_buflen)
Cristian Stoica307fd5432014-08-14 13:51:56 +0300839 scatterwalk_map_and_copy(next_buf, req->src,
840 to_hash - *buflen,
841 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500842 } else {
843 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
844 SEC4_SG_LEN_FIN;
845 }
846
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -0700847 state->current_buf = !state->current_buf;
848
Yuan Kang045e3672012-06-22 19:48:47 -0500849 sh_len = desc_len(sh_desc);
850 desc = edesc->hw_desc;
851 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
852 HDR_REVERSE);
853
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
855 sec4_sg_bytes,
856 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
858 dev_err(jrdev, "unable to map S/G table\n");
859 return -ENOMEM;
860 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530861
Yuan Kang045e3672012-06-22 19:48:47 -0500862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
863 to_hash, LDST_SGF);
864
865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
866
867#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300868 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500869 DUMP_PREFIX_ADDRESS, 16, 4, desc,
870 desc_bytes(desc), 1);
871#endif
872
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
874 if (!ret) {
875 ret = -EINPROGRESS;
876 } else {
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
878 DMA_BIDIRECTIONAL);
879 kfree(edesc);
880 }
881 } else if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +0300882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
883 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500884 *buflen = *next_buflen;
885 *next_buflen = last_buflen;
886 }
887#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300888 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500889 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300890 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500891 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
892 *next_buflen, 1);
893#endif
894
895 return ret;
896}
897
898static int ahash_final_ctx(struct ahash_request *req)
899{
900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
901 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
902 struct caam_hash_state *state = ahash_request_ctx(req);
903 struct device *jrdev = ctx->jrdev;
904 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
905 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
906 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
907 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
908 int last_buflen = state->current_buf ? state->buflen_0 :
909 state->buflen_1;
910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
912 int sec4_sg_bytes;
913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc;
915 int ret = 0;
916 int sh_len;
917
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
919
920 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -0700921 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
922 GFP_DMA | flags);
Yuan Kang045e3672012-06-22 19:48:47 -0500923 if (!edesc) {
924 dev_err(jrdev, "could not allocate extended descriptor\n");
925 return -ENOMEM;
926 }
927
928 sh_len = desc_len(sh_desc);
929 desc = edesc->hw_desc;
930 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
931
932 edesc->sec4_sg_bytes = sec4_sg_bytes;
933 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
934 DESC_JOB_IO_LEN;
Yuan Kang045e3672012-06-22 19:48:47 -0500935 edesc->src_nents = 0;
936
Horia Geantace572082014-07-11 15:34:49 +0300937 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
938 edesc->sec4_sg, DMA_TO_DEVICE);
939 if (ret)
940 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500941
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen,
944 last_buflen);
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
946
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300949 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
950 dev_err(jrdev, "unable to map S/G table\n");
951 return -ENOMEM;
952 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530953
Yuan Kang045e3672012-06-22 19:48:47 -0500954 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
955 LDST_SGF);
956
957 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
958 digestsize);
Horia Geantace572082014-07-11 15:34:49 +0300959 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
960 dev_err(jrdev, "unable to map dst\n");
961 return -ENOMEM;
962 }
Yuan Kang045e3672012-06-22 19:48:47 -0500963
964#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300965 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500966 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
967#endif
968
969 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
970 if (!ret) {
971 ret = -EINPROGRESS;
972 } else {
973 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
974 kfree(edesc);
975 }
976
977 return ret;
978}
979
980static int ahash_finup_ctx(struct ahash_request *req)
981{
982 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
983 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
984 struct caam_hash_state *state = ahash_request_ctx(req);
985 struct device *jrdev = ctx->jrdev;
986 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
987 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
988 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
989 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
990 int last_buflen = state->current_buf ? state->buflen_0 :
991 state->buflen_1;
992 u32 *sh_desc = ctx->sh_desc_finup, *desc;
993 dma_addr_t ptr = ctx->sh_desc_finup_dma;
994 int sec4_sg_bytes, sec4_sg_src_index;
995 int src_nents;
996 int digestsize = crypto_ahash_digestsize(ahash);
997 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -0500998 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -0500999 int ret = 0;
1000 int sh_len;
1001
Yuan Kang643b39b2012-06-22 19:48:49 -05001002 src_nents = __sg_count(req->src, req->nbytes, &chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001003 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1004 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1005 sizeof(struct sec4_sg_entry);
1006
1007 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001008 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
1009 GFP_DMA | flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001010 if (!edesc) {
1011 dev_err(jrdev, "could not allocate extended descriptor\n");
1012 return -ENOMEM;
1013 }
1014
1015 sh_len = desc_len(sh_desc);
1016 desc = edesc->hw_desc;
1017 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1018
1019 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001020 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001021 edesc->sec4_sg_bytes = sec4_sg_bytes;
1022 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1023 DESC_JOB_IO_LEN;
Yuan Kang045e3672012-06-22 19:48:47 -05001024
Horia Geantace572082014-07-11 15:34:49 +03001025 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1026 edesc->sec4_sg, DMA_TO_DEVICE);
1027 if (ret)
1028 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001029
1030 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1031 buf, state->buf_dma, buflen,
1032 last_buflen);
1033
1034 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
Yuan Kang643b39b2012-06-22 19:48:49 -05001035 sec4_sg_src_index, chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001036
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301037 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1038 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001039 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1040 dev_err(jrdev, "unable to map S/G table\n");
1041 return -ENOMEM;
1042 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301043
Yuan Kang045e3672012-06-22 19:48:47 -05001044 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
1045 buflen + req->nbytes, LDST_SGF);
1046
1047 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1048 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001049 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1050 dev_err(jrdev, "unable to map dst\n");
1051 return -ENOMEM;
1052 }
Yuan Kang045e3672012-06-22 19:48:47 -05001053
1054#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001055 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001056 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1057#endif
1058
1059 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1060 if (!ret) {
1061 ret = -EINPROGRESS;
1062 } else {
1063 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1064 kfree(edesc);
1065 }
1066
1067 return ret;
1068}
1069
1070static int ahash_digest(struct ahash_request *req)
1071{
1072 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1073 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1074 struct device *jrdev = ctx->jrdev;
1075 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1076 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1077 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1078 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1079 int digestsize = crypto_ahash_digestsize(ahash);
1080 int src_nents, sec4_sg_bytes;
1081 dma_addr_t src_dma;
1082 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -05001083 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001084 int ret = 0;
1085 u32 options;
1086 int sh_len;
1087
Yuan Kang643b39b2012-06-22 19:48:49 -05001088 src_nents = sg_count(req->src, req->nbytes, &chained);
1089 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
1090 chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001091 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1092
1093 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001094 edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
1095 GFP_DMA | flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001096 if (!edesc) {
1097 dev_err(jrdev, "could not allocate extended descriptor\n");
1098 return -ENOMEM;
1099 }
1100 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1101 DESC_JOB_IO_LEN;
Horia Geanta45e9af72014-07-11 15:34:53 +03001102 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -05001103 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001104 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001105
1106 sh_len = desc_len(sh_desc);
1107 desc = edesc->hw_desc;
1108 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1109
1110 if (src_nents) {
1111 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301112 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1113 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001114 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1115 dev_err(jrdev, "unable to map S/G table\n");
1116 return -ENOMEM;
1117 }
Yuan Kang045e3672012-06-22 19:48:47 -05001118 src_dma = edesc->sec4_sg_dma;
1119 options = LDST_SGF;
1120 } else {
1121 src_dma = sg_dma_address(req->src);
1122 options = 0;
1123 }
1124 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1125
1126 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1127 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001128 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1129 dev_err(jrdev, "unable to map dst\n");
1130 return -ENOMEM;
1131 }
Yuan Kang045e3672012-06-22 19:48:47 -05001132
1133#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001134 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001135 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1136#endif
1137
1138 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1139 if (!ret) {
1140 ret = -EINPROGRESS;
1141 } else {
1142 ahash_unmap(jrdev, edesc, req, digestsize);
1143 kfree(edesc);
1144 }
1145
1146 return ret;
1147}
1148
1149/* submit ahash final if it the first job descriptor */
1150static int ahash_final_no_ctx(struct ahash_request *req)
1151{
1152 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1153 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1154 struct caam_hash_state *state = ahash_request_ctx(req);
1155 struct device *jrdev = ctx->jrdev;
1156 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1157 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1158 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1159 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1160 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1161 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1162 int digestsize = crypto_ahash_digestsize(ahash);
1163 struct ahash_edesc *edesc;
1164 int ret = 0;
1165 int sh_len;
1166
1167 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001168 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001169 if (!edesc) {
1170 dev_err(jrdev, "could not allocate extended descriptor\n");
1171 return -ENOMEM;
1172 }
1173
Yanjiang Jin060e2342015-03-06 10:34:41 +08001174 edesc->sec4_sg_bytes = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001175 sh_len = desc_len(sh_desc);
1176 desc = edesc->hw_desc;
1177 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1178
1179 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001180 if (dma_mapping_error(jrdev, state->buf_dma)) {
1181 dev_err(jrdev, "unable to map src\n");
1182 return -ENOMEM;
1183 }
Yuan Kang045e3672012-06-22 19:48:47 -05001184
1185 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1186
1187 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1188 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001189 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1190 dev_err(jrdev, "unable to map dst\n");
1191 return -ENOMEM;
1192 }
Yuan Kang045e3672012-06-22 19:48:47 -05001193 edesc->src_nents = 0;
1194
1195#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001196 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001197 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1198#endif
1199
1200 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1201 if (!ret) {
1202 ret = -EINPROGRESS;
1203 } else {
1204 ahash_unmap(jrdev, edesc, req, digestsize);
1205 kfree(edesc);
1206 }
1207
1208 return ret;
1209}
1210
1211/* submit ahash update if it the first job descriptor after update */
1212static int ahash_update_no_ctx(struct ahash_request *req)
1213{
1214 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1215 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1216 struct caam_hash_state *state = ahash_request_ctx(req);
1217 struct device *jrdev = ctx->jrdev;
1218 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1219 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1220 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1221 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1222 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1223 int *next_buflen = state->current_buf ? &state->buflen_0 :
1224 &state->buflen_1;
1225 int in_len = *buflen + req->nbytes, to_hash;
1226 int sec4_sg_bytes, src_nents;
1227 struct ahash_edesc *edesc;
1228 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1229 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
Yuan Kang643b39b2012-06-22 19:48:49 -05001230 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001231 int ret = 0;
1232 int sh_len;
1233
1234 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1235 to_hash = in_len - *next_buflen;
1236
1237 if (to_hash) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001238 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
1239 &chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001240 sec4_sg_bytes = (1 + src_nents) *
1241 sizeof(struct sec4_sg_entry);
1242
1243 /*
1244 * allocate space for base edesc and hw desc commands,
1245 * link tables
1246 */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001247 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
Yuan Kang045e3672012-06-22 19:48:47 -05001248 sec4_sg_bytes, GFP_DMA | flags);
1249 if (!edesc) {
1250 dev_err(jrdev,
1251 "could not allocate extended descriptor\n");
1252 return -ENOMEM;
1253 }
1254
1255 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001256 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001257 edesc->sec4_sg_bytes = sec4_sg_bytes;
1258 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1259 DESC_JOB_IO_LEN;
Horia Geanta76b99082014-07-11 15:34:54 +03001260 edesc->dst_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001261
1262 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1263 buf, *buflen);
1264 src_map_to_sec4_sg(jrdev, req->src, src_nents,
Yuan Kang643b39b2012-06-22 19:48:49 -05001265 edesc->sec4_sg + 1, chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001266 if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +03001267 scatterwalk_map_and_copy(next_buf, req->src,
1268 to_hash - *buflen,
1269 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001270 }
1271
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -07001272 state->current_buf = !state->current_buf;
1273
Yuan Kang045e3672012-06-22 19:48:47 -05001274 sh_len = desc_len(sh_desc);
1275 desc = edesc->hw_desc;
1276 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1277 HDR_REVERSE);
1278
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301279 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1280 sec4_sg_bytes,
1281 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001282 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1283 dev_err(jrdev, "unable to map S/G table\n");
1284 return -ENOMEM;
1285 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301286
Yuan Kang045e3672012-06-22 19:48:47 -05001287 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1288
Horia Geantace572082014-07-11 15:34:49 +03001289 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1290 if (ret)
1291 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001292
1293#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001294 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001295 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1296 desc_bytes(desc), 1);
1297#endif
1298
1299 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1300 if (!ret) {
1301 ret = -EINPROGRESS;
1302 state->update = ahash_update_ctx;
1303 state->finup = ahash_finup_ctx;
1304 state->final = ahash_final_ctx;
1305 } else {
1306 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1307 DMA_TO_DEVICE);
1308 kfree(edesc);
1309 }
1310 } else if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +03001311 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1312 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001313 *buflen = *next_buflen;
1314 *next_buflen = 0;
1315 }
1316#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001317 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001318 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001319 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001320 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1321 *next_buflen, 1);
1322#endif
1323
1324 return ret;
1325}
1326
1327/* submit ahash finup if it the first job descriptor after update */
1328static int ahash_finup_no_ctx(struct ahash_request *req)
1329{
1330 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1331 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1332 struct caam_hash_state *state = ahash_request_ctx(req);
1333 struct device *jrdev = ctx->jrdev;
1334 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1335 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1336 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1337 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1338 int last_buflen = state->current_buf ? state->buflen_0 :
1339 state->buflen_1;
1340 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1341 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1342 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1343 int digestsize = crypto_ahash_digestsize(ahash);
1344 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -05001345 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001346 int sh_len;
1347 int ret = 0;
1348
Yuan Kang643b39b2012-06-22 19:48:49 -05001349 src_nents = __sg_count(req->src, req->nbytes, &chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001350 sec4_sg_src_index = 2;
1351 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1352 sizeof(struct sec4_sg_entry);
1353
1354 /* allocate space for base edesc and hw desc commands, link tables */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001355 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
1356 GFP_DMA | flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001357 if (!edesc) {
1358 dev_err(jrdev, "could not allocate extended descriptor\n");
1359 return -ENOMEM;
1360 }
1361
1362 sh_len = desc_len(sh_desc);
1363 desc = edesc->hw_desc;
1364 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1365
1366 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001367 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001368 edesc->sec4_sg_bytes = sec4_sg_bytes;
1369 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1370 DESC_JOB_IO_LEN;
Yuan Kang045e3672012-06-22 19:48:47 -05001371
1372 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1373 state->buf_dma, buflen,
1374 last_buflen);
1375
Yuan Kang643b39b2012-06-22 19:48:49 -05001376 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
1377 chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001378
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301379 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1380 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001381 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1382 dev_err(jrdev, "unable to map S/G table\n");
1383 return -ENOMEM;
1384 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301385
Yuan Kang045e3672012-06-22 19:48:47 -05001386 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1387 req->nbytes, LDST_SGF);
1388
1389 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1390 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001391 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1392 dev_err(jrdev, "unable to map dst\n");
1393 return -ENOMEM;
1394 }
Yuan Kang045e3672012-06-22 19:48:47 -05001395
1396#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001397 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001398 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1399#endif
1400
1401 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1402 if (!ret) {
1403 ret = -EINPROGRESS;
1404 } else {
1405 ahash_unmap(jrdev, edesc, req, digestsize);
1406 kfree(edesc);
1407 }
1408
1409 return ret;
1410}
1411
1412/* submit first update job descriptor after init */
1413static int ahash_update_first(struct ahash_request *req)
1414{
1415 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1416 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1417 struct caam_hash_state *state = ahash_request_ctx(req);
1418 struct device *jrdev = ctx->jrdev;
1419 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1420 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
Cristian Stoica4451d492014-08-14 13:51:57 +03001421 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1422 int *next_buflen = state->current_buf ?
1423 &state->buflen_1 : &state->buflen_0;
Yuan Kang045e3672012-06-22 19:48:47 -05001424 int to_hash;
1425 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1426 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1427 int sec4_sg_bytes, src_nents;
1428 dma_addr_t src_dma;
1429 u32 options;
1430 struct ahash_edesc *edesc;
Yuan Kang643b39b2012-06-22 19:48:49 -05001431 bool chained = false;
Yuan Kang045e3672012-06-22 19:48:47 -05001432 int ret = 0;
1433 int sh_len;
1434
1435 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1436 1);
1437 to_hash = req->nbytes - *next_buflen;
1438
1439 if (to_hash) {
Yuan Kang643b39b2012-06-22 19:48:49 -05001440 src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
1441 &chained);
1442 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1443 DMA_TO_DEVICE, chained);
Yuan Kang045e3672012-06-22 19:48:47 -05001444 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1445
1446 /*
1447 * allocate space for base edesc and hw desc commands,
1448 * link tables
1449 */
Victoria Milhoandde20ae2015-08-05 11:28:39 -07001450 edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
Yuan Kang045e3672012-06-22 19:48:47 -05001451 sec4_sg_bytes, GFP_DMA | flags);
1452 if (!edesc) {
1453 dev_err(jrdev,
1454 "could not allocate extended descriptor\n");
1455 return -ENOMEM;
1456 }
1457
1458 edesc->src_nents = src_nents;
Yuan Kang643b39b2012-06-22 19:48:49 -05001459 edesc->chained = chained;
Yuan Kang045e3672012-06-22 19:48:47 -05001460 edesc->sec4_sg_bytes = sec4_sg_bytes;
1461 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1462 DESC_JOB_IO_LEN;
Horia Geanta76b99082014-07-11 15:34:54 +03001463 edesc->dst_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001464
1465 if (src_nents) {
1466 sg_to_sec4_sg_last(req->src, src_nents,
1467 edesc->sec4_sg, 0);
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301468 edesc->sec4_sg_dma = dma_map_single(jrdev,
1469 edesc->sec4_sg,
1470 sec4_sg_bytes,
1471 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001472 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1473 dev_err(jrdev, "unable to map S/G table\n");
1474 return -ENOMEM;
1475 }
Yuan Kang045e3672012-06-22 19:48:47 -05001476 src_dma = edesc->sec4_sg_dma;
1477 options = LDST_SGF;
1478 } else {
1479 src_dma = sg_dma_address(req->src);
1480 options = 0;
1481 }
1482
1483 if (*next_buflen)
Cristian Stoica307fd5432014-08-14 13:51:56 +03001484 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1485 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001486
1487 sh_len = desc_len(sh_desc);
1488 desc = edesc->hw_desc;
1489 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1490 HDR_REVERSE);
1491
1492 append_seq_in_ptr(desc, src_dma, to_hash, options);
1493
Horia Geantace572082014-07-11 15:34:49 +03001494 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1495 if (ret)
1496 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001497
1498#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001499 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001500 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1501 desc_bytes(desc), 1);
1502#endif
1503
1504 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1505 req);
1506 if (!ret) {
1507 ret = -EINPROGRESS;
1508 state->update = ahash_update_ctx;
1509 state->finup = ahash_finup_ctx;
1510 state->final = ahash_final_ctx;
1511 } else {
1512 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1513 DMA_TO_DEVICE);
1514 kfree(edesc);
1515 }
1516 } else if (*next_buflen) {
1517 state->update = ahash_update_no_ctx;
1518 state->finup = ahash_finup_no_ctx;
1519 state->final = ahash_final_no_ctx;
Cristian Stoica307fd5432014-08-14 13:51:56 +03001520 scatterwalk_map_and_copy(next_buf, req->src, 0,
1521 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001522 }
1523#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001524 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001525 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1526 *next_buflen, 1);
1527#endif
1528
1529 return ret;
1530}
1531
1532static int ahash_finup_first(struct ahash_request *req)
1533{
1534 return ahash_digest(req);
1535}
1536
1537static int ahash_init(struct ahash_request *req)
1538{
1539 struct caam_hash_state *state = ahash_request_ctx(req);
1540
1541 state->update = ahash_update_first;
1542 state->finup = ahash_finup_first;
1543 state->final = ahash_final_no_ctx;
1544
1545 state->current_buf = 0;
Horia Geantade0e35e2014-07-11 15:34:55 +03001546 state->buf_dma = 0;
Steve Cornelius6fd4b152015-06-15 16:52:56 -07001547 state->buflen_0 = 0;
1548 state->buflen_1 = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001549
1550 return 0;
1551}
1552
1553static int ahash_update(struct ahash_request *req)
1554{
1555 struct caam_hash_state *state = ahash_request_ctx(req);
1556
1557 return state->update(req);
1558}
1559
1560static int ahash_finup(struct ahash_request *req)
1561{
1562 struct caam_hash_state *state = ahash_request_ctx(req);
1563
1564 return state->finup(req);
1565}
1566
1567static int ahash_final(struct ahash_request *req)
1568{
1569 struct caam_hash_state *state = ahash_request_ctx(req);
1570
1571 return state->final(req);
1572}
1573
1574static int ahash_export(struct ahash_request *req, void *out)
1575{
1576 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1577 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1578 struct caam_hash_state *state = ahash_request_ctx(req);
1579
1580 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1581 memcpy(out + sizeof(struct caam_hash_ctx), state,
1582 sizeof(struct caam_hash_state));
1583 return 0;
1584}
1585
1586static int ahash_import(struct ahash_request *req, const void *in)
1587{
1588 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1589 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1590 struct caam_hash_state *state = ahash_request_ctx(req);
1591
1592 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1593 memcpy(state, in + sizeof(struct caam_hash_ctx),
1594 sizeof(struct caam_hash_state));
1595 return 0;
1596}
1597
1598struct caam_hash_template {
1599 char name[CRYPTO_MAX_ALG_NAME];
1600 char driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001601 char hmac_name[CRYPTO_MAX_ALG_NAME];
1602 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kang045e3672012-06-22 19:48:47 -05001603 unsigned int blocksize;
1604 struct ahash_alg template_ahash;
1605 u32 alg_type;
1606 u32 alg_op;
1607};
1608
1609/* ahash descriptors */
1610static struct caam_hash_template driver_hash[] = {
1611 {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001612 .name = "sha1",
1613 .driver_name = "sha1-caam",
1614 .hmac_name = "hmac(sha1)",
1615 .hmac_driver_name = "hmac-sha1-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001616 .blocksize = SHA1_BLOCK_SIZE,
1617 .template_ahash = {
1618 .init = ahash_init,
1619 .update = ahash_update,
1620 .final = ahash_final,
1621 .finup = ahash_finup,
1622 .digest = ahash_digest,
1623 .export = ahash_export,
1624 .import = ahash_import,
1625 .setkey = ahash_setkey,
1626 .halg = {
1627 .digestsize = SHA1_DIGEST_SIZE,
1628 },
1629 },
1630 .alg_type = OP_ALG_ALGSEL_SHA1,
1631 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1632 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001633 .name = "sha224",
1634 .driver_name = "sha224-caam",
1635 .hmac_name = "hmac(sha224)",
1636 .hmac_driver_name = "hmac-sha224-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001637 .blocksize = SHA224_BLOCK_SIZE,
1638 .template_ahash = {
1639 .init = ahash_init,
1640 .update = ahash_update,
1641 .final = ahash_final,
1642 .finup = ahash_finup,
1643 .digest = ahash_digest,
1644 .export = ahash_export,
1645 .import = ahash_import,
1646 .setkey = ahash_setkey,
1647 .halg = {
1648 .digestsize = SHA224_DIGEST_SIZE,
1649 },
1650 },
1651 .alg_type = OP_ALG_ALGSEL_SHA224,
1652 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1653 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001654 .name = "sha256",
1655 .driver_name = "sha256-caam",
1656 .hmac_name = "hmac(sha256)",
1657 .hmac_driver_name = "hmac-sha256-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001658 .blocksize = SHA256_BLOCK_SIZE,
1659 .template_ahash = {
1660 .init = ahash_init,
1661 .update = ahash_update,
1662 .final = ahash_final,
1663 .finup = ahash_finup,
1664 .digest = ahash_digest,
1665 .export = ahash_export,
1666 .import = ahash_import,
1667 .setkey = ahash_setkey,
1668 .halg = {
1669 .digestsize = SHA256_DIGEST_SIZE,
1670 },
1671 },
1672 .alg_type = OP_ALG_ALGSEL_SHA256,
1673 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1674 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001675 .name = "sha384",
1676 .driver_name = "sha384-caam",
1677 .hmac_name = "hmac(sha384)",
1678 .hmac_driver_name = "hmac-sha384-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001679 .blocksize = SHA384_BLOCK_SIZE,
1680 .template_ahash = {
1681 .init = ahash_init,
1682 .update = ahash_update,
1683 .final = ahash_final,
1684 .finup = ahash_finup,
1685 .digest = ahash_digest,
1686 .export = ahash_export,
1687 .import = ahash_import,
1688 .setkey = ahash_setkey,
1689 .halg = {
1690 .digestsize = SHA384_DIGEST_SIZE,
1691 },
1692 },
1693 .alg_type = OP_ALG_ALGSEL_SHA384,
1694 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1695 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001696 .name = "sha512",
1697 .driver_name = "sha512-caam",
1698 .hmac_name = "hmac(sha512)",
1699 .hmac_driver_name = "hmac-sha512-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001700 .blocksize = SHA512_BLOCK_SIZE,
1701 .template_ahash = {
1702 .init = ahash_init,
1703 .update = ahash_update,
1704 .final = ahash_final,
1705 .finup = ahash_finup,
1706 .digest = ahash_digest,
1707 .export = ahash_export,
1708 .import = ahash_import,
1709 .setkey = ahash_setkey,
1710 .halg = {
1711 .digestsize = SHA512_DIGEST_SIZE,
1712 },
1713 },
1714 .alg_type = OP_ALG_ALGSEL_SHA512,
1715 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1716 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001717 .name = "md5",
1718 .driver_name = "md5-caam",
1719 .hmac_name = "hmac(md5)",
1720 .hmac_driver_name = "hmac-md5-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001721 .blocksize = MD5_BLOCK_WORDS * 4,
1722 .template_ahash = {
1723 .init = ahash_init,
1724 .update = ahash_update,
1725 .final = ahash_final,
1726 .finup = ahash_finup,
1727 .digest = ahash_digest,
1728 .export = ahash_export,
1729 .import = ahash_import,
1730 .setkey = ahash_setkey,
1731 .halg = {
1732 .digestsize = MD5_DIGEST_SIZE,
1733 },
1734 },
1735 .alg_type = OP_ALG_ALGSEL_MD5,
1736 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1737 },
1738};
1739
1740struct caam_hash_alg {
1741 struct list_head entry;
Yuan Kang045e3672012-06-22 19:48:47 -05001742 int alg_type;
1743 int alg_op;
1744 struct ahash_alg ahash_alg;
1745};
1746
1747static int caam_hash_cra_init(struct crypto_tfm *tfm)
1748{
1749 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1750 struct crypto_alg *base = tfm->__crt_alg;
1751 struct hash_alg_common *halg =
1752 container_of(base, struct hash_alg_common, base);
1753 struct ahash_alg *alg =
1754 container_of(halg, struct ahash_alg, halg);
1755 struct caam_hash_alg *caam_hash =
1756 container_of(alg, struct caam_hash_alg, ahash_alg);
1757 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Yuan Kang045e3672012-06-22 19:48:47 -05001758 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1759 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1760 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1761 HASH_MSG_LEN + 32,
1762 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1763 HASH_MSG_LEN + 64,
1764 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
Yuan Kang045e3672012-06-22 19:48:47 -05001765 int ret = 0;
1766
1767 /*
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301768 * Get a Job ring from Job Ring driver to ensure in-order
Yuan Kang045e3672012-06-22 19:48:47 -05001769 * crypto request processing per tfm
1770 */
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301771 ctx->jrdev = caam_jr_alloc();
1772 if (IS_ERR(ctx->jrdev)) {
1773 pr_err("Job Ring Device allocation for transform failed\n");
1774 return PTR_ERR(ctx->jrdev);
1775 }
Yuan Kang045e3672012-06-22 19:48:47 -05001776 /* copy descriptor header template value */
1777 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1778 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1779
1780 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1781 OP_ALG_ALGSEL_SHIFT];
1782
1783 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1784 sizeof(struct caam_hash_state));
1785
1786 ret = ahash_set_sh_desc(ahash);
1787
1788 return ret;
1789}
1790
1791static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1792{
1793 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1794
1795 if (ctx->sh_desc_update_dma &&
1796 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1797 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1798 desc_bytes(ctx->sh_desc_update),
1799 DMA_TO_DEVICE);
1800 if (ctx->sh_desc_update_first_dma &&
1801 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1802 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1803 desc_bytes(ctx->sh_desc_update_first),
1804 DMA_TO_DEVICE);
1805 if (ctx->sh_desc_fin_dma &&
1806 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1807 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1808 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1809 if (ctx->sh_desc_digest_dma &&
1810 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1811 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1812 desc_bytes(ctx->sh_desc_digest),
1813 DMA_TO_DEVICE);
1814 if (ctx->sh_desc_finup_dma &&
1815 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1816 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1817 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301818
1819 caam_jr_free(ctx->jrdev);
Yuan Kang045e3672012-06-22 19:48:47 -05001820}
1821
1822static void __exit caam_algapi_hash_exit(void)
1823{
Yuan Kang045e3672012-06-22 19:48:47 -05001824 struct caam_hash_alg *t_alg, *n;
1825
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301826 if (!hash_list.next)
Yuan Kang045e3672012-06-22 19:48:47 -05001827 return;
1828
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301829 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
Yuan Kang045e3672012-06-22 19:48:47 -05001830 crypto_unregister_ahash(&t_alg->ahash_alg);
1831 list_del(&t_alg->entry);
1832 kfree(t_alg);
1833 }
1834}
1835
1836static struct caam_hash_alg *
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301837caam_hash_alloc(struct caam_hash_template *template,
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001838 bool keyed)
Yuan Kang045e3672012-06-22 19:48:47 -05001839{
1840 struct caam_hash_alg *t_alg;
1841 struct ahash_alg *halg;
1842 struct crypto_alg *alg;
1843
Fabio Estevam9c4f9732015-08-21 13:52:00 -03001844 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Yuan Kang045e3672012-06-22 19:48:47 -05001845 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301846 pr_err("failed to allocate t_alg\n");
Yuan Kang045e3672012-06-22 19:48:47 -05001847 return ERR_PTR(-ENOMEM);
1848 }
1849
1850 t_alg->ahash_alg = template->template_ahash;
1851 halg = &t_alg->ahash_alg;
1852 alg = &halg->halg.base;
1853
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001854 if (keyed) {
1855 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1856 template->hmac_name);
1857 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1858 template->hmac_driver_name);
1859 } else {
1860 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1861 template->name);
1862 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1863 template->driver_name);
1864 }
Yuan Kang045e3672012-06-22 19:48:47 -05001865 alg->cra_module = THIS_MODULE;
1866 alg->cra_init = caam_hash_cra_init;
1867 alg->cra_exit = caam_hash_cra_exit;
1868 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1869 alg->cra_priority = CAAM_CRA_PRIORITY;
1870 alg->cra_blocksize = template->blocksize;
1871 alg->cra_alignmask = 0;
1872 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1873 alg->cra_type = &crypto_ahash_type;
1874
1875 t_alg->alg_type = template->alg_type;
1876 t_alg->alg_op = template->alg_op;
Yuan Kang045e3672012-06-22 19:48:47 -05001877
1878 return t_alg;
1879}
1880
1881static int __init caam_algapi_hash_init(void)
1882{
Ruchika Gupta35af6402014-07-07 10:42:12 +05301883 struct device_node *dev_node;
1884 struct platform_device *pdev;
1885 struct device *ctrldev;
Yuan Kang045e3672012-06-22 19:48:47 -05001886 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07001887 struct caam_drv_private *priv;
1888 unsigned int md_limit = SHA512_DIGEST_SIZE;
1889 u32 cha_inst, cha_vid;
Yuan Kang045e3672012-06-22 19:48:47 -05001890
Ruchika Gupta35af6402014-07-07 10:42:12 +05301891 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1892 if (!dev_node) {
1893 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1894 if (!dev_node)
1895 return -ENODEV;
1896 }
1897
1898 pdev = of_find_device_by_node(dev_node);
1899 if (!pdev) {
1900 of_node_put(dev_node);
1901 return -ENODEV;
1902 }
1903
1904 ctrldev = &pdev->dev;
1905 priv = dev_get_drvdata(ctrldev);
1906 of_node_put(dev_node);
1907
1908 /*
1909 * If priv is NULL, it's probably because the caam driver wasn't
1910 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1911 */
1912 if (!priv)
1913 return -ENODEV;
1914
Victoria Milhoanbf834902015-08-05 11:28:48 -07001915 /*
1916 * Register crypto algorithms the device supports. First, identify
1917 * presence and attributes of MD block.
1918 */
1919 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1920 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1921
1922 /*
1923 * Skip registration of any hashing algorithms if MD block
1924 * is not present.
1925 */
1926 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1927 return -ENODEV;
1928
1929 /* Limit digest size based on LP256 */
1930 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1931 md_limit = SHA256_DIGEST_SIZE;
1932
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301933 INIT_LIST_HEAD(&hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05001934
1935 /* register crypto algorithms the device supports */
1936 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
Yuan Kang045e3672012-06-22 19:48:47 -05001937 struct caam_hash_alg *t_alg;
Victoria Milhoanbf834902015-08-05 11:28:48 -07001938 struct caam_hash_template *alg = driver_hash + i;
1939
1940 /* If MD size is not supported by device, skip registration */
1941 if (alg->template_ahash.halg.digestsize > md_limit)
1942 continue;
Yuan Kang045e3672012-06-22 19:48:47 -05001943
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001944 /* register hmac version */
Victoria Milhoanbf834902015-08-05 11:28:48 -07001945 t_alg = caam_hash_alloc(alg, true);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001946 if (IS_ERR(t_alg)) {
1947 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07001948 pr_warn("%s alg allocation failed\n", alg->driver_name);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001949 continue;
1950 }
1951
1952 err = crypto_register_ahash(&t_alg->ahash_alg);
1953 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301954 pr_warn("%s alg registration failed\n",
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001955 t_alg->ahash_alg.halg.base.cra_driver_name);
1956 kfree(t_alg);
1957 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301958 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001959
1960 /* register unkeyed version */
Victoria Milhoanbf834902015-08-05 11:28:48 -07001961 t_alg = caam_hash_alloc(alg, false);
Yuan Kang045e3672012-06-22 19:48:47 -05001962 if (IS_ERR(t_alg)) {
1963 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07001964 pr_warn("%s alg allocation failed\n", alg->driver_name);
Yuan Kang045e3672012-06-22 19:48:47 -05001965 continue;
1966 }
1967
1968 err = crypto_register_ahash(&t_alg->ahash_alg);
1969 if (err) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301970 pr_warn("%s alg registration failed\n",
Yuan Kang045e3672012-06-22 19:48:47 -05001971 t_alg->ahash_alg.halg.base.cra_driver_name);
1972 kfree(t_alg);
1973 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301974 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05001975 }
1976
1977 return err;
1978}
1979
1980module_init(caam_algapi_hash_init);
1981module_exit(caam_algapi_hash_exit);
1982
1983MODULE_LICENSE("GPL");
1984MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1985MODULE_AUTHOR("Freescale Semiconductor - NMG");