blob: 631337c2e4a74e07d375d8fe17a5c705afafeb88 [file] [log] [blame]
Yuan Kang045e3672012-06-22 19:48:47 -05001/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
Yuan Kang045e3672012-06-22 19:48:47 -050075#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
Yuan Kang045e3672012-06-22 19:48:47 -050092#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
Ruchika Guptacfc6f112013-10-25 12:01:03 +053097
98static struct list_head hash_list;
99
Yuan Kang045e3672012-06-22 19:48:47 -0500100/* ahash per-session context */
101struct caam_hash_ctx {
Russell Kinge11793f2016-08-08 18:04:36 +0100102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
Yuan Kang045e3672012-06-22 19:48:47 -0500108 dma_addr_t sh_desc_update_first_dma;
109 dma_addr_t sh_desc_fin_dma;
110 dma_addr_t sh_desc_digest_dma;
111 dma_addr_t sh_desc_finup_dma;
Russell Kinge11793f2016-08-08 18:04:36 +0100112 struct device *jrdev;
Yuan Kang045e3672012-06-22 19:48:47 -0500113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120};
121
122/* ahash state */
123struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
Victoria Milhoane7472422015-08-05 11:28:35 -0700130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
Yuan Kang045e3672012-06-22 19:48:47 -0500131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135};
136
Russell King5ec90832015-10-18 17:51:25 +0100137struct caam_export_state {
138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 u8 caam_ctx[MAX_CTX_LEN];
140 int buflen;
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
144};
145
Yuan Kang045e3672012-06-22 19:48:47 -0500146/* Common job descriptor seq in/out ptr routines */
147
148/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
Horia Geantace572082014-07-11 15:34:49 +0300149static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 struct caam_hash_state *state,
151 int ctx_len)
Yuan Kang045e3672012-06-22 19:48:47 -0500152{
153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 ctx_len, DMA_FROM_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300155 if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 dev_err(jrdev, "unable to map ctx\n");
Horia Geantăccef31d2017-02-10 14:07:23 +0200157 state->ctx_dma = 0;
Horia Geantace572082014-07-11 15:34:49 +0300158 return -ENOMEM;
159 }
160
Yuan Kang045e3672012-06-22 19:48:47 -0500161 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
Horia Geantace572082014-07-11 15:34:49 +0300162
163 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500164}
165
166/* Map req->result, and append seq_out_ptr command that points to it */
167static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
168 u8 *result, int digestsize)
169{
170 dma_addr_t dst_dma;
171
172 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
173 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
174
175 return dst_dma;
176}
177
178/* Map current buffer in state and put it in link table */
179static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
180 struct sec4_sg_entry *sec4_sg,
181 u8 *buf, int buflen)
182{
183 dma_addr_t buf_dma;
184
185 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
186 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
187
188 return buf_dma;
189}
190
Yuan Kang045e3672012-06-22 19:48:47 -0500191/*
192 * Only put buffer in link table if it contains data, which is possible,
193 * since a buffer has previously been used, and needs to be unmapped,
194 */
195static inline dma_addr_t
196try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
197 u8 *buf, dma_addr_t buf_dma, int buflen,
198 int last_buflen)
199{
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
202 if (buflen)
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
204 else
205 buf_dma = 0;
206
207 return buf_dma;
208}
209
210/* Map state->caam_ctx, and add it to link table */
Horia Geantace572082014-07-11 15:34:49 +0300211static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
212 struct caam_hash_state *state, int ctx_len,
213 struct sec4_sg_entry *sec4_sg, u32 flag)
Yuan Kang045e3672012-06-22 19:48:47 -0500214{
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
Horia Geantace572082014-07-11 15:34:49 +0300216 if (dma_mapping_error(jrdev, state->ctx_dma)) {
217 dev_err(jrdev, "unable to map ctx\n");
Horia Geantăccef31d2017-02-10 14:07:23 +0200218 state->ctx_dma = 0;
Horia Geantace572082014-07-11 15:34:49 +0300219 return -ENOMEM;
220 }
221
Yuan Kang045e3672012-06-22 19:48:47 -0500222 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
Horia Geantace572082014-07-11 15:34:49 +0300223
224 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500225}
226
227/* Common shared descriptor commands */
228static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
229{
230 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
231 ctx->split_key_len, CLASS_2 |
232 KEY_DEST_MDHA_SPLIT | KEY_ENC);
233}
234
235/* Append key if it has been set */
236static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
237{
238 u32 *key_jump_cmd;
239
Kim Phillips61bb86b2012-07-13 17:49:28 -0500240 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500241
242 if (ctx->split_key_len) {
243 /* Skip if already shared */
244 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
245 JUMP_COND_SHRD);
246
247 append_key_ahash(desc, ctx);
248
249 set_jump_tgt_here(desc, key_jump_cmd);
250 }
251
252 /* Propagate errors from shared to job descriptor */
253 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
254}
255
256/*
257 * For ahash read data from seqin following state->caam_ctx,
258 * and write resulting class2 context to seqout, which may be state->caam_ctx
259 * or req->result
260 */
261static inline void ahash_append_load_str(u32 *desc, int digestsize)
262{
263 /* Calculate remaining bytes to read */
264 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
265
266 /* Read remaining bytes */
267 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
268 FIFOLD_TYPE_MSG | KEY_VLF);
269
270 /* Store class2 context bytes */
271 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
272 LDST_SRCDST_BYTE_CONTEXT);
273}
274
275/*
276 * For ahash update, final and finup, import context, read and write to seqout
277 */
278static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
279 int digestsize,
280 struct caam_hash_ctx *ctx)
281{
282 init_sh_desc_key_ahash(desc, ctx);
283
284 /* Import context from software */
285 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
286 LDST_CLASS_2_CCB | ctx->ctx_len);
287
288 /* Class 2 operation */
289 append_operation(desc, op | state | OP_ALG_ENCRYPT);
290
291 /*
292 * Load from buf and/or src and write to req->result or state->context
293 */
294 ahash_append_load_str(desc, digestsize);
295}
296
297/* For ahash firsts and digest, read and write to seqout */
298static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
299 int digestsize, struct caam_hash_ctx *ctx)
300{
301 init_sh_desc_key_ahash(desc, ctx);
302
303 /* Class 2 operation */
304 append_operation(desc, op | state | OP_ALG_ENCRYPT);
305
306 /*
307 * Load from buf and/or src and write to req->result or state->context
308 */
309 ahash_append_load_str(desc, digestsize);
310}
311
312static int ahash_set_sh_desc(struct crypto_ahash *ahash)
313{
314 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
315 int digestsize = crypto_ahash_digestsize(ahash);
316 struct device *jrdev = ctx->jrdev;
317 u32 have_key = 0;
318 u32 *desc;
319
320 if (ctx->split_key_len)
321 have_key = OP_ALG_AAI_HMAC_PRECOMP;
322
323 /* ahash_update shared descriptor */
324 desc = ctx->sh_desc_update;
325
Kim Phillips61bb86b2012-07-13 17:49:28 -0500326 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500327
328 /* Import context from software */
329 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
330 LDST_CLASS_2_CCB | ctx->ctx_len);
331
332 /* Class 2 operation */
333 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
334 OP_ALG_ENCRYPT);
335
336 /* Load data and write to result or context */
337 ahash_append_load_str(desc, ctx->ctx_len);
338
339 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
340 DMA_TO_DEVICE);
341 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
342 dev_err(jrdev, "unable to map shared descriptor\n");
343 return -ENOMEM;
344 }
345#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300346 print_hex_dump(KERN_ERR,
347 "ahash update shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500348 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
349#endif
350
351 /* ahash_update_first shared descriptor */
352 desc = ctx->sh_desc_update_first;
353
354 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
355 ctx->ctx_len, ctx);
356
357 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
358 desc_bytes(desc),
359 DMA_TO_DEVICE);
360 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
361 dev_err(jrdev, "unable to map shared descriptor\n");
362 return -ENOMEM;
363 }
364#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300365 print_hex_dump(KERN_ERR,
366 "ahash update first shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500367 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
368#endif
369
370 /* ahash_final shared descriptor */
371 desc = ctx->sh_desc_fin;
372
373 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
374 OP_ALG_AS_FINALIZE, digestsize, ctx);
375
376 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
377 DMA_TO_DEVICE);
378 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
379 dev_err(jrdev, "unable to map shared descriptor\n");
380 return -ENOMEM;
381 }
382#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300383 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500384 DUMP_PREFIX_ADDRESS, 16, 4, desc,
385 desc_bytes(desc), 1);
386#endif
387
388 /* ahash_finup shared descriptor */
389 desc = ctx->sh_desc_finup;
390
391 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
392 OP_ALG_AS_FINALIZE, digestsize, ctx);
393
394 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
395 DMA_TO_DEVICE);
396 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
397 dev_err(jrdev, "unable to map shared descriptor\n");
398 return -ENOMEM;
399 }
400#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300401 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500402 DUMP_PREFIX_ADDRESS, 16, 4, desc,
403 desc_bytes(desc), 1);
404#endif
405
406 /* ahash_digest shared descriptor */
407 desc = ctx->sh_desc_digest;
408
409 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
410 digestsize, ctx);
411
412 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
413 desc_bytes(desc),
414 DMA_TO_DEVICE);
415 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
416 dev_err(jrdev, "unable to map shared descriptor\n");
417 return -ENOMEM;
418 }
419#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300420 print_hex_dump(KERN_ERR,
421 "ahash digest shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500422 DUMP_PREFIX_ADDRESS, 16, 4, desc,
423 desc_bytes(desc), 1);
424#endif
425
426 return 0;
427}
428
Kim Phillips66b3e882013-03-26 18:10:14 -0500429static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500430 u32 keylen)
431{
432 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
433 ctx->split_key_pad_len, key_in, keylen,
434 ctx->alg_op);
435}
436
437/* Digest hash size if it is too large */
Kim Phillips66b3e882013-03-26 18:10:14 -0500438static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500439 u32 *keylen, u8 *key_out, u32 digestsize)
440{
441 struct device *jrdev = ctx->jrdev;
442 u32 *desc;
443 struct split_key_result result;
444 dma_addr_t src_dma, dst_dma;
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200445 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500446
Vakul Garg9c23b7d2013-07-10 06:26:13 +0000447 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800448 if (!desc) {
449 dev_err(jrdev, "unable to allocate key input memory\n");
450 return -ENOMEM;
451 }
Yuan Kang045e3672012-06-22 19:48:47 -0500452
453 init_job_desc(desc, 0);
454
455 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
456 DMA_TO_DEVICE);
457 if (dma_mapping_error(jrdev, src_dma)) {
458 dev_err(jrdev, "unable to map key input memory\n");
459 kfree(desc);
460 return -ENOMEM;
461 }
462 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
463 DMA_FROM_DEVICE);
464 if (dma_mapping_error(jrdev, dst_dma)) {
465 dev_err(jrdev, "unable to map key output memory\n");
466 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
467 kfree(desc);
468 return -ENOMEM;
469 }
470
471 /* Job descriptor to perform unkeyed hash on key_in */
472 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
473 OP_ALG_AS_INITFINAL);
474 append_seq_in_ptr(desc, src_dma, *keylen, 0);
475 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
476 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
477 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
478 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
479 LDST_SRCDST_BYTE_CONTEXT);
480
481#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300482 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500483 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300484 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500485 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
486#endif
487
488 result.err = 0;
489 init_completion(&result.completion);
490
491 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
492 if (!ret) {
493 /* in progress */
Horia Geantă80495c72017-07-07 16:57:06 +0300494 wait_for_completion(&result.completion);
Yuan Kang045e3672012-06-22 19:48:47 -0500495 ret = result.err;
496#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300497 print_hex_dump(KERN_ERR,
498 "digested key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500499 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
500 digestsize, 1);
501#endif
502 }
Yuan Kang045e3672012-06-22 19:48:47 -0500503 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
504 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
505
Horia Geantae11aa9f2014-07-11 15:34:50 +0300506 *keylen = digestsize;
507
Yuan Kang045e3672012-06-22 19:48:47 -0500508 kfree(desc);
509
510 return ret;
511}
512
513static int ahash_setkey(struct crypto_ahash *ahash,
514 const u8 *key, unsigned int keylen)
515{
516 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
517 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
518 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
519 struct device *jrdev = ctx->jrdev;
520 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
521 int digestsize = crypto_ahash_digestsize(ahash);
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200522 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500523 u8 *hashed_key = NULL;
524
525#ifdef DEBUG
526 printk(KERN_ERR "keylen %d\n", keylen);
527#endif
528
529 if (keylen > blocksize) {
Markus Elfringe7a33c42016-09-15 11:20:09 +0200530 hashed_key = kmalloc_array(digestsize,
531 sizeof(*hashed_key),
532 GFP_KERNEL | GFP_DMA);
Yuan Kang045e3672012-06-22 19:48:47 -0500533 if (!hashed_key)
534 return -ENOMEM;
535 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
536 digestsize);
537 if (ret)
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200538 goto bad_free_key;
Yuan Kang045e3672012-06-22 19:48:47 -0500539 key = hashed_key;
540 }
541
542 /* Pick class 2 key length from algorithm submask */
543 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
544 OP_ALG_ALGSEL_SHIFT] * 2;
545 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
546
547#ifdef DEBUG
548 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
549 ctx->split_key_len, ctx->split_key_pad_len);
Alex Porosanu514df282013-08-14 18:56:45 +0300550 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500551 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
552#endif
553
554 ret = gen_split_hash_key(ctx, key, keylen);
555 if (ret)
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200556 goto bad_free_key;
Yuan Kang045e3672012-06-22 19:48:47 -0500557
558 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
559 DMA_TO_DEVICE);
560 if (dma_mapping_error(jrdev, ctx->key_dma)) {
561 dev_err(jrdev, "unable to map key i/o memory\n");
Horia Geanta3d67be22014-04-18 13:01:41 +0300562 ret = -ENOMEM;
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200563 goto error_free_key;
Yuan Kang045e3672012-06-22 19:48:47 -0500564 }
565#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300566 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500567 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
568 ctx->split_key_pad_len, 1);
569#endif
570
571 ret = ahash_set_sh_desc(ahash);
572 if (ret) {
573 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
574 DMA_TO_DEVICE);
575 }
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200576 error_free_key:
Yuan Kang045e3672012-06-22 19:48:47 -0500577 kfree(hashed_key);
578 return ret;
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200579 bad_free_key:
Yuan Kang045e3672012-06-22 19:48:47 -0500580 kfree(hashed_key);
581 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
582 return -EINVAL;
583}
584
585/*
586 * ahash_edesc - s/w-extended ahash descriptor
587 * @dst_dma: physical mapped address of req->result
588 * @sec4_sg_dma: physical mapped address of h/w link table
589 * @src_nents: number of segments in input scatterlist
590 * @sec4_sg_bytes: length of dma mapped sec4_sg space
Yuan Kang045e3672012-06-22 19:48:47 -0500591 * @hw_desc: the h/w job descriptor followed by any referenced link tables
Russell King343e44b2016-08-08 18:04:52 +0100592 * @sec4_sg: h/w link table
Yuan Kang045e3672012-06-22 19:48:47 -0500593 */
594struct ahash_edesc {
595 dma_addr_t dst_dma;
596 dma_addr_t sec4_sg_dma;
597 int src_nents;
598 int sec4_sg_bytes;
Russell Kingd7b24ed2016-08-08 18:04:47 +0100599 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
Russell King343e44b2016-08-08 18:04:52 +0100600 struct sec4_sg_entry sec4_sg[0];
Yuan Kang045e3672012-06-22 19:48:47 -0500601};
602
603static inline void ahash_unmap(struct device *dev,
604 struct ahash_edesc *edesc,
605 struct ahash_request *req, int dst_len)
606{
607 if (edesc->src_nents)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200608 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500609 if (edesc->dst_dma)
610 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
611
612 if (edesc->sec4_sg_bytes)
613 dma_unmap_single(dev, edesc->sec4_sg_dma,
614 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
615}
616
617static inline void ahash_unmap_ctx(struct device *dev,
618 struct ahash_edesc *edesc,
619 struct ahash_request *req, int dst_len, u32 flag)
620{
621 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
622 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
623 struct caam_hash_state *state = ahash_request_ctx(req);
624
Horia Geantăccef31d2017-02-10 14:07:23 +0200625 if (state->ctx_dma) {
Yuan Kang045e3672012-06-22 19:48:47 -0500626 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
Horia Geantăccef31d2017-02-10 14:07:23 +0200627 state->ctx_dma = 0;
628 }
Yuan Kang045e3672012-06-22 19:48:47 -0500629 ahash_unmap(dev, edesc, req, dst_len);
630}
631
632static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
633 void *context)
634{
635 struct ahash_request *req = context;
636 struct ahash_edesc *edesc;
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
638 int digestsize = crypto_ahash_digestsize(ahash);
639#ifdef DEBUG
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
641 struct caam_hash_state *state = ahash_request_ctx(req);
642
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
644#endif
645
646 edesc = (struct ahash_edesc *)((char *)desc -
647 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200648 if (err)
649 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500650
651 ahash_unmap(jrdev, edesc, req, digestsize);
652 kfree(edesc);
653
654#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
657 ctx->ctx_len, 1);
658 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500660 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
661 digestsize, 1);
662#endif
663
664 req->base.complete(&req->base, err);
665}
666
667static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
668 void *context)
669{
670 struct ahash_request *req = context;
671 struct ahash_edesc *edesc;
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
674#ifdef DEBUG
675 struct caam_hash_state *state = ahash_request_ctx(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
677
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
679#endif
680
681 edesc = (struct ahash_edesc *)((char *)desc -
682 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200683 if (err)
684 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500685
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
687 kfree(edesc);
688
689#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
692 ctx->ctx_len, 1);
693 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500695 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
696 digestsize, 1);
697#endif
698
699 req->base.complete(&req->base, err);
700}
701
702static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
703 void *context)
704{
705 struct ahash_request *req = context;
706 struct ahash_edesc *edesc;
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
708 int digestsize = crypto_ahash_digestsize(ahash);
709#ifdef DEBUG
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711 struct caam_hash_state *state = ahash_request_ctx(req);
712
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
714#endif
715
716 edesc = (struct ahash_edesc *)((char *)desc -
717 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200718 if (err)
719 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500720
Horia Geantabc9e05f2014-07-11 15:34:52 +0300721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500722 kfree(edesc);
723
724#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
727 ctx->ctx_len, 1);
728 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500730 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
731 digestsize, 1);
732#endif
733
734 req->base.complete(&req->base, err);
735}
736
737static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
738 void *context)
739{
740 struct ahash_request *req = context;
741 struct ahash_edesc *edesc;
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
744#ifdef DEBUG
745 struct caam_hash_state *state = ahash_request_ctx(req);
746 int digestsize = crypto_ahash_digestsize(ahash);
747
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
749#endif
750
751 edesc = (struct ahash_edesc *)((char *)desc -
752 offsetof(struct ahash_edesc, hw_desc));
Marek Vasutfa9659c2014-04-24 20:05:12 +0200753 if (err)
754 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500755
Horia Geantaef62b232014-07-11 15:34:51 +0300756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500757 kfree(edesc);
758
759#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
762 ctx->ctx_len, 1);
763 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500765 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
766 digestsize, 1);
767#endif
768
769 req->base.complete(&req->base, err);
770}
771
Russell King5588d032016-08-08 18:05:08 +0100772/*
773 * Allocate an enhanced descriptor, which contains the hardware descriptor
774 * and space for hardware scatter table containing sg_num entries.
775 */
776static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
Russell King30a43b42016-08-08 18:05:13 +0100777 int sg_num, u32 *sh_desc,
778 dma_addr_t sh_desc_dma,
779 gfp_t flags)
Russell King5588d032016-08-08 18:05:08 +0100780{
781 struct ahash_edesc *edesc;
782 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
783
784 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
785 if (!edesc) {
786 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
787 return NULL;
788 }
789
Russell King30a43b42016-08-08 18:05:13 +0100790 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
791 HDR_SHARE_DEFER | HDR_REVERSE);
792
Russell King5588d032016-08-08 18:05:08 +0100793 return edesc;
794}
795
Russell King65cf1642016-08-08 18:05:19 +0100796static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
797 struct ahash_edesc *edesc,
798 struct ahash_request *req, int nents,
799 unsigned int first_sg,
800 unsigned int first_bytes, size_t to_hash)
801{
802 dma_addr_t src_dma;
803 u32 options;
804
805 if (nents > 1 || first_sg) {
806 struct sec4_sg_entry *sg = edesc->sec4_sg;
807 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
808
809 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
810
811 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
812 if (dma_mapping_error(ctx->jrdev, src_dma)) {
813 dev_err(ctx->jrdev, "unable to map S/G table\n");
814 return -ENOMEM;
815 }
816
817 edesc->sec4_sg_bytes = sgsize;
818 edesc->sec4_sg_dma = src_dma;
819 options = LDST_SGF;
820 } else {
821 src_dma = sg_dma_address(req->src);
822 options = 0;
823 }
824
825 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
826 options);
827
828 return 0;
829}
830
Yuan Kang045e3672012-06-22 19:48:47 -0500831/* submit update job descriptor */
832static int ahash_update_ctx(struct ahash_request *req)
833{
834 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
835 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
836 struct caam_hash_state *state = ahash_request_ctx(req);
837 struct device *jrdev = ctx->jrdev;
838 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
839 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
840 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
841 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
842 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
843 int *next_buflen = state->current_buf ? &state->buflen_0 :
844 &state->buflen_1, last_buflen;
845 int in_len = *buflen + req->nbytes, to_hash;
Russell King30a43b42016-08-08 18:05:13 +0100846 u32 *desc;
Russell Kingbc13c692016-08-08 18:05:03 +0100847 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
Yuan Kang045e3672012-06-22 19:48:47 -0500848 struct ahash_edesc *edesc;
849 int ret = 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500850
851 last_buflen = *next_buflen;
852 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
853 to_hash = in_len - *next_buflen;
854
855 if (to_hash) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200856 src_nents = sg_nents_for_len(req->src,
857 req->nbytes - (*next_buflen));
LABBE Corentinf9970c22015-11-04 21:13:38 +0100858 if (src_nents < 0) {
859 dev_err(jrdev, "Invalid number of src SG.\n");
860 return src_nents;
861 }
Russell Kingbc13c692016-08-08 18:05:03 +0100862
863 if (src_nents) {
864 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
865 DMA_TO_DEVICE);
866 if (!mapped_nents) {
867 dev_err(jrdev, "unable to DMA map source\n");
868 return -ENOMEM;
869 }
870 } else {
871 mapped_nents = 0;
872 }
873
Yuan Kang045e3672012-06-22 19:48:47 -0500874 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
Russell Kingbc13c692016-08-08 18:05:03 +0100875 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
Yuan Kang045e3672012-06-22 19:48:47 -0500876 sizeof(struct sec4_sg_entry);
877
878 /*
879 * allocate space for base edesc and hw desc commands,
880 * link tables
881 */
Russell King5588d032016-08-08 18:05:08 +0100882 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
Russell King30a43b42016-08-08 18:05:13 +0100883 ctx->sh_desc_update,
884 ctx->sh_desc_update_dma, flags);
Yuan Kang045e3672012-06-22 19:48:47 -0500885 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +0100886 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500887 return -ENOMEM;
888 }
889
890 edesc->src_nents = src_nents;
891 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -0500892
Horia Geantace572082014-07-11 15:34:49 +0300893 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
894 edesc->sec4_sg, DMA_BIDIRECTIONAL);
895 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200896 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500897
898 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
899 edesc->sec4_sg + 1,
900 buf, state->buf_dma,
Russell Kingc7556ff2015-10-18 17:51:20 +0100901 *buflen, last_buflen);
Yuan Kang045e3672012-06-22 19:48:47 -0500902
Russell Kingbc13c692016-08-08 18:05:03 +0100903 if (mapped_nents) {
904 sg_to_sec4_sg_last(req->src, mapped_nents,
905 edesc->sec4_sg + sec4_sg_src_index,
906 0);
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -0700907 if (*next_buflen)
Cristian Stoica307fd5432014-08-14 13:51:56 +0300908 scatterwalk_map_and_copy(next_buf, req->src,
909 to_hash - *buflen,
910 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500911 } else {
912 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
Horia Geantă261ea052016-05-19 18:11:26 +0300913 cpu_to_caam32(SEC4_SG_LEN_FIN);
Yuan Kang045e3672012-06-22 19:48:47 -0500914 }
915
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -0700916 state->current_buf = !state->current_buf;
917
Yuan Kang045e3672012-06-22 19:48:47 -0500918 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -0500919
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530920 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
921 sec4_sg_bytes,
922 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300923 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
924 dev_err(jrdev, "unable to map S/G table\n");
Russell King32686d32016-08-08 18:04:58 +0100925 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200926 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +0300927 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530928
Yuan Kang045e3672012-06-22 19:48:47 -0500929 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
930 to_hash, LDST_SGF);
931
932 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
933
934#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300935 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500936 DUMP_PREFIX_ADDRESS, 16, 4, desc,
937 desc_bytes(desc), 1);
938#endif
939
940 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
Russell King32686d32016-08-08 18:04:58 +0100941 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200942 goto unmap_ctx;
Russell King32686d32016-08-08 18:04:58 +0100943
944 ret = -EINPROGRESS;
Yuan Kang045e3672012-06-22 19:48:47 -0500945 } else if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +0300946 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
947 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500948 *buflen = *next_buflen;
949 *next_buflen = last_buflen;
950 }
951#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300952 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500953 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300954 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500955 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
956 *next_buflen, 1);
957#endif
958
959 return ret;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200960 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +0100961 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
962 kfree(edesc);
963 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500964}
965
966static int ahash_final_ctx(struct ahash_request *req)
967{
968 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
969 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
970 struct caam_hash_state *state = ahash_request_ctx(req);
971 struct device *jrdev = ctx->jrdev;
972 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
973 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
974 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
975 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
976 int last_buflen = state->current_buf ? state->buflen_0 :
977 state->buflen_1;
Russell King30a43b42016-08-08 18:05:13 +0100978 u32 *desc;
Horia Geant?b310c172015-08-11 20:19:20 +0300979 int sec4_sg_bytes, sec4_sg_src_index;
Yuan Kang045e3672012-06-22 19:48:47 -0500980 int digestsize = crypto_ahash_digestsize(ahash);
981 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200982 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500983
Horia Geant?b310c172015-08-11 20:19:20 +0300984 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
985 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
Yuan Kang045e3672012-06-22 19:48:47 -0500986
987 /* allocate space for base edesc and hw desc commands, link tables */
Russell King30a43b42016-08-08 18:05:13 +0100988 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
989 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
990 flags);
Russell King5588d032016-08-08 18:05:08 +0100991 if (!edesc)
Yuan Kang045e3672012-06-22 19:48:47 -0500992 return -ENOMEM;
Yuan Kang045e3672012-06-22 19:48:47 -0500993
Yuan Kang045e3672012-06-22 19:48:47 -0500994 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -0500995
996 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -0500997 edesc->src_nents = 0;
998
Horia Geantace572082014-07-11 15:34:49 +0300999 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1000 edesc->sec4_sg, DMA_TO_DEVICE);
1001 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001002 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001003
1004 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1005 buf, state->buf_dma, buflen,
1006 last_buflen);
Horia Geantă261ea052016-05-19 18:11:26 +03001007 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
1008 cpu_to_caam32(SEC4_SG_LEN_FIN);
Yuan Kang045e3672012-06-22 19:48:47 -05001009
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301010 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1011 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001012 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1013 dev_err(jrdev, "unable to map S/G table\n");
Russell King32686d32016-08-08 18:04:58 +01001014 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001015 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +03001016 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301017
Yuan Kang045e3672012-06-22 19:48:47 -05001018 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
1019 LDST_SGF);
1020
1021 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1022 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001023 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1024 dev_err(jrdev, "unable to map dst\n");
Russell King32686d32016-08-08 18:04:58 +01001025 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001026 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +03001027 }
Yuan Kang045e3672012-06-22 19:48:47 -05001028
1029#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001030 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001031 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1032#endif
1033
1034 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
Russell King32686d32016-08-08 18:04:58 +01001035 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001036 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001037
Russell King32686d32016-08-08 18:04:58 +01001038 return -EINPROGRESS;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001039 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001040 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1041 kfree(edesc);
Yuan Kang045e3672012-06-22 19:48:47 -05001042 return ret;
1043}
1044
1045static int ahash_finup_ctx(struct ahash_request *req)
1046{
1047 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1048 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1049 struct caam_hash_state *state = ahash_request_ctx(req);
1050 struct device *jrdev = ctx->jrdev;
1051 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1052 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1053 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1054 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1055 int last_buflen = state->current_buf ? state->buflen_0 :
1056 state->buflen_1;
Russell King30a43b42016-08-08 18:05:13 +01001057 u32 *desc;
Russell King65cf1642016-08-08 18:05:19 +01001058 int sec4_sg_src_index;
Russell Kingbc13c692016-08-08 18:05:03 +01001059 int src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001060 int digestsize = crypto_ahash_digestsize(ahash);
1061 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001062 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001063
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001064 src_nents = sg_nents_for_len(req->src, req->nbytes);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001065 if (src_nents < 0) {
1066 dev_err(jrdev, "Invalid number of src SG.\n");
1067 return src_nents;
1068 }
Russell Kingbc13c692016-08-08 18:05:03 +01001069
1070 if (src_nents) {
1071 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1072 DMA_TO_DEVICE);
1073 if (!mapped_nents) {
1074 dev_err(jrdev, "unable to DMA map source\n");
1075 return -ENOMEM;
1076 }
1077 } else {
1078 mapped_nents = 0;
1079 }
1080
Yuan Kang045e3672012-06-22 19:48:47 -05001081 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001082
1083 /* allocate space for base edesc and hw desc commands, link tables */
Russell King5588d032016-08-08 18:05:08 +01001084 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
Russell King30a43b42016-08-08 18:05:13 +01001085 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
Russell King5588d032016-08-08 18:05:08 +01001086 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001087 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001088 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001089 return -ENOMEM;
1090 }
1091
Yuan Kang045e3672012-06-22 19:48:47 -05001092 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001093
1094 edesc->src_nents = src_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001095
Horia Geantace572082014-07-11 15:34:49 +03001096 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1097 edesc->sec4_sg, DMA_TO_DEVICE);
1098 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001099 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001100
1101 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1102 buf, state->buf_dma, buflen,
1103 last_buflen);
1104
Russell King65cf1642016-08-08 18:05:19 +01001105 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1106 sec4_sg_src_index, ctx->ctx_len + buflen,
1107 req->nbytes);
1108 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001109 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001110
1111 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1112 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001113 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1114 dev_err(jrdev, "unable to map dst\n");
Russell King32686d32016-08-08 18:04:58 +01001115 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001116 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +03001117 }
Yuan Kang045e3672012-06-22 19:48:47 -05001118
1119#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001120 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001121 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1122#endif
1123
1124 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
Russell King32686d32016-08-08 18:04:58 +01001125 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001126 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001127
Russell King32686d32016-08-08 18:04:58 +01001128 return -EINPROGRESS;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001129 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001130 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1131 kfree(edesc);
Yuan Kang045e3672012-06-22 19:48:47 -05001132 return ret;
1133}
1134
1135static int ahash_digest(struct ahash_request *req)
1136{
1137 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1138 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1139 struct device *jrdev = ctx->jrdev;
1140 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1141 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
Russell King30a43b42016-08-08 18:05:13 +01001142 u32 *desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001143 int digestsize = crypto_ahash_digestsize(ahash);
Russell King65cf1642016-08-08 18:05:19 +01001144 int src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001145 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001146 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001147
Russell King3d5a2db2016-08-08 18:04:31 +01001148 src_nents = sg_nents_for_len(req->src, req->nbytes);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001149 if (src_nents < 0) {
1150 dev_err(jrdev, "Invalid number of src SG.\n");
1151 return src_nents;
1152 }
Russell Kingbc13c692016-08-08 18:05:03 +01001153
1154 if (src_nents) {
1155 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1156 DMA_TO_DEVICE);
1157 if (!mapped_nents) {
1158 dev_err(jrdev, "unable to map source for DMA\n");
1159 return -ENOMEM;
1160 }
1161 } else {
1162 mapped_nents = 0;
1163 }
1164
Yuan Kang045e3672012-06-22 19:48:47 -05001165 /* allocate space for base edesc and hw desc commands, link tables */
Russell King5588d032016-08-08 18:05:08 +01001166 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
Russell King30a43b42016-08-08 18:05:13 +01001167 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
Russell King5588d032016-08-08 18:05:08 +01001168 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001169 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001170 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001171 return -ENOMEM;
1172 }
Russell King343e44b2016-08-08 18:04:52 +01001173
Yuan Kang045e3672012-06-22 19:48:47 -05001174 edesc->src_nents = src_nents;
1175
Russell King65cf1642016-08-08 18:05:19 +01001176 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1177 req->nbytes);
1178 if (ret) {
1179 ahash_unmap(jrdev, edesc, req, digestsize);
1180 kfree(edesc);
1181 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001182 }
Russell King65cf1642016-08-08 18:05:19 +01001183
1184 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001185
1186 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1187 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001188 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1189 dev_err(jrdev, "unable to map dst\n");
Russell King32686d32016-08-08 18:04:58 +01001190 ahash_unmap(jrdev, edesc, req, digestsize);
1191 kfree(edesc);
Horia Geantace572082014-07-11 15:34:49 +03001192 return -ENOMEM;
1193 }
Yuan Kang045e3672012-06-22 19:48:47 -05001194
1195#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001196 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001197 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1198#endif
1199
1200 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1201 if (!ret) {
1202 ret = -EINPROGRESS;
1203 } else {
1204 ahash_unmap(jrdev, edesc, req, digestsize);
1205 kfree(edesc);
1206 }
1207
1208 return ret;
1209}
1210
1211/* submit ahash final if it the first job descriptor */
1212static int ahash_final_no_ctx(struct ahash_request *req)
1213{
1214 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1215 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1216 struct caam_hash_state *state = ahash_request_ctx(req);
1217 struct device *jrdev = ctx->jrdev;
1218 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1219 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1220 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1221 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
Russell King30a43b42016-08-08 18:05:13 +01001222 u32 *desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001223 int digestsize = crypto_ahash_digestsize(ahash);
1224 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001225 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001226
1227 /* allocate space for base edesc and hw desc commands, link tables */
Russell King30a43b42016-08-08 18:05:13 +01001228 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1229 ctx->sh_desc_digest_dma, flags);
Russell King5588d032016-08-08 18:05:08 +01001230 if (!edesc)
Yuan Kang045e3672012-06-22 19:48:47 -05001231 return -ENOMEM;
Yuan Kang045e3672012-06-22 19:48:47 -05001232
Yuan Kang045e3672012-06-22 19:48:47 -05001233 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001234
1235 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001236 if (dma_mapping_error(jrdev, state->buf_dma)) {
1237 dev_err(jrdev, "unable to map src\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001238 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001239 }
Yuan Kang045e3672012-06-22 19:48:47 -05001240
1241 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1242
1243 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1244 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001245 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1246 dev_err(jrdev, "unable to map dst\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001247 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001248 }
Yuan Kang045e3672012-06-22 19:48:47 -05001249 edesc->src_nents = 0;
1250
1251#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001252 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001253 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1254#endif
1255
1256 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1257 if (!ret) {
1258 ret = -EINPROGRESS;
1259 } else {
1260 ahash_unmap(jrdev, edesc, req, digestsize);
1261 kfree(edesc);
1262 }
1263
1264 return ret;
Markus Elfring06435f342016-09-15 16:00:55 +02001265 unmap:
1266 ahash_unmap(jrdev, edesc, req, digestsize);
1267 kfree(edesc);
1268 return -ENOMEM;
1269
Yuan Kang045e3672012-06-22 19:48:47 -05001270}
1271
1272/* submit ahash update if it the first job descriptor after update */
1273static int ahash_update_no_ctx(struct ahash_request *req)
1274{
1275 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1276 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1277 struct caam_hash_state *state = ahash_request_ctx(req);
1278 struct device *jrdev = ctx->jrdev;
1279 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1280 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1281 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1282 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1283 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1284 int *next_buflen = state->current_buf ? &state->buflen_0 :
1285 &state->buflen_1;
1286 int in_len = *buflen + req->nbytes, to_hash;
Russell Kingbc13c692016-08-08 18:05:03 +01001287 int sec4_sg_bytes, src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001288 struct ahash_edesc *edesc;
Russell King30a43b42016-08-08 18:05:13 +01001289 u32 *desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001290 int ret = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001291
1292 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1293 to_hash = in_len - *next_buflen;
1294
1295 if (to_hash) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001296 src_nents = sg_nents_for_len(req->src,
Russell King3d5a2db2016-08-08 18:04:31 +01001297 req->nbytes - *next_buflen);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001298 if (src_nents < 0) {
1299 dev_err(jrdev, "Invalid number of src SG.\n");
1300 return src_nents;
1301 }
Russell Kingbc13c692016-08-08 18:05:03 +01001302
1303 if (src_nents) {
1304 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1305 DMA_TO_DEVICE);
1306 if (!mapped_nents) {
1307 dev_err(jrdev, "unable to DMA map source\n");
1308 return -ENOMEM;
1309 }
1310 } else {
1311 mapped_nents = 0;
1312 }
1313
1314 sec4_sg_bytes = (1 + mapped_nents) *
Yuan Kang045e3672012-06-22 19:48:47 -05001315 sizeof(struct sec4_sg_entry);
1316
1317 /*
1318 * allocate space for base edesc and hw desc commands,
1319 * link tables
1320 */
Russell King30a43b42016-08-08 18:05:13 +01001321 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1322 ctx->sh_desc_update_first,
1323 ctx->sh_desc_update_first_dma,
1324 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001325 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001326 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001327 return -ENOMEM;
1328 }
1329
1330 edesc->src_nents = src_nents;
1331 edesc->sec4_sg_bytes = sec4_sg_bytes;
Horia Geanta76b99082014-07-11 15:34:54 +03001332 edesc->dst_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001333
1334 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1335 buf, *buflen);
Russell Kingbc13c692016-08-08 18:05:03 +01001336 sg_to_sec4_sg_last(req->src, mapped_nents,
1337 edesc->sec4_sg + 1, 0);
1338
Yuan Kang045e3672012-06-22 19:48:47 -05001339 if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +03001340 scatterwalk_map_and_copy(next_buf, req->src,
1341 to_hash - *buflen,
1342 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001343 }
1344
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -07001345 state->current_buf = !state->current_buf;
1346
Yuan Kang045e3672012-06-22 19:48:47 -05001347 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001348
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301349 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1350 sec4_sg_bytes,
1351 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001352 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1353 dev_err(jrdev, "unable to map S/G table\n");
Russell King32686d32016-08-08 18:04:58 +01001354 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001355 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +03001356 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301357
Yuan Kang045e3672012-06-22 19:48:47 -05001358 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1359
Horia Geantace572082014-07-11 15:34:49 +03001360 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1361 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001362 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001363
1364#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001365 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001366 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1367 desc_bytes(desc), 1);
1368#endif
1369
1370 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
Russell King32686d32016-08-08 18:04:58 +01001371 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001372 goto unmap_ctx;
Russell King32686d32016-08-08 18:04:58 +01001373
1374 ret = -EINPROGRESS;
1375 state->update = ahash_update_ctx;
1376 state->finup = ahash_finup_ctx;
1377 state->final = ahash_final_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001378 } else if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +03001379 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1380 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001381 *buflen = *next_buflen;
1382 *next_buflen = 0;
1383 }
1384#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001385 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001386 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001387 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001388 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1389 *next_buflen, 1);
1390#endif
1391
1392 return ret;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001393 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001394 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1395 kfree(edesc);
1396 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001397}
1398
1399/* submit ahash finup if it the first job descriptor after update */
1400static int ahash_finup_no_ctx(struct ahash_request *req)
1401{
1402 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1403 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1404 struct caam_hash_state *state = ahash_request_ctx(req);
1405 struct device *jrdev = ctx->jrdev;
1406 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1407 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1408 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1409 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1410 int last_buflen = state->current_buf ? state->buflen_0 :
1411 state->buflen_1;
Russell King30a43b42016-08-08 18:05:13 +01001412 u32 *desc;
Russell Kingbc13c692016-08-08 18:05:03 +01001413 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001414 int digestsize = crypto_ahash_digestsize(ahash);
1415 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001416 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001417
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001418 src_nents = sg_nents_for_len(req->src, req->nbytes);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001419 if (src_nents < 0) {
1420 dev_err(jrdev, "Invalid number of src SG.\n");
1421 return src_nents;
1422 }
Russell Kingbc13c692016-08-08 18:05:03 +01001423
1424 if (src_nents) {
1425 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1426 DMA_TO_DEVICE);
1427 if (!mapped_nents) {
1428 dev_err(jrdev, "unable to DMA map source\n");
1429 return -ENOMEM;
1430 }
1431 } else {
1432 mapped_nents = 0;
1433 }
1434
Yuan Kang045e3672012-06-22 19:48:47 -05001435 sec4_sg_src_index = 2;
Russell Kingbc13c692016-08-08 18:05:03 +01001436 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
Yuan Kang045e3672012-06-22 19:48:47 -05001437 sizeof(struct sec4_sg_entry);
1438
1439 /* allocate space for base edesc and hw desc commands, link tables */
Russell King30a43b42016-08-08 18:05:13 +01001440 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1441 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1442 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001443 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001444 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001445 return -ENOMEM;
1446 }
1447
Yuan Kang045e3672012-06-22 19:48:47 -05001448 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001449
1450 edesc->src_nents = src_nents;
1451 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -05001452
1453 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1454 state->buf_dma, buflen,
1455 last_buflen);
1456
Russell King65cf1642016-08-08 18:05:19 +01001457 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1458 req->nbytes);
1459 if (ret) {
Horia Geantace572082014-07-11 15:34:49 +03001460 dev_err(jrdev, "unable to map S/G table\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001461 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001462 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301463
Yuan Kang045e3672012-06-22 19:48:47 -05001464 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1465 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001466 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1467 dev_err(jrdev, "unable to map dst\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001468 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001469 }
Yuan Kang045e3672012-06-22 19:48:47 -05001470
1471#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001472 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001473 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1474#endif
1475
1476 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1477 if (!ret) {
1478 ret = -EINPROGRESS;
1479 } else {
1480 ahash_unmap(jrdev, edesc, req, digestsize);
1481 kfree(edesc);
1482 }
1483
1484 return ret;
Markus Elfring06435f342016-09-15 16:00:55 +02001485 unmap:
1486 ahash_unmap(jrdev, edesc, req, digestsize);
1487 kfree(edesc);
1488 return -ENOMEM;
1489
Yuan Kang045e3672012-06-22 19:48:47 -05001490}
1491
1492/* submit first update job descriptor after init */
1493static int ahash_update_first(struct ahash_request *req)
1494{
1495 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1496 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1497 struct caam_hash_state *state = ahash_request_ctx(req);
1498 struct device *jrdev = ctx->jrdev;
1499 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1500 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
Cristian Stoica4451d492014-08-14 13:51:57 +03001501 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1502 int *next_buflen = state->current_buf ?
1503 &state->buflen_1 : &state->buflen_0;
Yuan Kang045e3672012-06-22 19:48:47 -05001504 int to_hash;
Russell King30a43b42016-08-08 18:05:13 +01001505 u32 *desc;
Russell King65cf1642016-08-08 18:05:19 +01001506 int src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001507 struct ahash_edesc *edesc;
1508 int ret = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001509
1510 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1511 1);
1512 to_hash = req->nbytes - *next_buflen;
1513
1514 if (to_hash) {
Russell King3d5a2db2016-08-08 18:04:31 +01001515 src_nents = sg_nents_for_len(req->src,
1516 req->nbytes - *next_buflen);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001517 if (src_nents < 0) {
1518 dev_err(jrdev, "Invalid number of src SG.\n");
1519 return src_nents;
1520 }
Russell Kingbc13c692016-08-08 18:05:03 +01001521
1522 if (src_nents) {
1523 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1524 DMA_TO_DEVICE);
1525 if (!mapped_nents) {
1526 dev_err(jrdev, "unable to map source for DMA\n");
1527 return -ENOMEM;
1528 }
1529 } else {
1530 mapped_nents = 0;
1531 }
Yuan Kang045e3672012-06-22 19:48:47 -05001532
1533 /*
1534 * allocate space for base edesc and hw desc commands,
1535 * link tables
1536 */
Russell King5588d032016-08-08 18:05:08 +01001537 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
Russell King30a43b42016-08-08 18:05:13 +01001538 mapped_nents : 0,
1539 ctx->sh_desc_update_first,
1540 ctx->sh_desc_update_first_dma,
1541 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001542 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001543 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001544 return -ENOMEM;
1545 }
1546
1547 edesc->src_nents = src_nents;
Horia Geanta76b99082014-07-11 15:34:54 +03001548 edesc->dst_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001549
Russell King65cf1642016-08-08 18:05:19 +01001550 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1551 to_hash);
1552 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001553 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001554
1555 if (*next_buflen)
Cristian Stoica307fd5432014-08-14 13:51:56 +03001556 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1557 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001558
Yuan Kang045e3672012-06-22 19:48:47 -05001559 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001560
Horia Geantace572082014-07-11 15:34:49 +03001561 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1562 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001563 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001564
1565#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001566 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001567 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1568 desc_bytes(desc), 1);
1569#endif
1570
Russell King32686d32016-08-08 18:04:58 +01001571 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1572 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001573 goto unmap_ctx;
Russell King32686d32016-08-08 18:04:58 +01001574
1575 ret = -EINPROGRESS;
1576 state->update = ahash_update_ctx;
1577 state->finup = ahash_finup_ctx;
1578 state->final = ahash_final_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001579 } else if (*next_buflen) {
1580 state->update = ahash_update_no_ctx;
1581 state->finup = ahash_finup_no_ctx;
1582 state->final = ahash_final_no_ctx;
Cristian Stoica307fd5432014-08-14 13:51:56 +03001583 scatterwalk_map_and_copy(next_buf, req->src, 0,
1584 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001585 }
1586#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001587 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001588 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1589 *next_buflen, 1);
1590#endif
1591
1592 return ret;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001593 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001594 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1595 kfree(edesc);
1596 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001597}
1598
1599static int ahash_finup_first(struct ahash_request *req)
1600{
1601 return ahash_digest(req);
1602}
1603
1604static int ahash_init(struct ahash_request *req)
1605{
1606 struct caam_hash_state *state = ahash_request_ctx(req);
1607
1608 state->update = ahash_update_first;
1609 state->finup = ahash_finup_first;
1610 state->final = ahash_final_no_ctx;
1611
Horia Geantăccef31d2017-02-10 14:07:23 +02001612 state->ctx_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001613 state->current_buf = 0;
Horia Geantade0e35e2014-07-11 15:34:55 +03001614 state->buf_dma = 0;
Steve Cornelius6fd4b152015-06-15 16:52:56 -07001615 state->buflen_0 = 0;
1616 state->buflen_1 = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001617
1618 return 0;
1619}
1620
1621static int ahash_update(struct ahash_request *req)
1622{
1623 struct caam_hash_state *state = ahash_request_ctx(req);
1624
1625 return state->update(req);
1626}
1627
1628static int ahash_finup(struct ahash_request *req)
1629{
1630 struct caam_hash_state *state = ahash_request_ctx(req);
1631
1632 return state->finup(req);
1633}
1634
1635static int ahash_final(struct ahash_request *req)
1636{
1637 struct caam_hash_state *state = ahash_request_ctx(req);
1638
1639 return state->final(req);
1640}
1641
1642static int ahash_export(struct ahash_request *req, void *out)
1643{
Yuan Kang045e3672012-06-22 19:48:47 -05001644 struct caam_hash_state *state = ahash_request_ctx(req);
Russell King5ec90832015-10-18 17:51:25 +01001645 struct caam_export_state *export = out;
1646 int len;
1647 u8 *buf;
Yuan Kang045e3672012-06-22 19:48:47 -05001648
Russell King5ec90832015-10-18 17:51:25 +01001649 if (state->current_buf) {
1650 buf = state->buf_1;
1651 len = state->buflen_1;
1652 } else {
1653 buf = state->buf_0;
Fabio Estevamf456cd22015-11-30 11:03:58 -02001654 len = state->buflen_0;
Russell King5ec90832015-10-18 17:51:25 +01001655 }
1656
1657 memcpy(export->buf, buf, len);
1658 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1659 export->buflen = len;
1660 export->update = state->update;
1661 export->final = state->final;
1662 export->finup = state->finup;
Russell King434b4212015-10-18 17:51:15 +01001663
Yuan Kang045e3672012-06-22 19:48:47 -05001664 return 0;
1665}
1666
1667static int ahash_import(struct ahash_request *req, const void *in)
1668{
Yuan Kang045e3672012-06-22 19:48:47 -05001669 struct caam_hash_state *state = ahash_request_ctx(req);
Russell King5ec90832015-10-18 17:51:25 +01001670 const struct caam_export_state *export = in;
Yuan Kang045e3672012-06-22 19:48:47 -05001671
Russell King5ec90832015-10-18 17:51:25 +01001672 memset(state, 0, sizeof(*state));
1673 memcpy(state->buf_0, export->buf, export->buflen);
1674 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1675 state->buflen_0 = export->buflen;
1676 state->update = export->update;
1677 state->final = export->final;
1678 state->finup = export->finup;
Russell King434b4212015-10-18 17:51:15 +01001679
Yuan Kang045e3672012-06-22 19:48:47 -05001680 return 0;
1681}
1682
1683struct caam_hash_template {
1684 char name[CRYPTO_MAX_ALG_NAME];
1685 char driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001686 char hmac_name[CRYPTO_MAX_ALG_NAME];
1687 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kang045e3672012-06-22 19:48:47 -05001688 unsigned int blocksize;
1689 struct ahash_alg template_ahash;
1690 u32 alg_type;
1691 u32 alg_op;
1692};
1693
1694/* ahash descriptors */
1695static struct caam_hash_template driver_hash[] = {
1696 {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001697 .name = "sha1",
1698 .driver_name = "sha1-caam",
1699 .hmac_name = "hmac(sha1)",
1700 .hmac_driver_name = "hmac-sha1-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001701 .blocksize = SHA1_BLOCK_SIZE,
1702 .template_ahash = {
1703 .init = ahash_init,
1704 .update = ahash_update,
1705 .final = ahash_final,
1706 .finup = ahash_finup,
1707 .digest = ahash_digest,
1708 .export = ahash_export,
1709 .import = ahash_import,
1710 .setkey = ahash_setkey,
1711 .halg = {
1712 .digestsize = SHA1_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001713 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001714 },
Russell King659f3132015-10-18 17:51:31 +01001715 },
Yuan Kang045e3672012-06-22 19:48:47 -05001716 .alg_type = OP_ALG_ALGSEL_SHA1,
1717 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1718 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001719 .name = "sha224",
1720 .driver_name = "sha224-caam",
1721 .hmac_name = "hmac(sha224)",
1722 .hmac_driver_name = "hmac-sha224-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001723 .blocksize = SHA224_BLOCK_SIZE,
1724 .template_ahash = {
1725 .init = ahash_init,
1726 .update = ahash_update,
1727 .final = ahash_final,
1728 .finup = ahash_finup,
1729 .digest = ahash_digest,
1730 .export = ahash_export,
1731 .import = ahash_import,
1732 .setkey = ahash_setkey,
1733 .halg = {
1734 .digestsize = SHA224_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001735 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001736 },
Russell King659f3132015-10-18 17:51:31 +01001737 },
Yuan Kang045e3672012-06-22 19:48:47 -05001738 .alg_type = OP_ALG_ALGSEL_SHA224,
1739 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1740 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001741 .name = "sha256",
1742 .driver_name = "sha256-caam",
1743 .hmac_name = "hmac(sha256)",
1744 .hmac_driver_name = "hmac-sha256-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001745 .blocksize = SHA256_BLOCK_SIZE,
1746 .template_ahash = {
1747 .init = ahash_init,
1748 .update = ahash_update,
1749 .final = ahash_final,
1750 .finup = ahash_finup,
1751 .digest = ahash_digest,
1752 .export = ahash_export,
1753 .import = ahash_import,
1754 .setkey = ahash_setkey,
1755 .halg = {
1756 .digestsize = SHA256_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001757 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001758 },
Russell King659f3132015-10-18 17:51:31 +01001759 },
Yuan Kang045e3672012-06-22 19:48:47 -05001760 .alg_type = OP_ALG_ALGSEL_SHA256,
1761 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1762 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001763 .name = "sha384",
1764 .driver_name = "sha384-caam",
1765 .hmac_name = "hmac(sha384)",
1766 .hmac_driver_name = "hmac-sha384-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001767 .blocksize = SHA384_BLOCK_SIZE,
1768 .template_ahash = {
1769 .init = ahash_init,
1770 .update = ahash_update,
1771 .final = ahash_final,
1772 .finup = ahash_finup,
1773 .digest = ahash_digest,
1774 .export = ahash_export,
1775 .import = ahash_import,
1776 .setkey = ahash_setkey,
1777 .halg = {
1778 .digestsize = SHA384_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001779 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001780 },
Russell King659f3132015-10-18 17:51:31 +01001781 },
Yuan Kang045e3672012-06-22 19:48:47 -05001782 .alg_type = OP_ALG_ALGSEL_SHA384,
1783 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1784 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001785 .name = "sha512",
1786 .driver_name = "sha512-caam",
1787 .hmac_name = "hmac(sha512)",
1788 .hmac_driver_name = "hmac-sha512-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001789 .blocksize = SHA512_BLOCK_SIZE,
1790 .template_ahash = {
1791 .init = ahash_init,
1792 .update = ahash_update,
1793 .final = ahash_final,
1794 .finup = ahash_finup,
1795 .digest = ahash_digest,
1796 .export = ahash_export,
1797 .import = ahash_import,
1798 .setkey = ahash_setkey,
1799 .halg = {
1800 .digestsize = SHA512_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001801 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001802 },
Russell King659f3132015-10-18 17:51:31 +01001803 },
Yuan Kang045e3672012-06-22 19:48:47 -05001804 .alg_type = OP_ALG_ALGSEL_SHA512,
1805 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1806 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001807 .name = "md5",
1808 .driver_name = "md5-caam",
1809 .hmac_name = "hmac(md5)",
1810 .hmac_driver_name = "hmac-md5-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001811 .blocksize = MD5_BLOCK_WORDS * 4,
1812 .template_ahash = {
1813 .init = ahash_init,
1814 .update = ahash_update,
1815 .final = ahash_final,
1816 .finup = ahash_finup,
1817 .digest = ahash_digest,
1818 .export = ahash_export,
1819 .import = ahash_import,
1820 .setkey = ahash_setkey,
1821 .halg = {
1822 .digestsize = MD5_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001823 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001824 },
Russell King659f3132015-10-18 17:51:31 +01001825 },
Yuan Kang045e3672012-06-22 19:48:47 -05001826 .alg_type = OP_ALG_ALGSEL_MD5,
1827 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1828 },
1829};
1830
1831struct caam_hash_alg {
1832 struct list_head entry;
Yuan Kang045e3672012-06-22 19:48:47 -05001833 int alg_type;
1834 int alg_op;
1835 struct ahash_alg ahash_alg;
1836};
1837
1838static int caam_hash_cra_init(struct crypto_tfm *tfm)
1839{
1840 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1841 struct crypto_alg *base = tfm->__crt_alg;
1842 struct hash_alg_common *halg =
1843 container_of(base, struct hash_alg_common, base);
1844 struct ahash_alg *alg =
1845 container_of(halg, struct ahash_alg, halg);
1846 struct caam_hash_alg *caam_hash =
1847 container_of(alg, struct caam_hash_alg, ahash_alg);
1848 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Yuan Kang045e3672012-06-22 19:48:47 -05001849 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1850 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1851 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1852 HASH_MSG_LEN + 32,
1853 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1854 HASH_MSG_LEN + 64,
1855 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
Yuan Kang045e3672012-06-22 19:48:47 -05001856
1857 /*
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301858 * Get a Job ring from Job Ring driver to ensure in-order
Yuan Kang045e3672012-06-22 19:48:47 -05001859 * crypto request processing per tfm
1860 */
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301861 ctx->jrdev = caam_jr_alloc();
1862 if (IS_ERR(ctx->jrdev)) {
1863 pr_err("Job Ring Device allocation for transform failed\n");
1864 return PTR_ERR(ctx->jrdev);
1865 }
Yuan Kang045e3672012-06-22 19:48:47 -05001866 /* copy descriptor header template value */
1867 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1868 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1869
1870 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1871 OP_ALG_ALGSEL_SHIFT];
1872
1873 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1874 sizeof(struct caam_hash_state));
Markus Elfringe6cc5b82016-09-15 14:56:12 +02001875 return ahash_set_sh_desc(ahash);
Yuan Kang045e3672012-06-22 19:48:47 -05001876}
1877
1878static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1879{
1880 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1881
1882 if (ctx->sh_desc_update_dma &&
1883 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1884 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1885 desc_bytes(ctx->sh_desc_update),
1886 DMA_TO_DEVICE);
1887 if (ctx->sh_desc_update_first_dma &&
1888 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1889 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1890 desc_bytes(ctx->sh_desc_update_first),
1891 DMA_TO_DEVICE);
1892 if (ctx->sh_desc_fin_dma &&
1893 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1894 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1895 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1896 if (ctx->sh_desc_digest_dma &&
1897 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1898 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1899 desc_bytes(ctx->sh_desc_digest),
1900 DMA_TO_DEVICE);
1901 if (ctx->sh_desc_finup_dma &&
1902 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1903 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1904 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301905
1906 caam_jr_free(ctx->jrdev);
Yuan Kang045e3672012-06-22 19:48:47 -05001907}
1908
1909static void __exit caam_algapi_hash_exit(void)
1910{
Yuan Kang045e3672012-06-22 19:48:47 -05001911 struct caam_hash_alg *t_alg, *n;
1912
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301913 if (!hash_list.next)
Yuan Kang045e3672012-06-22 19:48:47 -05001914 return;
1915
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301916 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
Yuan Kang045e3672012-06-22 19:48:47 -05001917 crypto_unregister_ahash(&t_alg->ahash_alg);
1918 list_del(&t_alg->entry);
1919 kfree(t_alg);
1920 }
1921}
1922
1923static struct caam_hash_alg *
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301924caam_hash_alloc(struct caam_hash_template *template,
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001925 bool keyed)
Yuan Kang045e3672012-06-22 19:48:47 -05001926{
1927 struct caam_hash_alg *t_alg;
1928 struct ahash_alg *halg;
1929 struct crypto_alg *alg;
1930
Fabio Estevam9c4f9732015-08-21 13:52:00 -03001931 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Yuan Kang045e3672012-06-22 19:48:47 -05001932 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301933 pr_err("failed to allocate t_alg\n");
Yuan Kang045e3672012-06-22 19:48:47 -05001934 return ERR_PTR(-ENOMEM);
1935 }
1936
1937 t_alg->ahash_alg = template->template_ahash;
1938 halg = &t_alg->ahash_alg;
1939 alg = &halg->halg.base;
1940
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001941 if (keyed) {
1942 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1943 template->hmac_name);
1944 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1945 template->hmac_driver_name);
1946 } else {
1947 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1948 template->name);
1949 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1950 template->driver_name);
Russell Kinga0118c82016-08-09 08:27:17 +01001951 t_alg->ahash_alg.setkey = NULL;
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001952 }
Yuan Kang045e3672012-06-22 19:48:47 -05001953 alg->cra_module = THIS_MODULE;
1954 alg->cra_init = caam_hash_cra_init;
1955 alg->cra_exit = caam_hash_cra_exit;
1956 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1957 alg->cra_priority = CAAM_CRA_PRIORITY;
1958 alg->cra_blocksize = template->blocksize;
1959 alg->cra_alignmask = 0;
1960 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1961 alg->cra_type = &crypto_ahash_type;
1962
1963 t_alg->alg_type = template->alg_type;
1964 t_alg->alg_op = template->alg_op;
Yuan Kang045e3672012-06-22 19:48:47 -05001965
1966 return t_alg;
1967}
1968
1969static int __init caam_algapi_hash_init(void)
1970{
Ruchika Gupta35af6402014-07-07 10:42:12 +05301971 struct device_node *dev_node;
1972 struct platform_device *pdev;
1973 struct device *ctrldev;
Yuan Kang045e3672012-06-22 19:48:47 -05001974 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07001975 struct caam_drv_private *priv;
1976 unsigned int md_limit = SHA512_DIGEST_SIZE;
1977 u32 cha_inst, cha_vid;
Yuan Kang045e3672012-06-22 19:48:47 -05001978
Ruchika Gupta35af6402014-07-07 10:42:12 +05301979 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1980 if (!dev_node) {
1981 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1982 if (!dev_node)
1983 return -ENODEV;
1984 }
1985
1986 pdev = of_find_device_by_node(dev_node);
1987 if (!pdev) {
1988 of_node_put(dev_node);
1989 return -ENODEV;
1990 }
1991
1992 ctrldev = &pdev->dev;
1993 priv = dev_get_drvdata(ctrldev);
1994 of_node_put(dev_node);
1995
1996 /*
1997 * If priv is NULL, it's probably because the caam driver wasn't
1998 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1999 */
2000 if (!priv)
2001 return -ENODEV;
2002
Victoria Milhoanbf834902015-08-05 11:28:48 -07002003 /*
2004 * Register crypto algorithms the device supports. First, identify
2005 * presence and attributes of MD block.
2006 */
2007 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2008 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2009
2010 /*
2011 * Skip registration of any hashing algorithms if MD block
2012 * is not present.
2013 */
2014 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
2015 return -ENODEV;
2016
2017 /* Limit digest size based on LP256 */
2018 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
2019 md_limit = SHA256_DIGEST_SIZE;
2020
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302021 INIT_LIST_HEAD(&hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05002022
2023 /* register crypto algorithms the device supports */
2024 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
Yuan Kang045e3672012-06-22 19:48:47 -05002025 struct caam_hash_alg *t_alg;
Victoria Milhoanbf834902015-08-05 11:28:48 -07002026 struct caam_hash_template *alg = driver_hash + i;
2027
2028 /* If MD size is not supported by device, skip registration */
2029 if (alg->template_ahash.halg.digestsize > md_limit)
2030 continue;
Yuan Kang045e3672012-06-22 19:48:47 -05002031
Yuan Kangb0e09ba2012-06-22 19:48:48 -05002032 /* register hmac version */
Victoria Milhoanbf834902015-08-05 11:28:48 -07002033 t_alg = caam_hash_alloc(alg, true);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05002034 if (IS_ERR(t_alg)) {
2035 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07002036 pr_warn("%s alg allocation failed\n", alg->driver_name);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05002037 continue;
2038 }
2039
2040 err = crypto_register_ahash(&t_alg->ahash_alg);
2041 if (err) {
Russell King6ea30f02015-10-18 17:51:10 +01002042 pr_warn("%s alg registration failed: %d\n",
2043 t_alg->ahash_alg.halg.base.cra_driver_name,
2044 err);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05002045 kfree(t_alg);
2046 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302047 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05002048
2049 /* register unkeyed version */
Victoria Milhoanbf834902015-08-05 11:28:48 -07002050 t_alg = caam_hash_alloc(alg, false);
Yuan Kang045e3672012-06-22 19:48:47 -05002051 if (IS_ERR(t_alg)) {
2052 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07002053 pr_warn("%s alg allocation failed\n", alg->driver_name);
Yuan Kang045e3672012-06-22 19:48:47 -05002054 continue;
2055 }
2056
2057 err = crypto_register_ahash(&t_alg->ahash_alg);
2058 if (err) {
Russell King6ea30f02015-10-18 17:51:10 +01002059 pr_warn("%s alg registration failed: %d\n",
2060 t_alg->ahash_alg.halg.base.cra_driver_name,
2061 err);
Yuan Kang045e3672012-06-22 19:48:47 -05002062 kfree(t_alg);
2063 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05302064 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05002065 }
2066
2067 return err;
2068}
2069
2070module_init(caam_algapi_hash_init);
2071module_exit(caam_algapi_hash_exit);
2072
2073MODULE_LICENSE("GPL");
2074MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2075MODULE_AUTHOR("Freescale Semiconductor - NMG");