blob: 698580b60b2f1b8fed004fec52a73dfdf917a390 [file] [log] [blame]
Yuan Kang045e3672012-06-22 19:48:47 -05001/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
Horia Geantă39957c82016-11-09 10:46:12 +020075#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
Yuan Kang045e3672012-06-22 19:48:47 -050076#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
Yuan Kang045e3672012-06-22 19:48:47 -050092#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
Ruchika Guptacfc6f112013-10-25 12:01:03 +053097
98static struct list_head hash_list;
99
Yuan Kang045e3672012-06-22 19:48:47 -0500100/* ahash per-session context */
101struct caam_hash_ctx {
Russell Kinge11793f2016-08-08 18:04:36 +0100102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
Russell Kinge11793f2016-08-08 18:04:36 +0100106 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
Yuan Kang045e3672012-06-22 19:48:47 -0500107 dma_addr_t sh_desc_update_first_dma;
108 dma_addr_t sh_desc_fin_dma;
109 dma_addr_t sh_desc_digest_dma;
Russell Kinge11793f2016-08-08 18:04:36 +0100110 struct device *jrdev;
Yuan Kang045e3672012-06-22 19:48:47 -0500111 u8 key[CAAM_MAX_HASH_KEY_SIZE];
Yuan Kang045e3672012-06-22 19:48:47 -0500112 int ctx_len;
Horia Geantădb576562016-11-22 15:44:04 +0200113 struct alginfo adata;
Yuan Kang045e3672012-06-22 19:48:47 -0500114};
115
116/* ahash state */
117struct caam_hash_state {
118 dma_addr_t buf_dma;
119 dma_addr_t ctx_dma;
120 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
121 int buflen_0;
122 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
123 int buflen_1;
Victoria Milhoane7472422015-08-05 11:28:35 -0700124 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
Yuan Kang045e3672012-06-22 19:48:47 -0500125 int (*update)(struct ahash_request *req);
126 int (*final)(struct ahash_request *req);
127 int (*finup)(struct ahash_request *req);
128 int current_buf;
129};
130
Russell King5ec90832015-10-18 17:51:25 +0100131struct caam_export_state {
132 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
133 u8 caam_ctx[MAX_CTX_LEN];
134 int buflen;
135 int (*update)(struct ahash_request *req);
136 int (*final)(struct ahash_request *req);
137 int (*finup)(struct ahash_request *req);
138};
139
Horia Geantă0355d232017-02-10 14:07:24 +0200140static inline void switch_buf(struct caam_hash_state *state)
141{
142 state->current_buf ^= 1;
143}
144
145static inline u8 *current_buf(struct caam_hash_state *state)
146{
147 return state->current_buf ? state->buf_1 : state->buf_0;
148}
149
150static inline u8 *alt_buf(struct caam_hash_state *state)
151{
152 return state->current_buf ? state->buf_0 : state->buf_1;
153}
154
155static inline int *current_buflen(struct caam_hash_state *state)
156{
157 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
158}
159
160static inline int *alt_buflen(struct caam_hash_state *state)
161{
162 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
163}
164
Yuan Kang045e3672012-06-22 19:48:47 -0500165/* Common job descriptor seq in/out ptr routines */
166
167/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
Horia Geantace572082014-07-11 15:34:49 +0300168static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
169 struct caam_hash_state *state,
170 int ctx_len)
Yuan Kang045e3672012-06-22 19:48:47 -0500171{
172 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
173 ctx_len, DMA_FROM_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300174 if (dma_mapping_error(jrdev, state->ctx_dma)) {
175 dev_err(jrdev, "unable to map ctx\n");
Horia Geantă87ec02e2017-02-10 14:07:23 +0200176 state->ctx_dma = 0;
Horia Geantace572082014-07-11 15:34:49 +0300177 return -ENOMEM;
178 }
179
Yuan Kang045e3672012-06-22 19:48:47 -0500180 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
Horia Geantace572082014-07-11 15:34:49 +0300181
182 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500183}
184
185/* Map req->result, and append seq_out_ptr command that points to it */
186static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
187 u8 *result, int digestsize)
188{
189 dma_addr_t dst_dma;
190
191 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
192 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
193
194 return dst_dma;
195}
196
Horia Geantă944c3d42017-02-10 14:07:25 +0200197/* Map current buffer in state (if length > 0) and put it in link table */
198static inline int buf_map_to_sec4_sg(struct device *jrdev,
199 struct sec4_sg_entry *sec4_sg,
200 struct caam_hash_state *state)
Yuan Kang045e3672012-06-22 19:48:47 -0500201{
Horia Geantă944c3d42017-02-10 14:07:25 +0200202 int buflen = *current_buflen(state);
Yuan Kang045e3672012-06-22 19:48:47 -0500203
Horia Geantă944c3d42017-02-10 14:07:25 +0200204 if (!buflen)
205 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500206
Horia Geantă944c3d42017-02-10 14:07:25 +0200207 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
208 DMA_TO_DEVICE);
209 if (dma_mapping_error(jrdev, state->buf_dma)) {
210 dev_err(jrdev, "unable to map buf\n");
211 state->buf_dma = 0;
212 return -ENOMEM;
213 }
Yuan Kang045e3672012-06-22 19:48:47 -0500214
Horia Geantă944c3d42017-02-10 14:07:25 +0200215 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500216
Horia Geantă944c3d42017-02-10 14:07:25 +0200217 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500218}
219
220/* Map state->caam_ctx, and add it to link table */
Horia Geantace572082014-07-11 15:34:49 +0300221static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
222 struct caam_hash_state *state, int ctx_len,
223 struct sec4_sg_entry *sec4_sg, u32 flag)
Yuan Kang045e3672012-06-22 19:48:47 -0500224{
225 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
Horia Geantace572082014-07-11 15:34:49 +0300226 if (dma_mapping_error(jrdev, state->ctx_dma)) {
227 dev_err(jrdev, "unable to map ctx\n");
Horia Geantă87ec02e2017-02-10 14:07:23 +0200228 state->ctx_dma = 0;
Horia Geantace572082014-07-11 15:34:49 +0300229 return -ENOMEM;
230 }
231
Yuan Kang045e3672012-06-22 19:48:47 -0500232 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
Horia Geantace572082014-07-11 15:34:49 +0300233
234 return 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500235}
236
Horia Geantă1a0166f2016-11-22 15:44:11 +0200237/*
238 * For ahash update, final and finup (import_ctx = true)
239 * import context, read and write to seqout
240 * For ahash firsts and digest (import_ctx = false)
241 * read and write to seqout
242 */
243static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
244 struct caam_hash_ctx *ctx, bool import_ctx)
Yuan Kang045e3672012-06-22 19:48:47 -0500245{
Horia Geantă1a0166f2016-11-22 15:44:11 +0200246 u32 op = ctx->adata.algtype;
247 u32 *skip_key_load;
Yuan Kang045e3672012-06-22 19:48:47 -0500248
Kim Phillips61bb86b2012-07-13 17:49:28 -0500249 init_sh_desc(desc, HDR_SHARE_SERIAL);
Yuan Kang045e3672012-06-22 19:48:47 -0500250
Horia Geantă1a0166f2016-11-22 15:44:11 +0200251 /* Append key if it has been set; ahash update excluded */
252 if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
253 /* Skip key loading if already shared */
254 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
255 JUMP_COND_SHRD);
Yuan Kang045e3672012-06-22 19:48:47 -0500256
Horia Geantă1a0166f2016-11-22 15:44:11 +0200257 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
258 ctx->adata.keylen, CLASS_2 |
259 KEY_DEST_MDHA_SPLIT | KEY_ENC);
Yuan Kang045e3672012-06-22 19:48:47 -0500260
Horia Geantă1a0166f2016-11-22 15:44:11 +0200261 set_jump_tgt_here(desc, skip_key_load);
262
263 op |= OP_ALG_AAI_HMAC_PRECOMP;
Yuan Kang045e3672012-06-22 19:48:47 -0500264 }
Yuan Kang045e3672012-06-22 19:48:47 -0500265
Horia Geantă1a0166f2016-11-22 15:44:11 +0200266 /* If needed, import context from software */
267 if (import_ctx)
268 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
269 LDST_SRCDST_BYTE_CONTEXT);
270
271 /* Class 2 operation */
272 append_operation(desc, op | state | OP_ALG_ENCRYPT);
273
274 /*
275 * Load from buf and/or src and write to req->result or state->context
276 * Calculate remaining bytes to read
277 */
Yuan Kang045e3672012-06-22 19:48:47 -0500278 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
Yuan Kang045e3672012-06-22 19:48:47 -0500279 /* Read remaining bytes */
280 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
281 FIFOLD_TYPE_MSG | KEY_VLF);
Yuan Kang045e3672012-06-22 19:48:47 -0500282 /* Store class2 context bytes */
283 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
284 LDST_SRCDST_BYTE_CONTEXT);
285}
286
Yuan Kang045e3672012-06-22 19:48:47 -0500287static int ahash_set_sh_desc(struct crypto_ahash *ahash)
288{
289 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
290 int digestsize = crypto_ahash_digestsize(ahash);
291 struct device *jrdev = ctx->jrdev;
Yuan Kang045e3672012-06-22 19:48:47 -0500292 u32 *desc;
293
Yuan Kang045e3672012-06-22 19:48:47 -0500294 /* ahash_update shared descriptor */
295 desc = ctx->sh_desc_update;
Horia Geantă1a0166f2016-11-22 15:44:11 +0200296 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
Horia Geantăbbf22342017-02-10 14:07:22 +0200297 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
298 desc_bytes(desc), DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500299#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300300 print_hex_dump(KERN_ERR,
301 "ahash update shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500302 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
303#endif
304
305 /* ahash_update_first shared descriptor */
306 desc = ctx->sh_desc_update_first;
Horia Geantă1a0166f2016-11-22 15:44:11 +0200307 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
Horia Geantăbbf22342017-02-10 14:07:22 +0200308 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
309 desc_bytes(desc), DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500310#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300311 print_hex_dump(KERN_ERR,
312 "ahash update first shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500313 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
314#endif
315
316 /* ahash_final shared descriptor */
317 desc = ctx->sh_desc_fin;
Horia Geantă1a0166f2016-11-22 15:44:11 +0200318 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
Horia Geantăbbf22342017-02-10 14:07:22 +0200319 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
320 desc_bytes(desc), DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500321#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300322 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500323 DUMP_PREFIX_ADDRESS, 16, 4, desc,
324 desc_bytes(desc), 1);
325#endif
326
Yuan Kang045e3672012-06-22 19:48:47 -0500327 /* ahash_digest shared descriptor */
328 desc = ctx->sh_desc_digest;
Horia Geantă1a0166f2016-11-22 15:44:11 +0200329 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
Horia Geantăbbf22342017-02-10 14:07:22 +0200330 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
331 desc_bytes(desc), DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500332#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300333 print_hex_dump(KERN_ERR,
334 "ahash digest shdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500335 DUMP_PREFIX_ADDRESS, 16, 4, desc,
336 desc_bytes(desc), 1);
337#endif
338
339 return 0;
340}
341
Yuan Kang045e3672012-06-22 19:48:47 -0500342/* Digest hash size if it is too large */
Kim Phillips66b3e882013-03-26 18:10:14 -0500343static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
Yuan Kang045e3672012-06-22 19:48:47 -0500344 u32 *keylen, u8 *key_out, u32 digestsize)
345{
346 struct device *jrdev = ctx->jrdev;
347 u32 *desc;
348 struct split_key_result result;
349 dma_addr_t src_dma, dst_dma;
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200350 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500351
Vakul Garg9c23b7d2013-07-10 06:26:13 +0000352 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
Kim Phillips2af8f4a2012-09-07 04:17:03 +0800353 if (!desc) {
354 dev_err(jrdev, "unable to allocate key input memory\n");
355 return -ENOMEM;
356 }
Yuan Kang045e3672012-06-22 19:48:47 -0500357
358 init_job_desc(desc, 0);
359
360 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
361 DMA_TO_DEVICE);
362 if (dma_mapping_error(jrdev, src_dma)) {
363 dev_err(jrdev, "unable to map key input memory\n");
364 kfree(desc);
365 return -ENOMEM;
366 }
367 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
368 DMA_FROM_DEVICE);
369 if (dma_mapping_error(jrdev, dst_dma)) {
370 dev_err(jrdev, "unable to map key output memory\n");
371 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
372 kfree(desc);
373 return -ENOMEM;
374 }
375
376 /* Job descriptor to perform unkeyed hash on key_in */
Horia Geantădb576562016-11-22 15:44:04 +0200377 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
Yuan Kang045e3672012-06-22 19:48:47 -0500378 OP_ALG_AS_INITFINAL);
379 append_seq_in_ptr(desc, src_dma, *keylen, 0);
380 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
381 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
382 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
383 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
384 LDST_SRCDST_BYTE_CONTEXT);
385
386#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300387 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500388 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300389 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500390 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
391#endif
392
393 result.err = 0;
394 init_completion(&result.completion);
395
396 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
397 if (!ret) {
398 /* in progress */
Horia Geantă7459e1d2017-07-07 16:57:06 +0300399 wait_for_completion(&result.completion);
Yuan Kang045e3672012-06-22 19:48:47 -0500400 ret = result.err;
401#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300402 print_hex_dump(KERN_ERR,
403 "digested key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500404 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
405 digestsize, 1);
406#endif
407 }
Yuan Kang045e3672012-06-22 19:48:47 -0500408 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
409 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
410
Horia Geantae11aa9f2014-07-11 15:34:50 +0300411 *keylen = digestsize;
412
Yuan Kang045e3672012-06-22 19:48:47 -0500413 kfree(desc);
414
415 return ret;
416}
417
418static int ahash_setkey(struct crypto_ahash *ahash,
419 const u8 *key, unsigned int keylen)
420{
Yuan Kang045e3672012-06-22 19:48:47 -0500421 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
Yuan Kang045e3672012-06-22 19:48:47 -0500422 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
423 int digestsize = crypto_ahash_digestsize(ahash);
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200424 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500425 u8 *hashed_key = NULL;
426
427#ifdef DEBUG
428 printk(KERN_ERR "keylen %d\n", keylen);
429#endif
430
431 if (keylen > blocksize) {
Markus Elfringe7a33c42016-09-15 11:20:09 +0200432 hashed_key = kmalloc_array(digestsize,
433 sizeof(*hashed_key),
434 GFP_KERNEL | GFP_DMA);
Yuan Kang045e3672012-06-22 19:48:47 -0500435 if (!hashed_key)
436 return -ENOMEM;
437 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
438 digestsize);
439 if (ret)
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200440 goto bad_free_key;
Yuan Kang045e3672012-06-22 19:48:47 -0500441 key = hashed_key;
442 }
443
Horia Geantă6655cb82016-11-22 15:44:10 +0200444 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
445 CAAM_MAX_HASH_KEY_SIZE);
Yuan Kang045e3672012-06-22 19:48:47 -0500446 if (ret)
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200447 goto bad_free_key;
Yuan Kang045e3672012-06-22 19:48:47 -0500448
Yuan Kang045e3672012-06-22 19:48:47 -0500449#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300450 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500451 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
Horia Geantădb576562016-11-22 15:44:04 +0200452 ctx->adata.keylen_pad, 1);
Yuan Kang045e3672012-06-22 19:48:47 -0500453#endif
454
Yuan Kang045e3672012-06-22 19:48:47 -0500455 kfree(hashed_key);
Horia Geantăcfb725f2017-02-10 14:07:21 +0200456 return ahash_set_sh_desc(ahash);
Markus Elfringd6e7a7d2016-09-15 13:54:49 +0200457 bad_free_key:
Yuan Kang045e3672012-06-22 19:48:47 -0500458 kfree(hashed_key);
459 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
460 return -EINVAL;
461}
462
463/*
464 * ahash_edesc - s/w-extended ahash descriptor
465 * @dst_dma: physical mapped address of req->result
466 * @sec4_sg_dma: physical mapped address of h/w link table
467 * @src_nents: number of segments in input scatterlist
468 * @sec4_sg_bytes: length of dma mapped sec4_sg space
Yuan Kang045e3672012-06-22 19:48:47 -0500469 * @hw_desc: the h/w job descriptor followed by any referenced link tables
Russell King343e44b2016-08-08 18:04:52 +0100470 * @sec4_sg: h/w link table
Yuan Kang045e3672012-06-22 19:48:47 -0500471 */
472struct ahash_edesc {
473 dma_addr_t dst_dma;
474 dma_addr_t sec4_sg_dma;
475 int src_nents;
476 int sec4_sg_bytes;
Russell Kingd7b24ed2016-08-08 18:04:47 +0100477 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
Russell King343e44b2016-08-08 18:04:52 +0100478 struct sec4_sg_entry sec4_sg[0];
Yuan Kang045e3672012-06-22 19:48:47 -0500479};
480
481static inline void ahash_unmap(struct device *dev,
482 struct ahash_edesc *edesc,
483 struct ahash_request *req, int dst_len)
484{
Horia Geantă944c3d42017-02-10 14:07:25 +0200485 struct caam_hash_state *state = ahash_request_ctx(req);
486
Yuan Kang045e3672012-06-22 19:48:47 -0500487 if (edesc->src_nents)
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200488 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500489 if (edesc->dst_dma)
490 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
491
492 if (edesc->sec4_sg_bytes)
493 dma_unmap_single(dev, edesc->sec4_sg_dma,
494 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantă944c3d42017-02-10 14:07:25 +0200495
496 if (state->buf_dma) {
497 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
498 DMA_TO_DEVICE);
499 state->buf_dma = 0;
500 }
Yuan Kang045e3672012-06-22 19:48:47 -0500501}
502
503static inline void ahash_unmap_ctx(struct device *dev,
504 struct ahash_edesc *edesc,
505 struct ahash_request *req, int dst_len, u32 flag)
506{
507 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
508 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
509 struct caam_hash_state *state = ahash_request_ctx(req);
510
Horia Geantă87ec02e2017-02-10 14:07:23 +0200511 if (state->ctx_dma) {
Yuan Kang045e3672012-06-22 19:48:47 -0500512 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
Horia Geantă87ec02e2017-02-10 14:07:23 +0200513 state->ctx_dma = 0;
514 }
Yuan Kang045e3672012-06-22 19:48:47 -0500515 ahash_unmap(dev, edesc, req, dst_len);
516}
517
518static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
519 void *context)
520{
521 struct ahash_request *req = context;
522 struct ahash_edesc *edesc;
523 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
524 int digestsize = crypto_ahash_digestsize(ahash);
525#ifdef DEBUG
526 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
527 struct caam_hash_state *state = ahash_request_ctx(req);
528
529 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
530#endif
531
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200532 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
Marek Vasutfa9659c2014-04-24 20:05:12 +0200533 if (err)
534 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500535
536 ahash_unmap(jrdev, edesc, req, digestsize);
537 kfree(edesc);
538
539#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300540 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500541 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
542 ctx->ctx_len, 1);
543 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300544 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500545 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
546 digestsize, 1);
547#endif
548
549 req->base.complete(&req->base, err);
550}
551
552static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
553 void *context)
554{
555 struct ahash_request *req = context;
556 struct ahash_edesc *edesc;
557 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
558 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
Yuan Kang045e3672012-06-22 19:48:47 -0500559 struct caam_hash_state *state = ahash_request_ctx(req);
Horia Geantă944c3d42017-02-10 14:07:25 +0200560#ifdef DEBUG
Yuan Kang045e3672012-06-22 19:48:47 -0500561 int digestsize = crypto_ahash_digestsize(ahash);
562
563 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
564#endif
565
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200566 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
Marek Vasutfa9659c2014-04-24 20:05:12 +0200567 if (err)
568 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500569
570 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
Horia Geantă944c3d42017-02-10 14:07:25 +0200571 switch_buf(state);
Yuan Kang045e3672012-06-22 19:48:47 -0500572 kfree(edesc);
573
574#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300575 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500576 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
577 ctx->ctx_len, 1);
578 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300579 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500580 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
581 digestsize, 1);
582#endif
583
584 req->base.complete(&req->base, err);
585}
586
587static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
588 void *context)
589{
590 struct ahash_request *req = context;
591 struct ahash_edesc *edesc;
592 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
593 int digestsize = crypto_ahash_digestsize(ahash);
594#ifdef DEBUG
595 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
596 struct caam_hash_state *state = ahash_request_ctx(req);
597
598 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
599#endif
600
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200601 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
Marek Vasutfa9659c2014-04-24 20:05:12 +0200602 if (err)
603 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500604
Horia Geantabc9e05f2014-07-11 15:34:52 +0300605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500606 kfree(edesc);
607
608#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300609 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500610 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
611 ctx->ctx_len, 1);
612 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300613 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500614 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
615 digestsize, 1);
616#endif
617
618 req->base.complete(&req->base, err);
619}
620
621static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
622 void *context)
623{
624 struct ahash_request *req = context;
625 struct ahash_edesc *edesc;
626 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
627 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
Yuan Kang045e3672012-06-22 19:48:47 -0500628 struct caam_hash_state *state = ahash_request_ctx(req);
Horia Geantă944c3d42017-02-10 14:07:25 +0200629#ifdef DEBUG
Yuan Kang045e3672012-06-22 19:48:47 -0500630 int digestsize = crypto_ahash_digestsize(ahash);
631
632 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
633#endif
634
Horia Geantă4ca7c7d2016-11-09 10:46:18 +0200635 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
Marek Vasutfa9659c2014-04-24 20:05:12 +0200636 if (err)
637 caam_jr_strstatus(jrdev, err);
Yuan Kang045e3672012-06-22 19:48:47 -0500638
Horia Geantaef62b232014-07-11 15:34:51 +0300639 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
Horia Geantă944c3d42017-02-10 14:07:25 +0200640 switch_buf(state);
Yuan Kang045e3672012-06-22 19:48:47 -0500641 kfree(edesc);
642
643#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300644 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500645 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
646 ctx->ctx_len, 1);
647 if (req->result)
Alex Porosanu514df282013-08-14 18:56:45 +0300648 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500649 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
650 digestsize, 1);
651#endif
652
653 req->base.complete(&req->base, err);
654}
655
Russell King5588d032016-08-08 18:05:08 +0100656/*
657 * Allocate an enhanced descriptor, which contains the hardware descriptor
658 * and space for hardware scatter table containing sg_num entries.
659 */
660static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
Russell King30a43b42016-08-08 18:05:13 +0100661 int sg_num, u32 *sh_desc,
662 dma_addr_t sh_desc_dma,
663 gfp_t flags)
Russell King5588d032016-08-08 18:05:08 +0100664{
665 struct ahash_edesc *edesc;
666 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
667
668 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
669 if (!edesc) {
670 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
671 return NULL;
672 }
673
Russell King30a43b42016-08-08 18:05:13 +0100674 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
675 HDR_SHARE_DEFER | HDR_REVERSE);
676
Russell King5588d032016-08-08 18:05:08 +0100677 return edesc;
678}
679
Russell King65cf1642016-08-08 18:05:19 +0100680static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
681 struct ahash_edesc *edesc,
682 struct ahash_request *req, int nents,
683 unsigned int first_sg,
684 unsigned int first_bytes, size_t to_hash)
685{
686 dma_addr_t src_dma;
687 u32 options;
688
689 if (nents > 1 || first_sg) {
690 struct sec4_sg_entry *sg = edesc->sec4_sg;
691 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
692
693 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
694
695 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
696 if (dma_mapping_error(ctx->jrdev, src_dma)) {
697 dev_err(ctx->jrdev, "unable to map S/G table\n");
698 return -ENOMEM;
699 }
700
701 edesc->sec4_sg_bytes = sgsize;
702 edesc->sec4_sg_dma = src_dma;
703 options = LDST_SGF;
704 } else {
705 src_dma = sg_dma_address(req->src);
706 options = 0;
707 }
708
709 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
710 options);
711
712 return 0;
713}
714
Yuan Kang045e3672012-06-22 19:48:47 -0500715/* submit update job descriptor */
716static int ahash_update_ctx(struct ahash_request *req)
717{
718 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
719 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
720 struct caam_hash_state *state = ahash_request_ctx(req);
721 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +0300722 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
723 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă0355d232017-02-10 14:07:24 +0200724 u8 *buf = current_buf(state);
725 int *buflen = current_buflen(state);
726 u8 *next_buf = alt_buf(state);
727 int *next_buflen = alt_buflen(state), last_buflen;
Yuan Kang045e3672012-06-22 19:48:47 -0500728 int in_len = *buflen + req->nbytes, to_hash;
Russell King30a43b42016-08-08 18:05:13 +0100729 u32 *desc;
Russell Kingbc13c692016-08-08 18:05:03 +0100730 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
Yuan Kang045e3672012-06-22 19:48:47 -0500731 struct ahash_edesc *edesc;
732 int ret = 0;
Yuan Kang045e3672012-06-22 19:48:47 -0500733
734 last_buflen = *next_buflen;
735 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
736 to_hash = in_len - *next_buflen;
737
738 if (to_hash) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200739 src_nents = sg_nents_for_len(req->src,
740 req->nbytes - (*next_buflen));
LABBE Corentinf9970c22015-11-04 21:13:38 +0100741 if (src_nents < 0) {
742 dev_err(jrdev, "Invalid number of src SG.\n");
743 return src_nents;
744 }
Russell Kingbc13c692016-08-08 18:05:03 +0100745
746 if (src_nents) {
747 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
748 DMA_TO_DEVICE);
749 if (!mapped_nents) {
750 dev_err(jrdev, "unable to DMA map source\n");
751 return -ENOMEM;
752 }
753 } else {
754 mapped_nents = 0;
755 }
756
Yuan Kang045e3672012-06-22 19:48:47 -0500757 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
Russell Kingbc13c692016-08-08 18:05:03 +0100758 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
Yuan Kang045e3672012-06-22 19:48:47 -0500759 sizeof(struct sec4_sg_entry);
760
761 /*
762 * allocate space for base edesc and hw desc commands,
763 * link tables
764 */
Russell King5588d032016-08-08 18:05:08 +0100765 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
Russell King30a43b42016-08-08 18:05:13 +0100766 ctx->sh_desc_update,
767 ctx->sh_desc_update_dma, flags);
Yuan Kang045e3672012-06-22 19:48:47 -0500768 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +0100769 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500770 return -ENOMEM;
771 }
772
773 edesc->src_nents = src_nents;
774 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -0500775
Horia Geantace572082014-07-11 15:34:49 +0300776 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
777 edesc->sec4_sg, DMA_BIDIRECTIONAL);
778 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200779 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500780
Horia Geantă944c3d42017-02-10 14:07:25 +0200781 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
782 if (ret)
783 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500784
Russell Kingbc13c692016-08-08 18:05:03 +0100785 if (mapped_nents) {
786 sg_to_sec4_sg_last(req->src, mapped_nents,
787 edesc->sec4_sg + sec4_sg_src_index,
788 0);
Victoria Milhoan8af7b0f2015-06-15 16:52:57 -0700789 if (*next_buflen)
Cristian Stoica307fd5432014-08-14 13:51:56 +0300790 scatterwalk_map_and_copy(next_buf, req->src,
791 to_hash - *buflen,
792 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500793 } else {
Horia Geantă297b9ce2017-07-18 18:30:47 +0300794 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
795 1);
Yuan Kang045e3672012-06-22 19:48:47 -0500796 }
797
Yuan Kang045e3672012-06-22 19:48:47 -0500798 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -0500799
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530800 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
801 sec4_sg_bytes,
802 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300803 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
804 dev_err(jrdev, "unable to map S/G table\n");
Russell King32686d32016-08-08 18:04:58 +0100805 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200806 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +0300807 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530808
Yuan Kang045e3672012-06-22 19:48:47 -0500809 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
810 to_hash, LDST_SGF);
811
812 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
813
814#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300815 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500816 DUMP_PREFIX_ADDRESS, 16, 4, desc,
817 desc_bytes(desc), 1);
818#endif
819
820 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
Russell King32686d32016-08-08 18:04:58 +0100821 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200822 goto unmap_ctx;
Russell King32686d32016-08-08 18:04:58 +0100823
824 ret = -EINPROGRESS;
Yuan Kang045e3672012-06-22 19:48:47 -0500825 } else if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +0300826 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
827 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500828 *buflen = *next_buflen;
829 *next_buflen = last_buflen;
830 }
831#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300832 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500833 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +0300834 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500835 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
836 *next_buflen, 1);
837#endif
838
839 return ret;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200840 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +0100841 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
842 kfree(edesc);
843 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500844}
845
846static int ahash_final_ctx(struct ahash_request *req)
847{
848 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
849 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
850 struct caam_hash_state *state = ahash_request_ctx(req);
851 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +0300852 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
853 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă0355d232017-02-10 14:07:24 +0200854 int buflen = *current_buflen(state);
Russell King30a43b42016-08-08 18:05:13 +0100855 u32 *desc;
Horia Geant?b310c172015-08-11 20:19:20 +0300856 int sec4_sg_bytes, sec4_sg_src_index;
Yuan Kang045e3672012-06-22 19:48:47 -0500857 int digestsize = crypto_ahash_digestsize(ahash);
858 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200859 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500860
Horia Geant?b310c172015-08-11 20:19:20 +0300861 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
862 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
Yuan Kang045e3672012-06-22 19:48:47 -0500863
864 /* allocate space for base edesc and hw desc commands, link tables */
Russell King30a43b42016-08-08 18:05:13 +0100865 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
866 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
867 flags);
Russell King5588d032016-08-08 18:05:08 +0100868 if (!edesc)
Yuan Kang045e3672012-06-22 19:48:47 -0500869 return -ENOMEM;
Yuan Kang045e3672012-06-22 19:48:47 -0500870
Yuan Kang045e3672012-06-22 19:48:47 -0500871 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -0500872
873 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -0500874 edesc->src_nents = 0;
875
Horia Geantace572082014-07-11 15:34:49 +0300876 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
877 edesc->sec4_sg, DMA_TO_DEVICE);
878 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200879 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500880
Horia Geantă944c3d42017-02-10 14:07:25 +0200881 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
882 if (ret)
883 goto unmap_ctx;
884
Horia Geantă297b9ce2017-07-18 18:30:47 +0300885 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
Yuan Kang045e3672012-06-22 19:48:47 -0500886
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530887 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
888 sec4_sg_bytes, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +0300889 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
890 dev_err(jrdev, "unable to map S/G table\n");
Russell King32686d32016-08-08 18:04:58 +0100891 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200892 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +0300893 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +0530894
Yuan Kang045e3672012-06-22 19:48:47 -0500895 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
896 LDST_SGF);
897
898 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
899 digestsize);
Horia Geantace572082014-07-11 15:34:49 +0300900 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
901 dev_err(jrdev, "unable to map dst\n");
Russell King32686d32016-08-08 18:04:58 +0100902 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200903 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +0300904 }
Yuan Kang045e3672012-06-22 19:48:47 -0500905
906#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300907 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500908 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
909#endif
910
911 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
Russell King32686d32016-08-08 18:04:58 +0100912 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200913 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500914
Russell King32686d32016-08-08 18:04:58 +0100915 return -EINPROGRESS;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200916 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +0100917 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
918 kfree(edesc);
Yuan Kang045e3672012-06-22 19:48:47 -0500919 return ret;
920}
921
922static int ahash_finup_ctx(struct ahash_request *req)
923{
924 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
925 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
926 struct caam_hash_state *state = ahash_request_ctx(req);
927 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +0300928 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
929 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă0355d232017-02-10 14:07:24 +0200930 int buflen = *current_buflen(state);
Russell King30a43b42016-08-08 18:05:13 +0100931 u32 *desc;
Russell King65cf1642016-08-08 18:05:19 +0100932 int sec4_sg_src_index;
Russell Kingbc13c692016-08-08 18:05:03 +0100933 int src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -0500934 int digestsize = crypto_ahash_digestsize(ahash);
935 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +0200936 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -0500937
LABBE Corentin13fb8fd2015-09-23 13:55:27 +0200938 src_nents = sg_nents_for_len(req->src, req->nbytes);
LABBE Corentinf9970c22015-11-04 21:13:38 +0100939 if (src_nents < 0) {
940 dev_err(jrdev, "Invalid number of src SG.\n");
941 return src_nents;
942 }
Russell Kingbc13c692016-08-08 18:05:03 +0100943
944 if (src_nents) {
945 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
946 DMA_TO_DEVICE);
947 if (!mapped_nents) {
948 dev_err(jrdev, "unable to DMA map source\n");
949 return -ENOMEM;
950 }
951 } else {
952 mapped_nents = 0;
953 }
954
Yuan Kang045e3672012-06-22 19:48:47 -0500955 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
Yuan Kang045e3672012-06-22 19:48:47 -0500956
957 /* allocate space for base edesc and hw desc commands, link tables */
Russell King5588d032016-08-08 18:05:08 +0100958 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
Horia Geantă9a1a1c02016-11-09 10:46:24 +0200959 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
Russell King5588d032016-08-08 18:05:08 +0100960 flags);
Yuan Kang045e3672012-06-22 19:48:47 -0500961 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +0100962 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -0500963 return -ENOMEM;
964 }
965
Yuan Kang045e3672012-06-22 19:48:47 -0500966 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -0500967
968 edesc->src_nents = src_nents;
Yuan Kang045e3672012-06-22 19:48:47 -0500969
Horia Geantace572082014-07-11 15:34:49 +0300970 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
971 edesc->sec4_sg, DMA_TO_DEVICE);
972 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200973 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500974
Horia Geantă944c3d42017-02-10 14:07:25 +0200975 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
976 if (ret)
977 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500978
Russell King65cf1642016-08-08 18:05:19 +0100979 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
980 sec4_sg_src_index, ctx->ctx_len + buflen,
981 req->nbytes);
982 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200983 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -0500984
985 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
986 digestsize);
Horia Geantace572082014-07-11 15:34:49 +0300987 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
988 dev_err(jrdev, "unable to map dst\n");
Russell King32686d32016-08-08 18:04:58 +0100989 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +0200990 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +0300991 }
Yuan Kang045e3672012-06-22 19:48:47 -0500992
993#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +0300994 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -0500995 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
996#endif
997
998 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
Russell King32686d32016-08-08 18:04:58 +0100999 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001000 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001001
Russell King32686d32016-08-08 18:04:58 +01001002 return -EINPROGRESS;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001003 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001004 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1005 kfree(edesc);
Yuan Kang045e3672012-06-22 19:48:47 -05001006 return ret;
1007}
1008
1009static int ahash_digest(struct ahash_request *req)
1010{
1011 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1012 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
Horia Geantă944c3d42017-02-10 14:07:25 +02001013 struct caam_hash_state *state = ahash_request_ctx(req);
Yuan Kang045e3672012-06-22 19:48:47 -05001014 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001015 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1016 GFP_KERNEL : GFP_ATOMIC;
Russell King30a43b42016-08-08 18:05:13 +01001017 u32 *desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001018 int digestsize = crypto_ahash_digestsize(ahash);
Russell King65cf1642016-08-08 18:05:19 +01001019 int src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001020 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001021 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001022
Horia Geantă944c3d42017-02-10 14:07:25 +02001023 state->buf_dma = 0;
1024
Russell King3d5a2db2016-08-08 18:04:31 +01001025 src_nents = sg_nents_for_len(req->src, req->nbytes);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001026 if (src_nents < 0) {
1027 dev_err(jrdev, "Invalid number of src SG.\n");
1028 return src_nents;
1029 }
Russell Kingbc13c692016-08-08 18:05:03 +01001030
1031 if (src_nents) {
1032 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1033 DMA_TO_DEVICE);
1034 if (!mapped_nents) {
1035 dev_err(jrdev, "unable to map source for DMA\n");
1036 return -ENOMEM;
1037 }
1038 } else {
1039 mapped_nents = 0;
1040 }
1041
Yuan Kang045e3672012-06-22 19:48:47 -05001042 /* allocate space for base edesc and hw desc commands, link tables */
Russell King5588d032016-08-08 18:05:08 +01001043 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
Russell King30a43b42016-08-08 18:05:13 +01001044 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
Russell King5588d032016-08-08 18:05:08 +01001045 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001046 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001047 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001048 return -ENOMEM;
1049 }
Russell King343e44b2016-08-08 18:04:52 +01001050
Yuan Kang045e3672012-06-22 19:48:47 -05001051 edesc->src_nents = src_nents;
1052
Russell King65cf1642016-08-08 18:05:19 +01001053 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1054 req->nbytes);
1055 if (ret) {
1056 ahash_unmap(jrdev, edesc, req, digestsize);
1057 kfree(edesc);
1058 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001059 }
Russell King65cf1642016-08-08 18:05:19 +01001060
1061 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001062
1063 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1064 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001065 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1066 dev_err(jrdev, "unable to map dst\n");
Russell King32686d32016-08-08 18:04:58 +01001067 ahash_unmap(jrdev, edesc, req, digestsize);
1068 kfree(edesc);
Horia Geantace572082014-07-11 15:34:49 +03001069 return -ENOMEM;
1070 }
Yuan Kang045e3672012-06-22 19:48:47 -05001071
1072#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001073 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001074 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1075#endif
1076
1077 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1078 if (!ret) {
1079 ret = -EINPROGRESS;
1080 } else {
1081 ahash_unmap(jrdev, edesc, req, digestsize);
1082 kfree(edesc);
1083 }
1084
1085 return ret;
1086}
1087
1088/* submit ahash final if it the first job descriptor */
1089static int ahash_final_no_ctx(struct ahash_request *req)
1090{
1091 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1092 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1093 struct caam_hash_state *state = ahash_request_ctx(req);
1094 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001095 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1096 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă0355d232017-02-10 14:07:24 +02001097 u8 *buf = current_buf(state);
1098 int buflen = *current_buflen(state);
Russell King30a43b42016-08-08 18:05:13 +01001099 u32 *desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001100 int digestsize = crypto_ahash_digestsize(ahash);
1101 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001102 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001103
1104 /* allocate space for base edesc and hw desc commands, link tables */
Russell King30a43b42016-08-08 18:05:13 +01001105 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1106 ctx->sh_desc_digest_dma, flags);
Russell King5588d032016-08-08 18:05:08 +01001107 if (!edesc)
Yuan Kang045e3672012-06-22 19:48:47 -05001108 return -ENOMEM;
Yuan Kang045e3672012-06-22 19:48:47 -05001109
Yuan Kang045e3672012-06-22 19:48:47 -05001110 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001111
1112 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001113 if (dma_mapping_error(jrdev, state->buf_dma)) {
1114 dev_err(jrdev, "unable to map src\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001115 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001116 }
Yuan Kang045e3672012-06-22 19:48:47 -05001117
1118 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1119
1120 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1121 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001122 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1123 dev_err(jrdev, "unable to map dst\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001124 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001125 }
Yuan Kang045e3672012-06-22 19:48:47 -05001126 edesc->src_nents = 0;
1127
1128#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001129 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001130 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1131#endif
1132
1133 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1134 if (!ret) {
1135 ret = -EINPROGRESS;
1136 } else {
1137 ahash_unmap(jrdev, edesc, req, digestsize);
1138 kfree(edesc);
1139 }
1140
1141 return ret;
Markus Elfring06435f342016-09-15 16:00:55 +02001142 unmap:
1143 ahash_unmap(jrdev, edesc, req, digestsize);
1144 kfree(edesc);
1145 return -ENOMEM;
1146
Yuan Kang045e3672012-06-22 19:48:47 -05001147}
1148
1149/* submit ahash update if it the first job descriptor after update */
1150static int ahash_update_no_ctx(struct ahash_request *req)
1151{
1152 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1153 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1154 struct caam_hash_state *state = ahash_request_ctx(req);
1155 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001156 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1157 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă0355d232017-02-10 14:07:24 +02001158 u8 *buf = current_buf(state);
1159 int *buflen = current_buflen(state);
1160 u8 *next_buf = alt_buf(state);
1161 int *next_buflen = alt_buflen(state);
Yuan Kang045e3672012-06-22 19:48:47 -05001162 int in_len = *buflen + req->nbytes, to_hash;
Russell Kingbc13c692016-08-08 18:05:03 +01001163 int sec4_sg_bytes, src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001164 struct ahash_edesc *edesc;
Russell King30a43b42016-08-08 18:05:13 +01001165 u32 *desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001166 int ret = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001167
1168 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1169 to_hash = in_len - *next_buflen;
1170
1171 if (to_hash) {
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001172 src_nents = sg_nents_for_len(req->src,
Russell King3d5a2db2016-08-08 18:04:31 +01001173 req->nbytes - *next_buflen);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001174 if (src_nents < 0) {
1175 dev_err(jrdev, "Invalid number of src SG.\n");
1176 return src_nents;
1177 }
Russell Kingbc13c692016-08-08 18:05:03 +01001178
1179 if (src_nents) {
1180 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1181 DMA_TO_DEVICE);
1182 if (!mapped_nents) {
1183 dev_err(jrdev, "unable to DMA map source\n");
1184 return -ENOMEM;
1185 }
1186 } else {
1187 mapped_nents = 0;
1188 }
1189
1190 sec4_sg_bytes = (1 + mapped_nents) *
Yuan Kang045e3672012-06-22 19:48:47 -05001191 sizeof(struct sec4_sg_entry);
1192
1193 /*
1194 * allocate space for base edesc and hw desc commands,
1195 * link tables
1196 */
Russell King30a43b42016-08-08 18:05:13 +01001197 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1198 ctx->sh_desc_update_first,
1199 ctx->sh_desc_update_first_dma,
1200 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001201 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001202 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001203 return -ENOMEM;
1204 }
1205
1206 edesc->src_nents = src_nents;
1207 edesc->sec4_sg_bytes = sec4_sg_bytes;
Horia Geanta76b99082014-07-11 15:34:54 +03001208 edesc->dst_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001209
Horia Geantă944c3d42017-02-10 14:07:25 +02001210 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1211 if (ret)
1212 goto unmap_ctx;
1213
Russell Kingbc13c692016-08-08 18:05:03 +01001214 sg_to_sec4_sg_last(req->src, mapped_nents,
1215 edesc->sec4_sg + 1, 0);
1216
Yuan Kang045e3672012-06-22 19:48:47 -05001217 if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +03001218 scatterwalk_map_and_copy(next_buf, req->src,
1219 to_hash - *buflen,
1220 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001221 }
1222
Yuan Kang045e3672012-06-22 19:48:47 -05001223 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001224
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301225 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1226 sec4_sg_bytes,
1227 DMA_TO_DEVICE);
Horia Geantace572082014-07-11 15:34:49 +03001228 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1229 dev_err(jrdev, "unable to map S/G table\n");
Russell King32686d32016-08-08 18:04:58 +01001230 ret = -ENOMEM;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001231 goto unmap_ctx;
Horia Geantace572082014-07-11 15:34:49 +03001232 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301233
Yuan Kang045e3672012-06-22 19:48:47 -05001234 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1235
Horia Geantace572082014-07-11 15:34:49 +03001236 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1237 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001238 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001239
1240#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001241 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001242 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1243 desc_bytes(desc), 1);
1244#endif
1245
1246 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
Russell King32686d32016-08-08 18:04:58 +01001247 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001248 goto unmap_ctx;
Russell King32686d32016-08-08 18:04:58 +01001249
1250 ret = -EINPROGRESS;
1251 state->update = ahash_update_ctx;
1252 state->finup = ahash_finup_ctx;
1253 state->final = ahash_final_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001254 } else if (*next_buflen) {
Cristian Stoica307fd5432014-08-14 13:51:56 +03001255 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1256 req->nbytes, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001257 *buflen = *next_buflen;
1258 *next_buflen = 0;
1259 }
1260#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001261 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001262 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
Alex Porosanu514df282013-08-14 18:56:45 +03001263 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001264 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1265 *next_buflen, 1);
1266#endif
1267
1268 return ret;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001269 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001270 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1271 kfree(edesc);
1272 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001273}
1274
1275/* submit ahash finup if it the first job descriptor after update */
1276static int ahash_finup_no_ctx(struct ahash_request *req)
1277{
1278 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1279 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1280 struct caam_hash_state *state = ahash_request_ctx(req);
1281 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001282 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1283 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă0355d232017-02-10 14:07:24 +02001284 int buflen = *current_buflen(state);
Russell King30a43b42016-08-08 18:05:13 +01001285 u32 *desc;
Russell Kingbc13c692016-08-08 18:05:03 +01001286 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001287 int digestsize = crypto_ahash_digestsize(ahash);
1288 struct ahash_edesc *edesc;
Markus Elfring9e6df0f2016-09-15 15:24:02 +02001289 int ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001290
LABBE Corentin13fb8fd2015-09-23 13:55:27 +02001291 src_nents = sg_nents_for_len(req->src, req->nbytes);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001292 if (src_nents < 0) {
1293 dev_err(jrdev, "Invalid number of src SG.\n");
1294 return src_nents;
1295 }
Russell Kingbc13c692016-08-08 18:05:03 +01001296
1297 if (src_nents) {
1298 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1299 DMA_TO_DEVICE);
1300 if (!mapped_nents) {
1301 dev_err(jrdev, "unable to DMA map source\n");
1302 return -ENOMEM;
1303 }
1304 } else {
1305 mapped_nents = 0;
1306 }
1307
Yuan Kang045e3672012-06-22 19:48:47 -05001308 sec4_sg_src_index = 2;
Russell Kingbc13c692016-08-08 18:05:03 +01001309 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
Yuan Kang045e3672012-06-22 19:48:47 -05001310 sizeof(struct sec4_sg_entry);
1311
1312 /* allocate space for base edesc and hw desc commands, link tables */
Russell King30a43b42016-08-08 18:05:13 +01001313 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1314 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1315 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001316 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001317 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001318 return -ENOMEM;
1319 }
1320
Yuan Kang045e3672012-06-22 19:48:47 -05001321 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001322
1323 edesc->src_nents = src_nents;
1324 edesc->sec4_sg_bytes = sec4_sg_bytes;
Yuan Kang045e3672012-06-22 19:48:47 -05001325
Horia Geantă944c3d42017-02-10 14:07:25 +02001326 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1327 if (ret)
1328 goto unmap;
Yuan Kang045e3672012-06-22 19:48:47 -05001329
Russell King65cf1642016-08-08 18:05:19 +01001330 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1331 req->nbytes);
1332 if (ret) {
Horia Geantace572082014-07-11 15:34:49 +03001333 dev_err(jrdev, "unable to map S/G table\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001334 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001335 }
Ruchika Gupta1da2be32014-06-23 19:50:26 +05301336
Yuan Kang045e3672012-06-22 19:48:47 -05001337 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1338 digestsize);
Horia Geantace572082014-07-11 15:34:49 +03001339 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1340 dev_err(jrdev, "unable to map dst\n");
Markus Elfring06435f342016-09-15 16:00:55 +02001341 goto unmap;
Horia Geantace572082014-07-11 15:34:49 +03001342 }
Yuan Kang045e3672012-06-22 19:48:47 -05001343
1344#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001345 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001346 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1347#endif
1348
1349 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1350 if (!ret) {
1351 ret = -EINPROGRESS;
1352 } else {
1353 ahash_unmap(jrdev, edesc, req, digestsize);
1354 kfree(edesc);
1355 }
1356
1357 return ret;
Markus Elfring06435f342016-09-15 16:00:55 +02001358 unmap:
1359 ahash_unmap(jrdev, edesc, req, digestsize);
1360 kfree(edesc);
1361 return -ENOMEM;
1362
Yuan Kang045e3672012-06-22 19:48:47 -05001363}
1364
1365/* submit first update job descriptor after init */
1366static int ahash_update_first(struct ahash_request *req)
1367{
1368 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1369 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1370 struct caam_hash_state *state = ahash_request_ctx(req);
1371 struct device *jrdev = ctx->jrdev;
Horia Geantă019d62d2017-06-19 11:44:46 +03001372 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1373 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă944c3d42017-02-10 14:07:25 +02001374 u8 *next_buf = alt_buf(state);
1375 int *next_buflen = alt_buflen(state);
Yuan Kang045e3672012-06-22 19:48:47 -05001376 int to_hash;
Russell King30a43b42016-08-08 18:05:13 +01001377 u32 *desc;
Russell King65cf1642016-08-08 18:05:19 +01001378 int src_nents, mapped_nents;
Yuan Kang045e3672012-06-22 19:48:47 -05001379 struct ahash_edesc *edesc;
1380 int ret = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001381
1382 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1383 1);
1384 to_hash = req->nbytes - *next_buflen;
1385
1386 if (to_hash) {
Russell King3d5a2db2016-08-08 18:04:31 +01001387 src_nents = sg_nents_for_len(req->src,
1388 req->nbytes - *next_buflen);
LABBE Corentinf9970c22015-11-04 21:13:38 +01001389 if (src_nents < 0) {
1390 dev_err(jrdev, "Invalid number of src SG.\n");
1391 return src_nents;
1392 }
Russell Kingbc13c692016-08-08 18:05:03 +01001393
1394 if (src_nents) {
1395 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1396 DMA_TO_DEVICE);
1397 if (!mapped_nents) {
1398 dev_err(jrdev, "unable to map source for DMA\n");
1399 return -ENOMEM;
1400 }
1401 } else {
1402 mapped_nents = 0;
1403 }
Yuan Kang045e3672012-06-22 19:48:47 -05001404
1405 /*
1406 * allocate space for base edesc and hw desc commands,
1407 * link tables
1408 */
Russell King5588d032016-08-08 18:05:08 +01001409 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
Russell King30a43b42016-08-08 18:05:13 +01001410 mapped_nents : 0,
1411 ctx->sh_desc_update_first,
1412 ctx->sh_desc_update_first_dma,
1413 flags);
Yuan Kang045e3672012-06-22 19:48:47 -05001414 if (!edesc) {
Russell Kingbc13c692016-08-08 18:05:03 +01001415 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
Yuan Kang045e3672012-06-22 19:48:47 -05001416 return -ENOMEM;
1417 }
1418
1419 edesc->src_nents = src_nents;
Horia Geanta76b99082014-07-11 15:34:54 +03001420 edesc->dst_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001421
Russell King65cf1642016-08-08 18:05:19 +01001422 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1423 to_hash);
1424 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001425 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001426
1427 if (*next_buflen)
Cristian Stoica307fd5432014-08-14 13:51:56 +03001428 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1429 *next_buflen, 0);
Yuan Kang045e3672012-06-22 19:48:47 -05001430
Yuan Kang045e3672012-06-22 19:48:47 -05001431 desc = edesc->hw_desc;
Yuan Kang045e3672012-06-22 19:48:47 -05001432
Horia Geantace572082014-07-11 15:34:49 +03001433 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1434 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001435 goto unmap_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001436
1437#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001438 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001439 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1440 desc_bytes(desc), 1);
1441#endif
1442
Russell King32686d32016-08-08 18:04:58 +01001443 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1444 if (ret)
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001445 goto unmap_ctx;
Russell King32686d32016-08-08 18:04:58 +01001446
1447 ret = -EINPROGRESS;
1448 state->update = ahash_update_ctx;
1449 state->finup = ahash_finup_ctx;
1450 state->final = ahash_final_ctx;
Yuan Kang045e3672012-06-22 19:48:47 -05001451 } else if (*next_buflen) {
1452 state->update = ahash_update_no_ctx;
1453 state->finup = ahash_finup_no_ctx;
1454 state->final = ahash_final_no_ctx;
Cristian Stoica307fd5432014-08-14 13:51:56 +03001455 scatterwalk_map_and_copy(next_buf, req->src, 0,
1456 req->nbytes, 0);
Horia Geantă944c3d42017-02-10 14:07:25 +02001457 switch_buf(state);
Yuan Kang045e3672012-06-22 19:48:47 -05001458 }
1459#ifdef DEBUG
Alex Porosanu514df282013-08-14 18:56:45 +03001460 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
Yuan Kang045e3672012-06-22 19:48:47 -05001461 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1462 *next_buflen, 1);
1463#endif
1464
1465 return ret;
Markus Elfring58b0e5d2016-09-15 14:43:38 +02001466 unmap_ctx:
Russell King32686d32016-08-08 18:04:58 +01001467 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1468 kfree(edesc);
1469 return ret;
Yuan Kang045e3672012-06-22 19:48:47 -05001470}
1471
1472static int ahash_finup_first(struct ahash_request *req)
1473{
1474 return ahash_digest(req);
1475}
1476
1477static int ahash_init(struct ahash_request *req)
1478{
1479 struct caam_hash_state *state = ahash_request_ctx(req);
1480
1481 state->update = ahash_update_first;
1482 state->finup = ahash_finup_first;
1483 state->final = ahash_final_no_ctx;
1484
Horia Geantă87ec02e2017-02-10 14:07:23 +02001485 state->ctx_dma = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001486 state->current_buf = 0;
Horia Geantade0e35e2014-07-11 15:34:55 +03001487 state->buf_dma = 0;
Steve Cornelius6fd4b152015-06-15 16:52:56 -07001488 state->buflen_0 = 0;
1489 state->buflen_1 = 0;
Yuan Kang045e3672012-06-22 19:48:47 -05001490
1491 return 0;
1492}
1493
1494static int ahash_update(struct ahash_request *req)
1495{
1496 struct caam_hash_state *state = ahash_request_ctx(req);
1497
1498 return state->update(req);
1499}
1500
1501static int ahash_finup(struct ahash_request *req)
1502{
1503 struct caam_hash_state *state = ahash_request_ctx(req);
1504
1505 return state->finup(req);
1506}
1507
1508static int ahash_final(struct ahash_request *req)
1509{
1510 struct caam_hash_state *state = ahash_request_ctx(req);
1511
1512 return state->final(req);
1513}
1514
1515static int ahash_export(struct ahash_request *req, void *out)
1516{
Yuan Kang045e3672012-06-22 19:48:47 -05001517 struct caam_hash_state *state = ahash_request_ctx(req);
Russell King5ec90832015-10-18 17:51:25 +01001518 struct caam_export_state *export = out;
1519 int len;
1520 u8 *buf;
Yuan Kang045e3672012-06-22 19:48:47 -05001521
Russell King5ec90832015-10-18 17:51:25 +01001522 if (state->current_buf) {
1523 buf = state->buf_1;
1524 len = state->buflen_1;
1525 } else {
1526 buf = state->buf_0;
Fabio Estevamf456cd22015-11-30 11:03:58 -02001527 len = state->buflen_0;
Russell King5ec90832015-10-18 17:51:25 +01001528 }
1529
1530 memcpy(export->buf, buf, len);
1531 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1532 export->buflen = len;
1533 export->update = state->update;
1534 export->final = state->final;
1535 export->finup = state->finup;
Russell King434b4212015-10-18 17:51:15 +01001536
Yuan Kang045e3672012-06-22 19:48:47 -05001537 return 0;
1538}
1539
1540static int ahash_import(struct ahash_request *req, const void *in)
1541{
Yuan Kang045e3672012-06-22 19:48:47 -05001542 struct caam_hash_state *state = ahash_request_ctx(req);
Russell King5ec90832015-10-18 17:51:25 +01001543 const struct caam_export_state *export = in;
Yuan Kang045e3672012-06-22 19:48:47 -05001544
Russell King5ec90832015-10-18 17:51:25 +01001545 memset(state, 0, sizeof(*state));
1546 memcpy(state->buf_0, export->buf, export->buflen);
1547 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1548 state->buflen_0 = export->buflen;
1549 state->update = export->update;
1550 state->final = export->final;
1551 state->finup = export->finup;
Russell King434b4212015-10-18 17:51:15 +01001552
Yuan Kang045e3672012-06-22 19:48:47 -05001553 return 0;
1554}
1555
1556struct caam_hash_template {
1557 char name[CRYPTO_MAX_ALG_NAME];
1558 char driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001559 char hmac_name[CRYPTO_MAX_ALG_NAME];
1560 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kang045e3672012-06-22 19:48:47 -05001561 unsigned int blocksize;
1562 struct ahash_alg template_ahash;
1563 u32 alg_type;
Yuan Kang045e3672012-06-22 19:48:47 -05001564};
1565
1566/* ahash descriptors */
1567static struct caam_hash_template driver_hash[] = {
1568 {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001569 .name = "sha1",
1570 .driver_name = "sha1-caam",
1571 .hmac_name = "hmac(sha1)",
1572 .hmac_driver_name = "hmac-sha1-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001573 .blocksize = SHA1_BLOCK_SIZE,
1574 .template_ahash = {
1575 .init = ahash_init,
1576 .update = ahash_update,
1577 .final = ahash_final,
1578 .finup = ahash_finup,
1579 .digest = ahash_digest,
1580 .export = ahash_export,
1581 .import = ahash_import,
1582 .setkey = ahash_setkey,
1583 .halg = {
1584 .digestsize = SHA1_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001585 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001586 },
Russell King659f3132015-10-18 17:51:31 +01001587 },
Yuan Kang045e3672012-06-22 19:48:47 -05001588 .alg_type = OP_ALG_ALGSEL_SHA1,
Yuan Kang045e3672012-06-22 19:48:47 -05001589 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001590 .name = "sha224",
1591 .driver_name = "sha224-caam",
1592 .hmac_name = "hmac(sha224)",
1593 .hmac_driver_name = "hmac-sha224-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001594 .blocksize = SHA224_BLOCK_SIZE,
1595 .template_ahash = {
1596 .init = ahash_init,
1597 .update = ahash_update,
1598 .final = ahash_final,
1599 .finup = ahash_finup,
1600 .digest = ahash_digest,
1601 .export = ahash_export,
1602 .import = ahash_import,
1603 .setkey = ahash_setkey,
1604 .halg = {
1605 .digestsize = SHA224_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001606 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001607 },
Russell King659f3132015-10-18 17:51:31 +01001608 },
Yuan Kang045e3672012-06-22 19:48:47 -05001609 .alg_type = OP_ALG_ALGSEL_SHA224,
Yuan Kang045e3672012-06-22 19:48:47 -05001610 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001611 .name = "sha256",
1612 .driver_name = "sha256-caam",
1613 .hmac_name = "hmac(sha256)",
1614 .hmac_driver_name = "hmac-sha256-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001615 .blocksize = SHA256_BLOCK_SIZE,
1616 .template_ahash = {
1617 .init = ahash_init,
1618 .update = ahash_update,
1619 .final = ahash_final,
1620 .finup = ahash_finup,
1621 .digest = ahash_digest,
1622 .export = ahash_export,
1623 .import = ahash_import,
1624 .setkey = ahash_setkey,
1625 .halg = {
1626 .digestsize = SHA256_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001627 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001628 },
Russell King659f3132015-10-18 17:51:31 +01001629 },
Yuan Kang045e3672012-06-22 19:48:47 -05001630 .alg_type = OP_ALG_ALGSEL_SHA256,
Yuan Kang045e3672012-06-22 19:48:47 -05001631 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001632 .name = "sha384",
1633 .driver_name = "sha384-caam",
1634 .hmac_name = "hmac(sha384)",
1635 .hmac_driver_name = "hmac-sha384-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001636 .blocksize = SHA384_BLOCK_SIZE,
1637 .template_ahash = {
1638 .init = ahash_init,
1639 .update = ahash_update,
1640 .final = ahash_final,
1641 .finup = ahash_finup,
1642 .digest = ahash_digest,
1643 .export = ahash_export,
1644 .import = ahash_import,
1645 .setkey = ahash_setkey,
1646 .halg = {
1647 .digestsize = SHA384_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001648 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001649 },
Russell King659f3132015-10-18 17:51:31 +01001650 },
Yuan Kang045e3672012-06-22 19:48:47 -05001651 .alg_type = OP_ALG_ALGSEL_SHA384,
Yuan Kang045e3672012-06-22 19:48:47 -05001652 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001653 .name = "sha512",
1654 .driver_name = "sha512-caam",
1655 .hmac_name = "hmac(sha512)",
1656 .hmac_driver_name = "hmac-sha512-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001657 .blocksize = SHA512_BLOCK_SIZE,
1658 .template_ahash = {
1659 .init = ahash_init,
1660 .update = ahash_update,
1661 .final = ahash_final,
1662 .finup = ahash_finup,
1663 .digest = ahash_digest,
1664 .export = ahash_export,
1665 .import = ahash_import,
1666 .setkey = ahash_setkey,
1667 .halg = {
1668 .digestsize = SHA512_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001669 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001670 },
Russell King659f3132015-10-18 17:51:31 +01001671 },
Yuan Kang045e3672012-06-22 19:48:47 -05001672 .alg_type = OP_ALG_ALGSEL_SHA512,
Yuan Kang045e3672012-06-22 19:48:47 -05001673 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001674 .name = "md5",
1675 .driver_name = "md5-caam",
1676 .hmac_name = "hmac(md5)",
1677 .hmac_driver_name = "hmac-md5-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001678 .blocksize = MD5_BLOCK_WORDS * 4,
1679 .template_ahash = {
1680 .init = ahash_init,
1681 .update = ahash_update,
1682 .final = ahash_final,
1683 .finup = ahash_finup,
1684 .digest = ahash_digest,
1685 .export = ahash_export,
1686 .import = ahash_import,
1687 .setkey = ahash_setkey,
1688 .halg = {
1689 .digestsize = MD5_DIGEST_SIZE,
Russell King5ec90832015-10-18 17:51:25 +01001690 .statesize = sizeof(struct caam_export_state),
Yuan Kang045e3672012-06-22 19:48:47 -05001691 },
Russell King659f3132015-10-18 17:51:31 +01001692 },
Yuan Kang045e3672012-06-22 19:48:47 -05001693 .alg_type = OP_ALG_ALGSEL_MD5,
Yuan Kang045e3672012-06-22 19:48:47 -05001694 },
1695};
1696
1697struct caam_hash_alg {
1698 struct list_head entry;
Yuan Kang045e3672012-06-22 19:48:47 -05001699 int alg_type;
Yuan Kang045e3672012-06-22 19:48:47 -05001700 struct ahash_alg ahash_alg;
1701};
1702
1703static int caam_hash_cra_init(struct crypto_tfm *tfm)
1704{
1705 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1706 struct crypto_alg *base = tfm->__crt_alg;
1707 struct hash_alg_common *halg =
1708 container_of(base, struct hash_alg_common, base);
1709 struct ahash_alg *alg =
1710 container_of(halg, struct ahash_alg, halg);
1711 struct caam_hash_alg *caam_hash =
1712 container_of(alg, struct caam_hash_alg, ahash_alg);
1713 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
Yuan Kang045e3672012-06-22 19:48:47 -05001714 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1715 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1716 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1717 HASH_MSG_LEN + 32,
1718 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1719 HASH_MSG_LEN + 64,
1720 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
Horia Geantăbbf22342017-02-10 14:07:22 +02001721 dma_addr_t dma_addr;
Yuan Kang045e3672012-06-22 19:48:47 -05001722
1723 /*
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301724 * Get a Job ring from Job Ring driver to ensure in-order
Yuan Kang045e3672012-06-22 19:48:47 -05001725 * crypto request processing per tfm
1726 */
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301727 ctx->jrdev = caam_jr_alloc();
1728 if (IS_ERR(ctx->jrdev)) {
1729 pr_err("Job Ring Device allocation for transform failed\n");
1730 return PTR_ERR(ctx->jrdev);
1731 }
Horia Geantăbbf22342017-02-10 14:07:22 +02001732
1733 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1734 offsetof(struct caam_hash_ctx,
1735 sh_desc_update_dma),
1736 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1737 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1738 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1739 caam_jr_free(ctx->jrdev);
1740 return -ENOMEM;
1741 }
1742
1743 ctx->sh_desc_update_dma = dma_addr;
1744 ctx->sh_desc_update_first_dma = dma_addr +
1745 offsetof(struct caam_hash_ctx,
1746 sh_desc_update_first);
1747 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1748 sh_desc_fin);
1749 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1750 sh_desc_digest);
1751
Yuan Kang045e3672012-06-22 19:48:47 -05001752 /* copy descriptor header template value */
Horia Geantădb576562016-11-22 15:44:04 +02001753 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
Yuan Kang045e3672012-06-22 19:48:47 -05001754
Horia Geantă488ebc32016-11-22 15:44:05 +02001755 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1756 OP_ALG_ALGSEL_SUBMASK) >>
Yuan Kang045e3672012-06-22 19:48:47 -05001757 OP_ALG_ALGSEL_SHIFT];
1758
1759 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1760 sizeof(struct caam_hash_state));
Markus Elfringe6cc5b82016-09-15 14:56:12 +02001761 return ahash_set_sh_desc(ahash);
Yuan Kang045e3672012-06-22 19:48:47 -05001762}
1763
1764static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1765{
1766 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1767
Horia Geantăbbf22342017-02-10 14:07:22 +02001768 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1769 offsetof(struct caam_hash_ctx,
1770 sh_desc_update_dma),
1771 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301772 caam_jr_free(ctx->jrdev);
Yuan Kang045e3672012-06-22 19:48:47 -05001773}
1774
1775static void __exit caam_algapi_hash_exit(void)
1776{
Yuan Kang045e3672012-06-22 19:48:47 -05001777 struct caam_hash_alg *t_alg, *n;
1778
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301779 if (!hash_list.next)
Yuan Kang045e3672012-06-22 19:48:47 -05001780 return;
1781
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301782 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
Yuan Kang045e3672012-06-22 19:48:47 -05001783 crypto_unregister_ahash(&t_alg->ahash_alg);
1784 list_del(&t_alg->entry);
1785 kfree(t_alg);
1786 }
1787}
1788
1789static struct caam_hash_alg *
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301790caam_hash_alloc(struct caam_hash_template *template,
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001791 bool keyed)
Yuan Kang045e3672012-06-22 19:48:47 -05001792{
1793 struct caam_hash_alg *t_alg;
1794 struct ahash_alg *halg;
1795 struct crypto_alg *alg;
1796
Fabio Estevam9c4f9732015-08-21 13:52:00 -03001797 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
Yuan Kang045e3672012-06-22 19:48:47 -05001798 if (!t_alg) {
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301799 pr_err("failed to allocate t_alg\n");
Yuan Kang045e3672012-06-22 19:48:47 -05001800 return ERR_PTR(-ENOMEM);
1801 }
1802
1803 t_alg->ahash_alg = template->template_ahash;
1804 halg = &t_alg->ahash_alg;
1805 alg = &halg->halg.base;
1806
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001807 if (keyed) {
1808 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1809 template->hmac_name);
1810 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1811 template->hmac_driver_name);
1812 } else {
1813 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1814 template->name);
1815 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1816 template->driver_name);
Russell Kinga0118c82016-08-09 08:27:17 +01001817 t_alg->ahash_alg.setkey = NULL;
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001818 }
Yuan Kang045e3672012-06-22 19:48:47 -05001819 alg->cra_module = THIS_MODULE;
1820 alg->cra_init = caam_hash_cra_init;
1821 alg->cra_exit = caam_hash_cra_exit;
1822 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1823 alg->cra_priority = CAAM_CRA_PRIORITY;
1824 alg->cra_blocksize = template->blocksize;
1825 alg->cra_alignmask = 0;
1826 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1827 alg->cra_type = &crypto_ahash_type;
1828
1829 t_alg->alg_type = template->alg_type;
Yuan Kang045e3672012-06-22 19:48:47 -05001830
1831 return t_alg;
1832}
1833
1834static int __init caam_algapi_hash_init(void)
1835{
Ruchika Gupta35af6402014-07-07 10:42:12 +05301836 struct device_node *dev_node;
1837 struct platform_device *pdev;
1838 struct device *ctrldev;
Yuan Kang045e3672012-06-22 19:48:47 -05001839 int i = 0, err = 0;
Victoria Milhoanbf834902015-08-05 11:28:48 -07001840 struct caam_drv_private *priv;
1841 unsigned int md_limit = SHA512_DIGEST_SIZE;
1842 u32 cha_inst, cha_vid;
Yuan Kang045e3672012-06-22 19:48:47 -05001843
Ruchika Gupta35af6402014-07-07 10:42:12 +05301844 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1845 if (!dev_node) {
1846 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1847 if (!dev_node)
1848 return -ENODEV;
1849 }
1850
1851 pdev = of_find_device_by_node(dev_node);
1852 if (!pdev) {
1853 of_node_put(dev_node);
1854 return -ENODEV;
1855 }
1856
1857 ctrldev = &pdev->dev;
1858 priv = dev_get_drvdata(ctrldev);
1859 of_node_put(dev_node);
1860
1861 /*
1862 * If priv is NULL, it's probably because the caam driver wasn't
1863 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1864 */
1865 if (!priv)
1866 return -ENODEV;
1867
Victoria Milhoanbf834902015-08-05 11:28:48 -07001868 /*
1869 * Register crypto algorithms the device supports. First, identify
1870 * presence and attributes of MD block.
1871 */
1872 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1873 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1874
1875 /*
1876 * Skip registration of any hashing algorithms if MD block
1877 * is not present.
1878 */
1879 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1880 return -ENODEV;
1881
1882 /* Limit digest size based on LP256 */
1883 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1884 md_limit = SHA256_DIGEST_SIZE;
1885
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301886 INIT_LIST_HEAD(&hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05001887
1888 /* register crypto algorithms the device supports */
1889 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
Yuan Kang045e3672012-06-22 19:48:47 -05001890 struct caam_hash_alg *t_alg;
Victoria Milhoanbf834902015-08-05 11:28:48 -07001891 struct caam_hash_template *alg = driver_hash + i;
1892
1893 /* If MD size is not supported by device, skip registration */
1894 if (alg->template_ahash.halg.digestsize > md_limit)
1895 continue;
Yuan Kang045e3672012-06-22 19:48:47 -05001896
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001897 /* register hmac version */
Victoria Milhoanbf834902015-08-05 11:28:48 -07001898 t_alg = caam_hash_alloc(alg, true);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001899 if (IS_ERR(t_alg)) {
1900 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07001901 pr_warn("%s alg allocation failed\n", alg->driver_name);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001902 continue;
1903 }
1904
1905 err = crypto_register_ahash(&t_alg->ahash_alg);
1906 if (err) {
Russell King6ea30f02015-10-18 17:51:10 +01001907 pr_warn("%s alg registration failed: %d\n",
1908 t_alg->ahash_alg.halg.base.cra_driver_name,
1909 err);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001910 kfree(t_alg);
1911 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301912 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001913
1914 /* register unkeyed version */
Victoria Milhoanbf834902015-08-05 11:28:48 -07001915 t_alg = caam_hash_alloc(alg, false);
Yuan Kang045e3672012-06-22 19:48:47 -05001916 if (IS_ERR(t_alg)) {
1917 err = PTR_ERR(t_alg);
Victoria Milhoanbf834902015-08-05 11:28:48 -07001918 pr_warn("%s alg allocation failed\n", alg->driver_name);
Yuan Kang045e3672012-06-22 19:48:47 -05001919 continue;
1920 }
1921
1922 err = crypto_register_ahash(&t_alg->ahash_alg);
1923 if (err) {
Russell King6ea30f02015-10-18 17:51:10 +01001924 pr_warn("%s alg registration failed: %d\n",
1925 t_alg->ahash_alg.halg.base.cra_driver_name,
1926 err);
Yuan Kang045e3672012-06-22 19:48:47 -05001927 kfree(t_alg);
1928 } else
Ruchika Guptacfc6f112013-10-25 12:01:03 +05301929 list_add_tail(&t_alg->entry, &hash_list);
Yuan Kang045e3672012-06-22 19:48:47 -05001930 }
1931
1932 return err;
1933}
1934
1935module_init(caam_algapi_hash_init);
1936module_exit(caam_algapi_hash_exit);
1937
1938MODULE_LICENSE("GPL");
1939MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1940MODULE_AUTHOR("Freescale Semiconductor - NMG");