blob: 215858a829c3f9792a99ba63de988caa164144fa [file] [log] [blame]
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020027#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020030#include <linux/irq.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020031#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +020033#include <linux/of_device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020034#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/sha.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
Nicolas Royerd4905b32013-02-20 17:10:26 +010042#include <linux/platform_data/crypto-atmel.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020043#include "atmel-sha-regs.h"
44
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
53
54#define SHA_FLAGS_FINUP BIT(16)
55#define SHA_FLAGS_SG BIT(17)
56#define SHA_FLAGS_SHA1 BIT(18)
Nicolas Royerd4905b32013-02-20 17:10:26 +010057#define SHA_FLAGS_SHA224 BIT(19)
58#define SHA_FLAGS_SHA256 BIT(20)
59#define SHA_FLAGS_SHA384 BIT(21)
60#define SHA_FLAGS_SHA512 BIT(22)
61#define SHA_FLAGS_ERROR BIT(23)
62#define SHA_FLAGS_PAD BIT(24)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020063
64#define SHA_OP_UPDATE 1
65#define SHA_OP_FINAL 2
66
67#define SHA_BUFFER_LEN PAGE_SIZE
68
69#define ATMEL_SHA_DMA_THRESHOLD 56
70
Nicolas Royerd4905b32013-02-20 17:10:26 +010071struct atmel_sha_caps {
72 bool has_dma;
73 bool has_dualbuff;
74 bool has_sha224;
75 bool has_sha_384_512;
76};
Nicolas Royerebc82ef2012-07-01 19:19:46 +020077
78struct atmel_sha_dev;
79
80struct atmel_sha_reqctx {
81 struct atmel_sha_dev *dd;
82 unsigned long flags;
83 unsigned long op;
84
Nicolas Royerd4905b32013-02-20 17:10:26 +010085 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
86 u64 digcnt[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +020087 size_t bufcnt;
88 size_t buflen;
89 dma_addr_t dma_addr;
90
91 /* walk state */
92 struct scatterlist *sg;
93 unsigned int offset; /* offset in current sg */
94 unsigned int total; /* total request */
95
Nicolas Royerd4905b32013-02-20 17:10:26 +010096 size_t block_size;
97
Nicolas Royerebc82ef2012-07-01 19:19:46 +020098 u8 buffer[0] __aligned(sizeof(u32));
99};
100
101struct atmel_sha_ctx {
102 struct atmel_sha_dev *dd;
103
104 unsigned long flags;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200105};
106
Nicolas Royerd4905b32013-02-20 17:10:26 +0100107#define ATMEL_SHA_QUEUE_LENGTH 50
108
109struct atmel_sha_dma {
110 struct dma_chan *chan;
111 struct dma_slave_config dma_conf;
112};
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200113
114struct atmel_sha_dev {
115 struct list_head list;
116 unsigned long phys_base;
117 struct device *dev;
118 struct clk *iclk;
119 int irq;
120 void __iomem *io_base;
121
122 spinlock_t lock;
123 int err;
124 struct tasklet_struct done_task;
125
126 unsigned long flags;
127 struct crypto_queue queue;
128 struct ahash_request *req;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100129
130 struct atmel_sha_dma dma_lch_in;
131
132 struct atmel_sha_caps caps;
133
134 u32 hw_version;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200135};
136
137struct atmel_sha_drv {
138 struct list_head dev_list;
139 spinlock_t lock;
140};
141
142static struct atmel_sha_drv atmel_sha = {
143 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
144 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
145};
146
147static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
148{
149 return readl_relaxed(dd->io_base + offset);
150}
151
152static inline void atmel_sha_write(struct atmel_sha_dev *dd,
153 u32 offset, u32 value)
154{
155 writel_relaxed(value, dd->io_base + offset);
156}
157
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200158static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
159{
160 size_t count;
161
162 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
163 count = min(ctx->sg->length - ctx->offset, ctx->total);
164 count = min(count, ctx->buflen - ctx->bufcnt);
165
166 if (count <= 0)
167 break;
168
169 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
170 ctx->offset, count, 0);
171
172 ctx->bufcnt += count;
173 ctx->offset += count;
174 ctx->total -= count;
175
176 if (ctx->offset == ctx->sg->length) {
177 ctx->sg = sg_next(ctx->sg);
178 if (ctx->sg)
179 ctx->offset = 0;
180 else
181 ctx->total = 0;
182 }
183 }
184
185 return 0;
186}
187
188/*
Nicolas Royerd4905b32013-02-20 17:10:26 +0100189 * The purpose of this padding is to ensure that the padded message is a
190 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
191 * The bit "1" is appended at the end of the message followed by
192 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
193 * 128 bits block (SHA384/SHA512) equals to the message length in bits
194 * is appended.
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200195 *
Nicolas Royerd4905b32013-02-20 17:10:26 +0100196 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200197 * - if message length < 56 bytes then padlen = 56 - message length
198 * - else padlen = 64 + 56 - message length
Nicolas Royerd4905b32013-02-20 17:10:26 +0100199 *
200 * For SHA384/SHA512, padlen is calculated as followed:
201 * - if message length < 112 bytes then padlen = 112 - message length
202 * - else padlen = 128 + 112 - message length
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200203 */
204static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
205{
206 unsigned int index, padlen;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100207 u64 bits[2];
208 u64 size[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200209
Nicolas Royerd4905b32013-02-20 17:10:26 +0100210 size[0] = ctx->digcnt[0];
211 size[1] = ctx->digcnt[1];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200212
Nicolas Royerd4905b32013-02-20 17:10:26 +0100213 size[0] += ctx->bufcnt;
214 if (size[0] < ctx->bufcnt)
215 size[1]++;
216
217 size[0] += length;
218 if (size[0] < length)
219 size[1]++;
220
221 bits[1] = cpu_to_be64(size[0] << 3);
222 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
223
224 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
225 index = ctx->bufcnt & 0x7f;
226 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
227 *(ctx->buffer + ctx->bufcnt) = 0x80;
228 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
229 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
230 ctx->bufcnt += padlen + 16;
231 ctx->flags |= SHA_FLAGS_PAD;
232 } else {
233 index = ctx->bufcnt & 0x3f;
234 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
235 *(ctx->buffer + ctx->bufcnt) = 0x80;
236 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
237 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
238 ctx->bufcnt += padlen + 8;
239 ctx->flags |= SHA_FLAGS_PAD;
240 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200241}
242
243static int atmel_sha_init(struct ahash_request *req)
244{
245 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
246 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
247 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
248 struct atmel_sha_dev *dd = NULL;
249 struct atmel_sha_dev *tmp;
250
251 spin_lock_bh(&atmel_sha.lock);
252 if (!tctx->dd) {
253 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
254 dd = tmp;
255 break;
256 }
257 tctx->dd = dd;
258 } else {
259 dd = tctx->dd;
260 }
261
262 spin_unlock_bh(&atmel_sha.lock);
263
264 ctx->dd = dd;
265
266 ctx->flags = 0;
267
268 dev_dbg(dd->dev, "init: digest size: %d\n",
269 crypto_ahash_digestsize(tfm));
270
Nicolas Royerd4905b32013-02-20 17:10:26 +0100271 switch (crypto_ahash_digestsize(tfm)) {
272 case SHA1_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200273 ctx->flags |= SHA_FLAGS_SHA1;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100274 ctx->block_size = SHA1_BLOCK_SIZE;
275 break;
276 case SHA224_DIGEST_SIZE:
277 ctx->flags |= SHA_FLAGS_SHA224;
278 ctx->block_size = SHA224_BLOCK_SIZE;
279 break;
280 case SHA256_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200281 ctx->flags |= SHA_FLAGS_SHA256;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100282 ctx->block_size = SHA256_BLOCK_SIZE;
283 break;
284 case SHA384_DIGEST_SIZE:
285 ctx->flags |= SHA_FLAGS_SHA384;
286 ctx->block_size = SHA384_BLOCK_SIZE;
287 break;
288 case SHA512_DIGEST_SIZE:
289 ctx->flags |= SHA_FLAGS_SHA512;
290 ctx->block_size = SHA512_BLOCK_SIZE;
291 break;
292 default:
293 return -EINVAL;
294 break;
295 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200296
297 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100298 ctx->digcnt[0] = 0;
299 ctx->digcnt[1] = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200300 ctx->buflen = SHA_BUFFER_LEN;
301
302 return 0;
303}
304
305static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
306{
307 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
308 u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
309
310 if (likely(dma)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100311 if (!dd->caps.has_dma)
312 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200313 valmr = SHA_MR_MODE_PDC;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100314 if (dd->caps.has_dualbuff)
315 valmr |= SHA_MR_DUALBUFF;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200316 } else {
317 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
318 }
319
Nicolas Royerd4905b32013-02-20 17:10:26 +0100320 if (ctx->flags & SHA_FLAGS_SHA1)
321 valmr |= SHA_MR_ALGO_SHA1;
322 else if (ctx->flags & SHA_FLAGS_SHA224)
323 valmr |= SHA_MR_ALGO_SHA224;
324 else if (ctx->flags & SHA_FLAGS_SHA256)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200325 valmr |= SHA_MR_ALGO_SHA256;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100326 else if (ctx->flags & SHA_FLAGS_SHA384)
327 valmr |= SHA_MR_ALGO_SHA384;
328 else if (ctx->flags & SHA_FLAGS_SHA512)
329 valmr |= SHA_MR_ALGO_SHA512;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200330
331 /* Setting CR_FIRST only for the first iteration */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100332 if (!(ctx->digcnt[0] || ctx->digcnt[1]))
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200333 valcr = SHA_CR_FIRST;
334
335 atmel_sha_write(dd, SHA_CR, valcr);
336 atmel_sha_write(dd, SHA_MR, valmr);
337}
338
339static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
340 size_t length, int final)
341{
342 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
343 int count, len32;
344 const u32 *buffer = (const u32 *)buf;
345
Nicolas Royerd4905b32013-02-20 17:10:26 +0100346 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
347 ctx->digcnt[1], ctx->digcnt[0], length, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200348
349 atmel_sha_write_ctrl(dd, 0);
350
351 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100352 ctx->digcnt[0] += length;
353 if (ctx->digcnt[0] < length)
354 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200355
356 if (final)
357 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
358
359 len32 = DIV_ROUND_UP(length, sizeof(u32));
360
361 dd->flags |= SHA_FLAGS_CPU;
362
363 for (count = 0; count < len32; count++)
364 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
365
366 return -EINPROGRESS;
367}
368
369static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
370 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
371{
372 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
373 int len32;
374
Nicolas Royerd4905b32013-02-20 17:10:26 +0100375 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
376 ctx->digcnt[1], ctx->digcnt[0], length1, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200377
378 len32 = DIV_ROUND_UP(length1, sizeof(u32));
379 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
380 atmel_sha_write(dd, SHA_TPR, dma_addr1);
381 atmel_sha_write(dd, SHA_TCR, len32);
382
383 len32 = DIV_ROUND_UP(length2, sizeof(u32));
384 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
385 atmel_sha_write(dd, SHA_TNCR, len32);
386
387 atmel_sha_write_ctrl(dd, 1);
388
389 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100390 ctx->digcnt[0] += length1;
391 if (ctx->digcnt[0] < length1)
392 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200393
394 if (final)
395 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
396
397 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
398
399 /* Start DMA transfer */
400 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
401
402 return -EINPROGRESS;
403}
404
Nicolas Royerd4905b32013-02-20 17:10:26 +0100405static void atmel_sha_dma_callback(void *data)
406{
407 struct atmel_sha_dev *dd = data;
408
409 /* dma_lch_in - completed - wait DATRDY */
410 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
411}
412
413static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
414 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
415{
416 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
417 struct dma_async_tx_descriptor *in_desc;
418 struct scatterlist sg[2];
419
420 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
421 ctx->digcnt[1], ctx->digcnt[0], length1, final);
422
423 if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 |
424 SHA_FLAGS_SHA256)) {
425 dd->dma_lch_in.dma_conf.src_maxburst = 16;
426 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
427 } else {
428 dd->dma_lch_in.dma_conf.src_maxburst = 32;
429 dd->dma_lch_in.dma_conf.dst_maxburst = 32;
430 }
431
432 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
433
434 if (length2) {
435 sg_init_table(sg, 2);
436 sg_dma_address(&sg[0]) = dma_addr1;
437 sg_dma_len(&sg[0]) = length1;
438 sg_dma_address(&sg[1]) = dma_addr2;
439 sg_dma_len(&sg[1]) = length2;
440 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
441 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
442 } else {
443 sg_init_table(sg, 1);
444 sg_dma_address(&sg[0]) = dma_addr1;
445 sg_dma_len(&sg[0]) = length1;
446 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
447 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
448 }
449 if (!in_desc)
450 return -EINVAL;
451
452 in_desc->callback = atmel_sha_dma_callback;
453 in_desc->callback_param = dd;
454
455 atmel_sha_write_ctrl(dd, 1);
456
457 /* should be non-zero before next lines to disable clocks later */
458 ctx->digcnt[0] += length1;
459 if (ctx->digcnt[0] < length1)
460 ctx->digcnt[1]++;
461
462 if (final)
463 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
464
465 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
466
467 /* Start DMA transfer */
468 dmaengine_submit(in_desc);
469 dma_async_issue_pending(dd->dma_lch_in.chan);
470
471 return -EINPROGRESS;
472}
473
474static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
475 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
476{
477 if (dd->caps.has_dma)
478 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
479 dma_addr2, length2, final);
480 else
481 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
482 dma_addr2, length2, final);
483}
484
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200485static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
486{
487 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
488 int bufcnt;
489
490 atmel_sha_append_sg(ctx);
491 atmel_sha_fill_padding(ctx, 0);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200492 bufcnt = ctx->bufcnt;
493 ctx->bufcnt = 0;
494
495 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
496}
497
498static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
499 struct atmel_sha_reqctx *ctx,
500 size_t length, int final)
501{
502 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100503 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200504 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
505 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100506 ctx->block_size);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200507 return -EINVAL;
508 }
509
510 ctx->flags &= ~SHA_FLAGS_SG;
511
512 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100513 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200514}
515
516static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
517{
518 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
519 unsigned int final;
520 size_t count;
521
522 atmel_sha_append_sg(ctx);
523
524 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
525
Nicolas Royerd4905b32013-02-20 17:10:26 +0100526 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
527 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200528
529 if (final)
530 atmel_sha_fill_padding(ctx, 0);
531
Ludovic Desroches00992862015-04-07 17:45:04 +0800532 if (final || (ctx->bufcnt == ctx->buflen)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200533 count = ctx->bufcnt;
534 ctx->bufcnt = 0;
535 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
536 }
537
538 return 0;
539}
540
541static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
542{
543 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
544 unsigned int length, final, tail;
545 struct scatterlist *sg;
546 unsigned int count;
547
548 if (!ctx->total)
549 return 0;
550
551 if (ctx->bufcnt || ctx->offset)
552 return atmel_sha_update_dma_slow(dd);
553
Nicolas Royerd4905b32013-02-20 17:10:26 +0100554 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
555 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200556
557 sg = ctx->sg;
558
559 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
560 return atmel_sha_update_dma_slow(dd);
561
Nicolas Royerd4905b32013-02-20 17:10:26 +0100562 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
563 /* size is not ctx->block_size aligned */
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200564 return atmel_sha_update_dma_slow(dd);
565
566 length = min(ctx->total, sg->length);
567
568 if (sg_is_last(sg)) {
569 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100570 /* not last sg must be ctx->block_size aligned */
571 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200572 length -= tail;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200573 }
574 }
575
576 ctx->total -= length;
577 ctx->offset = length; /* offset where to start slow */
578
579 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
580
581 /* Add padding */
582 if (final) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100583 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200584 length -= tail;
585 ctx->total += tail;
586 ctx->offset = length; /* offset where to start slow */
587
588 sg = ctx->sg;
589 atmel_sha_append_sg(ctx);
590
591 atmel_sha_fill_padding(ctx, length);
592
593 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100594 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200595 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
596 dev_err(dd->dev, "dma %u bytes error\n",
Nicolas Royerd4905b32013-02-20 17:10:26 +0100597 ctx->buflen + ctx->block_size);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200598 return -EINVAL;
599 }
600
601 if (length == 0) {
602 ctx->flags &= ~SHA_FLAGS_SG;
603 count = ctx->bufcnt;
604 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100605 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200606 0, final);
607 } else {
608 ctx->sg = sg;
609 if (!dma_map_sg(dd->dev, ctx->sg, 1,
610 DMA_TO_DEVICE)) {
611 dev_err(dd->dev, "dma_map_sg error\n");
612 return -EINVAL;
613 }
614
615 ctx->flags |= SHA_FLAGS_SG;
616
617 count = ctx->bufcnt;
618 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100619 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200620 length, ctx->dma_addr, count, final);
621 }
622 }
623
624 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
625 dev_err(dd->dev, "dma_map_sg error\n");
626 return -EINVAL;
627 }
628
629 ctx->flags |= SHA_FLAGS_SG;
630
631 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100632 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200633 0, final);
634}
635
636static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
637{
638 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
639
640 if (ctx->flags & SHA_FLAGS_SG) {
641 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
642 if (ctx->sg->length == ctx->offset) {
643 ctx->sg = sg_next(ctx->sg);
644 if (ctx->sg)
645 ctx->offset = 0;
646 }
Nicolas Royerd4905b32013-02-20 17:10:26 +0100647 if (ctx->flags & SHA_FLAGS_PAD) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200648 dma_unmap_single(dd->dev, ctx->dma_addr,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100649 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
650 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200651 } else {
652 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100653 ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200654 }
655
656 return 0;
657}
658
659static int atmel_sha_update_req(struct atmel_sha_dev *dd)
660{
661 struct ahash_request *req = dd->req;
662 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
663 int err;
664
Nicolas Royerd4905b32013-02-20 17:10:26 +0100665 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
666 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200667
668 if (ctx->flags & SHA_FLAGS_CPU)
669 err = atmel_sha_update_cpu(dd);
670 else
671 err = atmel_sha_update_dma_start(dd);
672
673 /* wait for dma completion before can take more data */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100674 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
675 err, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200676
677 return err;
678}
679
680static int atmel_sha_final_req(struct atmel_sha_dev *dd)
681{
682 struct ahash_request *req = dd->req;
683 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
684 int err = 0;
685 int count;
686
687 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
688 atmel_sha_fill_padding(ctx, 0);
689 count = ctx->bufcnt;
690 ctx->bufcnt = 0;
691 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
692 }
693 /* faster to handle last block with cpu */
694 else {
695 atmel_sha_fill_padding(ctx, 0);
696 count = ctx->bufcnt;
697 ctx->bufcnt = 0;
698 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
699 }
700
701 dev_dbg(dd->dev, "final_req: err: %d\n", err);
702
703 return err;
704}
705
706static void atmel_sha_copy_hash(struct ahash_request *req)
707{
708 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
709 u32 *hash = (u32 *)ctx->digest;
710 int i;
711
Nicolas Royerd4905b32013-02-20 17:10:26 +0100712 if (ctx->flags & SHA_FLAGS_SHA1)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200713 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
714 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
Nicolas Royerd4905b32013-02-20 17:10:26 +0100715 else if (ctx->flags & SHA_FLAGS_SHA224)
716 for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
717 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
718 else if (ctx->flags & SHA_FLAGS_SHA256)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200719 for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
720 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
Nicolas Royerd4905b32013-02-20 17:10:26 +0100721 else if (ctx->flags & SHA_FLAGS_SHA384)
722 for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
723 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
724 else
725 for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
726 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200727}
728
729static void atmel_sha_copy_ready_hash(struct ahash_request *req)
730{
731 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
732
733 if (!req->result)
734 return;
735
Nicolas Royerd4905b32013-02-20 17:10:26 +0100736 if (ctx->flags & SHA_FLAGS_SHA1)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200737 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100738 else if (ctx->flags & SHA_FLAGS_SHA224)
739 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
740 else if (ctx->flags & SHA_FLAGS_SHA256)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200741 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100742 else if (ctx->flags & SHA_FLAGS_SHA384)
743 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
744 else
745 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200746}
747
748static int atmel_sha_finish(struct ahash_request *req)
749{
750 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
751 struct atmel_sha_dev *dd = ctx->dd;
752 int err = 0;
753
Nicolas Royerd4905b32013-02-20 17:10:26 +0100754 if (ctx->digcnt[0] || ctx->digcnt[1])
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200755 atmel_sha_copy_ready_hash(req);
756
Nicolas Royerd4905b32013-02-20 17:10:26 +0100757 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
758 ctx->digcnt[0], ctx->bufcnt);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200759
760 return err;
761}
762
763static void atmel_sha_finish_req(struct ahash_request *req, int err)
764{
765 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
766 struct atmel_sha_dev *dd = ctx->dd;
767
768 if (!err) {
769 atmel_sha_copy_hash(req);
770 if (SHA_FLAGS_FINAL & dd->flags)
771 err = atmel_sha_finish(req);
772 } else {
773 ctx->flags |= SHA_FLAGS_ERROR;
774 }
775
776 /* atomic operation is not needed here */
777 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
778 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
779
780 clk_disable_unprepare(dd->iclk);
781
782 if (req->base.complete)
783 req->base.complete(&req->base, err);
784
785 /* handle new request */
786 tasklet_schedule(&dd->done_task);
787}
788
789static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
790{
791 clk_prepare_enable(dd->iclk);
792
Nicolas Royerd4905b32013-02-20 17:10:26 +0100793 if (!(SHA_FLAGS_INIT & dd->flags)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200794 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200795 dd->flags |= SHA_FLAGS_INIT;
796 dd->err = 0;
797 }
798
799 return 0;
800}
801
Nicolas Royerd4905b32013-02-20 17:10:26 +0100802static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
803{
804 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
805}
806
807static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
808{
809 atmel_sha_hw_init(dd);
810
811 dd->hw_version = atmel_sha_get_version(dd);
812
813 dev_info(dd->dev,
814 "version: 0x%x\n", dd->hw_version);
815
816 clk_disable_unprepare(dd->iclk);
817}
818
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200819static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
820 struct ahash_request *req)
821{
822 struct crypto_async_request *async_req, *backlog;
823 struct atmel_sha_reqctx *ctx;
824 unsigned long flags;
825 int err = 0, ret = 0;
826
827 spin_lock_irqsave(&dd->lock, flags);
828 if (req)
829 ret = ahash_enqueue_request(&dd->queue, req);
830
831 if (SHA_FLAGS_BUSY & dd->flags) {
832 spin_unlock_irqrestore(&dd->lock, flags);
833 return ret;
834 }
835
836 backlog = crypto_get_backlog(&dd->queue);
837 async_req = crypto_dequeue_request(&dd->queue);
838 if (async_req)
839 dd->flags |= SHA_FLAGS_BUSY;
840
841 spin_unlock_irqrestore(&dd->lock, flags);
842
843 if (!async_req)
844 return ret;
845
846 if (backlog)
847 backlog->complete(backlog, -EINPROGRESS);
848
849 req = ahash_request_cast(async_req);
850 dd->req = req;
851 ctx = ahash_request_ctx(req);
852
853 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
854 ctx->op, req->nbytes);
855
856 err = atmel_sha_hw_init(dd);
857
858 if (err)
859 goto err1;
860
861 if (ctx->op == SHA_OP_UPDATE) {
862 err = atmel_sha_update_req(dd);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100863 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200864 /* no final() after finup() */
865 err = atmel_sha_final_req(dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200866 } else if (ctx->op == SHA_OP_FINAL) {
867 err = atmel_sha_final_req(dd);
868 }
869
870err1:
871 if (err != -EINPROGRESS)
872 /* done_task will not finish it, so do it here */
873 atmel_sha_finish_req(req, err);
874
875 dev_dbg(dd->dev, "exit, err: %d\n", err);
876
877 return ret;
878}
879
880static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
881{
882 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
883 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
884 struct atmel_sha_dev *dd = tctx->dd;
885
886 ctx->op = op;
887
888 return atmel_sha_handle_queue(dd, req);
889}
890
891static int atmel_sha_update(struct ahash_request *req)
892{
893 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
894
895 if (!req->nbytes)
896 return 0;
897
898 ctx->total = req->nbytes;
899 ctx->sg = req->src;
900 ctx->offset = 0;
901
902 if (ctx->flags & SHA_FLAGS_FINUP) {
903 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
904 /* faster to use CPU for short transfers */
905 ctx->flags |= SHA_FLAGS_CPU;
906 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
907 atmel_sha_append_sg(ctx);
908 return 0;
909 }
910 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
911}
912
913static int atmel_sha_final(struct ahash_request *req)
914{
915 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
916 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
917 struct atmel_sha_dev *dd = tctx->dd;
918
919 int err = 0;
920
921 ctx->flags |= SHA_FLAGS_FINUP;
922
923 if (ctx->flags & SHA_FLAGS_ERROR)
924 return 0; /* uncompleted hash is not needed */
925
926 if (ctx->bufcnt) {
927 return atmel_sha_enqueue(req, SHA_OP_FINAL);
928 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
929 err = atmel_sha_hw_init(dd);
930 if (err)
931 goto err1;
932
933 dd->flags |= SHA_FLAGS_BUSY;
934 err = atmel_sha_final_req(dd);
935 } else {
936 /* copy ready hash (+ finalize hmac) */
937 return atmel_sha_finish(req);
938 }
939
940err1:
941 if (err != -EINPROGRESS)
942 /* done_task will not finish it, so do it here */
943 atmel_sha_finish_req(req, err);
944
945 return err;
946}
947
948static int atmel_sha_finup(struct ahash_request *req)
949{
950 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
951 int err1, err2;
952
953 ctx->flags |= SHA_FLAGS_FINUP;
954
955 err1 = atmel_sha_update(req);
956 if (err1 == -EINPROGRESS || err1 == -EBUSY)
957 return err1;
958
959 /*
960 * final() has to be always called to cleanup resources
961 * even if udpate() failed, except EINPROGRESS
962 */
963 err2 = atmel_sha_final(req);
964
965 return err1 ?: err2;
966}
967
968static int atmel_sha_digest(struct ahash_request *req)
969{
970 return atmel_sha_init(req) ?: atmel_sha_finup(req);
971}
972
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +0100973static int atmel_sha_cra_init(struct crypto_tfm *tfm)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200974{
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200975 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
976 sizeof(struct atmel_sha_reqctx) +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100977 SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200978
979 return 0;
980}
981
Nicolas Royerd4905b32013-02-20 17:10:26 +0100982static struct ahash_alg sha_1_256_algs[] = {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200983{
984 .init = atmel_sha_init,
985 .update = atmel_sha_update,
986 .final = atmel_sha_final,
987 .finup = atmel_sha_finup,
988 .digest = atmel_sha_digest,
989 .halg = {
990 .digestsize = SHA1_DIGEST_SIZE,
991 .base = {
992 .cra_name = "sha1",
993 .cra_driver_name = "atmel-sha1",
994 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +0100995 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200996 .cra_blocksize = SHA1_BLOCK_SIZE,
997 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
998 .cra_alignmask = 0,
999 .cra_module = THIS_MODULE,
1000 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001001 }
1002 }
1003},
1004{
1005 .init = atmel_sha_init,
1006 .update = atmel_sha_update,
1007 .final = atmel_sha_final,
1008 .finup = atmel_sha_finup,
1009 .digest = atmel_sha_digest,
1010 .halg = {
1011 .digestsize = SHA256_DIGEST_SIZE,
1012 .base = {
1013 .cra_name = "sha256",
1014 .cra_driver_name = "atmel-sha256",
1015 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001016 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001017 .cra_blocksize = SHA256_BLOCK_SIZE,
1018 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1019 .cra_alignmask = 0,
1020 .cra_module = THIS_MODULE,
1021 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001022 }
1023 }
1024},
1025};
1026
Nicolas Royerd4905b32013-02-20 17:10:26 +01001027static struct ahash_alg sha_224_alg = {
1028 .init = atmel_sha_init,
1029 .update = atmel_sha_update,
1030 .final = atmel_sha_final,
1031 .finup = atmel_sha_finup,
1032 .digest = atmel_sha_digest,
1033 .halg = {
1034 .digestsize = SHA224_DIGEST_SIZE,
1035 .base = {
1036 .cra_name = "sha224",
1037 .cra_driver_name = "atmel-sha224",
1038 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001039 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001040 .cra_blocksize = SHA224_BLOCK_SIZE,
1041 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1042 .cra_alignmask = 0,
1043 .cra_module = THIS_MODULE,
1044 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001045 }
1046 }
1047};
1048
1049static struct ahash_alg sha_384_512_algs[] = {
1050{
1051 .init = atmel_sha_init,
1052 .update = atmel_sha_update,
1053 .final = atmel_sha_final,
1054 .finup = atmel_sha_finup,
1055 .digest = atmel_sha_digest,
1056 .halg = {
1057 .digestsize = SHA384_DIGEST_SIZE,
1058 .base = {
1059 .cra_name = "sha384",
1060 .cra_driver_name = "atmel-sha384",
1061 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001062 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001063 .cra_blocksize = SHA384_BLOCK_SIZE,
1064 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1065 .cra_alignmask = 0x3,
1066 .cra_module = THIS_MODULE,
1067 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001068 }
1069 }
1070},
1071{
1072 .init = atmel_sha_init,
1073 .update = atmel_sha_update,
1074 .final = atmel_sha_final,
1075 .finup = atmel_sha_finup,
1076 .digest = atmel_sha_digest,
1077 .halg = {
1078 .digestsize = SHA512_DIGEST_SIZE,
1079 .base = {
1080 .cra_name = "sha512",
1081 .cra_driver_name = "atmel-sha512",
1082 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001083 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001084 .cra_blocksize = SHA512_BLOCK_SIZE,
1085 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1086 .cra_alignmask = 0x3,
1087 .cra_module = THIS_MODULE,
1088 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001089 }
1090 }
1091},
1092};
1093
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001094static void atmel_sha_done_task(unsigned long data)
1095{
1096 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1097 int err = 0;
1098
1099 if (!(SHA_FLAGS_BUSY & dd->flags)) {
1100 atmel_sha_handle_queue(dd, NULL);
1101 return;
1102 }
1103
1104 if (SHA_FLAGS_CPU & dd->flags) {
1105 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1106 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1107 goto finish;
1108 }
1109 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1110 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1111 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1112 atmel_sha_update_dma_stop(dd);
1113 if (dd->err) {
1114 err = dd->err;
1115 goto finish;
1116 }
1117 }
1118 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1119 /* hash or semi-hash ready */
1120 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1121 SHA_FLAGS_OUTPUT_READY);
1122 err = atmel_sha_update_dma_start(dd);
1123 if (err != -EINPROGRESS)
1124 goto finish;
1125 }
1126 }
1127 return;
1128
1129finish:
1130 /* finish curent request */
1131 atmel_sha_finish_req(dd->req, err);
1132}
1133
1134static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1135{
1136 struct atmel_sha_dev *sha_dd = dev_id;
1137 u32 reg;
1138
1139 reg = atmel_sha_read(sha_dd, SHA_ISR);
1140 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1141 atmel_sha_write(sha_dd, SHA_IDR, reg);
1142 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1143 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1144 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1145 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1146 tasklet_schedule(&sha_dd->done_task);
1147 } else {
1148 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1149 }
1150 return IRQ_HANDLED;
1151 }
1152
1153 return IRQ_NONE;
1154}
1155
1156static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1157{
1158 int i;
1159
Nicolas Royerd4905b32013-02-20 17:10:26 +01001160 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1161 crypto_unregister_ahash(&sha_1_256_algs[i]);
1162
1163 if (dd->caps.has_sha224)
1164 crypto_unregister_ahash(&sha_224_alg);
1165
1166 if (dd->caps.has_sha_384_512) {
1167 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1168 crypto_unregister_ahash(&sha_384_512_algs[i]);
1169 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001170}
1171
1172static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1173{
1174 int err, i, j;
1175
Nicolas Royerd4905b32013-02-20 17:10:26 +01001176 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1177 err = crypto_register_ahash(&sha_1_256_algs[i]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001178 if (err)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001179 goto err_sha_1_256_algs;
1180 }
1181
1182 if (dd->caps.has_sha224) {
1183 err = crypto_register_ahash(&sha_224_alg);
1184 if (err)
1185 goto err_sha_224_algs;
1186 }
1187
1188 if (dd->caps.has_sha_384_512) {
1189 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1190 err = crypto_register_ahash(&sha_384_512_algs[i]);
1191 if (err)
1192 goto err_sha_384_512_algs;
1193 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001194 }
1195
1196 return 0;
1197
Nicolas Royerd4905b32013-02-20 17:10:26 +01001198err_sha_384_512_algs:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001199 for (j = 0; j < i; j++)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001200 crypto_unregister_ahash(&sha_384_512_algs[j]);
1201 crypto_unregister_ahash(&sha_224_alg);
1202err_sha_224_algs:
1203 i = ARRAY_SIZE(sha_1_256_algs);
1204err_sha_1_256_algs:
1205 for (j = 0; j < i; j++)
1206 crypto_unregister_ahash(&sha_1_256_algs[j]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001207
1208 return err;
1209}
1210
Nicolas Royerd4905b32013-02-20 17:10:26 +01001211static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1212{
1213 struct at_dma_slave *sl = slave;
1214
1215 if (sl && sl->dma_dev == chan->device->dev) {
1216 chan->private = sl;
1217 return true;
1218 } else {
1219 return false;
1220 }
1221}
1222
1223static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1224 struct crypto_platform_data *pdata)
1225{
1226 int err = -ENOMEM;
1227 dma_cap_mask_t mask_in;
1228
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001229 /* Try to grab DMA channel */
1230 dma_cap_zero(mask_in);
1231 dma_cap_set(DMA_SLAVE, mask_in);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001232
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001233 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1234 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1235 if (!dd->dma_lch_in.chan) {
1236 dev_warn(dd->dev, "no DMA channel available\n");
1237 return err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001238 }
1239
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001240 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1241 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1242 SHA_REG_DIN(0);
1243 dd->dma_lch_in.dma_conf.src_maxburst = 1;
1244 dd->dma_lch_in.dma_conf.src_addr_width =
1245 DMA_SLAVE_BUSWIDTH_4_BYTES;
1246 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1247 dd->dma_lch_in.dma_conf.dst_addr_width =
1248 DMA_SLAVE_BUSWIDTH_4_BYTES;
1249 dd->dma_lch_in.dma_conf.device_fc = false;
1250
1251 return 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001252}
1253
1254static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1255{
1256 dma_release_channel(dd->dma_lch_in.chan);
1257}
1258
1259static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1260{
1261
1262 dd->caps.has_dma = 0;
1263 dd->caps.has_dualbuff = 0;
1264 dd->caps.has_sha224 = 0;
1265 dd->caps.has_sha_384_512 = 0;
1266
1267 /* keep only major version number */
1268 switch (dd->hw_version & 0xff0) {
Leilei Zhao141824d2015-04-07 17:45:03 +08001269 case 0x420:
1270 dd->caps.has_dma = 1;
1271 dd->caps.has_dualbuff = 1;
1272 dd->caps.has_sha224 = 1;
1273 dd->caps.has_sha_384_512 = 1;
1274 break;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001275 case 0x410:
1276 dd->caps.has_dma = 1;
1277 dd->caps.has_dualbuff = 1;
1278 dd->caps.has_sha224 = 1;
1279 dd->caps.has_sha_384_512 = 1;
1280 break;
1281 case 0x400:
1282 dd->caps.has_dma = 1;
1283 dd->caps.has_dualbuff = 1;
1284 dd->caps.has_sha224 = 1;
1285 break;
1286 case 0x320:
1287 break;
1288 default:
1289 dev_warn(dd->dev,
1290 "Unmanaged sha version, set minimum capabilities\n");
1291 break;
1292 }
1293}
1294
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001295#if defined(CONFIG_OF)
1296static const struct of_device_id atmel_sha_dt_ids[] = {
1297 { .compatible = "atmel,at91sam9g46-sha" },
1298 { /* sentinel */ }
1299};
1300
1301MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1302
1303static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1304{
1305 struct device_node *np = pdev->dev.of_node;
1306 struct crypto_platform_data *pdata;
1307
1308 if (!np) {
1309 dev_err(&pdev->dev, "device node not found\n");
1310 return ERR_PTR(-EINVAL);
1311 }
1312
1313 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1314 if (!pdata) {
1315 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1316 return ERR_PTR(-ENOMEM);
1317 }
1318
1319 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1320 sizeof(*(pdata->dma_slave)),
1321 GFP_KERNEL);
1322 if (!pdata->dma_slave) {
1323 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001324 return ERR_PTR(-ENOMEM);
1325 }
1326
1327 return pdata;
1328}
1329#else /* CONFIG_OF */
1330static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1331{
1332 return ERR_PTR(-EINVAL);
1333}
1334#endif
1335
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001336static int atmel_sha_probe(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001337{
1338 struct atmel_sha_dev *sha_dd;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001339 struct crypto_platform_data *pdata;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001340 struct device *dev = &pdev->dev;
1341 struct resource *sha_res;
1342 unsigned long sha_phys_size;
1343 int err;
1344
Pramod Gurav593901a2014-07-28 17:45:56 +05301345 sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
1346 GFP_KERNEL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001347 if (sha_dd == NULL) {
1348 dev_err(dev, "unable to alloc data struct.\n");
1349 err = -ENOMEM;
1350 goto sha_dd_err;
1351 }
1352
1353 sha_dd->dev = dev;
1354
1355 platform_set_drvdata(pdev, sha_dd);
1356
1357 INIT_LIST_HEAD(&sha_dd->list);
1358
1359 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1360 (unsigned long)sha_dd);
1361
1362 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1363
1364 sha_dd->irq = -1;
1365
1366 /* Get the base address */
1367 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1368 if (!sha_res) {
1369 dev_err(dev, "no MEM resource info\n");
1370 err = -ENODEV;
1371 goto res_err;
1372 }
1373 sha_dd->phys_base = sha_res->start;
1374 sha_phys_size = resource_size(sha_res);
1375
1376 /* Get the IRQ */
1377 sha_dd->irq = platform_get_irq(pdev, 0);
1378 if (sha_dd->irq < 0) {
1379 dev_err(dev, "no IRQ resource info\n");
1380 err = sha_dd->irq;
1381 goto res_err;
1382 }
1383
1384 err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
1385 sha_dd);
1386 if (err) {
1387 dev_err(dev, "unable to request sha irq.\n");
1388 goto res_err;
1389 }
1390
1391 /* Initializing the clock */
Nicolas Royerd4905b32013-02-20 17:10:26 +01001392 sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001393 if (IS_ERR(sha_dd->iclk)) {
Colin Ian Kingbe208352015-02-28 20:40:10 +00001394 dev_err(dev, "clock initialization failed.\n");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001395 err = PTR_ERR(sha_dd->iclk);
1396 goto clk_err;
1397 }
1398
1399 sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
1400 if (!sha_dd->io_base) {
1401 dev_err(dev, "can't ioremap\n");
1402 err = -ENOMEM;
1403 goto sha_io_err;
1404 }
1405
Nicolas Royerd4905b32013-02-20 17:10:26 +01001406 atmel_sha_hw_version_init(sha_dd);
1407
1408 atmel_sha_get_cap(sha_dd);
1409
1410 if (sha_dd->caps.has_dma) {
1411 pdata = pdev->dev.platform_data;
1412 if (!pdata) {
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001413 pdata = atmel_sha_of_init(pdev);
1414 if (IS_ERR(pdata)) {
1415 dev_err(&pdev->dev, "platform data not available\n");
1416 err = PTR_ERR(pdata);
1417 goto err_pdata;
1418 }
1419 }
1420 if (!pdata->dma_slave) {
Nicolas Royerd4905b32013-02-20 17:10:26 +01001421 err = -ENXIO;
1422 goto err_pdata;
1423 }
1424 err = atmel_sha_dma_init(sha_dd, pdata);
1425 if (err)
1426 goto err_sha_dma;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001427
1428 dev_info(dev, "using %s for DMA transfers\n",
1429 dma_chan_name(sha_dd->dma_lch_in.chan));
Nicolas Royerd4905b32013-02-20 17:10:26 +01001430 }
1431
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001432 spin_lock(&atmel_sha.lock);
1433 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1434 spin_unlock(&atmel_sha.lock);
1435
1436 err = atmel_sha_register_algs(sha_dd);
1437 if (err)
1438 goto err_algs;
1439
Nicolas Ferre1ca5b7d2013-10-15 16:37:44 +02001440 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1441 sha_dd->caps.has_sha224 ? "/SHA224" : "",
1442 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001443
1444 return 0;
1445
1446err_algs:
1447 spin_lock(&atmel_sha.lock);
1448 list_del(&sha_dd->list);
1449 spin_unlock(&atmel_sha.lock);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001450 if (sha_dd->caps.has_dma)
1451 atmel_sha_dma_cleanup(sha_dd);
1452err_sha_dma:
1453err_pdata:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001454 iounmap(sha_dd->io_base);
1455sha_io_err:
1456 clk_put(sha_dd->iclk);
1457clk_err:
1458 free_irq(sha_dd->irq, sha_dd);
1459res_err:
1460 tasklet_kill(&sha_dd->done_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001461sha_dd_err:
1462 dev_err(dev, "initialization failed.\n");
1463
1464 return err;
1465}
1466
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001467static int atmel_sha_remove(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001468{
1469 static struct atmel_sha_dev *sha_dd;
1470
1471 sha_dd = platform_get_drvdata(pdev);
1472 if (!sha_dd)
1473 return -ENODEV;
1474 spin_lock(&atmel_sha.lock);
1475 list_del(&sha_dd->list);
1476 spin_unlock(&atmel_sha.lock);
1477
1478 atmel_sha_unregister_algs(sha_dd);
1479
1480 tasklet_kill(&sha_dd->done_task);
1481
Nicolas Royerd4905b32013-02-20 17:10:26 +01001482 if (sha_dd->caps.has_dma)
1483 atmel_sha_dma_cleanup(sha_dd);
1484
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001485 iounmap(sha_dd->io_base);
1486
1487 clk_put(sha_dd->iclk);
1488
1489 if (sha_dd->irq >= 0)
1490 free_irq(sha_dd->irq, sha_dd);
1491
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001492 return 0;
1493}
1494
1495static struct platform_driver atmel_sha_driver = {
1496 .probe = atmel_sha_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001497 .remove = atmel_sha_remove,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001498 .driver = {
1499 .name = "atmel_sha",
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001500 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001501 },
1502};
1503
1504module_platform_driver(atmel_sha_driver);
1505
Nicolas Royerd4905b32013-02-20 17:10:26 +01001506MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001507MODULE_LICENSE("GPL v2");
1508MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");