blob: da4c3055784f6f8b61302671cfb6a276c8589890 [file] [log] [blame]
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020027#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020030#include <linux/irq.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020031#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +020033#include <linux/of_device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020034#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/sha.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
Nicolas Royerd4905b32013-02-20 17:10:26 +010042#include <linux/platform_data/crypto-atmel.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020043#include "atmel-sha-regs.h"
44
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
53
54#define SHA_FLAGS_FINUP BIT(16)
55#define SHA_FLAGS_SG BIT(17)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010056#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020057#define SHA_FLAGS_SHA1 BIT(18)
Nicolas Royerd4905b32013-02-20 17:10:26 +010058#define SHA_FLAGS_SHA224 BIT(19)
59#define SHA_FLAGS_SHA256 BIT(20)
60#define SHA_FLAGS_SHA384 BIT(21)
61#define SHA_FLAGS_SHA512 BIT(22)
62#define SHA_FLAGS_ERROR BIT(23)
63#define SHA_FLAGS_PAD BIT(24)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010064#define SHA_FLAGS_RESTORE BIT(25)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020065
66#define SHA_OP_UPDATE 1
67#define SHA_OP_FINAL 2
68
69#define SHA_BUFFER_LEN PAGE_SIZE
70
71#define ATMEL_SHA_DMA_THRESHOLD 56
72
Nicolas Royerd4905b32013-02-20 17:10:26 +010073struct atmel_sha_caps {
74 bool has_dma;
75 bool has_dualbuff;
76 bool has_sha224;
77 bool has_sha_384_512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010078 bool has_uihv;
Nicolas Royerd4905b32013-02-20 17:10:26 +010079};
Nicolas Royerebc82ef2012-07-01 19:19:46 +020080
81struct atmel_sha_dev;
82
83struct atmel_sha_reqctx {
84 struct atmel_sha_dev *dd;
85 unsigned long flags;
86 unsigned long op;
87
Nicolas Royerd4905b32013-02-20 17:10:26 +010088 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
89 u64 digcnt[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +020090 size_t bufcnt;
91 size_t buflen;
92 dma_addr_t dma_addr;
93
94 /* walk state */
95 struct scatterlist *sg;
96 unsigned int offset; /* offset in current sg */
97 unsigned int total; /* total request */
98
Nicolas Royerd4905b32013-02-20 17:10:26 +010099 size_t block_size;
100
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200101 u8 buffer[0] __aligned(sizeof(u32));
102};
103
104struct atmel_sha_ctx {
105 struct atmel_sha_dev *dd;
106
107 unsigned long flags;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200108};
109
Nicolas Royerd4905b32013-02-20 17:10:26 +0100110#define ATMEL_SHA_QUEUE_LENGTH 50
111
112struct atmel_sha_dma {
113 struct dma_chan *chan;
114 struct dma_slave_config dma_conf;
115};
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200116
117struct atmel_sha_dev {
118 struct list_head list;
119 unsigned long phys_base;
120 struct device *dev;
121 struct clk *iclk;
122 int irq;
123 void __iomem *io_base;
124
125 spinlock_t lock;
126 int err;
127 struct tasklet_struct done_task;
Cyrille Pitchenf56809c2016-01-15 15:49:32 +0100128 struct tasklet_struct queue_task;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200129
130 unsigned long flags;
131 struct crypto_queue queue;
132 struct ahash_request *req;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100133
134 struct atmel_sha_dma dma_lch_in;
135
136 struct atmel_sha_caps caps;
137
138 u32 hw_version;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200139};
140
141struct atmel_sha_drv {
142 struct list_head dev_list;
143 spinlock_t lock;
144};
145
146static struct atmel_sha_drv atmel_sha = {
147 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
148 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
149};
150
151static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
152{
153 return readl_relaxed(dd->io_base + offset);
154}
155
156static inline void atmel_sha_write(struct atmel_sha_dev *dd,
157 u32 offset, u32 value)
158{
159 writel_relaxed(value, dd->io_base + offset);
160}
161
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200162static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
163{
164 size_t count;
165
166 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
167 count = min(ctx->sg->length - ctx->offset, ctx->total);
168 count = min(count, ctx->buflen - ctx->bufcnt);
169
Leilei Zhao803eeae2015-04-07 17:45:05 +0800170 if (count <= 0) {
171 /*
172 * Check if count <= 0 because the buffer is full or
173 * because the sg length is 0. In the latest case,
174 * check if there is another sg in the list, a 0 length
175 * sg doesn't necessarily mean the end of the sg list.
176 */
177 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
178 ctx->sg = sg_next(ctx->sg);
179 continue;
180 } else {
181 break;
182 }
183 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200184
185 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
186 ctx->offset, count, 0);
187
188 ctx->bufcnt += count;
189 ctx->offset += count;
190 ctx->total -= count;
191
192 if (ctx->offset == ctx->sg->length) {
193 ctx->sg = sg_next(ctx->sg);
194 if (ctx->sg)
195 ctx->offset = 0;
196 else
197 ctx->total = 0;
198 }
199 }
200
201 return 0;
202}
203
204/*
Nicolas Royerd4905b32013-02-20 17:10:26 +0100205 * The purpose of this padding is to ensure that the padded message is a
206 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
207 * The bit "1" is appended at the end of the message followed by
208 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
209 * 128 bits block (SHA384/SHA512) equals to the message length in bits
210 * is appended.
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200211 *
Nicolas Royerd4905b32013-02-20 17:10:26 +0100212 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200213 * - if message length < 56 bytes then padlen = 56 - message length
214 * - else padlen = 64 + 56 - message length
Nicolas Royerd4905b32013-02-20 17:10:26 +0100215 *
216 * For SHA384/SHA512, padlen is calculated as followed:
217 * - if message length < 112 bytes then padlen = 112 - message length
218 * - else padlen = 128 + 112 - message length
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200219 */
220static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
221{
222 unsigned int index, padlen;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100223 u64 bits[2];
224 u64 size[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200225
Nicolas Royerd4905b32013-02-20 17:10:26 +0100226 size[0] = ctx->digcnt[0];
227 size[1] = ctx->digcnt[1];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200228
Nicolas Royerd4905b32013-02-20 17:10:26 +0100229 size[0] += ctx->bufcnt;
230 if (size[0] < ctx->bufcnt)
231 size[1]++;
232
233 size[0] += length;
234 if (size[0] < length)
235 size[1]++;
236
237 bits[1] = cpu_to_be64(size[0] << 3);
238 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
239
240 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
241 index = ctx->bufcnt & 0x7f;
242 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
243 *(ctx->buffer + ctx->bufcnt) = 0x80;
244 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
245 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
246 ctx->bufcnt += padlen + 16;
247 ctx->flags |= SHA_FLAGS_PAD;
248 } else {
249 index = ctx->bufcnt & 0x3f;
250 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
251 *(ctx->buffer + ctx->bufcnt) = 0x80;
252 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
253 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
254 ctx->bufcnt += padlen + 8;
255 ctx->flags |= SHA_FLAGS_PAD;
256 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200257}
258
259static int atmel_sha_init(struct ahash_request *req)
260{
261 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
262 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
263 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
264 struct atmel_sha_dev *dd = NULL;
265 struct atmel_sha_dev *tmp;
266
267 spin_lock_bh(&atmel_sha.lock);
268 if (!tctx->dd) {
269 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
270 dd = tmp;
271 break;
272 }
273 tctx->dd = dd;
274 } else {
275 dd = tctx->dd;
276 }
277
278 spin_unlock_bh(&atmel_sha.lock);
279
280 ctx->dd = dd;
281
282 ctx->flags = 0;
283
284 dev_dbg(dd->dev, "init: digest size: %d\n",
285 crypto_ahash_digestsize(tfm));
286
Nicolas Royerd4905b32013-02-20 17:10:26 +0100287 switch (crypto_ahash_digestsize(tfm)) {
288 case SHA1_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200289 ctx->flags |= SHA_FLAGS_SHA1;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100290 ctx->block_size = SHA1_BLOCK_SIZE;
291 break;
292 case SHA224_DIGEST_SIZE:
293 ctx->flags |= SHA_FLAGS_SHA224;
294 ctx->block_size = SHA224_BLOCK_SIZE;
295 break;
296 case SHA256_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200297 ctx->flags |= SHA_FLAGS_SHA256;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100298 ctx->block_size = SHA256_BLOCK_SIZE;
299 break;
300 case SHA384_DIGEST_SIZE:
301 ctx->flags |= SHA_FLAGS_SHA384;
302 ctx->block_size = SHA384_BLOCK_SIZE;
303 break;
304 case SHA512_DIGEST_SIZE:
305 ctx->flags |= SHA_FLAGS_SHA512;
306 ctx->block_size = SHA512_BLOCK_SIZE;
307 break;
308 default:
309 return -EINVAL;
310 break;
311 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200312
313 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100314 ctx->digcnt[0] = 0;
315 ctx->digcnt[1] = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200316 ctx->buflen = SHA_BUFFER_LEN;
317
318 return 0;
319}
320
321static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
322{
323 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100324 u32 valmr = SHA_MR_MODE_AUTO;
325 unsigned int i, hashsize = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200326
327 if (likely(dma)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100328 if (!dd->caps.has_dma)
329 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200330 valmr = SHA_MR_MODE_PDC;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100331 if (dd->caps.has_dualbuff)
332 valmr |= SHA_MR_DUALBUFF;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200333 } else {
334 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
335 }
336
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100337 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
338 case SHA_FLAGS_SHA1:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100339 valmr |= SHA_MR_ALGO_SHA1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100340 hashsize = SHA1_DIGEST_SIZE;
341 break;
342
343 case SHA_FLAGS_SHA224:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100344 valmr |= SHA_MR_ALGO_SHA224;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100345 hashsize = SHA256_DIGEST_SIZE;
346 break;
347
348 case SHA_FLAGS_SHA256:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200349 valmr |= SHA_MR_ALGO_SHA256;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100350 hashsize = SHA256_DIGEST_SIZE;
351 break;
352
353 case SHA_FLAGS_SHA384:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100354 valmr |= SHA_MR_ALGO_SHA384;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100355 hashsize = SHA512_DIGEST_SIZE;
356 break;
357
358 case SHA_FLAGS_SHA512:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100359 valmr |= SHA_MR_ALGO_SHA512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100360 hashsize = SHA512_DIGEST_SIZE;
361 break;
362
363 default:
364 break;
365 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200366
367 /* Setting CR_FIRST only for the first iteration */
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100368 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
369 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
370 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
371 const u32 *hash = (const u32 *)ctx->digest;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200372
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100373 /*
374 * Restore the hardware context: update the User Initialize
375 * Hash Value (UIHV) with the value saved when the latest
376 * 'update' operation completed on this very same crypto
377 * request.
378 */
379 ctx->flags &= ~SHA_FLAGS_RESTORE;
380 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
381 for (i = 0; i < hashsize / sizeof(u32); ++i)
382 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
383 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
384 valmr |= SHA_MR_UIHV;
385 }
386 /*
387 * WARNING: If the UIHV feature is not available, the hardware CANNOT
388 * process concurrent requests: the internal registers used to store
389 * the hash/digest are still set to the partial digest output values
390 * computed during the latest round.
391 */
392
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200393 atmel_sha_write(dd, SHA_MR, valmr);
394}
395
396static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
397 size_t length, int final)
398{
399 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
400 int count, len32;
401 const u32 *buffer = (const u32 *)buf;
402
Nicolas Royerd4905b32013-02-20 17:10:26 +0100403 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
404 ctx->digcnt[1], ctx->digcnt[0], length, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200405
406 atmel_sha_write_ctrl(dd, 0);
407
408 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100409 ctx->digcnt[0] += length;
410 if (ctx->digcnt[0] < length)
411 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200412
413 if (final)
414 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
415
416 len32 = DIV_ROUND_UP(length, sizeof(u32));
417
418 dd->flags |= SHA_FLAGS_CPU;
419
420 for (count = 0; count < len32; count++)
421 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
422
423 return -EINPROGRESS;
424}
425
426static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
427 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
428{
429 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
430 int len32;
431
Nicolas Royerd4905b32013-02-20 17:10:26 +0100432 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
433 ctx->digcnt[1], ctx->digcnt[0], length1, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200434
435 len32 = DIV_ROUND_UP(length1, sizeof(u32));
436 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
437 atmel_sha_write(dd, SHA_TPR, dma_addr1);
438 atmel_sha_write(dd, SHA_TCR, len32);
439
440 len32 = DIV_ROUND_UP(length2, sizeof(u32));
441 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
442 atmel_sha_write(dd, SHA_TNCR, len32);
443
444 atmel_sha_write_ctrl(dd, 1);
445
446 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100447 ctx->digcnt[0] += length1;
448 if (ctx->digcnt[0] < length1)
449 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200450
451 if (final)
452 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
453
454 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
455
456 /* Start DMA transfer */
457 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
458
459 return -EINPROGRESS;
460}
461
Nicolas Royerd4905b32013-02-20 17:10:26 +0100462static void atmel_sha_dma_callback(void *data)
463{
464 struct atmel_sha_dev *dd = data;
465
466 /* dma_lch_in - completed - wait DATRDY */
467 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
468}
469
470static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
471 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
472{
473 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
474 struct dma_async_tx_descriptor *in_desc;
475 struct scatterlist sg[2];
476
477 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
478 ctx->digcnt[1], ctx->digcnt[0], length1, final);
479
Leilei Zhao3f1992c2015-04-07 17:45:07 +0800480 dd->dma_lch_in.dma_conf.src_maxburst = 16;
481 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100482
483 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
484
485 if (length2) {
486 sg_init_table(sg, 2);
487 sg_dma_address(&sg[0]) = dma_addr1;
488 sg_dma_len(&sg[0]) = length1;
489 sg_dma_address(&sg[1]) = dma_addr2;
490 sg_dma_len(&sg[1]) = length2;
491 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
492 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
493 } else {
494 sg_init_table(sg, 1);
495 sg_dma_address(&sg[0]) = dma_addr1;
496 sg_dma_len(&sg[0]) = length1;
497 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
498 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
499 }
500 if (!in_desc)
501 return -EINVAL;
502
503 in_desc->callback = atmel_sha_dma_callback;
504 in_desc->callback_param = dd;
505
506 atmel_sha_write_ctrl(dd, 1);
507
508 /* should be non-zero before next lines to disable clocks later */
509 ctx->digcnt[0] += length1;
510 if (ctx->digcnt[0] < length1)
511 ctx->digcnt[1]++;
512
513 if (final)
514 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
515
516 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
517
518 /* Start DMA transfer */
519 dmaengine_submit(in_desc);
520 dma_async_issue_pending(dd->dma_lch_in.chan);
521
522 return -EINPROGRESS;
523}
524
525static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
526 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
527{
528 if (dd->caps.has_dma)
529 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
530 dma_addr2, length2, final);
531 else
532 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
533 dma_addr2, length2, final);
534}
535
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200536static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
537{
538 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
539 int bufcnt;
540
541 atmel_sha_append_sg(ctx);
542 atmel_sha_fill_padding(ctx, 0);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200543 bufcnt = ctx->bufcnt;
544 ctx->bufcnt = 0;
545
546 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
547}
548
549static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
550 struct atmel_sha_reqctx *ctx,
551 size_t length, int final)
552{
553 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100554 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200555 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
556 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100557 ctx->block_size);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200558 return -EINVAL;
559 }
560
561 ctx->flags &= ~SHA_FLAGS_SG;
562
563 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100564 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200565}
566
567static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
568{
569 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
570 unsigned int final;
571 size_t count;
572
573 atmel_sha_append_sg(ctx);
574
575 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
576
Nicolas Royerd4905b32013-02-20 17:10:26 +0100577 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
578 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200579
580 if (final)
581 atmel_sha_fill_padding(ctx, 0);
582
Ludovic Desroches00992862015-04-07 17:45:04 +0800583 if (final || (ctx->bufcnt == ctx->buflen)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200584 count = ctx->bufcnt;
585 ctx->bufcnt = 0;
586 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
587 }
588
589 return 0;
590}
591
592static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
593{
594 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
595 unsigned int length, final, tail;
596 struct scatterlist *sg;
597 unsigned int count;
598
599 if (!ctx->total)
600 return 0;
601
602 if (ctx->bufcnt || ctx->offset)
603 return atmel_sha_update_dma_slow(dd);
604
Nicolas Royerd4905b32013-02-20 17:10:26 +0100605 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
606 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200607
608 sg = ctx->sg;
609
610 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
611 return atmel_sha_update_dma_slow(dd);
612
Nicolas Royerd4905b32013-02-20 17:10:26 +0100613 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
614 /* size is not ctx->block_size aligned */
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200615 return atmel_sha_update_dma_slow(dd);
616
617 length = min(ctx->total, sg->length);
618
619 if (sg_is_last(sg)) {
620 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100621 /* not last sg must be ctx->block_size aligned */
622 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200623 length -= tail;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200624 }
625 }
626
627 ctx->total -= length;
628 ctx->offset = length; /* offset where to start slow */
629
630 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
631
632 /* Add padding */
633 if (final) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100634 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200635 length -= tail;
636 ctx->total += tail;
637 ctx->offset = length; /* offset where to start slow */
638
639 sg = ctx->sg;
640 atmel_sha_append_sg(ctx);
641
642 atmel_sha_fill_padding(ctx, length);
643
644 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100645 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200646 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
647 dev_err(dd->dev, "dma %u bytes error\n",
Nicolas Royerd4905b32013-02-20 17:10:26 +0100648 ctx->buflen + ctx->block_size);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200649 return -EINVAL;
650 }
651
652 if (length == 0) {
653 ctx->flags &= ~SHA_FLAGS_SG;
654 count = ctx->bufcnt;
655 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100656 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200657 0, final);
658 } else {
659 ctx->sg = sg;
660 if (!dma_map_sg(dd->dev, ctx->sg, 1,
661 DMA_TO_DEVICE)) {
662 dev_err(dd->dev, "dma_map_sg error\n");
663 return -EINVAL;
664 }
665
666 ctx->flags |= SHA_FLAGS_SG;
667
668 count = ctx->bufcnt;
669 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100670 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200671 length, ctx->dma_addr, count, final);
672 }
673 }
674
675 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
676 dev_err(dd->dev, "dma_map_sg error\n");
677 return -EINVAL;
678 }
679
680 ctx->flags |= SHA_FLAGS_SG;
681
682 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100683 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200684 0, final);
685}
686
687static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
688{
689 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
690
691 if (ctx->flags & SHA_FLAGS_SG) {
692 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
693 if (ctx->sg->length == ctx->offset) {
694 ctx->sg = sg_next(ctx->sg);
695 if (ctx->sg)
696 ctx->offset = 0;
697 }
Nicolas Royerd4905b32013-02-20 17:10:26 +0100698 if (ctx->flags & SHA_FLAGS_PAD) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200699 dma_unmap_single(dd->dev, ctx->dma_addr,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100700 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
701 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200702 } else {
703 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100704 ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200705 }
706
707 return 0;
708}
709
710static int atmel_sha_update_req(struct atmel_sha_dev *dd)
711{
712 struct ahash_request *req = dd->req;
713 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
714 int err;
715
Nicolas Royerd4905b32013-02-20 17:10:26 +0100716 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
717 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200718
719 if (ctx->flags & SHA_FLAGS_CPU)
720 err = atmel_sha_update_cpu(dd);
721 else
722 err = atmel_sha_update_dma_start(dd);
723
724 /* wait for dma completion before can take more data */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100725 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
726 err, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200727
728 return err;
729}
730
731static int atmel_sha_final_req(struct atmel_sha_dev *dd)
732{
733 struct ahash_request *req = dd->req;
734 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
735 int err = 0;
736 int count;
737
738 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
739 atmel_sha_fill_padding(ctx, 0);
740 count = ctx->bufcnt;
741 ctx->bufcnt = 0;
742 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
743 }
744 /* faster to handle last block with cpu */
745 else {
746 atmel_sha_fill_padding(ctx, 0);
747 count = ctx->bufcnt;
748 ctx->bufcnt = 0;
749 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
750 }
751
752 dev_dbg(dd->dev, "final_req: err: %d\n", err);
753
754 return err;
755}
756
757static void atmel_sha_copy_hash(struct ahash_request *req)
758{
759 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
760 u32 *hash = (u32 *)ctx->digest;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100761 unsigned int i, hashsize;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200762
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100763 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
764 case SHA_FLAGS_SHA1:
765 hashsize = SHA1_DIGEST_SIZE;
766 break;
767
768 case SHA_FLAGS_SHA224:
769 case SHA_FLAGS_SHA256:
770 hashsize = SHA256_DIGEST_SIZE;
771 break;
772
773 case SHA_FLAGS_SHA384:
774 case SHA_FLAGS_SHA512:
775 hashsize = SHA512_DIGEST_SIZE;
776 break;
777
778 default:
779 /* Should not happen... */
780 return;
781 }
782
783 for (i = 0; i < hashsize / sizeof(u32); ++i)
784 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
785 ctx->flags |= SHA_FLAGS_RESTORE;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200786}
787
788static void atmel_sha_copy_ready_hash(struct ahash_request *req)
789{
790 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
791
792 if (!req->result)
793 return;
794
Nicolas Royerd4905b32013-02-20 17:10:26 +0100795 if (ctx->flags & SHA_FLAGS_SHA1)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200796 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100797 else if (ctx->flags & SHA_FLAGS_SHA224)
798 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
799 else if (ctx->flags & SHA_FLAGS_SHA256)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200800 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100801 else if (ctx->flags & SHA_FLAGS_SHA384)
802 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
803 else
804 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200805}
806
807static int atmel_sha_finish(struct ahash_request *req)
808{
809 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
810 struct atmel_sha_dev *dd = ctx->dd;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200811
Nicolas Royerd4905b32013-02-20 17:10:26 +0100812 if (ctx->digcnt[0] || ctx->digcnt[1])
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200813 atmel_sha_copy_ready_hash(req);
814
Nicolas Royerd4905b32013-02-20 17:10:26 +0100815 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
816 ctx->digcnt[0], ctx->bufcnt);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200817
Rahul Pathak871b88a2015-12-14 08:44:19 +0000818 return 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200819}
820
821static void atmel_sha_finish_req(struct ahash_request *req, int err)
822{
823 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
824 struct atmel_sha_dev *dd = ctx->dd;
825
826 if (!err) {
827 atmel_sha_copy_hash(req);
828 if (SHA_FLAGS_FINAL & dd->flags)
829 err = atmel_sha_finish(req);
830 } else {
831 ctx->flags |= SHA_FLAGS_ERROR;
832 }
833
834 /* atomic operation is not needed here */
835 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
836 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
837
838 clk_disable_unprepare(dd->iclk);
839
840 if (req->base.complete)
841 req->base.complete(&req->base, err);
842
843 /* handle new request */
Cyrille Pitchenf56809c2016-01-15 15:49:32 +0100844 tasklet_schedule(&dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200845}
846
847static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
848{
LABBE Corentin9d83d292015-10-02 14:12:58 +0200849 int err;
850
851 err = clk_prepare_enable(dd->iclk);
852 if (err)
853 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200854
Nicolas Royerd4905b32013-02-20 17:10:26 +0100855 if (!(SHA_FLAGS_INIT & dd->flags)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200856 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200857 dd->flags |= SHA_FLAGS_INIT;
858 dd->err = 0;
859 }
860
861 return 0;
862}
863
Nicolas Royerd4905b32013-02-20 17:10:26 +0100864static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
865{
866 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
867}
868
869static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
870{
871 atmel_sha_hw_init(dd);
872
873 dd->hw_version = atmel_sha_get_version(dd);
874
875 dev_info(dd->dev,
876 "version: 0x%x\n", dd->hw_version);
877
878 clk_disable_unprepare(dd->iclk);
879}
880
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200881static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
882 struct ahash_request *req)
883{
884 struct crypto_async_request *async_req, *backlog;
885 struct atmel_sha_reqctx *ctx;
886 unsigned long flags;
887 int err = 0, ret = 0;
888
889 spin_lock_irqsave(&dd->lock, flags);
890 if (req)
891 ret = ahash_enqueue_request(&dd->queue, req);
892
893 if (SHA_FLAGS_BUSY & dd->flags) {
894 spin_unlock_irqrestore(&dd->lock, flags);
895 return ret;
896 }
897
898 backlog = crypto_get_backlog(&dd->queue);
899 async_req = crypto_dequeue_request(&dd->queue);
900 if (async_req)
901 dd->flags |= SHA_FLAGS_BUSY;
902
903 spin_unlock_irqrestore(&dd->lock, flags);
904
905 if (!async_req)
906 return ret;
907
908 if (backlog)
909 backlog->complete(backlog, -EINPROGRESS);
910
911 req = ahash_request_cast(async_req);
912 dd->req = req;
913 ctx = ahash_request_ctx(req);
914
915 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
916 ctx->op, req->nbytes);
917
918 err = atmel_sha_hw_init(dd);
919
920 if (err)
921 goto err1;
922
923 if (ctx->op == SHA_OP_UPDATE) {
924 err = atmel_sha_update_req(dd);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100925 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200926 /* no final() after finup() */
927 err = atmel_sha_final_req(dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200928 } else if (ctx->op == SHA_OP_FINAL) {
929 err = atmel_sha_final_req(dd);
930 }
931
932err1:
933 if (err != -EINPROGRESS)
934 /* done_task will not finish it, so do it here */
935 atmel_sha_finish_req(req, err);
936
937 dev_dbg(dd->dev, "exit, err: %d\n", err);
938
939 return ret;
940}
941
942static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
943{
944 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
945 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
946 struct atmel_sha_dev *dd = tctx->dd;
947
948 ctx->op = op;
949
950 return atmel_sha_handle_queue(dd, req);
951}
952
953static int atmel_sha_update(struct ahash_request *req)
954{
955 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
956
957 if (!req->nbytes)
958 return 0;
959
960 ctx->total = req->nbytes;
961 ctx->sg = req->src;
962 ctx->offset = 0;
963
964 if (ctx->flags & SHA_FLAGS_FINUP) {
965 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
966 /* faster to use CPU for short transfers */
967 ctx->flags |= SHA_FLAGS_CPU;
968 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
969 atmel_sha_append_sg(ctx);
970 return 0;
971 }
972 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
973}
974
975static int atmel_sha_final(struct ahash_request *req)
976{
977 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
978 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
979 struct atmel_sha_dev *dd = tctx->dd;
980
981 int err = 0;
982
983 ctx->flags |= SHA_FLAGS_FINUP;
984
985 if (ctx->flags & SHA_FLAGS_ERROR)
986 return 0; /* uncompleted hash is not needed */
987
988 if (ctx->bufcnt) {
989 return atmel_sha_enqueue(req, SHA_OP_FINAL);
990 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
991 err = atmel_sha_hw_init(dd);
992 if (err)
993 goto err1;
994
Cyrille Pitchen1900c582016-01-15 15:49:31 +0100995 dd->req = req;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200996 dd->flags |= SHA_FLAGS_BUSY;
997 err = atmel_sha_final_req(dd);
998 } else {
999 /* copy ready hash (+ finalize hmac) */
1000 return atmel_sha_finish(req);
1001 }
1002
1003err1:
1004 if (err != -EINPROGRESS)
1005 /* done_task will not finish it, so do it here */
1006 atmel_sha_finish_req(req, err);
1007
1008 return err;
1009}
1010
1011static int atmel_sha_finup(struct ahash_request *req)
1012{
1013 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1014 int err1, err2;
1015
1016 ctx->flags |= SHA_FLAGS_FINUP;
1017
1018 err1 = atmel_sha_update(req);
1019 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1020 return err1;
1021
1022 /*
1023 * final() has to be always called to cleanup resources
1024 * even if udpate() failed, except EINPROGRESS
1025 */
1026 err2 = atmel_sha_final(req);
1027
1028 return err1 ?: err2;
1029}
1030
1031static int atmel_sha_digest(struct ahash_request *req)
1032{
1033 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1034}
1035
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001036static int atmel_sha_cra_init(struct crypto_tfm *tfm)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001037{
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001038 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1039 sizeof(struct atmel_sha_reqctx) +
Nicolas Royerd4905b32013-02-20 17:10:26 +01001040 SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001041
1042 return 0;
1043}
1044
Nicolas Royerd4905b32013-02-20 17:10:26 +01001045static struct ahash_alg sha_1_256_algs[] = {
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001046{
1047 .init = atmel_sha_init,
1048 .update = atmel_sha_update,
1049 .final = atmel_sha_final,
1050 .finup = atmel_sha_finup,
1051 .digest = atmel_sha_digest,
1052 .halg = {
1053 .digestsize = SHA1_DIGEST_SIZE,
1054 .base = {
1055 .cra_name = "sha1",
1056 .cra_driver_name = "atmel-sha1",
1057 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001058 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001059 .cra_blocksize = SHA1_BLOCK_SIZE,
1060 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1061 .cra_alignmask = 0,
1062 .cra_module = THIS_MODULE,
1063 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001064 }
1065 }
1066},
1067{
1068 .init = atmel_sha_init,
1069 .update = atmel_sha_update,
1070 .final = atmel_sha_final,
1071 .finup = atmel_sha_finup,
1072 .digest = atmel_sha_digest,
1073 .halg = {
1074 .digestsize = SHA256_DIGEST_SIZE,
1075 .base = {
1076 .cra_name = "sha256",
1077 .cra_driver_name = "atmel-sha256",
1078 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001079 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001080 .cra_blocksize = SHA256_BLOCK_SIZE,
1081 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1082 .cra_alignmask = 0,
1083 .cra_module = THIS_MODULE,
1084 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001085 }
1086 }
1087},
1088};
1089
Nicolas Royerd4905b32013-02-20 17:10:26 +01001090static struct ahash_alg sha_224_alg = {
1091 .init = atmel_sha_init,
1092 .update = atmel_sha_update,
1093 .final = atmel_sha_final,
1094 .finup = atmel_sha_finup,
1095 .digest = atmel_sha_digest,
1096 .halg = {
1097 .digestsize = SHA224_DIGEST_SIZE,
1098 .base = {
1099 .cra_name = "sha224",
1100 .cra_driver_name = "atmel-sha224",
1101 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001102 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001103 .cra_blocksize = SHA224_BLOCK_SIZE,
1104 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1105 .cra_alignmask = 0,
1106 .cra_module = THIS_MODULE,
1107 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001108 }
1109 }
1110};
1111
1112static struct ahash_alg sha_384_512_algs[] = {
1113{
1114 .init = atmel_sha_init,
1115 .update = atmel_sha_update,
1116 .final = atmel_sha_final,
1117 .finup = atmel_sha_finup,
1118 .digest = atmel_sha_digest,
1119 .halg = {
1120 .digestsize = SHA384_DIGEST_SIZE,
1121 .base = {
1122 .cra_name = "sha384",
1123 .cra_driver_name = "atmel-sha384",
1124 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001125 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001126 .cra_blocksize = SHA384_BLOCK_SIZE,
1127 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1128 .cra_alignmask = 0x3,
1129 .cra_module = THIS_MODULE,
1130 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001131 }
1132 }
1133},
1134{
1135 .init = atmel_sha_init,
1136 .update = atmel_sha_update,
1137 .final = atmel_sha_final,
1138 .finup = atmel_sha_finup,
1139 .digest = atmel_sha_digest,
1140 .halg = {
1141 .digestsize = SHA512_DIGEST_SIZE,
1142 .base = {
1143 .cra_name = "sha512",
1144 .cra_driver_name = "atmel-sha512",
1145 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001146 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001147 .cra_blocksize = SHA512_BLOCK_SIZE,
1148 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1149 .cra_alignmask = 0x3,
1150 .cra_module = THIS_MODULE,
1151 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001152 }
1153 }
1154},
1155};
1156
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001157static void atmel_sha_queue_task(unsigned long data)
1158{
1159 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1160
1161 atmel_sha_handle_queue(dd, NULL);
1162}
1163
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001164static void atmel_sha_done_task(unsigned long data)
1165{
1166 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1167 int err = 0;
1168
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001169 if (SHA_FLAGS_CPU & dd->flags) {
1170 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1171 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1172 goto finish;
1173 }
1174 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1175 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1176 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1177 atmel_sha_update_dma_stop(dd);
1178 if (dd->err) {
1179 err = dd->err;
1180 goto finish;
1181 }
1182 }
1183 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1184 /* hash or semi-hash ready */
1185 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1186 SHA_FLAGS_OUTPUT_READY);
1187 err = atmel_sha_update_dma_start(dd);
1188 if (err != -EINPROGRESS)
1189 goto finish;
1190 }
1191 }
1192 return;
1193
1194finish:
1195 /* finish curent request */
1196 atmel_sha_finish_req(dd->req, err);
1197}
1198
1199static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1200{
1201 struct atmel_sha_dev *sha_dd = dev_id;
1202 u32 reg;
1203
1204 reg = atmel_sha_read(sha_dd, SHA_ISR);
1205 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1206 atmel_sha_write(sha_dd, SHA_IDR, reg);
1207 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1208 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1209 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1210 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1211 tasklet_schedule(&sha_dd->done_task);
1212 } else {
1213 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1214 }
1215 return IRQ_HANDLED;
1216 }
1217
1218 return IRQ_NONE;
1219}
1220
1221static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1222{
1223 int i;
1224
Nicolas Royerd4905b32013-02-20 17:10:26 +01001225 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1226 crypto_unregister_ahash(&sha_1_256_algs[i]);
1227
1228 if (dd->caps.has_sha224)
1229 crypto_unregister_ahash(&sha_224_alg);
1230
1231 if (dd->caps.has_sha_384_512) {
1232 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1233 crypto_unregister_ahash(&sha_384_512_algs[i]);
1234 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001235}
1236
1237static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1238{
1239 int err, i, j;
1240
Nicolas Royerd4905b32013-02-20 17:10:26 +01001241 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1242 err = crypto_register_ahash(&sha_1_256_algs[i]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001243 if (err)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001244 goto err_sha_1_256_algs;
1245 }
1246
1247 if (dd->caps.has_sha224) {
1248 err = crypto_register_ahash(&sha_224_alg);
1249 if (err)
1250 goto err_sha_224_algs;
1251 }
1252
1253 if (dd->caps.has_sha_384_512) {
1254 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1255 err = crypto_register_ahash(&sha_384_512_algs[i]);
1256 if (err)
1257 goto err_sha_384_512_algs;
1258 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001259 }
1260
1261 return 0;
1262
Nicolas Royerd4905b32013-02-20 17:10:26 +01001263err_sha_384_512_algs:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001264 for (j = 0; j < i; j++)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001265 crypto_unregister_ahash(&sha_384_512_algs[j]);
1266 crypto_unregister_ahash(&sha_224_alg);
1267err_sha_224_algs:
1268 i = ARRAY_SIZE(sha_1_256_algs);
1269err_sha_1_256_algs:
1270 for (j = 0; j < i; j++)
1271 crypto_unregister_ahash(&sha_1_256_algs[j]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001272
1273 return err;
1274}
1275
Nicolas Royerd4905b32013-02-20 17:10:26 +01001276static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1277{
1278 struct at_dma_slave *sl = slave;
1279
1280 if (sl && sl->dma_dev == chan->device->dev) {
1281 chan->private = sl;
1282 return true;
1283 } else {
1284 return false;
1285 }
1286}
1287
1288static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1289 struct crypto_platform_data *pdata)
1290{
1291 int err = -ENOMEM;
1292 dma_cap_mask_t mask_in;
1293
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001294 /* Try to grab DMA channel */
1295 dma_cap_zero(mask_in);
1296 dma_cap_set(DMA_SLAVE, mask_in);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001297
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001298 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1299 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1300 if (!dd->dma_lch_in.chan) {
1301 dev_warn(dd->dev, "no DMA channel available\n");
1302 return err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001303 }
1304
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001305 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1306 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1307 SHA_REG_DIN(0);
1308 dd->dma_lch_in.dma_conf.src_maxburst = 1;
1309 dd->dma_lch_in.dma_conf.src_addr_width =
1310 DMA_SLAVE_BUSWIDTH_4_BYTES;
1311 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1312 dd->dma_lch_in.dma_conf.dst_addr_width =
1313 DMA_SLAVE_BUSWIDTH_4_BYTES;
1314 dd->dma_lch_in.dma_conf.device_fc = false;
1315
1316 return 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001317}
1318
1319static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1320{
1321 dma_release_channel(dd->dma_lch_in.chan);
1322}
1323
1324static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1325{
1326
1327 dd->caps.has_dma = 0;
1328 dd->caps.has_dualbuff = 0;
1329 dd->caps.has_sha224 = 0;
1330 dd->caps.has_sha_384_512 = 0;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001331 dd->caps.has_uihv = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001332
1333 /* keep only major version number */
1334 switch (dd->hw_version & 0xff0) {
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01001335 case 0x510:
1336 dd->caps.has_dma = 1;
1337 dd->caps.has_dualbuff = 1;
1338 dd->caps.has_sha224 = 1;
1339 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001340 dd->caps.has_uihv = 1;
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01001341 break;
Leilei Zhao141824d2015-04-07 17:45:03 +08001342 case 0x420:
1343 dd->caps.has_dma = 1;
1344 dd->caps.has_dualbuff = 1;
1345 dd->caps.has_sha224 = 1;
1346 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001347 dd->caps.has_uihv = 1;
Leilei Zhao141824d2015-04-07 17:45:03 +08001348 break;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001349 case 0x410:
1350 dd->caps.has_dma = 1;
1351 dd->caps.has_dualbuff = 1;
1352 dd->caps.has_sha224 = 1;
1353 dd->caps.has_sha_384_512 = 1;
1354 break;
1355 case 0x400:
1356 dd->caps.has_dma = 1;
1357 dd->caps.has_dualbuff = 1;
1358 dd->caps.has_sha224 = 1;
1359 break;
1360 case 0x320:
1361 break;
1362 default:
1363 dev_warn(dd->dev,
1364 "Unmanaged sha version, set minimum capabilities\n");
1365 break;
1366 }
1367}
1368
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001369#if defined(CONFIG_OF)
1370static const struct of_device_id atmel_sha_dt_ids[] = {
1371 { .compatible = "atmel,at91sam9g46-sha" },
1372 { /* sentinel */ }
1373};
1374
1375MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1376
1377static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1378{
1379 struct device_node *np = pdev->dev.of_node;
1380 struct crypto_platform_data *pdata;
1381
1382 if (!np) {
1383 dev_err(&pdev->dev, "device node not found\n");
1384 return ERR_PTR(-EINVAL);
1385 }
1386
1387 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1388 if (!pdata) {
1389 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1390 return ERR_PTR(-ENOMEM);
1391 }
1392
1393 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1394 sizeof(*(pdata->dma_slave)),
1395 GFP_KERNEL);
1396 if (!pdata->dma_slave) {
1397 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001398 return ERR_PTR(-ENOMEM);
1399 }
1400
1401 return pdata;
1402}
1403#else /* CONFIG_OF */
1404static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1405{
1406 return ERR_PTR(-EINVAL);
1407}
1408#endif
1409
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001410static int atmel_sha_probe(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001411{
1412 struct atmel_sha_dev *sha_dd;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001413 struct crypto_platform_data *pdata;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001414 struct device *dev = &pdev->dev;
1415 struct resource *sha_res;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001416 int err;
1417
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001418 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001419 if (sha_dd == NULL) {
1420 dev_err(dev, "unable to alloc data struct.\n");
1421 err = -ENOMEM;
1422 goto sha_dd_err;
1423 }
1424
1425 sha_dd->dev = dev;
1426
1427 platform_set_drvdata(pdev, sha_dd);
1428
1429 INIT_LIST_HEAD(&sha_dd->list);
Leilei Zhao62728e82015-04-07 17:45:06 +08001430 spin_lock_init(&sha_dd->lock);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001431
1432 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1433 (unsigned long)sha_dd);
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001434 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
1435 (unsigned long)sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001436
1437 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1438
1439 sha_dd->irq = -1;
1440
1441 /* Get the base address */
1442 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1443 if (!sha_res) {
1444 dev_err(dev, "no MEM resource info\n");
1445 err = -ENODEV;
1446 goto res_err;
1447 }
1448 sha_dd->phys_base = sha_res->start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001449
1450 /* Get the IRQ */
1451 sha_dd->irq = platform_get_irq(pdev, 0);
1452 if (sha_dd->irq < 0) {
1453 dev_err(dev, "no IRQ resource info\n");
1454 err = sha_dd->irq;
1455 goto res_err;
1456 }
1457
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001458 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
1459 IRQF_SHARED, "atmel-sha", sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001460 if (err) {
1461 dev_err(dev, "unable to request sha irq.\n");
1462 goto res_err;
1463 }
1464
1465 /* Initializing the clock */
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001466 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001467 if (IS_ERR(sha_dd->iclk)) {
Colin Ian Kingbe208352015-02-28 20:40:10 +00001468 dev_err(dev, "clock initialization failed.\n");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001469 err = PTR_ERR(sha_dd->iclk);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001470 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001471 }
1472
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001473 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001474 if (!sha_dd->io_base) {
1475 dev_err(dev, "can't ioremap\n");
1476 err = -ENOMEM;
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001477 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001478 }
1479
Nicolas Royerd4905b32013-02-20 17:10:26 +01001480 atmel_sha_hw_version_init(sha_dd);
1481
1482 atmel_sha_get_cap(sha_dd);
1483
1484 if (sha_dd->caps.has_dma) {
1485 pdata = pdev->dev.platform_data;
1486 if (!pdata) {
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001487 pdata = atmel_sha_of_init(pdev);
1488 if (IS_ERR(pdata)) {
1489 dev_err(&pdev->dev, "platform data not available\n");
1490 err = PTR_ERR(pdata);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001491 goto res_err;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001492 }
1493 }
1494 if (!pdata->dma_slave) {
Nicolas Royerd4905b32013-02-20 17:10:26 +01001495 err = -ENXIO;
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001496 goto res_err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001497 }
1498 err = atmel_sha_dma_init(sha_dd, pdata);
1499 if (err)
1500 goto err_sha_dma;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001501
1502 dev_info(dev, "using %s for DMA transfers\n",
1503 dma_chan_name(sha_dd->dma_lch_in.chan));
Nicolas Royerd4905b32013-02-20 17:10:26 +01001504 }
1505
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001506 spin_lock(&atmel_sha.lock);
1507 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1508 spin_unlock(&atmel_sha.lock);
1509
1510 err = atmel_sha_register_algs(sha_dd);
1511 if (err)
1512 goto err_algs;
1513
Nicolas Ferre1ca5b7d2013-10-15 16:37:44 +02001514 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1515 sha_dd->caps.has_sha224 ? "/SHA224" : "",
1516 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001517
1518 return 0;
1519
1520err_algs:
1521 spin_lock(&atmel_sha.lock);
1522 list_del(&sha_dd->list);
1523 spin_unlock(&atmel_sha.lock);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001524 if (sha_dd->caps.has_dma)
1525 atmel_sha_dma_cleanup(sha_dd);
1526err_sha_dma:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001527res_err:
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001528 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001529 tasklet_kill(&sha_dd->done_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001530sha_dd_err:
1531 dev_err(dev, "initialization failed.\n");
1532
1533 return err;
1534}
1535
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001536static int atmel_sha_remove(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001537{
1538 static struct atmel_sha_dev *sha_dd;
1539
1540 sha_dd = platform_get_drvdata(pdev);
1541 if (!sha_dd)
1542 return -ENODEV;
1543 spin_lock(&atmel_sha.lock);
1544 list_del(&sha_dd->list);
1545 spin_unlock(&atmel_sha.lock);
1546
1547 atmel_sha_unregister_algs(sha_dd);
1548
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001549 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001550 tasklet_kill(&sha_dd->done_task);
1551
Nicolas Royerd4905b32013-02-20 17:10:26 +01001552 if (sha_dd->caps.has_dma)
1553 atmel_sha_dma_cleanup(sha_dd);
1554
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001555 iounmap(sha_dd->io_base);
1556
1557 clk_put(sha_dd->iclk);
1558
1559 if (sha_dd->irq >= 0)
1560 free_irq(sha_dd->irq, sha_dd);
1561
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001562 return 0;
1563}
1564
1565static struct platform_driver atmel_sha_driver = {
1566 .probe = atmel_sha_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001567 .remove = atmel_sha_remove,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001568 .driver = {
1569 .name = "atmel_sha",
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001570 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001571 },
1572};
1573
1574module_platform_driver(atmel_sha_driver);
1575
Nicolas Royerd4905b32013-02-20 17:10:26 +01001576MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001577MODULE_LICENSE("GPL v2");
1578MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");