blob: 78c3c02e4483906d4b66ea9b36fbf72cdb723501 [file] [log] [blame]
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020027#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020030#include <linux/irq.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020031#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +020033#include <linux/of_device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020034#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/sha.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
Nicolas Royerd4905b32013-02-20 17:10:26 +010042#include <linux/platform_data/crypto-atmel.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020043#include "atmel-sha-regs.h"
44
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
53
Cyrille Pitchen81d87502017-01-26 17:07:54 +010054/* bits[11:8] are reserved. */
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +010055#define SHA_FLAGS_ALGO_MASK SHA_MR_ALGO_MASK
56#define SHA_FLAGS_SHA1 SHA_MR_ALGO_SHA1
57#define SHA_FLAGS_SHA256 SHA_MR_ALGO_SHA256
58#define SHA_FLAGS_SHA384 SHA_MR_ALGO_SHA384
59#define SHA_FLAGS_SHA512 SHA_MR_ALGO_SHA512
60#define SHA_FLAGS_SHA224 SHA_MR_ALGO_SHA224
Cyrille Pitchen81d87502017-01-26 17:07:54 +010061#define SHA_FLAGS_HMAC SHA_MR_HMAC
62#define SHA_FLAGS_HMAC_SHA1 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA1)
63#define SHA_FLAGS_HMAC_SHA256 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA256)
64#define SHA_FLAGS_HMAC_SHA384 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA384)
65#define SHA_FLAGS_HMAC_SHA512 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA512)
66#define SHA_FLAGS_HMAC_SHA224 (SHA_FLAGS_HMAC | SHA_FLAGS_SHA224)
67#define SHA_FLAGS_MODE_MASK (SHA_FLAGS_HMAC | SHA_FLAGS_ALGO_MASK)
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +010068
Nicolas Royerebc82ef2012-07-01 19:19:46 +020069#define SHA_FLAGS_FINUP BIT(16)
70#define SHA_FLAGS_SG BIT(17)
Nicolas Royerd4905b32013-02-20 17:10:26 +010071#define SHA_FLAGS_ERROR BIT(23)
72#define SHA_FLAGS_PAD BIT(24)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010073#define SHA_FLAGS_RESTORE BIT(25)
Cyrille Pitcheneec12f62017-01-26 17:07:52 +010074#define SHA_FLAGS_IDATAR0 BIT(26)
75#define SHA_FLAGS_WAIT_DATARDY BIT(27)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020076
Cyrille Pitchen81d87502017-01-26 17:07:54 +010077#define SHA_OP_INIT 0
Nicolas Royerebc82ef2012-07-01 19:19:46 +020078#define SHA_OP_UPDATE 1
79#define SHA_OP_FINAL 2
Cyrille Pitchen81d87502017-01-26 17:07:54 +010080#define SHA_OP_DIGEST 3
Nicolas Royerebc82ef2012-07-01 19:19:46 +020081
Cyrille Pitchencc831d32016-01-29 17:04:02 +010082#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020083
84#define ATMEL_SHA_DMA_THRESHOLD 56
85
Nicolas Royerd4905b32013-02-20 17:10:26 +010086struct atmel_sha_caps {
87 bool has_dma;
88 bool has_dualbuff;
89 bool has_sha224;
90 bool has_sha_384_512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010091 bool has_uihv;
Cyrille Pitchen81d87502017-01-26 17:07:54 +010092 bool has_hmac;
Nicolas Royerd4905b32013-02-20 17:10:26 +010093};
Nicolas Royerebc82ef2012-07-01 19:19:46 +020094
95struct atmel_sha_dev;
96
Cyrille Pitchencc831d32016-01-29 17:04:02 +010097/*
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +010098 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
Cyrille Pitchencc831d32016-01-29 17:04:02 +010099 * tested by the ahash_prepare_alg() function.
100 */
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200101struct atmel_sha_reqctx {
102 struct atmel_sha_dev *dd;
103 unsigned long flags;
104 unsigned long op;
105
Nicolas Royerd4905b32013-02-20 17:10:26 +0100106 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
107 u64 digcnt[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200108 size_t bufcnt;
109 size_t buflen;
110 dma_addr_t dma_addr;
111
112 /* walk state */
113 struct scatterlist *sg;
114 unsigned int offset; /* offset in current sg */
115 unsigned int total; /* total request */
116
Nicolas Royerd4905b32013-02-20 17:10:26 +0100117 size_t block_size;
Cyrille Pitchen81d87502017-01-26 17:07:54 +0100118 size_t hash_size;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100119
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +0100120 u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200121};
122
Cyrille Pitchena29af932017-01-26 17:07:47 +0100123typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
124
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200125struct atmel_sha_ctx {
126 struct atmel_sha_dev *dd;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100127 atmel_sha_fn_t start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200128
129 unsigned long flags;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200130};
131
Nicolas Royerd4905b32013-02-20 17:10:26 +0100132#define ATMEL_SHA_QUEUE_LENGTH 50
133
134struct atmel_sha_dma {
135 struct dma_chan *chan;
136 struct dma_slave_config dma_conf;
Cyrille Pitchen69303cf2017-01-26 17:07:53 +0100137 struct scatterlist *sg;
138 int nents;
139 unsigned int last_sg_length;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100140};
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200141
142struct atmel_sha_dev {
143 struct list_head list;
144 unsigned long phys_base;
145 struct device *dev;
146 struct clk *iclk;
147 int irq;
148 void __iomem *io_base;
149
150 spinlock_t lock;
151 int err;
152 struct tasklet_struct done_task;
Cyrille Pitchenf56809c2016-01-15 15:49:32 +0100153 struct tasklet_struct queue_task;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200154
155 unsigned long flags;
156 struct crypto_queue queue;
157 struct ahash_request *req;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100158 bool is_async;
Cyrille Pitchenb5ce82a2017-01-26 17:07:48 +0100159 atmel_sha_fn_t resume;
Cyrille Pitcheneec12f62017-01-26 17:07:52 +0100160 atmel_sha_fn_t cpu_transfer_complete;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100161
162 struct atmel_sha_dma dma_lch_in;
163
164 struct atmel_sha_caps caps;
165
Cyrille Pitchen81d87502017-01-26 17:07:54 +0100166 struct scatterlist tmp;
167
Nicolas Royerd4905b32013-02-20 17:10:26 +0100168 u32 hw_version;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200169};
170
171struct atmel_sha_drv {
172 struct list_head dev_list;
173 spinlock_t lock;
174};
175
176static struct atmel_sha_drv atmel_sha = {
177 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
178 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
179};
180
181static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
182{
183 return readl_relaxed(dd->io_base + offset);
184}
185
186static inline void atmel_sha_write(struct atmel_sha_dev *dd,
187 u32 offset, u32 value)
188{
189 writel_relaxed(value, dd->io_base + offset);
190}
191
Cyrille Pitchena29af932017-01-26 17:07:47 +0100192static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
193{
194 struct ahash_request *req = dd->req;
195
196 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
197 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
198
199 clk_disable(dd->iclk);
200
201 if (dd->is_async && req->base.complete)
202 req->base.complete(&req->base, err);
203
204 /* handle new request */
205 tasklet_schedule(&dd->queue_task);
206
207 return err;
208}
209
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200210static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
211{
212 size_t count;
213
214 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
215 count = min(ctx->sg->length - ctx->offset, ctx->total);
216 count = min(count, ctx->buflen - ctx->bufcnt);
217
Leilei Zhao803eeae2015-04-07 17:45:05 +0800218 if (count <= 0) {
219 /*
220 * Check if count <= 0 because the buffer is full or
221 * because the sg length is 0. In the latest case,
222 * check if there is another sg in the list, a 0 length
223 * sg doesn't necessarily mean the end of the sg list.
224 */
225 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
226 ctx->sg = sg_next(ctx->sg);
227 continue;
228 } else {
229 break;
230 }
231 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200232
233 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
234 ctx->offset, count, 0);
235
236 ctx->bufcnt += count;
237 ctx->offset += count;
238 ctx->total -= count;
239
240 if (ctx->offset == ctx->sg->length) {
241 ctx->sg = sg_next(ctx->sg);
242 if (ctx->sg)
243 ctx->offset = 0;
244 else
245 ctx->total = 0;
246 }
247 }
248
249 return 0;
250}
251
252/*
Nicolas Royerd4905b32013-02-20 17:10:26 +0100253 * The purpose of this padding is to ensure that the padded message is a
254 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
255 * The bit "1" is appended at the end of the message followed by
256 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
257 * 128 bits block (SHA384/SHA512) equals to the message length in bits
258 * is appended.
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200259 *
Nicolas Royerd4905b32013-02-20 17:10:26 +0100260 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200261 * - if message length < 56 bytes then padlen = 56 - message length
262 * - else padlen = 64 + 56 - message length
Nicolas Royerd4905b32013-02-20 17:10:26 +0100263 *
264 * For SHA384/SHA512, padlen is calculated as followed:
265 * - if message length < 112 bytes then padlen = 112 - message length
266 * - else padlen = 128 + 112 - message length
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200267 */
268static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
269{
270 unsigned int index, padlen;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100271 u64 bits[2];
272 u64 size[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200273
Nicolas Royerd4905b32013-02-20 17:10:26 +0100274 size[0] = ctx->digcnt[0];
275 size[1] = ctx->digcnt[1];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200276
Nicolas Royerd4905b32013-02-20 17:10:26 +0100277 size[0] += ctx->bufcnt;
278 if (size[0] < ctx->bufcnt)
279 size[1]++;
280
281 size[0] += length;
282 if (size[0] < length)
283 size[1]++;
284
285 bits[1] = cpu_to_be64(size[0] << 3);
286 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
287
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100288 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
289 case SHA_FLAGS_SHA384:
290 case SHA_FLAGS_SHA512:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100291 index = ctx->bufcnt & 0x7f;
292 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
293 *(ctx->buffer + ctx->bufcnt) = 0x80;
294 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
295 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
296 ctx->bufcnt += padlen + 16;
297 ctx->flags |= SHA_FLAGS_PAD;
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100298 break;
299
300 default:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100301 index = ctx->bufcnt & 0x3f;
302 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
303 *(ctx->buffer + ctx->bufcnt) = 0x80;
304 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
305 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
306 ctx->bufcnt += padlen + 8;
307 ctx->flags |= SHA_FLAGS_PAD;
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100308 break;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100309 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200310}
311
Cyrille Pitchen8340c7f2017-01-26 17:07:46 +0100312static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200313{
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200314 struct atmel_sha_dev *dd = NULL;
315 struct atmel_sha_dev *tmp;
316
317 spin_lock_bh(&atmel_sha.lock);
318 if (!tctx->dd) {
319 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
320 dd = tmp;
321 break;
322 }
323 tctx->dd = dd;
324 } else {
325 dd = tctx->dd;
326 }
327
328 spin_unlock_bh(&atmel_sha.lock);
329
Cyrille Pitchen8340c7f2017-01-26 17:07:46 +0100330 return dd;
331}
332
333static int atmel_sha_init(struct ahash_request *req)
334{
335 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
336 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
337 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
338 struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
339
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200340 ctx->dd = dd;
341
342 ctx->flags = 0;
343
344 dev_dbg(dd->dev, "init: digest size: %d\n",
345 crypto_ahash_digestsize(tfm));
346
Nicolas Royerd4905b32013-02-20 17:10:26 +0100347 switch (crypto_ahash_digestsize(tfm)) {
348 case SHA1_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200349 ctx->flags |= SHA_FLAGS_SHA1;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100350 ctx->block_size = SHA1_BLOCK_SIZE;
351 break;
352 case SHA224_DIGEST_SIZE:
353 ctx->flags |= SHA_FLAGS_SHA224;
354 ctx->block_size = SHA224_BLOCK_SIZE;
355 break;
356 case SHA256_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200357 ctx->flags |= SHA_FLAGS_SHA256;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100358 ctx->block_size = SHA256_BLOCK_SIZE;
359 break;
360 case SHA384_DIGEST_SIZE:
361 ctx->flags |= SHA_FLAGS_SHA384;
362 ctx->block_size = SHA384_BLOCK_SIZE;
363 break;
364 case SHA512_DIGEST_SIZE:
365 ctx->flags |= SHA_FLAGS_SHA512;
366 ctx->block_size = SHA512_BLOCK_SIZE;
367 break;
368 default:
369 return -EINVAL;
370 break;
371 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200372
373 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100374 ctx->digcnt[0] = 0;
375 ctx->digcnt[1] = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200376 ctx->buflen = SHA_BUFFER_LEN;
377
378 return 0;
379}
380
381static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
382{
383 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100384 u32 valmr = SHA_MR_MODE_AUTO;
385 unsigned int i, hashsize = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200386
387 if (likely(dma)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100388 if (!dd->caps.has_dma)
389 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200390 valmr = SHA_MR_MODE_PDC;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100391 if (dd->caps.has_dualbuff)
392 valmr |= SHA_MR_DUALBUFF;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200393 } else {
394 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
395 }
396
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100397 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
398 case SHA_FLAGS_SHA1:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100399 valmr |= SHA_MR_ALGO_SHA1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100400 hashsize = SHA1_DIGEST_SIZE;
401 break;
402
403 case SHA_FLAGS_SHA224:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100404 valmr |= SHA_MR_ALGO_SHA224;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100405 hashsize = SHA256_DIGEST_SIZE;
406 break;
407
408 case SHA_FLAGS_SHA256:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200409 valmr |= SHA_MR_ALGO_SHA256;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100410 hashsize = SHA256_DIGEST_SIZE;
411 break;
412
413 case SHA_FLAGS_SHA384:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100414 valmr |= SHA_MR_ALGO_SHA384;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100415 hashsize = SHA512_DIGEST_SIZE;
416 break;
417
418 case SHA_FLAGS_SHA512:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100419 valmr |= SHA_MR_ALGO_SHA512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100420 hashsize = SHA512_DIGEST_SIZE;
421 break;
422
423 default:
424 break;
425 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200426
427 /* Setting CR_FIRST only for the first iteration */
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100428 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
429 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
430 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
431 const u32 *hash = (const u32 *)ctx->digest;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200432
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100433 /*
434 * Restore the hardware context: update the User Initialize
435 * Hash Value (UIHV) with the value saved when the latest
436 * 'update' operation completed on this very same crypto
437 * request.
438 */
439 ctx->flags &= ~SHA_FLAGS_RESTORE;
440 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
441 for (i = 0; i < hashsize / sizeof(u32); ++i)
442 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
443 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
444 valmr |= SHA_MR_UIHV;
445 }
446 /*
447 * WARNING: If the UIHV feature is not available, the hardware CANNOT
448 * process concurrent requests: the internal registers used to store
449 * the hash/digest are still set to the partial digest output values
450 * computed during the latest round.
451 */
452
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200453 atmel_sha_write(dd, SHA_MR, valmr);
454}
455
Cyrille Pitchen9064ed92017-01-26 17:07:50 +0100456static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
457 atmel_sha_fn_t resume)
458{
459 u32 isr = atmel_sha_read(dd, SHA_ISR);
460
461 if (unlikely(isr & SHA_INT_DATARDY))
462 return resume(dd);
463
464 dd->resume = resume;
465 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
466 return -EINPROGRESS;
467}
468
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200469static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
470 size_t length, int final)
471{
472 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
473 int count, len32;
474 const u32 *buffer = (const u32 *)buf;
475
Nicolas Royerd4905b32013-02-20 17:10:26 +0100476 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
477 ctx->digcnt[1], ctx->digcnt[0], length, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200478
479 atmel_sha_write_ctrl(dd, 0);
480
481 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100482 ctx->digcnt[0] += length;
483 if (ctx->digcnt[0] < length)
484 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200485
486 if (final)
487 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
488
489 len32 = DIV_ROUND_UP(length, sizeof(u32));
490
491 dd->flags |= SHA_FLAGS_CPU;
492
493 for (count = 0; count < len32; count++)
494 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
495
496 return -EINPROGRESS;
497}
498
499static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
500 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
501{
502 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
503 int len32;
504
Nicolas Royerd4905b32013-02-20 17:10:26 +0100505 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
506 ctx->digcnt[1], ctx->digcnt[0], length1, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200507
508 len32 = DIV_ROUND_UP(length1, sizeof(u32));
509 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
510 atmel_sha_write(dd, SHA_TPR, dma_addr1);
511 atmel_sha_write(dd, SHA_TCR, len32);
512
513 len32 = DIV_ROUND_UP(length2, sizeof(u32));
514 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
515 atmel_sha_write(dd, SHA_TNCR, len32);
516
517 atmel_sha_write_ctrl(dd, 1);
518
519 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100520 ctx->digcnt[0] += length1;
521 if (ctx->digcnt[0] < length1)
522 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200523
524 if (final)
525 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
526
527 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
528
529 /* Start DMA transfer */
530 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
531
532 return -EINPROGRESS;
533}
534
Nicolas Royerd4905b32013-02-20 17:10:26 +0100535static void atmel_sha_dma_callback(void *data)
536{
537 struct atmel_sha_dev *dd = data;
538
Cyrille Pitchena29af932017-01-26 17:07:47 +0100539 dd->is_async = true;
540
Nicolas Royerd4905b32013-02-20 17:10:26 +0100541 /* dma_lch_in - completed - wait DATRDY */
542 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
543}
544
545static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
546 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
547{
548 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
549 struct dma_async_tx_descriptor *in_desc;
550 struct scatterlist sg[2];
551
552 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
553 ctx->digcnt[1], ctx->digcnt[0], length1, final);
554
Leilei Zhao3f1992c2015-04-07 17:45:07 +0800555 dd->dma_lch_in.dma_conf.src_maxburst = 16;
556 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100557
558 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
559
560 if (length2) {
561 sg_init_table(sg, 2);
562 sg_dma_address(&sg[0]) = dma_addr1;
563 sg_dma_len(&sg[0]) = length1;
564 sg_dma_address(&sg[1]) = dma_addr2;
565 sg_dma_len(&sg[1]) = length2;
566 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
567 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
568 } else {
569 sg_init_table(sg, 1);
570 sg_dma_address(&sg[0]) = dma_addr1;
571 sg_dma_len(&sg[0]) = length1;
572 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
573 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
574 }
575 if (!in_desc)
Cyrille Pitchena29af932017-01-26 17:07:47 +0100576 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100577
578 in_desc->callback = atmel_sha_dma_callback;
579 in_desc->callback_param = dd;
580
581 atmel_sha_write_ctrl(dd, 1);
582
583 /* should be non-zero before next lines to disable clocks later */
584 ctx->digcnt[0] += length1;
585 if (ctx->digcnt[0] < length1)
586 ctx->digcnt[1]++;
587
588 if (final)
589 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
590
591 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
592
593 /* Start DMA transfer */
594 dmaengine_submit(in_desc);
595 dma_async_issue_pending(dd->dma_lch_in.chan);
596
597 return -EINPROGRESS;
598}
599
600static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
601 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
602{
603 if (dd->caps.has_dma)
604 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
605 dma_addr2, length2, final);
606 else
607 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
608 dma_addr2, length2, final);
609}
610
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200611static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
612{
613 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
614 int bufcnt;
615
616 atmel_sha_append_sg(ctx);
617 atmel_sha_fill_padding(ctx, 0);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200618 bufcnt = ctx->bufcnt;
619 ctx->bufcnt = 0;
620
621 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
622}
623
624static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
625 struct atmel_sha_reqctx *ctx,
626 size_t length, int final)
627{
628 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100629 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200630 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
631 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100632 ctx->block_size);
Cyrille Pitchena29af932017-01-26 17:07:47 +0100633 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200634 }
635
636 ctx->flags &= ~SHA_FLAGS_SG;
637
638 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100639 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200640}
641
642static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
643{
644 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
645 unsigned int final;
646 size_t count;
647
648 atmel_sha_append_sg(ctx);
649
650 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
651
Nicolas Royerd4905b32013-02-20 17:10:26 +0100652 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
653 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200654
655 if (final)
656 atmel_sha_fill_padding(ctx, 0);
657
Ludovic Desroches00992862015-04-07 17:45:04 +0800658 if (final || (ctx->bufcnt == ctx->buflen)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200659 count = ctx->bufcnt;
660 ctx->bufcnt = 0;
661 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
662 }
663
664 return 0;
665}
666
667static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
668{
669 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
670 unsigned int length, final, tail;
671 struct scatterlist *sg;
672 unsigned int count;
673
674 if (!ctx->total)
675 return 0;
676
677 if (ctx->bufcnt || ctx->offset)
678 return atmel_sha_update_dma_slow(dd);
679
Nicolas Royerd4905b32013-02-20 17:10:26 +0100680 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
681 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200682
683 sg = ctx->sg;
684
685 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
686 return atmel_sha_update_dma_slow(dd);
687
Nicolas Royerd4905b32013-02-20 17:10:26 +0100688 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
689 /* size is not ctx->block_size aligned */
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200690 return atmel_sha_update_dma_slow(dd);
691
692 length = min(ctx->total, sg->length);
693
694 if (sg_is_last(sg)) {
695 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100696 /* not last sg must be ctx->block_size aligned */
697 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200698 length -= tail;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200699 }
700 }
701
702 ctx->total -= length;
703 ctx->offset = length; /* offset where to start slow */
704
705 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
706
707 /* Add padding */
708 if (final) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100709 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200710 length -= tail;
711 ctx->total += tail;
712 ctx->offset = length; /* offset where to start slow */
713
714 sg = ctx->sg;
715 atmel_sha_append_sg(ctx);
716
717 atmel_sha_fill_padding(ctx, length);
718
719 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100720 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200721 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
722 dev_err(dd->dev, "dma %u bytes error\n",
Nicolas Royerd4905b32013-02-20 17:10:26 +0100723 ctx->buflen + ctx->block_size);
Cyrille Pitchena29af932017-01-26 17:07:47 +0100724 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200725 }
726
727 if (length == 0) {
728 ctx->flags &= ~SHA_FLAGS_SG;
729 count = ctx->bufcnt;
730 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100731 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200732 0, final);
733 } else {
734 ctx->sg = sg;
735 if (!dma_map_sg(dd->dev, ctx->sg, 1,
736 DMA_TO_DEVICE)) {
737 dev_err(dd->dev, "dma_map_sg error\n");
Cyrille Pitchena29af932017-01-26 17:07:47 +0100738 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200739 }
740
741 ctx->flags |= SHA_FLAGS_SG;
742
743 count = ctx->bufcnt;
744 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100745 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200746 length, ctx->dma_addr, count, final);
747 }
748 }
749
750 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
751 dev_err(dd->dev, "dma_map_sg error\n");
Cyrille Pitchena29af932017-01-26 17:07:47 +0100752 atmel_sha_complete(dd, -EINVAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200753 }
754
755 ctx->flags |= SHA_FLAGS_SG;
756
757 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100758 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200759 0, final);
760}
761
762static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
763{
764 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
765
766 if (ctx->flags & SHA_FLAGS_SG) {
767 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
768 if (ctx->sg->length == ctx->offset) {
769 ctx->sg = sg_next(ctx->sg);
770 if (ctx->sg)
771 ctx->offset = 0;
772 }
Nicolas Royerd4905b32013-02-20 17:10:26 +0100773 if (ctx->flags & SHA_FLAGS_PAD) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200774 dma_unmap_single(dd->dev, ctx->dma_addr,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100775 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
776 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200777 } else {
778 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100779 ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200780 }
781
782 return 0;
783}
784
785static int atmel_sha_update_req(struct atmel_sha_dev *dd)
786{
787 struct ahash_request *req = dd->req;
788 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
789 int err;
790
Nicolas Royerd4905b32013-02-20 17:10:26 +0100791 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
792 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200793
794 if (ctx->flags & SHA_FLAGS_CPU)
795 err = atmel_sha_update_cpu(dd);
796 else
797 err = atmel_sha_update_dma_start(dd);
798
799 /* wait for dma completion before can take more data */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100800 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
801 err, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200802
803 return err;
804}
805
806static int atmel_sha_final_req(struct atmel_sha_dev *dd)
807{
808 struct ahash_request *req = dd->req;
809 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
810 int err = 0;
811 int count;
812
813 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
814 atmel_sha_fill_padding(ctx, 0);
815 count = ctx->bufcnt;
816 ctx->bufcnt = 0;
817 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
818 }
819 /* faster to handle last block with cpu */
820 else {
821 atmel_sha_fill_padding(ctx, 0);
822 count = ctx->bufcnt;
823 ctx->bufcnt = 0;
824 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
825 }
826
827 dev_dbg(dd->dev, "final_req: err: %d\n", err);
828
829 return err;
830}
831
832static void atmel_sha_copy_hash(struct ahash_request *req)
833{
834 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
835 u32 *hash = (u32 *)ctx->digest;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100836 unsigned int i, hashsize;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200837
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100838 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
839 case SHA_FLAGS_SHA1:
840 hashsize = SHA1_DIGEST_SIZE;
841 break;
842
843 case SHA_FLAGS_SHA224:
844 case SHA_FLAGS_SHA256:
845 hashsize = SHA256_DIGEST_SIZE;
846 break;
847
848 case SHA_FLAGS_SHA384:
849 case SHA_FLAGS_SHA512:
850 hashsize = SHA512_DIGEST_SIZE;
851 break;
852
853 default:
854 /* Should not happen... */
855 return;
856 }
857
858 for (i = 0; i < hashsize / sizeof(u32); ++i)
859 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
860 ctx->flags |= SHA_FLAGS_RESTORE;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200861}
862
863static void atmel_sha_copy_ready_hash(struct ahash_request *req)
864{
865 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
866
867 if (!req->result)
868 return;
869
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100870 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
871 default:
872 case SHA_FLAGS_SHA1:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200873 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100874 break;
875
876 case SHA_FLAGS_SHA224:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100877 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100878 break;
879
880 case SHA_FLAGS_SHA256:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200881 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100882 break;
883
884 case SHA_FLAGS_SHA384:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100885 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100886 break;
887
888 case SHA_FLAGS_SHA512:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100889 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
Cyrille Pitchenf07ceba2017-01-26 17:07:49 +0100890 break;
891 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200892}
893
894static int atmel_sha_finish(struct ahash_request *req)
895{
896 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
897 struct atmel_sha_dev *dd = ctx->dd;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200898
Nicolas Royerd4905b32013-02-20 17:10:26 +0100899 if (ctx->digcnt[0] || ctx->digcnt[1])
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200900 atmel_sha_copy_ready_hash(req);
901
Nicolas Royerd4905b32013-02-20 17:10:26 +0100902 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
903 ctx->digcnt[0], ctx->bufcnt);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200904
Rahul Pathak871b88a2015-12-14 08:44:19 +0000905 return 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200906}
907
908static void atmel_sha_finish_req(struct ahash_request *req, int err)
909{
910 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
911 struct atmel_sha_dev *dd = ctx->dd;
912
913 if (!err) {
914 atmel_sha_copy_hash(req);
915 if (SHA_FLAGS_FINAL & dd->flags)
916 err = atmel_sha_finish(req);
917 } else {
918 ctx->flags |= SHA_FLAGS_ERROR;
919 }
920
921 /* atomic operation is not needed here */
Cyrille Pitchena29af932017-01-26 17:07:47 +0100922 (void)atmel_sha_complete(dd, err);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200923}
924
925static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
926{
LABBE Corentin9d83d292015-10-02 14:12:58 +0200927 int err;
928
Cyrille Pitchenc0330422016-02-05 13:45:13 +0100929 err = clk_enable(dd->iclk);
LABBE Corentin9d83d292015-10-02 14:12:58 +0200930 if (err)
931 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200932
Nicolas Royerd4905b32013-02-20 17:10:26 +0100933 if (!(SHA_FLAGS_INIT & dd->flags)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200934 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200935 dd->flags |= SHA_FLAGS_INIT;
936 dd->err = 0;
937 }
938
939 return 0;
940}
941
Nicolas Royerd4905b32013-02-20 17:10:26 +0100942static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
943{
944 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
945}
946
947static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
948{
949 atmel_sha_hw_init(dd);
950
951 dd->hw_version = atmel_sha_get_version(dd);
952
953 dev_info(dd->dev,
954 "version: 0x%x\n", dd->hw_version);
955
Cyrille Pitchenc0330422016-02-05 13:45:13 +0100956 clk_disable(dd->iclk);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100957}
958
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200959static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
960 struct ahash_request *req)
961{
962 struct crypto_async_request *async_req, *backlog;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100963 struct atmel_sha_ctx *ctx;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200964 unsigned long flags;
Cyrille Pitchena29af932017-01-26 17:07:47 +0100965 bool start_async;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200966 int err = 0, ret = 0;
967
968 spin_lock_irqsave(&dd->lock, flags);
969 if (req)
970 ret = ahash_enqueue_request(&dd->queue, req);
971
972 if (SHA_FLAGS_BUSY & dd->flags) {
973 spin_unlock_irqrestore(&dd->lock, flags);
974 return ret;
975 }
976
977 backlog = crypto_get_backlog(&dd->queue);
978 async_req = crypto_dequeue_request(&dd->queue);
979 if (async_req)
980 dd->flags |= SHA_FLAGS_BUSY;
981
982 spin_unlock_irqrestore(&dd->lock, flags);
983
984 if (!async_req)
985 return ret;
986
987 if (backlog)
988 backlog->complete(backlog, -EINPROGRESS);
989
Cyrille Pitchena29af932017-01-26 17:07:47 +0100990 ctx = crypto_tfm_ctx(async_req->tfm);
991
992 dd->req = ahash_request_cast(async_req);
993 start_async = (dd->req != req);
994 dd->is_async = start_async;
995
996 /* WARNING: ctx->start() MAY change dd->is_async. */
997 err = ctx->start(dd);
998 return (start_async) ? ret : err;
999}
1000
Cyrille Pitchenb5ce82a2017-01-26 17:07:48 +01001001static int atmel_sha_done(struct atmel_sha_dev *dd);
1002
Cyrille Pitchena29af932017-01-26 17:07:47 +01001003static int atmel_sha_start(struct atmel_sha_dev *dd)
1004{
1005 struct ahash_request *req = dd->req;
1006 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1007 int err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001008
1009 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1010 ctx->op, req->nbytes);
1011
1012 err = atmel_sha_hw_init(dd);
1013
1014 if (err)
1015 goto err1;
1016
Cyrille Pitchenb5ce82a2017-01-26 17:07:48 +01001017 dd->resume = atmel_sha_done;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001018 if (ctx->op == SHA_OP_UPDATE) {
1019 err = atmel_sha_update_req(dd);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001020 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001021 /* no final() after finup() */
1022 err = atmel_sha_final_req(dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001023 } else if (ctx->op == SHA_OP_FINAL) {
1024 err = atmel_sha_final_req(dd);
1025 }
1026
1027err1:
1028 if (err != -EINPROGRESS)
1029 /* done_task will not finish it, so do it here */
1030 atmel_sha_finish_req(req, err);
1031
1032 dev_dbg(dd->dev, "exit, err: %d\n", err);
1033
Cyrille Pitchena29af932017-01-26 17:07:47 +01001034 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001035}
1036
1037static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
1038{
1039 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1040 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1041 struct atmel_sha_dev *dd = tctx->dd;
1042
1043 ctx->op = op;
1044
1045 return atmel_sha_handle_queue(dd, req);
1046}
1047
1048static int atmel_sha_update(struct ahash_request *req)
1049{
1050 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1051
1052 if (!req->nbytes)
1053 return 0;
1054
1055 ctx->total = req->nbytes;
1056 ctx->sg = req->src;
1057 ctx->offset = 0;
1058
1059 if (ctx->flags & SHA_FLAGS_FINUP) {
1060 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1061 /* faster to use CPU for short transfers */
1062 ctx->flags |= SHA_FLAGS_CPU;
1063 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1064 atmel_sha_append_sg(ctx);
1065 return 0;
1066 }
1067 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1068}
1069
1070static int atmel_sha_final(struct ahash_request *req)
1071{
1072 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001073
1074 ctx->flags |= SHA_FLAGS_FINUP;
1075
1076 if (ctx->flags & SHA_FLAGS_ERROR)
1077 return 0; /* uncompleted hash is not needed */
1078
Cyrille Pitchenad841122016-02-08 16:26:49 +01001079 if (ctx->flags & SHA_FLAGS_PAD)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001080 /* copy ready hash (+ finalize hmac) */
1081 return atmel_sha_finish(req);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001082
Cyrille Pitchenad841122016-02-08 16:26:49 +01001083 return atmel_sha_enqueue(req, SHA_OP_FINAL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001084}
1085
1086static int atmel_sha_finup(struct ahash_request *req)
1087{
1088 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1089 int err1, err2;
1090
1091 ctx->flags |= SHA_FLAGS_FINUP;
1092
1093 err1 = atmel_sha_update(req);
1094 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1095 return err1;
1096
1097 /*
1098 * final() has to be always called to cleanup resources
1099 * even if udpate() failed, except EINPROGRESS
1100 */
1101 err2 = atmel_sha_final(req);
1102
1103 return err1 ?: err2;
1104}
1105
1106static int atmel_sha_digest(struct ahash_request *req)
1107{
1108 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1109}
1110
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001111
1112static int atmel_sha_export(struct ahash_request *req, void *out)
1113{
1114 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001115
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001116 memcpy(out, ctx, sizeof(*ctx));
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001117 return 0;
1118}
1119
1120static int atmel_sha_import(struct ahash_request *req, const void *in)
1121{
1122 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001123
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001124 memcpy(ctx, in, sizeof(*ctx));
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001125 return 0;
1126}
1127
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001128static int atmel_sha_cra_init(struct crypto_tfm *tfm)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001129{
Cyrille Pitchena29af932017-01-26 17:07:47 +01001130 struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1131
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001132 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001133 sizeof(struct atmel_sha_reqctx));
Cyrille Pitchena29af932017-01-26 17:07:47 +01001134 ctx->start = atmel_sha_start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001135
1136 return 0;
1137}
1138
Nicolas Royerd4905b32013-02-20 17:10:26 +01001139static struct ahash_alg sha_1_256_algs[] = {
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001140{
1141 .init = atmel_sha_init,
1142 .update = atmel_sha_update,
1143 .final = atmel_sha_final,
1144 .finup = atmel_sha_finup,
1145 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001146 .export = atmel_sha_export,
1147 .import = atmel_sha_import,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001148 .halg = {
1149 .digestsize = SHA1_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001150 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001151 .base = {
1152 .cra_name = "sha1",
1153 .cra_driver_name = "atmel-sha1",
1154 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001155 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001156 .cra_blocksize = SHA1_BLOCK_SIZE,
1157 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1158 .cra_alignmask = 0,
1159 .cra_module = THIS_MODULE,
1160 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001161 }
1162 }
1163},
1164{
1165 .init = atmel_sha_init,
1166 .update = atmel_sha_update,
1167 .final = atmel_sha_final,
1168 .finup = atmel_sha_finup,
1169 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001170 .export = atmel_sha_export,
1171 .import = atmel_sha_import,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001172 .halg = {
1173 .digestsize = SHA256_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001174 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001175 .base = {
1176 .cra_name = "sha256",
1177 .cra_driver_name = "atmel-sha256",
1178 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001179 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001180 .cra_blocksize = SHA256_BLOCK_SIZE,
1181 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1182 .cra_alignmask = 0,
1183 .cra_module = THIS_MODULE,
1184 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001185 }
1186 }
1187},
1188};
1189
Nicolas Royerd4905b32013-02-20 17:10:26 +01001190static struct ahash_alg sha_224_alg = {
1191 .init = atmel_sha_init,
1192 .update = atmel_sha_update,
1193 .final = atmel_sha_final,
1194 .finup = atmel_sha_finup,
1195 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001196 .export = atmel_sha_export,
1197 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001198 .halg = {
1199 .digestsize = SHA224_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001200 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001201 .base = {
1202 .cra_name = "sha224",
1203 .cra_driver_name = "atmel-sha224",
1204 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001205 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001206 .cra_blocksize = SHA224_BLOCK_SIZE,
1207 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1208 .cra_alignmask = 0,
1209 .cra_module = THIS_MODULE,
1210 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001211 }
1212 }
1213};
1214
1215static struct ahash_alg sha_384_512_algs[] = {
1216{
1217 .init = atmel_sha_init,
1218 .update = atmel_sha_update,
1219 .final = atmel_sha_final,
1220 .finup = atmel_sha_finup,
1221 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001222 .export = atmel_sha_export,
1223 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001224 .halg = {
1225 .digestsize = SHA384_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001226 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001227 .base = {
1228 .cra_name = "sha384",
1229 .cra_driver_name = "atmel-sha384",
1230 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001231 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001232 .cra_blocksize = SHA384_BLOCK_SIZE,
1233 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1234 .cra_alignmask = 0x3,
1235 .cra_module = THIS_MODULE,
1236 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001237 }
1238 }
1239},
1240{
1241 .init = atmel_sha_init,
1242 .update = atmel_sha_update,
1243 .final = atmel_sha_final,
1244 .finup = atmel_sha_finup,
1245 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001246 .export = atmel_sha_export,
1247 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001248 .halg = {
1249 .digestsize = SHA512_DIGEST_SIZE,
Cyrille Pitchen9c4274d2016-02-08 16:26:48 +01001250 .statesize = sizeof(struct atmel_sha_reqctx),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001251 .base = {
1252 .cra_name = "sha512",
1253 .cra_driver_name = "atmel-sha512",
1254 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001255 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001256 .cra_blocksize = SHA512_BLOCK_SIZE,
1257 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1258 .cra_alignmask = 0x3,
1259 .cra_module = THIS_MODULE,
1260 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001261 }
1262 }
1263},
1264};
1265
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001266static void atmel_sha_queue_task(unsigned long data)
1267{
1268 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1269
1270 atmel_sha_handle_queue(dd, NULL);
1271}
1272
Cyrille Pitchenb5ce82a2017-01-26 17:07:48 +01001273static int atmel_sha_done(struct atmel_sha_dev *dd)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001274{
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001275 int err = 0;
1276
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001277 if (SHA_FLAGS_CPU & dd->flags) {
1278 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1279 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1280 goto finish;
1281 }
1282 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1283 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1284 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1285 atmel_sha_update_dma_stop(dd);
1286 if (dd->err) {
1287 err = dd->err;
1288 goto finish;
1289 }
1290 }
1291 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1292 /* hash or semi-hash ready */
1293 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1294 SHA_FLAGS_OUTPUT_READY);
1295 err = atmel_sha_update_dma_start(dd);
1296 if (err != -EINPROGRESS)
1297 goto finish;
1298 }
1299 }
Cyrille Pitchenb5ce82a2017-01-26 17:07:48 +01001300 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001301
1302finish:
1303 /* finish curent request */
1304 atmel_sha_finish_req(dd->req, err);
Cyrille Pitchenb5ce82a2017-01-26 17:07:48 +01001305
1306 return err;
1307}
1308
1309static void atmel_sha_done_task(unsigned long data)
1310{
1311 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1312
1313 dd->is_async = true;
1314 (void)dd->resume(dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001315}
1316
1317static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1318{
1319 struct atmel_sha_dev *sha_dd = dev_id;
1320 u32 reg;
1321
1322 reg = atmel_sha_read(sha_dd, SHA_ISR);
1323 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1324 atmel_sha_write(sha_dd, SHA_IDR, reg);
1325 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1326 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1327 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1328 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1329 tasklet_schedule(&sha_dd->done_task);
1330 } else {
1331 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1332 }
1333 return IRQ_HANDLED;
1334 }
1335
1336 return IRQ_NONE;
1337}
1338
Cyrille Pitcheneec12f62017-01-26 17:07:52 +01001339
Cyrille Pitchen69303cf2017-01-26 17:07:53 +01001340/* DMA transfer functions */
1341
1342static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
1343 struct scatterlist *sg,
1344 size_t len)
1345{
1346 struct atmel_sha_dma *dma = &dd->dma_lch_in;
1347 struct ahash_request *req = dd->req;
1348 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1349 size_t bs = ctx->block_size;
1350 int nents;
1351
1352 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1353 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
1354 return false;
1355
1356 /*
1357 * This is the last sg, the only one that is allowed to
1358 * have an unaligned length.
1359 */
1360 if (len <= sg->length) {
1361 dma->nents = nents + 1;
1362 dma->last_sg_length = sg->length;
1363 sg->length = ALIGN(len, sizeof(u32));
1364 return true;
1365 }
1366
1367 /* All other sg lengths MUST be aligned to the block size. */
1368 if (!IS_ALIGNED(sg->length, bs))
1369 return false;
1370
1371 len -= sg->length;
1372 }
1373
1374 return false;
1375}
1376
1377static void atmel_sha_dma_callback2(void *data)
1378{
1379 struct atmel_sha_dev *dd = data;
1380 struct atmel_sha_dma *dma = &dd->dma_lch_in;
1381 struct scatterlist *sg;
1382 int nents;
1383
1384 dmaengine_terminate_all(dma->chan);
1385 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1386
1387 sg = dma->sg;
1388 for (nents = 0; nents < dma->nents - 1; ++nents)
1389 sg = sg_next(sg);
1390 sg->length = dma->last_sg_length;
1391
1392 dd->is_async = true;
1393 (void)atmel_sha_wait_for_data_ready(dd, dd->resume);
1394}
1395
1396static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
1397 struct scatterlist *src,
1398 size_t len,
1399 atmel_sha_fn_t resume)
1400{
1401 struct atmel_sha_dma *dma = &dd->dma_lch_in;
1402 struct dma_slave_config *config = &dma->dma_conf;
1403 struct dma_chan *chan = dma->chan;
1404 struct dma_async_tx_descriptor *desc;
1405 dma_cookie_t cookie;
1406 unsigned int sg_len;
1407 int err;
1408
1409 dd->resume = resume;
1410
1411 /*
1412 * dma->nents has already been initialized by
1413 * atmel_sha_dma_check_aligned().
1414 */
1415 dma->sg = src;
1416 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1417 if (!sg_len) {
1418 err = -ENOMEM;
1419 goto exit;
1420 }
1421
1422 config->src_maxburst = 16;
1423 config->dst_maxburst = 16;
1424 err = dmaengine_slave_config(chan, config);
1425 if (err)
1426 goto unmap_sg;
1427
1428 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
1429 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1430 if (!desc) {
1431 err = -ENOMEM;
1432 goto unmap_sg;
1433 }
1434
1435 desc->callback = atmel_sha_dma_callback2;
1436 desc->callback_param = dd;
1437 cookie = dmaengine_submit(desc);
1438 err = dma_submit_error(cookie);
1439 if (err)
1440 goto unmap_sg;
1441
1442 dma_async_issue_pending(chan);
1443
1444 return -EINPROGRESS;
1445
1446unmap_sg:
1447 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1448exit:
1449 return atmel_sha_complete(dd, err);
1450}
1451
1452
Cyrille Pitcheneec12f62017-01-26 17:07:52 +01001453/* CPU transfer functions */
1454
1455static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
1456{
1457 struct ahash_request *req = dd->req;
1458 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1459 const u32 *words = (const u32 *)ctx->buffer;
1460 size_t i, num_words;
1461 u32 isr, din, din_inc;
1462
1463 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1464 for (;;) {
1465 /* Write data into the Input Data Registers. */
1466 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1467 for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
1468 atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
1469
1470 ctx->offset += ctx->bufcnt;
1471 ctx->total -= ctx->bufcnt;
1472
1473 if (!ctx->total)
1474 break;
1475
1476 /*
1477 * Prepare next block:
1478 * Fill ctx->buffer now with the next data to be written into
1479 * IDATARx: it gives time for the SHA hardware to process
1480 * the current data so the SHA_INT_DATARDY flag might be set
1481 * in SHA_ISR when polling this register at the beginning of
1482 * the next loop.
1483 */
1484 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1485 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1486 ctx->offset, ctx->bufcnt, 0);
1487
1488 /* Wait for hardware to be ready again. */
1489 isr = atmel_sha_read(dd, SHA_ISR);
1490 if (!(isr & SHA_INT_DATARDY)) {
1491 /* Not ready yet. */
1492 dd->resume = atmel_sha_cpu_transfer;
1493 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
1494 return -EINPROGRESS;
1495 }
1496 }
1497
1498 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1499 return dd->cpu_transfer_complete(dd);
1500
1501 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
1502}
1503
1504static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
1505 struct scatterlist *sg,
1506 unsigned int len,
1507 bool idatar0_only,
1508 bool wait_data_ready,
1509 atmel_sha_fn_t resume)
1510{
1511 struct ahash_request *req = dd->req;
1512 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1513
1514 if (!len)
1515 return resume(dd);
1516
1517 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1518
1519 if (idatar0_only)
1520 ctx->flags |= SHA_FLAGS_IDATAR0;
1521
1522 if (wait_data_ready)
1523 ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1524
1525 ctx->sg = sg;
1526 ctx->total = len;
1527 ctx->offset = 0;
1528
1529 /* Prepare the first block to be written. */
1530 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1531 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1532 ctx->offset, ctx->bufcnt, 0);
1533
1534 dd->cpu_transfer_complete = resume;
1535 return atmel_sha_cpu_transfer(dd);
1536}
1537
Cyrille Pitchen81d87502017-01-26 17:07:54 +01001538static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
1539 const void *data, unsigned int datalen,
1540 bool auto_padding,
1541 atmel_sha_fn_t resume)
1542{
1543 struct ahash_request *req = dd->req;
1544 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1545 u32 msglen = (auto_padding) ? datalen : 0;
1546 u32 mr = SHA_MR_MODE_AUTO;
1547
1548 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1549 return atmel_sha_complete(dd, -EINVAL);
1550
1551 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1552 atmel_sha_write(dd, SHA_MR, mr);
1553 atmel_sha_write(dd, SHA_MSR, msglen);
1554 atmel_sha_write(dd, SHA_BCR, msglen);
1555 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1556
1557 sg_init_one(&dd->tmp, data, datalen);
1558 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
1559}
1560
1561
1562/* hmac functions */
1563
1564struct atmel_sha_hmac_key {
1565 bool valid;
1566 unsigned int keylen;
1567 u8 buffer[SHA512_BLOCK_SIZE];
1568 u8 *keydup;
1569};
1570
1571static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
1572{
1573 memset(hkey, 0, sizeof(*hkey));
1574}
1575
1576static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
1577{
1578 kfree(hkey->keydup);
1579 memset(hkey, 0, sizeof(*hkey));
1580}
1581
1582static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
1583 const u8 *key,
1584 unsigned int keylen)
1585{
1586 atmel_sha_hmac_key_release(hkey);
1587
1588 if (keylen > sizeof(hkey->buffer)) {
1589 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
1590 if (!hkey->keydup)
1591 return -ENOMEM;
1592
1593 } else {
1594 memcpy(hkey->buffer, key, keylen);
1595 }
1596
1597 hkey->valid = true;
1598 hkey->keylen = keylen;
1599 return 0;
1600}
1601
1602static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
1603 const u8 **key,
1604 unsigned int *keylen)
1605{
1606 if (!hkey->valid)
1607 return false;
1608
1609 *keylen = hkey->keylen;
1610 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
1611 return true;
1612}
1613
1614
1615struct atmel_sha_hmac_ctx {
1616 struct atmel_sha_ctx base;
1617
1618 struct atmel_sha_hmac_key hkey;
1619 u32 ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
1620 u32 opad[SHA512_BLOCK_SIZE / sizeof(u32)];
1621 atmel_sha_fn_t resume;
1622};
1623
1624static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1625 atmel_sha_fn_t resume);
1626static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1627 const u8 *key, unsigned int keylen);
1628static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
1629static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
1630static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
1631static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
1632
1633static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
1634static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
1635static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
1636static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
1637
1638static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1639 atmel_sha_fn_t resume)
1640{
1641 struct ahash_request *req = dd->req;
1642 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1643 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1644 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1645 unsigned int keylen;
1646 const u8 *key;
1647 size_t bs;
1648
1649 hmac->resume = resume;
1650 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1651 case SHA_FLAGS_SHA1:
1652 ctx->block_size = SHA1_BLOCK_SIZE;
1653 ctx->hash_size = SHA1_DIGEST_SIZE;
1654 break;
1655
1656 case SHA_FLAGS_SHA224:
1657 ctx->block_size = SHA224_BLOCK_SIZE;
1658 ctx->hash_size = SHA256_DIGEST_SIZE;
1659 break;
1660
1661 case SHA_FLAGS_SHA256:
1662 ctx->block_size = SHA256_BLOCK_SIZE;
1663 ctx->hash_size = SHA256_DIGEST_SIZE;
1664 break;
1665
1666 case SHA_FLAGS_SHA384:
1667 ctx->block_size = SHA384_BLOCK_SIZE;
1668 ctx->hash_size = SHA512_DIGEST_SIZE;
1669 break;
1670
1671 case SHA_FLAGS_SHA512:
1672 ctx->block_size = SHA512_BLOCK_SIZE;
1673 ctx->hash_size = SHA512_DIGEST_SIZE;
1674 break;
1675
1676 default:
1677 return atmel_sha_complete(dd, -EINVAL);
1678 }
1679 bs = ctx->block_size;
1680
1681 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
1682 return resume(dd);
1683
1684 /* Compute K' from K. */
1685 if (unlikely(keylen > bs))
1686 return atmel_sha_hmac_prehash_key(dd, key, keylen);
1687
1688 /* Prepare ipad. */
1689 memcpy((u8 *)hmac->ipad, key, keylen);
1690 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
1691 return atmel_sha_hmac_compute_ipad_hash(dd);
1692}
1693
1694static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1695 const u8 *key, unsigned int keylen)
1696{
1697 return atmel_sha_cpu_hash(dd, key, keylen, true,
1698 atmel_sha_hmac_prehash_key_done);
1699}
1700
1701static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
1702{
1703 struct ahash_request *req = dd->req;
1704 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1705 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1706 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1707 size_t ds = crypto_ahash_digestsize(tfm);
1708 size_t bs = ctx->block_size;
1709 size_t i, num_words = ds / sizeof(u32);
1710
1711 /* Prepare ipad. */
1712 for (i = 0; i < num_words; ++i)
1713 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1714 memset((u8 *)hmac->ipad + ds, 0, bs - ds);
1715 return atmel_sha_hmac_compute_ipad_hash(dd);
1716}
1717
1718static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
1719{
1720 struct ahash_request *req = dd->req;
1721 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1722 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1723 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1724 size_t bs = ctx->block_size;
1725 size_t i, num_words = bs / sizeof(u32);
1726
1727 memcpy(hmac->opad, hmac->ipad, bs);
1728 for (i = 0; i < num_words; ++i) {
1729 hmac->ipad[i] ^= 0x36363636;
1730 hmac->opad[i] ^= 0x5c5c5c5c;
1731 }
1732
1733 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
1734 atmel_sha_hmac_compute_opad_hash);
1735}
1736
1737static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
1738{
1739 struct ahash_request *req = dd->req;
1740 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1741 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1742 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1743 size_t bs = ctx->block_size;
1744 size_t hs = ctx->hash_size;
1745 size_t i, num_words = hs / sizeof(u32);
1746
1747 for (i = 0; i < num_words; ++i)
1748 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1749 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
1750 atmel_sha_hmac_setup_done);
1751}
1752
1753static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
1754{
1755 struct ahash_request *req = dd->req;
1756 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1757 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1758 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1759 size_t hs = ctx->hash_size;
1760 size_t i, num_words = hs / sizeof(u32);
1761
1762 for (i = 0; i < num_words; ++i)
1763 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1764 atmel_sha_hmac_key_release(&hmac->hkey);
1765 return hmac->resume(dd);
1766}
1767
1768static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
1769{
1770 struct ahash_request *req = dd->req;
1771 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1772 int err;
1773
1774 err = atmel_sha_hw_init(dd);
1775 if (err)
1776 return atmel_sha_complete(dd, err);
1777
1778 switch (ctx->op) {
1779 case SHA_OP_INIT:
1780 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
1781 break;
1782
1783 case SHA_OP_UPDATE:
1784 dd->resume = atmel_sha_done;
1785 err = atmel_sha_update_req(dd);
1786 break;
1787
1788 case SHA_OP_FINAL:
1789 dd->resume = atmel_sha_hmac_final;
1790 err = atmel_sha_final_req(dd);
1791 break;
1792
1793 case SHA_OP_DIGEST:
1794 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
1795 break;
1796
1797 default:
1798 return atmel_sha_complete(dd, -EINVAL);
1799 }
1800
1801 return err;
1802}
1803
1804static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1805 unsigned int keylen)
1806{
1807 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1808
1809 if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
1810 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1811 return -EINVAL;
1812 }
1813
1814 return 0;
1815}
1816
1817static int atmel_sha_hmac_init(struct ahash_request *req)
1818{
1819 int err;
1820
1821 err = atmel_sha_init(req);
1822 if (err)
1823 return err;
1824
1825 return atmel_sha_enqueue(req, SHA_OP_INIT);
1826}
1827
1828static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
1829{
1830 struct ahash_request *req = dd->req;
1831 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1832 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1833 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1834 size_t bs = ctx->block_size;
1835 size_t hs = ctx->hash_size;
1836
1837 ctx->bufcnt = 0;
1838 ctx->digcnt[0] = bs;
1839 ctx->digcnt[1] = 0;
1840 ctx->flags |= SHA_FLAGS_RESTORE;
1841 memcpy(ctx->digest, hmac->ipad, hs);
1842 return atmel_sha_complete(dd, 0);
1843}
1844
1845static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
1846{
1847 struct ahash_request *req = dd->req;
1848 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1849 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1850 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1851 u32 *digest = (u32 *)ctx->digest;
1852 size_t ds = crypto_ahash_digestsize(tfm);
1853 size_t bs = ctx->block_size;
1854 size_t hs = ctx->hash_size;
1855 size_t i, num_words;
1856 u32 mr;
1857
1858 /* Save d = SHA((K' + ipad) | msg). */
1859 num_words = ds / sizeof(u32);
1860 for (i = 0; i < num_words; ++i)
1861 digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1862
1863 /* Restore context to finish computing SHA((K' + opad) | d). */
1864 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1865 num_words = hs / sizeof(u32);
1866 for (i = 0; i < num_words; ++i)
1867 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1868
1869 mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
1870 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1871 atmel_sha_write(dd, SHA_MR, mr);
1872 atmel_sha_write(dd, SHA_MSR, bs + ds);
1873 atmel_sha_write(dd, SHA_BCR, ds);
1874 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1875
1876 sg_init_one(&dd->tmp, digest, ds);
1877 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
1878 atmel_sha_hmac_final_done);
1879}
1880
1881static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
1882{
1883 /*
1884 * req->result might not be sizeof(u32) aligned, so copy the
1885 * digest into ctx->digest[] before memcpy() the data into
1886 * req->result.
1887 */
1888 atmel_sha_copy_hash(dd->req);
1889 atmel_sha_copy_ready_hash(dd->req);
1890 return atmel_sha_complete(dd, 0);
1891}
1892
1893static int atmel_sha_hmac_digest(struct ahash_request *req)
1894{
1895 int err;
1896
1897 err = atmel_sha_init(req);
1898 if (err)
1899 return err;
1900
1901 return atmel_sha_enqueue(req, SHA_OP_DIGEST);
1902}
1903
1904static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
1905{
1906 struct ahash_request *req = dd->req;
1907 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1908 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1909 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1910 size_t hs = ctx->hash_size;
1911 size_t i, num_words = hs / sizeof(u32);
1912 bool use_dma = false;
1913 u32 mr;
1914
1915 /* Special case for empty message. */
1916 if (!req->nbytes)
1917 return atmel_sha_complete(dd, -EINVAL); // TODO:
1918
1919 /* Check DMA threshold and alignment. */
1920 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
1921 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
1922 use_dma = true;
1923
1924 /* Write both initial hash values to compute a HMAC. */
1925 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1926 for (i = 0; i < num_words; ++i)
1927 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
1928
1929 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
1930 for (i = 0; i < num_words; ++i)
1931 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1932
1933 /* Write the Mode, Message Size, Bytes Count then Control Registers. */
1934 mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
1935 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
1936 if (use_dma)
1937 mr |= SHA_MR_MODE_IDATAR0;
1938 else
1939 mr |= SHA_MR_MODE_AUTO;
1940 atmel_sha_write(dd, SHA_MR, mr);
1941
1942 atmel_sha_write(dd, SHA_MSR, req->nbytes);
1943 atmel_sha_write(dd, SHA_BCR, req->nbytes);
1944
1945 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1946
1947 /* Process data. */
1948 if (use_dma)
1949 return atmel_sha_dma_start(dd, req->src, req->nbytes,
1950 atmel_sha_hmac_final_done);
1951
1952 return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
1953 atmel_sha_hmac_final_done);
1954}
1955
1956static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
1957{
1958 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
1959
1960 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1961 sizeof(struct atmel_sha_reqctx));
1962 hmac->base.start = atmel_sha_hmac_start;
1963 atmel_sha_hmac_key_init(&hmac->hkey);
1964
1965 return 0;
1966}
1967
1968static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
1969{
1970 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
1971
1972 atmel_sha_hmac_key_release(&hmac->hkey);
1973}
1974
1975static struct ahash_alg sha_hmac_algs[] = {
1976{
1977 .init = atmel_sha_hmac_init,
1978 .update = atmel_sha_update,
1979 .final = atmel_sha_final,
1980 .digest = atmel_sha_hmac_digest,
1981 .setkey = atmel_sha_hmac_setkey,
1982 .export = atmel_sha_export,
1983 .import = atmel_sha_import,
1984 .halg = {
1985 .digestsize = SHA1_DIGEST_SIZE,
1986 .statesize = sizeof(struct atmel_sha_reqctx),
1987 .base = {
1988 .cra_name = "hmac(sha1)",
1989 .cra_driver_name = "atmel-hmac-sha1",
1990 .cra_priority = 100,
1991 .cra_flags = CRYPTO_ALG_ASYNC,
1992 .cra_blocksize = SHA1_BLOCK_SIZE,
1993 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
1994 .cra_alignmask = 0,
1995 .cra_module = THIS_MODULE,
1996 .cra_init = atmel_sha_hmac_cra_init,
1997 .cra_exit = atmel_sha_hmac_cra_exit,
1998 }
1999 }
2000},
2001{
2002 .init = atmel_sha_hmac_init,
2003 .update = atmel_sha_update,
2004 .final = atmel_sha_final,
2005 .digest = atmel_sha_hmac_digest,
2006 .setkey = atmel_sha_hmac_setkey,
2007 .export = atmel_sha_export,
2008 .import = atmel_sha_import,
2009 .halg = {
2010 .digestsize = SHA224_DIGEST_SIZE,
2011 .statesize = sizeof(struct atmel_sha_reqctx),
2012 .base = {
2013 .cra_name = "hmac(sha224)",
2014 .cra_driver_name = "atmel-hmac-sha224",
2015 .cra_priority = 100,
2016 .cra_flags = CRYPTO_ALG_ASYNC,
2017 .cra_blocksize = SHA224_BLOCK_SIZE,
2018 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2019 .cra_alignmask = 0,
2020 .cra_module = THIS_MODULE,
2021 .cra_init = atmel_sha_hmac_cra_init,
2022 .cra_exit = atmel_sha_hmac_cra_exit,
2023 }
2024 }
2025},
2026{
2027 .init = atmel_sha_hmac_init,
2028 .update = atmel_sha_update,
2029 .final = atmel_sha_final,
2030 .digest = atmel_sha_hmac_digest,
2031 .setkey = atmel_sha_hmac_setkey,
2032 .export = atmel_sha_export,
2033 .import = atmel_sha_import,
2034 .halg = {
2035 .digestsize = SHA256_DIGEST_SIZE,
2036 .statesize = sizeof(struct atmel_sha_reqctx),
2037 .base = {
2038 .cra_name = "hmac(sha256)",
2039 .cra_driver_name = "atmel-hmac-sha256",
2040 .cra_priority = 100,
2041 .cra_flags = CRYPTO_ALG_ASYNC,
2042 .cra_blocksize = SHA256_BLOCK_SIZE,
2043 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2044 .cra_alignmask = 0,
2045 .cra_module = THIS_MODULE,
2046 .cra_init = atmel_sha_hmac_cra_init,
2047 .cra_exit = atmel_sha_hmac_cra_exit,
2048 }
2049 }
2050},
2051{
2052 .init = atmel_sha_hmac_init,
2053 .update = atmel_sha_update,
2054 .final = atmel_sha_final,
2055 .digest = atmel_sha_hmac_digest,
2056 .setkey = atmel_sha_hmac_setkey,
2057 .export = atmel_sha_export,
2058 .import = atmel_sha_import,
2059 .halg = {
2060 .digestsize = SHA384_DIGEST_SIZE,
2061 .statesize = sizeof(struct atmel_sha_reqctx),
2062 .base = {
2063 .cra_name = "hmac(sha384)",
2064 .cra_driver_name = "atmel-hmac-sha384",
2065 .cra_priority = 100,
2066 .cra_flags = CRYPTO_ALG_ASYNC,
2067 .cra_blocksize = SHA384_BLOCK_SIZE,
2068 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2069 .cra_alignmask = 0,
2070 .cra_module = THIS_MODULE,
2071 .cra_init = atmel_sha_hmac_cra_init,
2072 .cra_exit = atmel_sha_hmac_cra_exit,
2073 }
2074 }
2075},
2076{
2077 .init = atmel_sha_hmac_init,
2078 .update = atmel_sha_update,
2079 .final = atmel_sha_final,
2080 .digest = atmel_sha_hmac_digest,
2081 .setkey = atmel_sha_hmac_setkey,
2082 .export = atmel_sha_export,
2083 .import = atmel_sha_import,
2084 .halg = {
2085 .digestsize = SHA512_DIGEST_SIZE,
2086 .statesize = sizeof(struct atmel_sha_reqctx),
2087 .base = {
2088 .cra_name = "hmac(sha512)",
2089 .cra_driver_name = "atmel-hmac-sha512",
2090 .cra_priority = 100,
2091 .cra_flags = CRYPTO_ALG_ASYNC,
2092 .cra_blocksize = SHA512_BLOCK_SIZE,
2093 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx),
2094 .cra_alignmask = 0,
2095 .cra_module = THIS_MODULE,
2096 .cra_init = atmel_sha_hmac_cra_init,
2097 .cra_exit = atmel_sha_hmac_cra_exit,
2098 }
2099 }
2100},
2101};
Cyrille Pitcheneec12f62017-01-26 17:07:52 +01002102
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002103static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
2104{
2105 int i;
2106
Cyrille Pitchen81d87502017-01-26 17:07:54 +01002107 if (dd->caps.has_hmac)
2108 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
2109 crypto_unregister_ahash(&sha_hmac_algs[i]);
2110
Nicolas Royerd4905b32013-02-20 17:10:26 +01002111 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
2112 crypto_unregister_ahash(&sha_1_256_algs[i]);
2113
2114 if (dd->caps.has_sha224)
2115 crypto_unregister_ahash(&sha_224_alg);
2116
2117 if (dd->caps.has_sha_384_512) {
2118 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
2119 crypto_unregister_ahash(&sha_384_512_algs[i]);
2120 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002121}
2122
2123static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
2124{
2125 int err, i, j;
2126
Nicolas Royerd4905b32013-02-20 17:10:26 +01002127 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
2128 err = crypto_register_ahash(&sha_1_256_algs[i]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002129 if (err)
Nicolas Royerd4905b32013-02-20 17:10:26 +01002130 goto err_sha_1_256_algs;
2131 }
2132
2133 if (dd->caps.has_sha224) {
2134 err = crypto_register_ahash(&sha_224_alg);
2135 if (err)
2136 goto err_sha_224_algs;
2137 }
2138
2139 if (dd->caps.has_sha_384_512) {
2140 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
2141 err = crypto_register_ahash(&sha_384_512_algs[i]);
2142 if (err)
2143 goto err_sha_384_512_algs;
2144 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002145 }
2146
Cyrille Pitchen81d87502017-01-26 17:07:54 +01002147 if (dd->caps.has_hmac) {
2148 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
2149 err = crypto_register_ahash(&sha_hmac_algs[i]);
2150 if (err)
2151 goto err_sha_hmac_algs;
2152 }
2153 }
2154
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002155 return 0;
2156
Cyrille Pitchen81d87502017-01-26 17:07:54 +01002157 /*i = ARRAY_SIZE(sha_hmac_algs);*/
2158err_sha_hmac_algs:
2159 for (j = 0; j < i; j++)
2160 crypto_unregister_ahash(&sha_hmac_algs[j]);
2161 i = ARRAY_SIZE(sha_384_512_algs);
Nicolas Royerd4905b32013-02-20 17:10:26 +01002162err_sha_384_512_algs:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002163 for (j = 0; j < i; j++)
Nicolas Royerd4905b32013-02-20 17:10:26 +01002164 crypto_unregister_ahash(&sha_384_512_algs[j]);
2165 crypto_unregister_ahash(&sha_224_alg);
2166err_sha_224_algs:
2167 i = ARRAY_SIZE(sha_1_256_algs);
2168err_sha_1_256_algs:
2169 for (j = 0; j < i; j++)
2170 crypto_unregister_ahash(&sha_1_256_algs[j]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002171
2172 return err;
2173}
2174
Nicolas Royerd4905b32013-02-20 17:10:26 +01002175static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
2176{
2177 struct at_dma_slave *sl = slave;
2178
2179 if (sl && sl->dma_dev == chan->device->dev) {
2180 chan->private = sl;
2181 return true;
2182 } else {
2183 return false;
2184 }
2185}
2186
2187static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
2188 struct crypto_platform_data *pdata)
2189{
2190 int err = -ENOMEM;
2191 dma_cap_mask_t mask_in;
2192
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002193 /* Try to grab DMA channel */
2194 dma_cap_zero(mask_in);
2195 dma_cap_set(DMA_SLAVE, mask_in);
Nicolas Royerd4905b32013-02-20 17:10:26 +01002196
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002197 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
2198 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
2199 if (!dd->dma_lch_in.chan) {
2200 dev_warn(dd->dev, "no DMA channel available\n");
2201 return err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01002202 }
2203
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002204 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
2205 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
2206 SHA_REG_DIN(0);
2207 dd->dma_lch_in.dma_conf.src_maxburst = 1;
2208 dd->dma_lch_in.dma_conf.src_addr_width =
2209 DMA_SLAVE_BUSWIDTH_4_BYTES;
2210 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
2211 dd->dma_lch_in.dma_conf.dst_addr_width =
2212 DMA_SLAVE_BUSWIDTH_4_BYTES;
2213 dd->dma_lch_in.dma_conf.device_fc = false;
2214
2215 return 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01002216}
2217
2218static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
2219{
2220 dma_release_channel(dd->dma_lch_in.chan);
2221}
2222
2223static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
2224{
2225
2226 dd->caps.has_dma = 0;
2227 dd->caps.has_dualbuff = 0;
2228 dd->caps.has_sha224 = 0;
2229 dd->caps.has_sha_384_512 = 0;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01002230 dd->caps.has_uihv = 0;
Cyrille Pitchen81d87502017-01-26 17:07:54 +01002231 dd->caps.has_hmac = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01002232
2233 /* keep only major version number */
2234 switch (dd->hw_version & 0xff0) {
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01002235 case 0x510:
2236 dd->caps.has_dma = 1;
2237 dd->caps.has_dualbuff = 1;
2238 dd->caps.has_sha224 = 1;
2239 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01002240 dd->caps.has_uihv = 1;
Cyrille Pitchen81d87502017-01-26 17:07:54 +01002241 dd->caps.has_hmac = 1;
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01002242 break;
Leilei Zhao141824d2015-04-07 17:45:03 +08002243 case 0x420:
2244 dd->caps.has_dma = 1;
2245 dd->caps.has_dualbuff = 1;
2246 dd->caps.has_sha224 = 1;
2247 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01002248 dd->caps.has_uihv = 1;
Leilei Zhao141824d2015-04-07 17:45:03 +08002249 break;
Nicolas Royerd4905b32013-02-20 17:10:26 +01002250 case 0x410:
2251 dd->caps.has_dma = 1;
2252 dd->caps.has_dualbuff = 1;
2253 dd->caps.has_sha224 = 1;
2254 dd->caps.has_sha_384_512 = 1;
2255 break;
2256 case 0x400:
2257 dd->caps.has_dma = 1;
2258 dd->caps.has_dualbuff = 1;
2259 dd->caps.has_sha224 = 1;
2260 break;
2261 case 0x320:
2262 break;
2263 default:
2264 dev_warn(dd->dev,
2265 "Unmanaged sha version, set minimum capabilities\n");
2266 break;
2267 }
2268}
2269
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002270#if defined(CONFIG_OF)
2271static const struct of_device_id atmel_sha_dt_ids[] = {
2272 { .compatible = "atmel,at91sam9g46-sha" },
2273 { /* sentinel */ }
2274};
2275
2276MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
2277
2278static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
2279{
2280 struct device_node *np = pdev->dev.of_node;
2281 struct crypto_platform_data *pdata;
2282
2283 if (!np) {
2284 dev_err(&pdev->dev, "device node not found\n");
2285 return ERR_PTR(-EINVAL);
2286 }
2287
2288 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2289 if (!pdata) {
2290 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
2291 return ERR_PTR(-ENOMEM);
2292 }
2293
2294 pdata->dma_slave = devm_kzalloc(&pdev->dev,
2295 sizeof(*(pdata->dma_slave)),
2296 GFP_KERNEL);
2297 if (!pdata->dma_slave) {
2298 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002299 return ERR_PTR(-ENOMEM);
2300 }
2301
2302 return pdata;
2303}
2304#else /* CONFIG_OF */
2305static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
2306{
2307 return ERR_PTR(-EINVAL);
2308}
2309#endif
2310
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002311static int atmel_sha_probe(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002312{
2313 struct atmel_sha_dev *sha_dd;
Nicolas Royerd4905b32013-02-20 17:10:26 +01002314 struct crypto_platform_data *pdata;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002315 struct device *dev = &pdev->dev;
2316 struct resource *sha_res;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002317 int err;
2318
LABBE Corentinb0e8b342015-10-12 19:47:03 +02002319 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002320 if (sha_dd == NULL) {
2321 dev_err(dev, "unable to alloc data struct.\n");
2322 err = -ENOMEM;
2323 goto sha_dd_err;
2324 }
2325
2326 sha_dd->dev = dev;
2327
2328 platform_set_drvdata(pdev, sha_dd);
2329
2330 INIT_LIST_HEAD(&sha_dd->list);
Leilei Zhao62728e82015-04-07 17:45:06 +08002331 spin_lock_init(&sha_dd->lock);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002332
2333 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
2334 (unsigned long)sha_dd);
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01002335 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
2336 (unsigned long)sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002337
2338 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
2339
2340 sha_dd->irq = -1;
2341
2342 /* Get the base address */
2343 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2344 if (!sha_res) {
2345 dev_err(dev, "no MEM resource info\n");
2346 err = -ENODEV;
2347 goto res_err;
2348 }
2349 sha_dd->phys_base = sha_res->start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002350
2351 /* Get the IRQ */
2352 sha_dd->irq = platform_get_irq(pdev, 0);
2353 if (sha_dd->irq < 0) {
2354 dev_err(dev, "no IRQ resource info\n");
2355 err = sha_dd->irq;
2356 goto res_err;
2357 }
2358
LABBE Corentinb0e8b342015-10-12 19:47:03 +02002359 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
2360 IRQF_SHARED, "atmel-sha", sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002361 if (err) {
2362 dev_err(dev, "unable to request sha irq.\n");
2363 goto res_err;
2364 }
2365
2366 /* Initializing the clock */
LABBE Corentinb0e8b342015-10-12 19:47:03 +02002367 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002368 if (IS_ERR(sha_dd->iclk)) {
Colin Ian Kingbe208352015-02-28 20:40:10 +00002369 dev_err(dev, "clock initialization failed.\n");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002370 err = PTR_ERR(sha_dd->iclk);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02002371 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002372 }
2373
LABBE Corentinb0e8b342015-10-12 19:47:03 +02002374 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
Vladimir Zapolskiy9b52d552016-03-06 03:21:52 +02002375 if (IS_ERR(sha_dd->io_base)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002376 dev_err(dev, "can't ioremap\n");
Vladimir Zapolskiy9b52d552016-03-06 03:21:52 +02002377 err = PTR_ERR(sha_dd->io_base);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02002378 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002379 }
2380
Cyrille Pitchenc0330422016-02-05 13:45:13 +01002381 err = clk_prepare(sha_dd->iclk);
2382 if (err)
2383 goto res_err;
2384
Nicolas Royerd4905b32013-02-20 17:10:26 +01002385 atmel_sha_hw_version_init(sha_dd);
2386
2387 atmel_sha_get_cap(sha_dd);
2388
2389 if (sha_dd->caps.has_dma) {
2390 pdata = pdev->dev.platform_data;
2391 if (!pdata) {
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002392 pdata = atmel_sha_of_init(pdev);
2393 if (IS_ERR(pdata)) {
2394 dev_err(&pdev->dev, "platform data not available\n");
2395 err = PTR_ERR(pdata);
Cyrille Pitchenc0330422016-02-05 13:45:13 +01002396 goto iclk_unprepare;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002397 }
2398 }
2399 if (!pdata->dma_slave) {
Nicolas Royerd4905b32013-02-20 17:10:26 +01002400 err = -ENXIO;
Cyrille Pitchenc0330422016-02-05 13:45:13 +01002401 goto iclk_unprepare;
Nicolas Royerd4905b32013-02-20 17:10:26 +01002402 }
2403 err = atmel_sha_dma_init(sha_dd, pdata);
2404 if (err)
2405 goto err_sha_dma;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002406
2407 dev_info(dev, "using %s for DMA transfers\n",
2408 dma_chan_name(sha_dd->dma_lch_in.chan));
Nicolas Royerd4905b32013-02-20 17:10:26 +01002409 }
2410
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002411 spin_lock(&atmel_sha.lock);
2412 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
2413 spin_unlock(&atmel_sha.lock);
2414
2415 err = atmel_sha_register_algs(sha_dd);
2416 if (err)
2417 goto err_algs;
2418
Nicolas Ferre1ca5b7d2013-10-15 16:37:44 +02002419 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
2420 sha_dd->caps.has_sha224 ? "/SHA224" : "",
2421 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002422
2423 return 0;
2424
2425err_algs:
2426 spin_lock(&atmel_sha.lock);
2427 list_del(&sha_dd->list);
2428 spin_unlock(&atmel_sha.lock);
Nicolas Royerd4905b32013-02-20 17:10:26 +01002429 if (sha_dd->caps.has_dma)
2430 atmel_sha_dma_cleanup(sha_dd);
2431err_sha_dma:
Cyrille Pitchenc0330422016-02-05 13:45:13 +01002432iclk_unprepare:
2433 clk_unprepare(sha_dd->iclk);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002434res_err:
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01002435 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002436 tasklet_kill(&sha_dd->done_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002437sha_dd_err:
2438 dev_err(dev, "initialization failed.\n");
2439
2440 return err;
2441}
2442
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002443static int atmel_sha_remove(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002444{
2445 static struct atmel_sha_dev *sha_dd;
2446
2447 sha_dd = platform_get_drvdata(pdev);
2448 if (!sha_dd)
2449 return -ENODEV;
2450 spin_lock(&atmel_sha.lock);
2451 list_del(&sha_dd->list);
2452 spin_unlock(&atmel_sha.lock);
2453
2454 atmel_sha_unregister_algs(sha_dd);
2455
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01002456 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002457 tasklet_kill(&sha_dd->done_task);
2458
Nicolas Royerd4905b32013-02-20 17:10:26 +01002459 if (sha_dd->caps.has_dma)
2460 atmel_sha_dma_cleanup(sha_dd);
2461
Cyrille Pitchenc0330422016-02-05 13:45:13 +01002462 clk_unprepare(sha_dd->iclk);
2463
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002464 return 0;
2465}
2466
2467static struct platform_driver atmel_sha_driver = {
2468 .probe = atmel_sha_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002469 .remove = atmel_sha_remove,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002470 .driver = {
2471 .name = "atmel_sha",
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02002472 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002473 },
2474};
2475
2476module_platform_driver(atmel_sha_driver);
2477
Nicolas Royerd4905b32013-02-20 17:10:26 +01002478MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02002479MODULE_LICENSE("GPL v2");
2480MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");