blob: 63b09e01075c9a5c34abc1c294e562909f4cf550 [file] [log] [blame]
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL SHA1/SHA256 HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-sham.c drivers.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020027#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020030#include <linux/irq.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020031#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +020033#include <linux/of_device.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020034#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/sha.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
Nicolas Royerd4905b32013-02-20 17:10:26 +010042#include <linux/platform_data/crypto-atmel.h>
Nicolas Royerebc82ef2012-07-01 19:19:46 +020043#include "atmel-sha-regs.h"
44
45/* SHA flags */
46#define SHA_FLAGS_BUSY BIT(0)
47#define SHA_FLAGS_FINAL BIT(1)
48#define SHA_FLAGS_DMA_ACTIVE BIT(2)
49#define SHA_FLAGS_OUTPUT_READY BIT(3)
50#define SHA_FLAGS_INIT BIT(4)
51#define SHA_FLAGS_CPU BIT(5)
52#define SHA_FLAGS_DMA_READY BIT(6)
53
54#define SHA_FLAGS_FINUP BIT(16)
55#define SHA_FLAGS_SG BIT(17)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010056#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020057#define SHA_FLAGS_SHA1 BIT(18)
Nicolas Royerd4905b32013-02-20 17:10:26 +010058#define SHA_FLAGS_SHA224 BIT(19)
59#define SHA_FLAGS_SHA256 BIT(20)
60#define SHA_FLAGS_SHA384 BIT(21)
61#define SHA_FLAGS_SHA512 BIT(22)
62#define SHA_FLAGS_ERROR BIT(23)
63#define SHA_FLAGS_PAD BIT(24)
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010064#define SHA_FLAGS_RESTORE BIT(25)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020065
66#define SHA_OP_UPDATE 1
67#define SHA_OP_FINAL 2
68
Cyrille Pitchencc831d32016-01-29 17:04:02 +010069#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
Nicolas Royerebc82ef2012-07-01 19:19:46 +020070
71#define ATMEL_SHA_DMA_THRESHOLD 56
72
Nicolas Royerd4905b32013-02-20 17:10:26 +010073struct atmel_sha_caps {
74 bool has_dma;
75 bool has_dualbuff;
76 bool has_sha224;
77 bool has_sha_384_512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +010078 bool has_uihv;
Nicolas Royerd4905b32013-02-20 17:10:26 +010079};
Nicolas Royerebc82ef2012-07-01 19:19:46 +020080
81struct atmel_sha_dev;
82
Cyrille Pitchencc831d32016-01-29 17:04:02 +010083/*
84 * .statesize = sizeof(struct atmel_sha_state) must be <= PAGE_SIZE / 8 as
85 * tested by the ahash_prepare_alg() function.
86 */
87struct atmel_sha_state {
88 u8 digest[SHA512_DIGEST_SIZE];
89 u8 buffer[SHA_BUFFER_LEN];
90 u64 digcnt[2];
91 size_t bufcnt;
92};
93
Nicolas Royerebc82ef2012-07-01 19:19:46 +020094struct atmel_sha_reqctx {
95 struct atmel_sha_dev *dd;
96 unsigned long flags;
97 unsigned long op;
98
Nicolas Royerd4905b32013-02-20 17:10:26 +010099 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
100 u64 digcnt[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200101 size_t bufcnt;
102 size_t buflen;
103 dma_addr_t dma_addr;
104
105 /* walk state */
106 struct scatterlist *sg;
107 unsigned int offset; /* offset in current sg */
108 unsigned int total; /* total request */
109
Nicolas Royerd4905b32013-02-20 17:10:26 +0100110 size_t block_size;
111
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200112 u8 buffer[0] __aligned(sizeof(u32));
113};
114
115struct atmel_sha_ctx {
116 struct atmel_sha_dev *dd;
117
118 unsigned long flags;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200119};
120
Nicolas Royerd4905b32013-02-20 17:10:26 +0100121#define ATMEL_SHA_QUEUE_LENGTH 50
122
123struct atmel_sha_dma {
124 struct dma_chan *chan;
125 struct dma_slave_config dma_conf;
126};
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200127
128struct atmel_sha_dev {
129 struct list_head list;
130 unsigned long phys_base;
131 struct device *dev;
132 struct clk *iclk;
133 int irq;
134 void __iomem *io_base;
135
136 spinlock_t lock;
137 int err;
138 struct tasklet_struct done_task;
Cyrille Pitchenf56809c2016-01-15 15:49:32 +0100139 struct tasklet_struct queue_task;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200140
141 unsigned long flags;
142 struct crypto_queue queue;
143 struct ahash_request *req;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100144
145 struct atmel_sha_dma dma_lch_in;
146
147 struct atmel_sha_caps caps;
148
149 u32 hw_version;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200150};
151
152struct atmel_sha_drv {
153 struct list_head dev_list;
154 spinlock_t lock;
155};
156
157static struct atmel_sha_drv atmel_sha = {
158 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
159 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
160};
161
162static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
163{
164 return readl_relaxed(dd->io_base + offset);
165}
166
167static inline void atmel_sha_write(struct atmel_sha_dev *dd,
168 u32 offset, u32 value)
169{
170 writel_relaxed(value, dd->io_base + offset);
171}
172
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200173static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
174{
175 size_t count;
176
177 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
178 count = min(ctx->sg->length - ctx->offset, ctx->total);
179 count = min(count, ctx->buflen - ctx->bufcnt);
180
Leilei Zhao803eeae2015-04-07 17:45:05 +0800181 if (count <= 0) {
182 /*
183 * Check if count <= 0 because the buffer is full or
184 * because the sg length is 0. In the latest case,
185 * check if there is another sg in the list, a 0 length
186 * sg doesn't necessarily mean the end of the sg list.
187 */
188 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
189 ctx->sg = sg_next(ctx->sg);
190 continue;
191 } else {
192 break;
193 }
194 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200195
196 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
197 ctx->offset, count, 0);
198
199 ctx->bufcnt += count;
200 ctx->offset += count;
201 ctx->total -= count;
202
203 if (ctx->offset == ctx->sg->length) {
204 ctx->sg = sg_next(ctx->sg);
205 if (ctx->sg)
206 ctx->offset = 0;
207 else
208 ctx->total = 0;
209 }
210 }
211
212 return 0;
213}
214
215/*
Nicolas Royerd4905b32013-02-20 17:10:26 +0100216 * The purpose of this padding is to ensure that the padded message is a
217 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
218 * The bit "1" is appended at the end of the message followed by
219 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
220 * 128 bits block (SHA384/SHA512) equals to the message length in bits
221 * is appended.
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200222 *
Nicolas Royerd4905b32013-02-20 17:10:26 +0100223 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200224 * - if message length < 56 bytes then padlen = 56 - message length
225 * - else padlen = 64 + 56 - message length
Nicolas Royerd4905b32013-02-20 17:10:26 +0100226 *
227 * For SHA384/SHA512, padlen is calculated as followed:
228 * - if message length < 112 bytes then padlen = 112 - message length
229 * - else padlen = 128 + 112 - message length
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200230 */
231static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
232{
233 unsigned int index, padlen;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100234 u64 bits[2];
235 u64 size[2];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200236
Nicolas Royerd4905b32013-02-20 17:10:26 +0100237 size[0] = ctx->digcnt[0];
238 size[1] = ctx->digcnt[1];
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200239
Nicolas Royerd4905b32013-02-20 17:10:26 +0100240 size[0] += ctx->bufcnt;
241 if (size[0] < ctx->bufcnt)
242 size[1]++;
243
244 size[0] += length;
245 if (size[0] < length)
246 size[1]++;
247
248 bits[1] = cpu_to_be64(size[0] << 3);
249 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
250
251 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
252 index = ctx->bufcnt & 0x7f;
253 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
254 *(ctx->buffer + ctx->bufcnt) = 0x80;
255 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
256 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
257 ctx->bufcnt += padlen + 16;
258 ctx->flags |= SHA_FLAGS_PAD;
259 } else {
260 index = ctx->bufcnt & 0x3f;
261 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
262 *(ctx->buffer + ctx->bufcnt) = 0x80;
263 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
264 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
265 ctx->bufcnt += padlen + 8;
266 ctx->flags |= SHA_FLAGS_PAD;
267 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200268}
269
270static int atmel_sha_init(struct ahash_request *req)
271{
272 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
273 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
274 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
275 struct atmel_sha_dev *dd = NULL;
276 struct atmel_sha_dev *tmp;
277
278 spin_lock_bh(&atmel_sha.lock);
279 if (!tctx->dd) {
280 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
281 dd = tmp;
282 break;
283 }
284 tctx->dd = dd;
285 } else {
286 dd = tctx->dd;
287 }
288
289 spin_unlock_bh(&atmel_sha.lock);
290
291 ctx->dd = dd;
292
293 ctx->flags = 0;
294
295 dev_dbg(dd->dev, "init: digest size: %d\n",
296 crypto_ahash_digestsize(tfm));
297
Nicolas Royerd4905b32013-02-20 17:10:26 +0100298 switch (crypto_ahash_digestsize(tfm)) {
299 case SHA1_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200300 ctx->flags |= SHA_FLAGS_SHA1;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100301 ctx->block_size = SHA1_BLOCK_SIZE;
302 break;
303 case SHA224_DIGEST_SIZE:
304 ctx->flags |= SHA_FLAGS_SHA224;
305 ctx->block_size = SHA224_BLOCK_SIZE;
306 break;
307 case SHA256_DIGEST_SIZE:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200308 ctx->flags |= SHA_FLAGS_SHA256;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100309 ctx->block_size = SHA256_BLOCK_SIZE;
310 break;
311 case SHA384_DIGEST_SIZE:
312 ctx->flags |= SHA_FLAGS_SHA384;
313 ctx->block_size = SHA384_BLOCK_SIZE;
314 break;
315 case SHA512_DIGEST_SIZE:
316 ctx->flags |= SHA_FLAGS_SHA512;
317 ctx->block_size = SHA512_BLOCK_SIZE;
318 break;
319 default:
320 return -EINVAL;
321 break;
322 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200323
324 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100325 ctx->digcnt[0] = 0;
326 ctx->digcnt[1] = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200327 ctx->buflen = SHA_BUFFER_LEN;
328
329 return 0;
330}
331
332static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
333{
334 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100335 u32 valmr = SHA_MR_MODE_AUTO;
336 unsigned int i, hashsize = 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200337
338 if (likely(dma)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100339 if (!dd->caps.has_dma)
340 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200341 valmr = SHA_MR_MODE_PDC;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100342 if (dd->caps.has_dualbuff)
343 valmr |= SHA_MR_DUALBUFF;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200344 } else {
345 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
346 }
347
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100348 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
349 case SHA_FLAGS_SHA1:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100350 valmr |= SHA_MR_ALGO_SHA1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100351 hashsize = SHA1_DIGEST_SIZE;
352 break;
353
354 case SHA_FLAGS_SHA224:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100355 valmr |= SHA_MR_ALGO_SHA224;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100356 hashsize = SHA256_DIGEST_SIZE;
357 break;
358
359 case SHA_FLAGS_SHA256:
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200360 valmr |= SHA_MR_ALGO_SHA256;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100361 hashsize = SHA256_DIGEST_SIZE;
362 break;
363
364 case SHA_FLAGS_SHA384:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100365 valmr |= SHA_MR_ALGO_SHA384;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100366 hashsize = SHA512_DIGEST_SIZE;
367 break;
368
369 case SHA_FLAGS_SHA512:
Nicolas Royerd4905b32013-02-20 17:10:26 +0100370 valmr |= SHA_MR_ALGO_SHA512;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100371 hashsize = SHA512_DIGEST_SIZE;
372 break;
373
374 default:
375 break;
376 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200377
378 /* Setting CR_FIRST only for the first iteration */
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100379 if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
380 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
381 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
382 const u32 *hash = (const u32 *)ctx->digest;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200383
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100384 /*
385 * Restore the hardware context: update the User Initialize
386 * Hash Value (UIHV) with the value saved when the latest
387 * 'update' operation completed on this very same crypto
388 * request.
389 */
390 ctx->flags &= ~SHA_FLAGS_RESTORE;
391 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
392 for (i = 0; i < hashsize / sizeof(u32); ++i)
393 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
394 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
395 valmr |= SHA_MR_UIHV;
396 }
397 /*
398 * WARNING: If the UIHV feature is not available, the hardware CANNOT
399 * process concurrent requests: the internal registers used to store
400 * the hash/digest are still set to the partial digest output values
401 * computed during the latest round.
402 */
403
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200404 atmel_sha_write(dd, SHA_MR, valmr);
405}
406
407static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
408 size_t length, int final)
409{
410 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
411 int count, len32;
412 const u32 *buffer = (const u32 *)buf;
413
Nicolas Royerd4905b32013-02-20 17:10:26 +0100414 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
415 ctx->digcnt[1], ctx->digcnt[0], length, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200416
417 atmel_sha_write_ctrl(dd, 0);
418
419 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100420 ctx->digcnt[0] += length;
421 if (ctx->digcnt[0] < length)
422 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200423
424 if (final)
425 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
426
427 len32 = DIV_ROUND_UP(length, sizeof(u32));
428
429 dd->flags |= SHA_FLAGS_CPU;
430
431 for (count = 0; count < len32; count++)
432 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
433
434 return -EINPROGRESS;
435}
436
437static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
438 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
439{
440 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
441 int len32;
442
Nicolas Royerd4905b32013-02-20 17:10:26 +0100443 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
444 ctx->digcnt[1], ctx->digcnt[0], length1, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200445
446 len32 = DIV_ROUND_UP(length1, sizeof(u32));
447 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
448 atmel_sha_write(dd, SHA_TPR, dma_addr1);
449 atmel_sha_write(dd, SHA_TCR, len32);
450
451 len32 = DIV_ROUND_UP(length2, sizeof(u32));
452 atmel_sha_write(dd, SHA_TNPR, dma_addr2);
453 atmel_sha_write(dd, SHA_TNCR, len32);
454
455 atmel_sha_write_ctrl(dd, 1);
456
457 /* should be non-zero before next lines to disable clocks later */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100458 ctx->digcnt[0] += length1;
459 if (ctx->digcnt[0] < length1)
460 ctx->digcnt[1]++;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200461
462 if (final)
463 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
464
465 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
466
467 /* Start DMA transfer */
468 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
469
470 return -EINPROGRESS;
471}
472
Nicolas Royerd4905b32013-02-20 17:10:26 +0100473static void atmel_sha_dma_callback(void *data)
474{
475 struct atmel_sha_dev *dd = data;
476
477 /* dma_lch_in - completed - wait DATRDY */
478 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
479}
480
481static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
482 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
483{
484 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
485 struct dma_async_tx_descriptor *in_desc;
486 struct scatterlist sg[2];
487
488 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
489 ctx->digcnt[1], ctx->digcnt[0], length1, final);
490
Leilei Zhao3f1992c2015-04-07 17:45:07 +0800491 dd->dma_lch_in.dma_conf.src_maxburst = 16;
492 dd->dma_lch_in.dma_conf.dst_maxburst = 16;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100493
494 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
495
496 if (length2) {
497 sg_init_table(sg, 2);
498 sg_dma_address(&sg[0]) = dma_addr1;
499 sg_dma_len(&sg[0]) = length1;
500 sg_dma_address(&sg[1]) = dma_addr2;
501 sg_dma_len(&sg[1]) = length2;
502 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
503 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
504 } else {
505 sg_init_table(sg, 1);
506 sg_dma_address(&sg[0]) = dma_addr1;
507 sg_dma_len(&sg[0]) = length1;
508 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
509 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
510 }
511 if (!in_desc)
512 return -EINVAL;
513
514 in_desc->callback = atmel_sha_dma_callback;
515 in_desc->callback_param = dd;
516
517 atmel_sha_write_ctrl(dd, 1);
518
519 /* should be non-zero before next lines to disable clocks later */
520 ctx->digcnt[0] += length1;
521 if (ctx->digcnt[0] < length1)
522 ctx->digcnt[1]++;
523
524 if (final)
525 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
526
527 dd->flags |= SHA_FLAGS_DMA_ACTIVE;
528
529 /* Start DMA transfer */
530 dmaengine_submit(in_desc);
531 dma_async_issue_pending(dd->dma_lch_in.chan);
532
533 return -EINPROGRESS;
534}
535
536static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
537 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
538{
539 if (dd->caps.has_dma)
540 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
541 dma_addr2, length2, final);
542 else
543 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
544 dma_addr2, length2, final);
545}
546
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200547static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
548{
549 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
550 int bufcnt;
551
552 atmel_sha_append_sg(ctx);
553 atmel_sha_fill_padding(ctx, 0);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200554 bufcnt = ctx->bufcnt;
555 ctx->bufcnt = 0;
556
557 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
558}
559
560static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
561 struct atmel_sha_reqctx *ctx,
562 size_t length, int final)
563{
564 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100565 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200566 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
567 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100568 ctx->block_size);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200569 return -EINVAL;
570 }
571
572 ctx->flags &= ~SHA_FLAGS_SG;
573
574 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100575 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200576}
577
578static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
579{
580 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
581 unsigned int final;
582 size_t count;
583
584 atmel_sha_append_sg(ctx);
585
586 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
587
Nicolas Royerd4905b32013-02-20 17:10:26 +0100588 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
589 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200590
591 if (final)
592 atmel_sha_fill_padding(ctx, 0);
593
Ludovic Desroches00992862015-04-07 17:45:04 +0800594 if (final || (ctx->bufcnt == ctx->buflen)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200595 count = ctx->bufcnt;
596 ctx->bufcnt = 0;
597 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
598 }
599
600 return 0;
601}
602
603static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
604{
605 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
606 unsigned int length, final, tail;
607 struct scatterlist *sg;
608 unsigned int count;
609
610 if (!ctx->total)
611 return 0;
612
613 if (ctx->bufcnt || ctx->offset)
614 return atmel_sha_update_dma_slow(dd);
615
Nicolas Royerd4905b32013-02-20 17:10:26 +0100616 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
617 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200618
619 sg = ctx->sg;
620
621 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
622 return atmel_sha_update_dma_slow(dd);
623
Nicolas Royerd4905b32013-02-20 17:10:26 +0100624 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
625 /* size is not ctx->block_size aligned */
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200626 return atmel_sha_update_dma_slow(dd);
627
628 length = min(ctx->total, sg->length);
629
630 if (sg_is_last(sg)) {
631 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100632 /* not last sg must be ctx->block_size aligned */
633 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200634 length -= tail;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200635 }
636 }
637
638 ctx->total -= length;
639 ctx->offset = length; /* offset where to start slow */
640
641 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
642
643 /* Add padding */
644 if (final) {
Nicolas Royerd4905b32013-02-20 17:10:26 +0100645 tail = length & (ctx->block_size - 1);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200646 length -= tail;
647 ctx->total += tail;
648 ctx->offset = length; /* offset where to start slow */
649
650 sg = ctx->sg;
651 atmel_sha_append_sg(ctx);
652
653 atmel_sha_fill_padding(ctx, length);
654
655 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100656 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200657 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
658 dev_err(dd->dev, "dma %u bytes error\n",
Nicolas Royerd4905b32013-02-20 17:10:26 +0100659 ctx->buflen + ctx->block_size);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200660 return -EINVAL;
661 }
662
663 if (length == 0) {
664 ctx->flags &= ~SHA_FLAGS_SG;
665 count = ctx->bufcnt;
666 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100667 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200668 0, final);
669 } else {
670 ctx->sg = sg;
671 if (!dma_map_sg(dd->dev, ctx->sg, 1,
672 DMA_TO_DEVICE)) {
673 dev_err(dd->dev, "dma_map_sg error\n");
674 return -EINVAL;
675 }
676
677 ctx->flags |= SHA_FLAGS_SG;
678
679 count = ctx->bufcnt;
680 ctx->bufcnt = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +0100681 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200682 length, ctx->dma_addr, count, final);
683 }
684 }
685
686 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
687 dev_err(dd->dev, "dma_map_sg error\n");
688 return -EINVAL;
689 }
690
691 ctx->flags |= SHA_FLAGS_SG;
692
693 /* next call does not fail... so no unmap in the case of error */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100694 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200695 0, final);
696}
697
698static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
699{
700 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
701
702 if (ctx->flags & SHA_FLAGS_SG) {
703 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
704 if (ctx->sg->length == ctx->offset) {
705 ctx->sg = sg_next(ctx->sg);
706 if (ctx->sg)
707 ctx->offset = 0;
708 }
Nicolas Royerd4905b32013-02-20 17:10:26 +0100709 if (ctx->flags & SHA_FLAGS_PAD) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200710 dma_unmap_single(dd->dev, ctx->dma_addr,
Nicolas Royerd4905b32013-02-20 17:10:26 +0100711 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
712 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200713 } else {
714 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
Nicolas Royerd4905b32013-02-20 17:10:26 +0100715 ctx->block_size, DMA_TO_DEVICE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200716 }
717
718 return 0;
719}
720
721static int atmel_sha_update_req(struct atmel_sha_dev *dd)
722{
723 struct ahash_request *req = dd->req;
724 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
725 int err;
726
Nicolas Royerd4905b32013-02-20 17:10:26 +0100727 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
728 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200729
730 if (ctx->flags & SHA_FLAGS_CPU)
731 err = atmel_sha_update_cpu(dd);
732 else
733 err = atmel_sha_update_dma_start(dd);
734
735 /* wait for dma completion before can take more data */
Nicolas Royerd4905b32013-02-20 17:10:26 +0100736 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
737 err, ctx->digcnt[1], ctx->digcnt[0]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200738
739 return err;
740}
741
742static int atmel_sha_final_req(struct atmel_sha_dev *dd)
743{
744 struct ahash_request *req = dd->req;
745 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
746 int err = 0;
747 int count;
748
749 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
750 atmel_sha_fill_padding(ctx, 0);
751 count = ctx->bufcnt;
752 ctx->bufcnt = 0;
753 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
754 }
755 /* faster to handle last block with cpu */
756 else {
757 atmel_sha_fill_padding(ctx, 0);
758 count = ctx->bufcnt;
759 ctx->bufcnt = 0;
760 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
761 }
762
763 dev_dbg(dd->dev, "final_req: err: %d\n", err);
764
765 return err;
766}
767
768static void atmel_sha_copy_hash(struct ahash_request *req)
769{
770 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
771 u32 *hash = (u32 *)ctx->digest;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100772 unsigned int i, hashsize;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200773
Cyrille Pitchen7cee3502016-01-15 15:49:34 +0100774 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
775 case SHA_FLAGS_SHA1:
776 hashsize = SHA1_DIGEST_SIZE;
777 break;
778
779 case SHA_FLAGS_SHA224:
780 case SHA_FLAGS_SHA256:
781 hashsize = SHA256_DIGEST_SIZE;
782 break;
783
784 case SHA_FLAGS_SHA384:
785 case SHA_FLAGS_SHA512:
786 hashsize = SHA512_DIGEST_SIZE;
787 break;
788
789 default:
790 /* Should not happen... */
791 return;
792 }
793
794 for (i = 0; i < hashsize / sizeof(u32); ++i)
795 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
796 ctx->flags |= SHA_FLAGS_RESTORE;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200797}
798
799static void atmel_sha_copy_ready_hash(struct ahash_request *req)
800{
801 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
802
803 if (!req->result)
804 return;
805
Nicolas Royerd4905b32013-02-20 17:10:26 +0100806 if (ctx->flags & SHA_FLAGS_SHA1)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200807 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100808 else if (ctx->flags & SHA_FLAGS_SHA224)
809 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
810 else if (ctx->flags & SHA_FLAGS_SHA256)
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200811 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100812 else if (ctx->flags & SHA_FLAGS_SHA384)
813 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
814 else
815 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200816}
817
818static int atmel_sha_finish(struct ahash_request *req)
819{
820 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
821 struct atmel_sha_dev *dd = ctx->dd;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200822
Nicolas Royerd4905b32013-02-20 17:10:26 +0100823 if (ctx->digcnt[0] || ctx->digcnt[1])
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200824 atmel_sha_copy_ready_hash(req);
825
Nicolas Royerd4905b32013-02-20 17:10:26 +0100826 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
827 ctx->digcnt[0], ctx->bufcnt);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200828
Rahul Pathak871b88a2015-12-14 08:44:19 +0000829 return 0;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200830}
831
832static void atmel_sha_finish_req(struct ahash_request *req, int err)
833{
834 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
835 struct atmel_sha_dev *dd = ctx->dd;
836
837 if (!err) {
838 atmel_sha_copy_hash(req);
839 if (SHA_FLAGS_FINAL & dd->flags)
840 err = atmel_sha_finish(req);
841 } else {
842 ctx->flags |= SHA_FLAGS_ERROR;
843 }
844
845 /* atomic operation is not needed here */
846 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
847 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
848
849 clk_disable_unprepare(dd->iclk);
850
851 if (req->base.complete)
852 req->base.complete(&req->base, err);
853
854 /* handle new request */
Cyrille Pitchenf56809c2016-01-15 15:49:32 +0100855 tasklet_schedule(&dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200856}
857
858static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
859{
LABBE Corentin9d83d292015-10-02 14:12:58 +0200860 int err;
861
862 err = clk_prepare_enable(dd->iclk);
863 if (err)
864 return err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200865
Nicolas Royerd4905b32013-02-20 17:10:26 +0100866 if (!(SHA_FLAGS_INIT & dd->flags)) {
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200867 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200868 dd->flags |= SHA_FLAGS_INIT;
869 dd->err = 0;
870 }
871
872 return 0;
873}
874
Nicolas Royerd4905b32013-02-20 17:10:26 +0100875static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
876{
877 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
878}
879
880static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
881{
882 atmel_sha_hw_init(dd);
883
884 dd->hw_version = atmel_sha_get_version(dd);
885
886 dev_info(dd->dev,
887 "version: 0x%x\n", dd->hw_version);
888
889 clk_disable_unprepare(dd->iclk);
890}
891
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200892static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
893 struct ahash_request *req)
894{
895 struct crypto_async_request *async_req, *backlog;
896 struct atmel_sha_reqctx *ctx;
897 unsigned long flags;
898 int err = 0, ret = 0;
899
900 spin_lock_irqsave(&dd->lock, flags);
901 if (req)
902 ret = ahash_enqueue_request(&dd->queue, req);
903
904 if (SHA_FLAGS_BUSY & dd->flags) {
905 spin_unlock_irqrestore(&dd->lock, flags);
906 return ret;
907 }
908
909 backlog = crypto_get_backlog(&dd->queue);
910 async_req = crypto_dequeue_request(&dd->queue);
911 if (async_req)
912 dd->flags |= SHA_FLAGS_BUSY;
913
914 spin_unlock_irqrestore(&dd->lock, flags);
915
916 if (!async_req)
917 return ret;
918
919 if (backlog)
920 backlog->complete(backlog, -EINPROGRESS);
921
922 req = ahash_request_cast(async_req);
923 dd->req = req;
924 ctx = ahash_request_ctx(req);
925
926 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
927 ctx->op, req->nbytes);
928
929 err = atmel_sha_hw_init(dd);
930
931 if (err)
932 goto err1;
933
934 if (ctx->op == SHA_OP_UPDATE) {
935 err = atmel_sha_update_req(dd);
Nicolas Royerd4905b32013-02-20 17:10:26 +0100936 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200937 /* no final() after finup() */
938 err = atmel_sha_final_req(dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +0200939 } else if (ctx->op == SHA_OP_FINAL) {
940 err = atmel_sha_final_req(dd);
941 }
942
943err1:
944 if (err != -EINPROGRESS)
945 /* done_task will not finish it, so do it here */
946 atmel_sha_finish_req(req, err);
947
948 dev_dbg(dd->dev, "exit, err: %d\n", err);
949
950 return ret;
951}
952
953static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
954{
955 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
956 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
957 struct atmel_sha_dev *dd = tctx->dd;
958
959 ctx->op = op;
960
961 return atmel_sha_handle_queue(dd, req);
962}
963
964static int atmel_sha_update(struct ahash_request *req)
965{
966 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
967
968 if (!req->nbytes)
969 return 0;
970
971 ctx->total = req->nbytes;
972 ctx->sg = req->src;
973 ctx->offset = 0;
974
975 if (ctx->flags & SHA_FLAGS_FINUP) {
976 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
977 /* faster to use CPU for short transfers */
978 ctx->flags |= SHA_FLAGS_CPU;
979 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
980 atmel_sha_append_sg(ctx);
981 return 0;
982 }
983 return atmel_sha_enqueue(req, SHA_OP_UPDATE);
984}
985
986static int atmel_sha_final(struct ahash_request *req)
987{
988 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
989 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
990 struct atmel_sha_dev *dd = tctx->dd;
991
992 int err = 0;
993
994 ctx->flags |= SHA_FLAGS_FINUP;
995
996 if (ctx->flags & SHA_FLAGS_ERROR)
997 return 0; /* uncompleted hash is not needed */
998
999 if (ctx->bufcnt) {
1000 return atmel_sha_enqueue(req, SHA_OP_FINAL);
1001 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
1002 err = atmel_sha_hw_init(dd);
1003 if (err)
1004 goto err1;
1005
Cyrille Pitchen1900c582016-01-15 15:49:31 +01001006 dd->req = req;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001007 dd->flags |= SHA_FLAGS_BUSY;
1008 err = atmel_sha_final_req(dd);
1009 } else {
1010 /* copy ready hash (+ finalize hmac) */
1011 return atmel_sha_finish(req);
1012 }
1013
1014err1:
1015 if (err != -EINPROGRESS)
1016 /* done_task will not finish it, so do it here */
1017 atmel_sha_finish_req(req, err);
1018
1019 return err;
1020}
1021
1022static int atmel_sha_finup(struct ahash_request *req)
1023{
1024 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1025 int err1, err2;
1026
1027 ctx->flags |= SHA_FLAGS_FINUP;
1028
1029 err1 = atmel_sha_update(req);
1030 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1031 return err1;
1032
1033 /*
1034 * final() has to be always called to cleanup resources
1035 * even if udpate() failed, except EINPROGRESS
1036 */
1037 err2 = atmel_sha_final(req);
1038
1039 return err1 ?: err2;
1040}
1041
1042static int atmel_sha_digest(struct ahash_request *req)
1043{
1044 return atmel_sha_init(req) ?: atmel_sha_finup(req);
1045}
1046
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001047
1048static int atmel_sha_export(struct ahash_request *req, void *out)
1049{
1050 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1051 struct atmel_sha_state state;
1052
1053 memcpy(state.digest, ctx->digest, SHA512_DIGEST_SIZE);
1054 memcpy(state.buffer, ctx->buffer, ctx->bufcnt);
1055 state.bufcnt = ctx->bufcnt;
1056 state.digcnt[0] = ctx->digcnt[0];
1057 state.digcnt[1] = ctx->digcnt[1];
1058
1059 /* out might be unaligned. */
1060 memcpy(out, &state, sizeof(state));
1061 return 0;
1062}
1063
1064static int atmel_sha_import(struct ahash_request *req, const void *in)
1065{
1066 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1067 struct atmel_sha_state state;
1068
1069 /* in might be unaligned. */
1070 memcpy(&state, in, sizeof(state));
1071
1072 memcpy(ctx->digest, state.digest, SHA512_DIGEST_SIZE);
1073 memcpy(ctx->buffer, state.buffer, state.bufcnt);
1074 ctx->bufcnt = state.bufcnt;
1075 ctx->digcnt[0] = state.digcnt[0];
1076 ctx->digcnt[1] = state.digcnt[1];
1077 return 0;
1078}
1079
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001080static int atmel_sha_cra_init(struct crypto_tfm *tfm)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001081{
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001082 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1083 sizeof(struct atmel_sha_reqctx) +
Nicolas Royerd4905b32013-02-20 17:10:26 +01001084 SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001085
1086 return 0;
1087}
1088
Nicolas Royerd4905b32013-02-20 17:10:26 +01001089static struct ahash_alg sha_1_256_algs[] = {
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001090{
1091 .init = atmel_sha_init,
1092 .update = atmel_sha_update,
1093 .final = atmel_sha_final,
1094 .finup = atmel_sha_finup,
1095 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001096 .export = atmel_sha_export,
1097 .import = atmel_sha_import,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001098 .halg = {
1099 .digestsize = SHA1_DIGEST_SIZE,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001100 .statesize = sizeof(struct atmel_sha_state),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001101 .base = {
1102 .cra_name = "sha1",
1103 .cra_driver_name = "atmel-sha1",
1104 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001105 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001106 .cra_blocksize = SHA1_BLOCK_SIZE,
1107 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1108 .cra_alignmask = 0,
1109 .cra_module = THIS_MODULE,
1110 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001111 }
1112 }
1113},
1114{
1115 .init = atmel_sha_init,
1116 .update = atmel_sha_update,
1117 .final = atmel_sha_final,
1118 .finup = atmel_sha_finup,
1119 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001120 .export = atmel_sha_export,
1121 .import = atmel_sha_import,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001122 .halg = {
1123 .digestsize = SHA256_DIGEST_SIZE,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001124 .statesize = sizeof(struct atmel_sha_state),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001125 .base = {
1126 .cra_name = "sha256",
1127 .cra_driver_name = "atmel-sha256",
1128 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001129 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001130 .cra_blocksize = SHA256_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1132 .cra_alignmask = 0,
1133 .cra_module = THIS_MODULE,
1134 .cra_init = atmel_sha_cra_init,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001135 }
1136 }
1137},
1138};
1139
Nicolas Royerd4905b32013-02-20 17:10:26 +01001140static struct ahash_alg sha_224_alg = {
1141 .init = atmel_sha_init,
1142 .update = atmel_sha_update,
1143 .final = atmel_sha_final,
1144 .finup = atmel_sha_finup,
1145 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001146 .export = atmel_sha_export,
1147 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001148 .halg = {
1149 .digestsize = SHA224_DIGEST_SIZE,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001150 .statesize = sizeof(struct atmel_sha_state),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001151 .base = {
1152 .cra_name = "sha224",
1153 .cra_driver_name = "atmel-sha224",
1154 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001155 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001156 .cra_blocksize = SHA224_BLOCK_SIZE,
1157 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1158 .cra_alignmask = 0,
1159 .cra_module = THIS_MODULE,
1160 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001161 }
1162 }
1163};
1164
1165static struct ahash_alg sha_384_512_algs[] = {
1166{
1167 .init = atmel_sha_init,
1168 .update = atmel_sha_update,
1169 .final = atmel_sha_final,
1170 .finup = atmel_sha_finup,
1171 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001172 .export = atmel_sha_export,
1173 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001174 .halg = {
1175 .digestsize = SHA384_DIGEST_SIZE,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001176 .statesize = sizeof(struct atmel_sha_state),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001177 .base = {
1178 .cra_name = "sha384",
1179 .cra_driver_name = "atmel-sha384",
1180 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001181 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001182 .cra_blocksize = SHA384_BLOCK_SIZE,
1183 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1184 .cra_alignmask = 0x3,
1185 .cra_module = THIS_MODULE,
1186 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001187 }
1188 }
1189},
1190{
1191 .init = atmel_sha_init,
1192 .update = atmel_sha_update,
1193 .final = atmel_sha_final,
1194 .finup = atmel_sha_finup,
1195 .digest = atmel_sha_digest,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001196 .export = atmel_sha_export,
1197 .import = atmel_sha_import,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001198 .halg = {
1199 .digestsize = SHA512_DIGEST_SIZE,
Cyrille Pitchencc831d32016-01-29 17:04:02 +01001200 .statesize = sizeof(struct atmel_sha_state),
Nicolas Royerd4905b32013-02-20 17:10:26 +01001201 .base = {
1202 .cra_name = "sha512",
1203 .cra_driver_name = "atmel-sha512",
1204 .cra_priority = 100,
Svenning Sørensenbe95f0f2014-12-05 01:18:57 +01001205 .cra_flags = CRYPTO_ALG_ASYNC,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001206 .cra_blocksize = SHA512_BLOCK_SIZE,
1207 .cra_ctxsize = sizeof(struct atmel_sha_ctx),
1208 .cra_alignmask = 0x3,
1209 .cra_module = THIS_MODULE,
1210 .cra_init = atmel_sha_cra_init,
Nicolas Royerd4905b32013-02-20 17:10:26 +01001211 }
1212 }
1213},
1214};
1215
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001216static void atmel_sha_queue_task(unsigned long data)
1217{
1218 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1219
1220 atmel_sha_handle_queue(dd, NULL);
1221}
1222
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001223static void atmel_sha_done_task(unsigned long data)
1224{
1225 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1226 int err = 0;
1227
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001228 if (SHA_FLAGS_CPU & dd->flags) {
1229 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1230 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1231 goto finish;
1232 }
1233 } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1234 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1235 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1236 atmel_sha_update_dma_stop(dd);
1237 if (dd->err) {
1238 err = dd->err;
1239 goto finish;
1240 }
1241 }
1242 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1243 /* hash or semi-hash ready */
1244 dd->flags &= ~(SHA_FLAGS_DMA_READY |
1245 SHA_FLAGS_OUTPUT_READY);
1246 err = atmel_sha_update_dma_start(dd);
1247 if (err != -EINPROGRESS)
1248 goto finish;
1249 }
1250 }
1251 return;
1252
1253finish:
1254 /* finish curent request */
1255 atmel_sha_finish_req(dd->req, err);
1256}
1257
1258static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1259{
1260 struct atmel_sha_dev *sha_dd = dev_id;
1261 u32 reg;
1262
1263 reg = atmel_sha_read(sha_dd, SHA_ISR);
1264 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1265 atmel_sha_write(sha_dd, SHA_IDR, reg);
1266 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1267 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1268 if (!(SHA_FLAGS_CPU & sha_dd->flags))
1269 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1270 tasklet_schedule(&sha_dd->done_task);
1271 } else {
1272 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1273 }
1274 return IRQ_HANDLED;
1275 }
1276
1277 return IRQ_NONE;
1278}
1279
1280static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
1281{
1282 int i;
1283
Nicolas Royerd4905b32013-02-20 17:10:26 +01001284 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
1285 crypto_unregister_ahash(&sha_1_256_algs[i]);
1286
1287 if (dd->caps.has_sha224)
1288 crypto_unregister_ahash(&sha_224_alg);
1289
1290 if (dd->caps.has_sha_384_512) {
1291 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
1292 crypto_unregister_ahash(&sha_384_512_algs[i]);
1293 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001294}
1295
1296static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
1297{
1298 int err, i, j;
1299
Nicolas Royerd4905b32013-02-20 17:10:26 +01001300 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
1301 err = crypto_register_ahash(&sha_1_256_algs[i]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001302 if (err)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001303 goto err_sha_1_256_algs;
1304 }
1305
1306 if (dd->caps.has_sha224) {
1307 err = crypto_register_ahash(&sha_224_alg);
1308 if (err)
1309 goto err_sha_224_algs;
1310 }
1311
1312 if (dd->caps.has_sha_384_512) {
1313 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
1314 err = crypto_register_ahash(&sha_384_512_algs[i]);
1315 if (err)
1316 goto err_sha_384_512_algs;
1317 }
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001318 }
1319
1320 return 0;
1321
Nicolas Royerd4905b32013-02-20 17:10:26 +01001322err_sha_384_512_algs:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001323 for (j = 0; j < i; j++)
Nicolas Royerd4905b32013-02-20 17:10:26 +01001324 crypto_unregister_ahash(&sha_384_512_algs[j]);
1325 crypto_unregister_ahash(&sha_224_alg);
1326err_sha_224_algs:
1327 i = ARRAY_SIZE(sha_1_256_algs);
1328err_sha_1_256_algs:
1329 for (j = 0; j < i; j++)
1330 crypto_unregister_ahash(&sha_1_256_algs[j]);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001331
1332 return err;
1333}
1334
Nicolas Royerd4905b32013-02-20 17:10:26 +01001335static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
1336{
1337 struct at_dma_slave *sl = slave;
1338
1339 if (sl && sl->dma_dev == chan->device->dev) {
1340 chan->private = sl;
1341 return true;
1342 } else {
1343 return false;
1344 }
1345}
1346
1347static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
1348 struct crypto_platform_data *pdata)
1349{
1350 int err = -ENOMEM;
1351 dma_cap_mask_t mask_in;
1352
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001353 /* Try to grab DMA channel */
1354 dma_cap_zero(mask_in);
1355 dma_cap_set(DMA_SLAVE, mask_in);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001356
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001357 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
1358 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
1359 if (!dd->dma_lch_in.chan) {
1360 dev_warn(dd->dev, "no DMA channel available\n");
1361 return err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001362 }
1363
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001364 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
1365 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
1366 SHA_REG_DIN(0);
1367 dd->dma_lch_in.dma_conf.src_maxburst = 1;
1368 dd->dma_lch_in.dma_conf.src_addr_width =
1369 DMA_SLAVE_BUSWIDTH_4_BYTES;
1370 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
1371 dd->dma_lch_in.dma_conf.dst_addr_width =
1372 DMA_SLAVE_BUSWIDTH_4_BYTES;
1373 dd->dma_lch_in.dma_conf.device_fc = false;
1374
1375 return 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001376}
1377
1378static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
1379{
1380 dma_release_channel(dd->dma_lch_in.chan);
1381}
1382
1383static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
1384{
1385
1386 dd->caps.has_dma = 0;
1387 dd->caps.has_dualbuff = 0;
1388 dd->caps.has_sha224 = 0;
1389 dd->caps.has_sha_384_512 = 0;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001390 dd->caps.has_uihv = 0;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001391
1392 /* keep only major version number */
1393 switch (dd->hw_version & 0xff0) {
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01001394 case 0x510:
1395 dd->caps.has_dma = 1;
1396 dd->caps.has_dualbuff = 1;
1397 dd->caps.has_sha224 = 1;
1398 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001399 dd->caps.has_uihv = 1;
Cyrille Pitchen507c5cc2016-01-15 15:49:33 +01001400 break;
Leilei Zhao141824d2015-04-07 17:45:03 +08001401 case 0x420:
1402 dd->caps.has_dma = 1;
1403 dd->caps.has_dualbuff = 1;
1404 dd->caps.has_sha224 = 1;
1405 dd->caps.has_sha_384_512 = 1;
Cyrille Pitchen7cee3502016-01-15 15:49:34 +01001406 dd->caps.has_uihv = 1;
Leilei Zhao141824d2015-04-07 17:45:03 +08001407 break;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001408 case 0x410:
1409 dd->caps.has_dma = 1;
1410 dd->caps.has_dualbuff = 1;
1411 dd->caps.has_sha224 = 1;
1412 dd->caps.has_sha_384_512 = 1;
1413 break;
1414 case 0x400:
1415 dd->caps.has_dma = 1;
1416 dd->caps.has_dualbuff = 1;
1417 dd->caps.has_sha224 = 1;
1418 break;
1419 case 0x320:
1420 break;
1421 default:
1422 dev_warn(dd->dev,
1423 "Unmanaged sha version, set minimum capabilities\n");
1424 break;
1425 }
1426}
1427
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001428#if defined(CONFIG_OF)
1429static const struct of_device_id atmel_sha_dt_ids[] = {
1430 { .compatible = "atmel,at91sam9g46-sha" },
1431 { /* sentinel */ }
1432};
1433
1434MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
1435
1436static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
1437{
1438 struct device_node *np = pdev->dev.of_node;
1439 struct crypto_platform_data *pdata;
1440
1441 if (!np) {
1442 dev_err(&pdev->dev, "device node not found\n");
1443 return ERR_PTR(-EINVAL);
1444 }
1445
1446 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1447 if (!pdata) {
1448 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1449 return ERR_PTR(-ENOMEM);
1450 }
1451
1452 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1453 sizeof(*(pdata->dma_slave)),
1454 GFP_KERNEL);
1455 if (!pdata->dma_slave) {
1456 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001457 return ERR_PTR(-ENOMEM);
1458 }
1459
1460 return pdata;
1461}
1462#else /* CONFIG_OF */
1463static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
1464{
1465 return ERR_PTR(-EINVAL);
1466}
1467#endif
1468
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001469static int atmel_sha_probe(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001470{
1471 struct atmel_sha_dev *sha_dd;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001472 struct crypto_platform_data *pdata;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001473 struct device *dev = &pdev->dev;
1474 struct resource *sha_res;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001475 int err;
1476
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001477 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001478 if (sha_dd == NULL) {
1479 dev_err(dev, "unable to alloc data struct.\n");
1480 err = -ENOMEM;
1481 goto sha_dd_err;
1482 }
1483
1484 sha_dd->dev = dev;
1485
1486 platform_set_drvdata(pdev, sha_dd);
1487
1488 INIT_LIST_HEAD(&sha_dd->list);
Leilei Zhao62728e82015-04-07 17:45:06 +08001489 spin_lock_init(&sha_dd->lock);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001490
1491 tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
1492 (unsigned long)sha_dd);
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001493 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
1494 (unsigned long)sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001495
1496 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
1497
1498 sha_dd->irq = -1;
1499
1500 /* Get the base address */
1501 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 if (!sha_res) {
1503 dev_err(dev, "no MEM resource info\n");
1504 err = -ENODEV;
1505 goto res_err;
1506 }
1507 sha_dd->phys_base = sha_res->start;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001508
1509 /* Get the IRQ */
1510 sha_dd->irq = platform_get_irq(pdev, 0);
1511 if (sha_dd->irq < 0) {
1512 dev_err(dev, "no IRQ resource info\n");
1513 err = sha_dd->irq;
1514 goto res_err;
1515 }
1516
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001517 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
1518 IRQF_SHARED, "atmel-sha", sha_dd);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001519 if (err) {
1520 dev_err(dev, "unable to request sha irq.\n");
1521 goto res_err;
1522 }
1523
1524 /* Initializing the clock */
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001525 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001526 if (IS_ERR(sha_dd->iclk)) {
Colin Ian Kingbe208352015-02-28 20:40:10 +00001527 dev_err(dev, "clock initialization failed.\n");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001528 err = PTR_ERR(sha_dd->iclk);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001529 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001530 }
1531
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001532 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001533 if (!sha_dd->io_base) {
1534 dev_err(dev, "can't ioremap\n");
1535 err = -ENOMEM;
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001536 goto res_err;
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001537 }
1538
Nicolas Royerd4905b32013-02-20 17:10:26 +01001539 atmel_sha_hw_version_init(sha_dd);
1540
1541 atmel_sha_get_cap(sha_dd);
1542
1543 if (sha_dd->caps.has_dma) {
1544 pdata = pdev->dev.platform_data;
1545 if (!pdata) {
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001546 pdata = atmel_sha_of_init(pdev);
1547 if (IS_ERR(pdata)) {
1548 dev_err(&pdev->dev, "platform data not available\n");
1549 err = PTR_ERR(pdata);
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001550 goto res_err;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001551 }
1552 }
1553 if (!pdata->dma_slave) {
Nicolas Royerd4905b32013-02-20 17:10:26 +01001554 err = -ENXIO;
LABBE Corentinb0e8b342015-10-12 19:47:03 +02001555 goto res_err;
Nicolas Royerd4905b32013-02-20 17:10:26 +01001556 }
1557 err = atmel_sha_dma_init(sha_dd, pdata);
1558 if (err)
1559 goto err_sha_dma;
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001560
1561 dev_info(dev, "using %s for DMA transfers\n",
1562 dma_chan_name(sha_dd->dma_lch_in.chan));
Nicolas Royerd4905b32013-02-20 17:10:26 +01001563 }
1564
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001565 spin_lock(&atmel_sha.lock);
1566 list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1567 spin_unlock(&atmel_sha.lock);
1568
1569 err = atmel_sha_register_algs(sha_dd);
1570 if (err)
1571 goto err_algs;
1572
Nicolas Ferre1ca5b7d2013-10-15 16:37:44 +02001573 dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
1574 sha_dd->caps.has_sha224 ? "/SHA224" : "",
1575 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001576
1577 return 0;
1578
1579err_algs:
1580 spin_lock(&atmel_sha.lock);
1581 list_del(&sha_dd->list);
1582 spin_unlock(&atmel_sha.lock);
Nicolas Royerd4905b32013-02-20 17:10:26 +01001583 if (sha_dd->caps.has_dma)
1584 atmel_sha_dma_cleanup(sha_dd);
1585err_sha_dma:
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001586res_err:
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001587 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001588 tasklet_kill(&sha_dd->done_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001589sha_dd_err:
1590 dev_err(dev, "initialization failed.\n");
1591
1592 return err;
1593}
1594
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001595static int atmel_sha_remove(struct platform_device *pdev)
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001596{
1597 static struct atmel_sha_dev *sha_dd;
1598
1599 sha_dd = platform_get_drvdata(pdev);
1600 if (!sha_dd)
1601 return -ENODEV;
1602 spin_lock(&atmel_sha.lock);
1603 list_del(&sha_dd->list);
1604 spin_unlock(&atmel_sha.lock);
1605
1606 atmel_sha_unregister_algs(sha_dd);
1607
Cyrille Pitchenf56809c2016-01-15 15:49:32 +01001608 tasklet_kill(&sha_dd->queue_task);
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001609 tasklet_kill(&sha_dd->done_task);
1610
Nicolas Royerd4905b32013-02-20 17:10:26 +01001611 if (sha_dd->caps.has_dma)
1612 atmel_sha_dma_cleanup(sha_dd);
1613
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001614 iounmap(sha_dd->io_base);
1615
1616 clk_put(sha_dd->iclk);
1617
1618 if (sha_dd->irq >= 0)
1619 free_irq(sha_dd->irq, sha_dd);
1620
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001621 return 0;
1622}
1623
1624static struct platform_driver atmel_sha_driver = {
1625 .probe = atmel_sha_probe,
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08001626 .remove = atmel_sha_remove,
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001627 .driver = {
1628 .name = "atmel_sha",
Nicolas Ferreabfe7ae2013-10-15 15:36:34 +02001629 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001630 },
1631};
1632
1633module_platform_driver(atmel_sha_driver);
1634
Nicolas Royerd4905b32013-02-20 17:10:26 +01001635MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
Nicolas Royerebc82ef2012-07-01 19:19:46 +02001636MODULE_LICENSE("GPL v2");
1637MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");