blob: 2222370140077a8193971ad7594e3cfdbfe040d8 [file] [log] [blame]
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from old omap-sha1-md5.c driver.
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080018#include <linux/err.h>
19#include <linux/device.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/clk.h>
26#include <linux/irq.h>
27#include <linux/io.h>
28#include <linux/platform_device.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
31#include <linux/delay.h>
32#include <linux/crypto.h>
33#include <linux/cryptohash.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36#include <crypto/sha.h>
37#include <crypto/hash.h>
38#include <crypto/internal/hash.h>
39
40#include <plat/cpu.h>
41#include <plat/dma.h>
42#include <mach/irqs.h>
43
44#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
46
47#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48#define MD5_DIGEST_SIZE 16
49
50#define SHA_REG_DIGCNT 0x14
51
52#define SHA_REG_CTRL 0x18
53#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56#define SHA_REG_CTRL_ALGO (1 << 2)
57#define SHA_REG_CTRL_INPUT_READY (1 << 1)
58#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
59
60#define SHA_REG_REV 0x5C
61#define SHA_REG_REV_MAJOR 0xF0
62#define SHA_REG_REV_MINOR 0x0F
63
64#define SHA_REG_MASK 0x60
65#define SHA_REG_MASK_DMA_EN (1 << 3)
66#define SHA_REG_MASK_IT_EN (1 << 2)
67#define SHA_REG_MASK_SOFTRESET (1 << 1)
68#define SHA_REG_AUTOIDLE (1 << 0)
69
70#define SHA_REG_SYSSTATUS 0x64
71#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
72
73#define DEFAULT_TIMEOUT_INTERVAL HZ
74
75#define FLAGS_FIRST 0x0001
76#define FLAGS_FINUP 0x0002
77#define FLAGS_FINAL 0x0004
78#define FLAGS_FAST 0x0008
79#define FLAGS_SHA1 0x0010
80#define FLAGS_DMA_ACTIVE 0x0020
81#define FLAGS_OUTPUT_READY 0x0040
82#define FLAGS_CLEAN 0x0080
83#define FLAGS_INIT 0x0100
84#define FLAGS_CPU 0x0200
85#define FLAGS_HMAC 0x0400
86
87/* 3rd byte */
88#define FLAGS_BUSY 16
89
90#define OP_UPDATE 1
91#define OP_FINAL 2
92
93struct omap_sham_dev;
94
95struct omap_sham_reqctx {
96 struct omap_sham_dev *dd;
97 unsigned long flags;
98 unsigned long op;
99
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200100 u8 digest[SHA1_DIGEST_SIZE];
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800101 size_t digcnt;
102 u8 *buffer;
103 size_t bufcnt;
104 size_t buflen;
105 dma_addr_t dma_addr;
106
107 /* walk state */
108 struct scatterlist *sg;
109 unsigned int offset; /* offset in current sg */
110 unsigned int total; /* total request */
111};
112
113struct omap_sham_hmac_ctx {
114 struct crypto_shash *shash;
115 u8 ipad[SHA1_MD5_BLOCK_SIZE];
116 u8 opad[SHA1_MD5_BLOCK_SIZE];
117};
118
119struct omap_sham_ctx {
120 struct omap_sham_dev *dd;
121
122 unsigned long flags;
123
124 /* fallback stuff */
125 struct crypto_shash *fallback;
126
127 struct omap_sham_hmac_ctx base[0];
128};
129
130#define OMAP_SHAM_QUEUE_LENGTH 1
131
132struct omap_sham_dev {
133 struct list_head list;
134 unsigned long phys_base;
135 struct device *dev;
136 void __iomem *io_base;
137 int irq;
138 struct clk *iclk;
139 spinlock_t lock;
140 int dma;
141 int dma_lch;
142 struct tasklet_struct done_task;
143 struct tasklet_struct queue_task;
144
145 unsigned long flags;
146 struct crypto_queue queue;
147 struct ahash_request *req;
148};
149
150struct omap_sham_drv {
151 struct list_head dev_list;
152 spinlock_t lock;
153 unsigned long flags;
154};
155
156static struct omap_sham_drv sham = {
157 .dev_list = LIST_HEAD_INIT(sham.dev_list),
158 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
159};
160
161static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
162{
163 return __raw_readl(dd->io_base + offset);
164}
165
166static inline void omap_sham_write(struct omap_sham_dev *dd,
167 u32 offset, u32 value)
168{
169 __raw_writel(value, dd->io_base + offset);
170}
171
172static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
173 u32 value, u32 mask)
174{
175 u32 val;
176
177 val = omap_sham_read(dd, address);
178 val &= ~mask;
179 val |= value;
180 omap_sham_write(dd, address, val);
181}
182
183static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
184{
185 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
186
187 while (!(omap_sham_read(dd, offset) & bit)) {
188 if (time_is_before_jiffies(timeout))
189 return -ETIMEDOUT;
190 }
191
192 return 0;
193}
194
195static void omap_sham_copy_hash(struct ahash_request *req, int out)
196{
197 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200198 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800199 int i;
200
201 if (likely(ctx->flags & FLAGS_SHA1)) {
202 /* SHA1 results are in big endian */
203 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
204 if (out)
205 hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
206 SHA_REG_DIGEST(i)));
207 else
208 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
209 cpu_to_be32(hash[i]));
210 } else {
211 /* MD5 results are in little endian */
212 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
213 if (out)
214 hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
215 SHA_REG_DIGEST(i)));
216 else
217 omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
218 cpu_to_le32(hash[i]));
219 }
220}
221
222static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
223 int final, int dma)
224{
225 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
226 u32 val = length << 5, mask;
227
228 if (unlikely(!ctx->digcnt)) {
229
230 clk_enable(dd->iclk);
231
232 if (!(dd->flags & FLAGS_INIT)) {
233 omap_sham_write_mask(dd, SHA_REG_MASK,
234 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
235
236 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
237 SHA_REG_SYSSTATUS_RESETDONE))
238 return -ETIMEDOUT;
239
240 dd->flags |= FLAGS_INIT;
241 }
242 } else {
243 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
244 }
245
246 omap_sham_write_mask(dd, SHA_REG_MASK,
247 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
248 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
249 /*
250 * Setting ALGO_CONST only for the first iteration
251 * and CLOSE_HASH only for the last one.
252 */
253 if (ctx->flags & FLAGS_SHA1)
254 val |= SHA_REG_CTRL_ALGO;
255 if (!ctx->digcnt)
256 val |= SHA_REG_CTRL_ALGO_CONST;
257 if (final)
258 val |= SHA_REG_CTRL_CLOSE_HASH;
259
260 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
261 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
262
263 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
264
265 return 0;
266}
267
268static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
269 size_t length, int final)
270{
271 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
272 int err, count, len32;
273 const u32 *buffer = (const u32 *)buf;
274
275 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
276 ctx->digcnt, length, final);
277
278 err = omap_sham_write_ctrl(dd, length, final, 0);
279 if (err)
280 return err;
281
282 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
283 return -ETIMEDOUT;
284
285 ctx->digcnt += length;
286
287 if (final)
288 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
289
290 len32 = DIV_ROUND_UP(length, sizeof(u32));
291
292 for (count = 0; count < len32; count++)
293 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
294
295 return -EINPROGRESS;
296}
297
298static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
299 size_t length, int final)
300{
301 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
302 int err, len32;
303
304 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
305 ctx->digcnt, length, final);
306
307 /* flush cache entries related to our page */
308 if (dma_addr == ctx->dma_addr)
309 dma_sync_single_for_device(dd->dev, dma_addr, length,
310 DMA_TO_DEVICE);
311
312 len32 = DIV_ROUND_UP(length, sizeof(u32));
313
314 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
Samu Onkalo584db6a2010-09-03 19:20:19 +0800315 1, OMAP_DMA_SYNC_PACKET, dd->dma,
316 OMAP_DMA_DST_SYNC_PREFETCH);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800317
318 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
319 dma_addr, 0, 0);
320
321 err = omap_sham_write_ctrl(dd, length, final, 1);
322 if (err)
323 return err;
324
325 ctx->digcnt += length;
326
327 if (final)
328 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
329
330 dd->flags |= FLAGS_DMA_ACTIVE;
331
332 omap_start_dma(dd->dma_lch);
333
334 return -EINPROGRESS;
335}
336
337static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
338 const u8 *data, size_t length)
339{
340 size_t count = min(length, ctx->buflen - ctx->bufcnt);
341
342 count = min(count, ctx->total);
343 if (count <= 0)
344 return 0;
345 memcpy(ctx->buffer + ctx->bufcnt, data, count);
346 ctx->bufcnt += count;
347
348 return count;
349}
350
351static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
352{
353 size_t count;
354
355 while (ctx->sg) {
356 count = omap_sham_append_buffer(ctx,
357 sg_virt(ctx->sg) + ctx->offset,
358 ctx->sg->length - ctx->offset);
359 if (!count)
360 break;
361 ctx->offset += count;
362 ctx->total -= count;
363 if (ctx->offset == ctx->sg->length) {
364 ctx->sg = sg_next(ctx->sg);
365 if (ctx->sg)
366 ctx->offset = 0;
367 else
368 ctx->total = 0;
369 }
370 }
371
372 return 0;
373}
374
375static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
376{
377 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
378 unsigned int final;
379 size_t count;
380
381 if (!ctx->total)
382 return 0;
383
384 omap_sham_append_sg(ctx);
385
386 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
387
388 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
389 ctx->bufcnt, ctx->digcnt, final);
390
391 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
392 count = ctx->bufcnt;
393 ctx->bufcnt = 0;
394 return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
395 }
396
397 return 0;
398}
399
400static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
401{
402 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
403 unsigned int length;
404
405 ctx->flags |= FLAGS_FAST;
406
407 length = min(ctx->total, sg_dma_len(ctx->sg));
408 ctx->total = length;
409
410 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
411 dev_err(dd->dev, "dma_map_sg error\n");
412 return -EINVAL;
413 }
414
415 ctx->total -= length;
416
417 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
418}
419
420static int omap_sham_update_cpu(struct omap_sham_dev *dd)
421{
422 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
423 int bufcnt;
424
425 omap_sham_append_sg(ctx);
426 bufcnt = ctx->bufcnt;
427 ctx->bufcnt = 0;
428
429 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
430}
431
432static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
433{
434 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
435
436 omap_stop_dma(dd->dma_lch);
437 if (ctx->flags & FLAGS_FAST)
438 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
439
440 return 0;
441}
442
443static void omap_sham_cleanup(struct ahash_request *req)
444{
445 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
446 struct omap_sham_dev *dd = ctx->dd;
447 unsigned long flags;
448
449 spin_lock_irqsave(&dd->lock, flags);
450 if (ctx->flags & FLAGS_CLEAN) {
451 spin_unlock_irqrestore(&dd->lock, flags);
452 return;
453 }
454 ctx->flags |= FLAGS_CLEAN;
455 spin_unlock_irqrestore(&dd->lock, flags);
456
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200457 if (ctx->digcnt) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800458 clk_disable(dd->iclk);
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200459 memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ?
460 SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE);
461 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800462
463 if (ctx->dma_addr)
464 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
465 DMA_TO_DEVICE);
466
467 if (ctx->buffer)
468 free_page((unsigned long)ctx->buffer);
469
470 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
471}
472
473static int omap_sham_init(struct ahash_request *req)
474{
475 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
476 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
477 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
478 struct omap_sham_dev *dd = NULL, *tmp;
479
480 spin_lock_bh(&sham.lock);
481 if (!tctx->dd) {
482 list_for_each_entry(tmp, &sham.dev_list, list) {
483 dd = tmp;
484 break;
485 }
486 tctx->dd = dd;
487 } else {
488 dd = tctx->dd;
489 }
490 spin_unlock_bh(&sham.lock);
491
492 ctx->dd = dd;
493
494 ctx->flags = 0;
495
496 ctx->flags |= FLAGS_FIRST;
497
498 dev_dbg(dd->dev, "init: digest size: %d\n",
499 crypto_ahash_digestsize(tfm));
500
501 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
502 ctx->flags |= FLAGS_SHA1;
503
504 ctx->bufcnt = 0;
505 ctx->digcnt = 0;
506
507 ctx->buflen = PAGE_SIZE;
508 ctx->buffer = (void *)__get_free_page(
509 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
510 GFP_KERNEL : GFP_ATOMIC);
511 if (!ctx->buffer)
512 return -ENOMEM;
513
514 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
515 DMA_TO_DEVICE);
516 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
517 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
518 free_page((unsigned long)ctx->buffer);
519 return -EINVAL;
520 }
521
522 if (tctx->flags & FLAGS_HMAC) {
523 struct omap_sham_hmac_ctx *bctx = tctx->base;
524
525 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
526 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
527 ctx->flags |= FLAGS_HMAC;
528 }
529
530 return 0;
531
532}
533
534static int omap_sham_update_req(struct omap_sham_dev *dd)
535{
536 struct ahash_request *req = dd->req;
537 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
538 int err;
539
540 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
541 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
542
543 if (ctx->flags & FLAGS_CPU)
544 err = omap_sham_update_cpu(dd);
545 else if (ctx->flags & FLAGS_FAST)
546 err = omap_sham_update_dma_fast(dd);
547 else
548 err = omap_sham_update_dma_slow(dd);
549
550 /* wait for dma completion before can take more data */
551 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
552
553 return err;
554}
555
556static int omap_sham_final_req(struct omap_sham_dev *dd)
557{
558 struct ahash_request *req = dd->req;
559 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
560 int err = 0, use_dma = 1;
561
562 if (ctx->bufcnt <= 64)
563 /* faster to handle last block with cpu */
564 use_dma = 0;
565
566 if (use_dma)
567 err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
568 else
569 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
570
571 ctx->bufcnt = 0;
572
573 if (err != -EINPROGRESS)
574 omap_sham_cleanup(req);
575
576 dev_dbg(dd->dev, "final_req: err: %d\n", err);
577
578 return err;
579}
580
581static int omap_sham_finish_req_hmac(struct ahash_request *req)
582{
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200583 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800584 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
585 struct omap_sham_hmac_ctx *bctx = tctx->base;
586 int bs = crypto_shash_blocksize(bctx->shash);
587 int ds = crypto_shash_digestsize(bctx->shash);
588 struct {
589 struct shash_desc shash;
590 char ctx[crypto_shash_descsize(bctx->shash)];
591 } desc;
592
593 desc.shash.tfm = bctx->shash;
594 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
595
596 return crypto_shash_init(&desc.shash) ?:
597 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200598 crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800599}
600
601static void omap_sham_finish_req(struct ahash_request *req, int err)
602{
603 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
604
605 if (!err) {
606 omap_sham_copy_hash(ctx->dd->req, 1);
607 if (ctx->flags & FLAGS_HMAC)
608 err = omap_sham_finish_req_hmac(req);
609 }
610
611 if (ctx->flags & FLAGS_FINAL)
612 omap_sham_cleanup(req);
613
614 clear_bit(FLAGS_BUSY, &ctx->dd->flags);
615
616 if (req->base.complete)
617 req->base.complete(&req->base, err);
618}
619
620static int omap_sham_handle_queue(struct omap_sham_dev *dd)
621{
622 struct crypto_async_request *async_req, *backlog;
623 struct omap_sham_reqctx *ctx;
624 struct ahash_request *req, *prev_req;
625 unsigned long flags;
626 int err = 0;
627
628 if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
629 return 0;
630
631 spin_lock_irqsave(&dd->lock, flags);
632 backlog = crypto_get_backlog(&dd->queue);
633 async_req = crypto_dequeue_request(&dd->queue);
634 if (!async_req)
635 clear_bit(FLAGS_BUSY, &dd->flags);
636 spin_unlock_irqrestore(&dd->lock, flags);
637
638 if (!async_req)
639 return 0;
640
641 if (backlog)
642 backlog->complete(backlog, -EINPROGRESS);
643
644 req = ahash_request_cast(async_req);
645
646 prev_req = dd->req;
647 dd->req = req;
648
649 ctx = ahash_request_ctx(req);
650
651 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
652 ctx->op, req->nbytes);
653
654 if (req != prev_req && ctx->digcnt)
655 /* request has changed - restore hash */
656 omap_sham_copy_hash(req, 0);
657
658 if (ctx->op == OP_UPDATE) {
659 err = omap_sham_update_req(dd);
660 if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
661 /* no final() after finup() */
662 err = omap_sham_final_req(dd);
663 } else if (ctx->op == OP_FINAL) {
664 err = omap_sham_final_req(dd);
665 }
666
667 if (err != -EINPROGRESS) {
668 /* done_task will not finish it, so do it here */
669 omap_sham_finish_req(req, err);
670 tasklet_schedule(&dd->queue_task);
671 }
672
673 dev_dbg(dd->dev, "exit, err: %d\n", err);
674
675 return err;
676}
677
678static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
679{
680 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
681 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
682 struct omap_sham_dev *dd = tctx->dd;
683 unsigned long flags;
684 int err;
685
686 ctx->op = op;
687
688 spin_lock_irqsave(&dd->lock, flags);
689 err = ahash_enqueue_request(&dd->queue, req);
690 spin_unlock_irqrestore(&dd->lock, flags);
691
692 omap_sham_handle_queue(dd);
693
694 return err;
695}
696
697static int omap_sham_update(struct ahash_request *req)
698{
699 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
700
701 if (!req->nbytes)
702 return 0;
703
704 ctx->total = req->nbytes;
705 ctx->sg = req->src;
706 ctx->offset = 0;
707
708 if (ctx->flags & FLAGS_FINUP) {
709 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
710 /*
711 * OMAP HW accel works only with buffers >= 9
712 * will switch to bypass in final()
713 * final has the same request and data
714 */
715 omap_sham_append_sg(ctx);
716 return 0;
717 } else if (ctx->bufcnt + ctx->total <= 64) {
718 ctx->flags |= FLAGS_CPU;
719 } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
720 /* may be can use faster functions */
721 int aligned = IS_ALIGNED((u32)ctx->sg->offset,
722 sizeof(u32));
723
724 if (aligned && (ctx->flags & FLAGS_FIRST))
725 /* digest: first and final */
726 ctx->flags |= FLAGS_FAST;
727
728 ctx->flags &= ~FLAGS_FIRST;
729 }
730 } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
731 /* if not finaup -> not fast */
732 omap_sham_append_sg(ctx);
733 return 0;
734 }
735
736 return omap_sham_enqueue(req, OP_UPDATE);
737}
738
739static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
740 const u8 *data, unsigned int len, u8 *out)
741{
742 struct {
743 struct shash_desc shash;
744 char ctx[crypto_shash_descsize(shash)];
745 } desc;
746
747 desc.shash.tfm = shash;
748 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
749
750 return crypto_shash_digest(&desc.shash, data, len, out);
751}
752
753static int omap_sham_final_shash(struct ahash_request *req)
754{
755 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
756 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
757
758 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
759 ctx->buffer, ctx->bufcnt, req->result);
760}
761
762static int omap_sham_final(struct ahash_request *req)
763{
764 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
765 int err = 0;
766
767 ctx->flags |= FLAGS_FINUP;
768
769 /* OMAP HW accel works only with buffers >= 9 */
770 /* HMAC is always >= 9 because of ipad */
771 if ((ctx->digcnt + ctx->bufcnt) < 9)
772 err = omap_sham_final_shash(req);
773 else if (ctx->bufcnt)
774 return omap_sham_enqueue(req, OP_FINAL);
775
776 omap_sham_cleanup(req);
777
778 return err;
779}
780
781static int omap_sham_finup(struct ahash_request *req)
782{
783 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
784 int err1, err2;
785
786 ctx->flags |= FLAGS_FINUP;
787
788 err1 = omap_sham_update(req);
789 if (err1 == -EINPROGRESS)
790 return err1;
791 /*
792 * final() has to be always called to cleanup resources
793 * even if udpate() failed, except EINPROGRESS
794 */
795 err2 = omap_sham_final(req);
796
797 return err1 ?: err2;
798}
799
800static int omap_sham_digest(struct ahash_request *req)
801{
802 return omap_sham_init(req) ?: omap_sham_finup(req);
803}
804
805static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
806 unsigned int keylen)
807{
808 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
809 struct omap_sham_hmac_ctx *bctx = tctx->base;
810 int bs = crypto_shash_blocksize(bctx->shash);
811 int ds = crypto_shash_digestsize(bctx->shash);
812 int err, i;
813 err = crypto_shash_setkey(tctx->fallback, key, keylen);
814 if (err)
815 return err;
816
817 if (keylen > bs) {
818 err = omap_sham_shash_digest(bctx->shash,
819 crypto_shash_get_flags(bctx->shash),
820 key, keylen, bctx->ipad);
821 if (err)
822 return err;
823 keylen = ds;
824 } else {
825 memcpy(bctx->ipad, key, keylen);
826 }
827
828 memset(bctx->ipad + keylen, 0, bs - keylen);
829 memcpy(bctx->opad, bctx->ipad, bs);
830
831 for (i = 0; i < bs; i++) {
832 bctx->ipad[i] ^= 0x36;
833 bctx->opad[i] ^= 0x5c;
834 }
835
836 return err;
837}
838
839static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
840{
841 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
842 const char *alg_name = crypto_tfm_alg_name(tfm);
843
844 /* Allocate a fallback and abort if it failed. */
845 tctx->fallback = crypto_alloc_shash(alg_name, 0,
846 CRYPTO_ALG_NEED_FALLBACK);
847 if (IS_ERR(tctx->fallback)) {
848 pr_err("omap-sham: fallback driver '%s' "
849 "could not be loaded.\n", alg_name);
850 return PTR_ERR(tctx->fallback);
851 }
852
853 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
854 sizeof(struct omap_sham_reqctx));
855
856 if (alg_base) {
857 struct omap_sham_hmac_ctx *bctx = tctx->base;
858 tctx->flags |= FLAGS_HMAC;
859 bctx->shash = crypto_alloc_shash(alg_base, 0,
860 CRYPTO_ALG_NEED_FALLBACK);
861 if (IS_ERR(bctx->shash)) {
862 pr_err("omap-sham: base driver '%s' "
863 "could not be loaded.\n", alg_base);
864 crypto_free_shash(tctx->fallback);
865 return PTR_ERR(bctx->shash);
866 }
867
868 }
869
870 return 0;
871}
872
873static int omap_sham_cra_init(struct crypto_tfm *tfm)
874{
875 return omap_sham_cra_init_alg(tfm, NULL);
876}
877
878static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
879{
880 return omap_sham_cra_init_alg(tfm, "sha1");
881}
882
883static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
884{
885 return omap_sham_cra_init_alg(tfm, "md5");
886}
887
888static void omap_sham_cra_exit(struct crypto_tfm *tfm)
889{
890 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
891
892 crypto_free_shash(tctx->fallback);
893 tctx->fallback = NULL;
894
895 if (tctx->flags & FLAGS_HMAC) {
896 struct omap_sham_hmac_ctx *bctx = tctx->base;
897 crypto_free_shash(bctx->shash);
898 }
899}
900
901static struct ahash_alg algs[] = {
902{
903 .init = omap_sham_init,
904 .update = omap_sham_update,
905 .final = omap_sham_final,
906 .finup = omap_sham_finup,
907 .digest = omap_sham_digest,
908 .halg.digestsize = SHA1_DIGEST_SIZE,
909 .halg.base = {
910 .cra_name = "sha1",
911 .cra_driver_name = "omap-sha1",
912 .cra_priority = 100,
913 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
914 CRYPTO_ALG_ASYNC |
915 CRYPTO_ALG_NEED_FALLBACK,
916 .cra_blocksize = SHA1_BLOCK_SIZE,
917 .cra_ctxsize = sizeof(struct omap_sham_ctx),
918 .cra_alignmask = 0,
919 .cra_module = THIS_MODULE,
920 .cra_init = omap_sham_cra_init,
921 .cra_exit = omap_sham_cra_exit,
922 }
923},
924{
925 .init = omap_sham_init,
926 .update = omap_sham_update,
927 .final = omap_sham_final,
928 .finup = omap_sham_finup,
929 .digest = omap_sham_digest,
930 .halg.digestsize = MD5_DIGEST_SIZE,
931 .halg.base = {
932 .cra_name = "md5",
933 .cra_driver_name = "omap-md5",
934 .cra_priority = 100,
935 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
936 CRYPTO_ALG_ASYNC |
937 CRYPTO_ALG_NEED_FALLBACK,
938 .cra_blocksize = SHA1_BLOCK_SIZE,
939 .cra_ctxsize = sizeof(struct omap_sham_ctx),
940 .cra_alignmask = 0,
941 .cra_module = THIS_MODULE,
942 .cra_init = omap_sham_cra_init,
943 .cra_exit = omap_sham_cra_exit,
944 }
945},
946{
947 .init = omap_sham_init,
948 .update = omap_sham_update,
949 .final = omap_sham_final,
950 .finup = omap_sham_finup,
951 .digest = omap_sham_digest,
952 .setkey = omap_sham_setkey,
953 .halg.digestsize = SHA1_DIGEST_SIZE,
954 .halg.base = {
955 .cra_name = "hmac(sha1)",
956 .cra_driver_name = "omap-hmac-sha1",
957 .cra_priority = 100,
958 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
959 CRYPTO_ALG_ASYNC |
960 CRYPTO_ALG_NEED_FALLBACK,
961 .cra_blocksize = SHA1_BLOCK_SIZE,
962 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
963 sizeof(struct omap_sham_hmac_ctx),
964 .cra_alignmask = 0,
965 .cra_module = THIS_MODULE,
966 .cra_init = omap_sham_cra_sha1_init,
967 .cra_exit = omap_sham_cra_exit,
968 }
969},
970{
971 .init = omap_sham_init,
972 .update = omap_sham_update,
973 .final = omap_sham_final,
974 .finup = omap_sham_finup,
975 .digest = omap_sham_digest,
976 .setkey = omap_sham_setkey,
977 .halg.digestsize = MD5_DIGEST_SIZE,
978 .halg.base = {
979 .cra_name = "hmac(md5)",
980 .cra_driver_name = "omap-hmac-md5",
981 .cra_priority = 100,
982 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
983 CRYPTO_ALG_ASYNC |
984 CRYPTO_ALG_NEED_FALLBACK,
985 .cra_blocksize = SHA1_BLOCK_SIZE,
986 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
987 sizeof(struct omap_sham_hmac_ctx),
988 .cra_alignmask = 0,
989 .cra_module = THIS_MODULE,
990 .cra_init = omap_sham_cra_md5_init,
991 .cra_exit = omap_sham_cra_exit,
992 }
993}
994};
995
996static void omap_sham_done_task(unsigned long data)
997{
998 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
999 struct ahash_request *req = dd->req;
1000 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1001 int ready = 1;
1002
1003 if (ctx->flags & FLAGS_OUTPUT_READY) {
1004 ctx->flags &= ~FLAGS_OUTPUT_READY;
1005 ready = 1;
1006 }
1007
1008 if (dd->flags & FLAGS_DMA_ACTIVE) {
1009 dd->flags &= ~FLAGS_DMA_ACTIVE;
1010 omap_sham_update_dma_stop(dd);
1011 omap_sham_update_dma_slow(dd);
1012 }
1013
1014 if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
1015 dev_dbg(dd->dev, "update done\n");
1016 /* finish curent request */
1017 omap_sham_finish_req(req, 0);
1018 /* start new request */
1019 omap_sham_handle_queue(dd);
1020 }
1021}
1022
1023static void omap_sham_queue_task(unsigned long data)
1024{
1025 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1026
1027 omap_sham_handle_queue(dd);
1028}
1029
1030static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1031{
1032 struct omap_sham_dev *dd = dev_id;
1033 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1034
1035 if (!ctx) {
1036 dev_err(dd->dev, "unknown interrupt.\n");
1037 return IRQ_HANDLED;
1038 }
1039
1040 if (unlikely(ctx->flags & FLAGS_FINAL))
1041 /* final -> allow device to go to power-saving mode */
1042 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1043
1044 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1045 SHA_REG_CTRL_OUTPUT_READY);
1046 omap_sham_read(dd, SHA_REG_CTRL);
1047
1048 ctx->flags |= FLAGS_OUTPUT_READY;
1049 tasklet_schedule(&dd->done_task);
1050
1051 return IRQ_HANDLED;
1052}
1053
1054static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1055{
1056 struct omap_sham_dev *dd = data;
1057
1058 if (likely(lch == dd->dma_lch))
1059 tasklet_schedule(&dd->done_task);
1060}
1061
1062static int omap_sham_dma_init(struct omap_sham_dev *dd)
1063{
1064 int err;
1065
1066 dd->dma_lch = -1;
1067
1068 err = omap_request_dma(dd->dma, dev_name(dd->dev),
1069 omap_sham_dma_callback, dd, &dd->dma_lch);
1070 if (err) {
1071 dev_err(dd->dev, "Unable to request DMA channel\n");
1072 return err;
1073 }
1074 omap_set_dma_dest_params(dd->dma_lch, 0,
1075 OMAP_DMA_AMODE_CONSTANT,
1076 dd->phys_base + SHA_REG_DIN(0), 0, 16);
1077
1078 omap_set_dma_dest_burst_mode(dd->dma_lch,
1079 OMAP_DMA_DATA_BURST_16);
1080
Samu Onkalo584db6a2010-09-03 19:20:19 +08001081 omap_set_dma_src_burst_mode(dd->dma_lch,
1082 OMAP_DMA_DATA_BURST_4);
1083
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001084 return 0;
1085}
1086
1087static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1088{
1089 if (dd->dma_lch >= 0) {
1090 omap_free_dma(dd->dma_lch);
1091 dd->dma_lch = -1;
1092 }
1093}
1094
1095static int __devinit omap_sham_probe(struct platform_device *pdev)
1096{
1097 struct omap_sham_dev *dd;
1098 struct device *dev = &pdev->dev;
1099 struct resource *res;
1100 int err, i, j;
1101
1102 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1103 if (dd == NULL) {
1104 dev_err(dev, "unable to alloc data struct.\n");
1105 err = -ENOMEM;
1106 goto data_err;
1107 }
1108 dd->dev = dev;
1109 platform_set_drvdata(pdev, dd);
1110
1111 INIT_LIST_HEAD(&dd->list);
1112 spin_lock_init(&dd->lock);
1113 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1114 tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
1115 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1116
1117 dd->irq = -1;
1118
1119 /* Get the base address */
1120 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1121 if (!res) {
1122 dev_err(dev, "no MEM resource info\n");
1123 err = -ENODEV;
1124 goto res_err;
1125 }
1126 dd->phys_base = res->start;
1127
1128 /* Get the DMA */
1129 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1130 if (!res) {
1131 dev_err(dev, "no DMA resource info\n");
1132 err = -ENODEV;
1133 goto res_err;
1134 }
1135 dd->dma = res->start;
1136
1137 /* Get the IRQ */
1138 dd->irq = platform_get_irq(pdev, 0);
1139 if (dd->irq < 0) {
1140 dev_err(dev, "no IRQ resource info\n");
1141 err = dd->irq;
1142 goto res_err;
1143 }
1144
1145 err = request_irq(dd->irq, omap_sham_irq,
1146 IRQF_TRIGGER_LOW, dev_name(dev), dd);
1147 if (err) {
1148 dev_err(dev, "unable to request irq.\n");
1149 goto res_err;
1150 }
1151
1152 err = omap_sham_dma_init(dd);
1153 if (err)
1154 goto dma_err;
1155
1156 /* Initializing the clock */
1157 dd->iclk = clk_get(dev, "ick");
1158 if (!dd->iclk) {
1159 dev_err(dev, "clock intialization failed.\n");
1160 err = -ENODEV;
1161 goto clk_err;
1162 }
1163
1164 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1165 if (!dd->io_base) {
1166 dev_err(dev, "can't ioremap\n");
1167 err = -ENOMEM;
1168 goto io_err;
1169 }
1170
1171 clk_enable(dd->iclk);
1172 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1173 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1174 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1175 clk_disable(dd->iclk);
1176
1177 spin_lock(&sham.lock);
1178 list_add_tail(&dd->list, &sham.dev_list);
1179 spin_unlock(&sham.lock);
1180
1181 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1182 err = crypto_register_ahash(&algs[i]);
1183 if (err)
1184 goto err_algs;
1185 }
1186
1187 return 0;
1188
1189err_algs:
1190 for (j = 0; j < i; j++)
1191 crypto_unregister_ahash(&algs[j]);
1192 iounmap(dd->io_base);
1193io_err:
1194 clk_put(dd->iclk);
1195clk_err:
1196 omap_sham_dma_cleanup(dd);
1197dma_err:
1198 if (dd->irq >= 0)
1199 free_irq(dd->irq, dd);
1200res_err:
1201 kfree(dd);
1202 dd = NULL;
1203data_err:
1204 dev_err(dev, "initialization failed.\n");
1205
1206 return err;
1207}
1208
1209static int __devexit omap_sham_remove(struct platform_device *pdev)
1210{
1211 static struct omap_sham_dev *dd;
1212 int i;
1213
1214 dd = platform_get_drvdata(pdev);
1215 if (!dd)
1216 return -ENODEV;
1217 spin_lock(&sham.lock);
1218 list_del(&dd->list);
1219 spin_unlock(&sham.lock);
1220 for (i = 0; i < ARRAY_SIZE(algs); i++)
1221 crypto_unregister_ahash(&algs[i]);
1222 tasklet_kill(&dd->done_task);
1223 tasklet_kill(&dd->queue_task);
1224 iounmap(dd->io_base);
1225 clk_put(dd->iclk);
1226 omap_sham_dma_cleanup(dd);
1227 if (dd->irq >= 0)
1228 free_irq(dd->irq, dd);
1229 kfree(dd);
1230 dd = NULL;
1231
1232 return 0;
1233}
1234
1235static struct platform_driver omap_sham_driver = {
1236 .probe = omap_sham_probe,
1237 .remove = omap_sham_remove,
1238 .driver = {
1239 .name = "omap-sham",
1240 .owner = THIS_MODULE,
1241 },
1242};
1243
1244static int __init omap_sham_mod_init(void)
1245{
1246 pr_info("loading %s driver\n", "omap-sham");
1247
1248 if (!cpu_class_is_omap2() ||
1249 omap_type() != OMAP2_DEVICE_TYPE_SEC) {
1250 pr_err("Unsupported cpu\n");
1251 return -ENODEV;
1252 }
1253
1254 return platform_driver_register(&omap_sham_driver);
1255}
1256
1257static void __exit omap_sham_mod_exit(void)
1258{
1259 platform_driver_unregister(&omap_sham_driver);
1260}
1261
1262module_init(omap_sham_mod_init);
1263module_exit(omap_sham_mod_exit);
1264
1265MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1266MODULE_LICENSE("GPL v2");
1267MODULE_AUTHOR("Dmitry Kasatkin");