blob: eb988e7a2fd9be63281e941e9f07680e065c94b3 [file] [log] [blame]
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from old omap-sha1-md5.c driver.
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080018#include <linux/err.h>
19#include <linux/device.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/clk.h>
26#include <linux/irq.h>
27#include <linux/io.h>
28#include <linux/platform_device.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
31#include <linux/delay.h>
32#include <linux/crypto.h>
33#include <linux/cryptohash.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36#include <crypto/sha.h>
37#include <crypto/hash.h>
38#include <crypto/internal/hash.h>
39
40#include <plat/cpu.h>
41#include <plat/dma.h>
42#include <mach/irqs.h>
43
44#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
46
47#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48#define MD5_DIGEST_SIZE 16
49
50#define SHA_REG_DIGCNT 0x14
51
52#define SHA_REG_CTRL 0x18
53#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
54#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
55#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
56#define SHA_REG_CTRL_ALGO (1 << 2)
57#define SHA_REG_CTRL_INPUT_READY (1 << 1)
58#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
59
60#define SHA_REG_REV 0x5C
61#define SHA_REG_REV_MAJOR 0xF0
62#define SHA_REG_REV_MINOR 0x0F
63
64#define SHA_REG_MASK 0x60
65#define SHA_REG_MASK_DMA_EN (1 << 3)
66#define SHA_REG_MASK_IT_EN (1 << 2)
67#define SHA_REG_MASK_SOFTRESET (1 << 1)
68#define SHA_REG_AUTOIDLE (1 << 0)
69
70#define SHA_REG_SYSSTATUS 0x64
71#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
72
73#define DEFAULT_TIMEOUT_INTERVAL HZ
74
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080075#define FLAGS_FINUP 0x0002
76#define FLAGS_FINAL 0x0004
Dmitry Kasatkin887c8832010-11-19 16:04:29 +020077#define FLAGS_SG 0x0008
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080078#define FLAGS_SHA1 0x0010
79#define FLAGS_DMA_ACTIVE 0x0020
80#define FLAGS_OUTPUT_READY 0x0040
81#define FLAGS_CLEAN 0x0080
82#define FLAGS_INIT 0x0100
83#define FLAGS_CPU 0x0200
84#define FLAGS_HMAC 0x0400
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +020085#define FLAGS_ERROR 0x0800
Dmitry Kasatkina5d87232010-11-19 16:04:25 +020086#define FLAGS_BUSY 0x1000
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080087
88#define OP_UPDATE 1
89#define OP_FINAL 2
90
Dmitry Kasatkin798eed52010-11-19 16:04:26 +020091#define OMAP_ALIGN_MASK (sizeof(u32)-1)
92#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
93
94#define BUFLEN PAGE_SIZE
95
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080096struct omap_sham_dev;
97
98struct omap_sham_reqctx {
99 struct omap_sham_dev *dd;
100 unsigned long flags;
101 unsigned long op;
102
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200103 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800104 size_t digcnt;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800105 size_t bufcnt;
106 size_t buflen;
107 dma_addr_t dma_addr;
108
109 /* walk state */
110 struct scatterlist *sg;
111 unsigned int offset; /* offset in current sg */
112 unsigned int total; /* total request */
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200113
114 u8 buffer[0] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800115};
116
117struct omap_sham_hmac_ctx {
118 struct crypto_shash *shash;
119 u8 ipad[SHA1_MD5_BLOCK_SIZE];
120 u8 opad[SHA1_MD5_BLOCK_SIZE];
121};
122
123struct omap_sham_ctx {
124 struct omap_sham_dev *dd;
125
126 unsigned long flags;
127
128 /* fallback stuff */
129 struct crypto_shash *fallback;
130
131 struct omap_sham_hmac_ctx base[0];
132};
133
134#define OMAP_SHAM_QUEUE_LENGTH 1
135
136struct omap_sham_dev {
137 struct list_head list;
138 unsigned long phys_base;
139 struct device *dev;
140 void __iomem *io_base;
141 int irq;
142 struct clk *iclk;
143 spinlock_t lock;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200144 int err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800145 int dma;
146 int dma_lch;
147 struct tasklet_struct done_task;
148 struct tasklet_struct queue_task;
149
150 unsigned long flags;
151 struct crypto_queue queue;
152 struct ahash_request *req;
153};
154
155struct omap_sham_drv {
156 struct list_head dev_list;
157 spinlock_t lock;
158 unsigned long flags;
159};
160
161static struct omap_sham_drv sham = {
162 .dev_list = LIST_HEAD_INIT(sham.dev_list),
163 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
164};
165
166static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
167{
168 return __raw_readl(dd->io_base + offset);
169}
170
171static inline void omap_sham_write(struct omap_sham_dev *dd,
172 u32 offset, u32 value)
173{
174 __raw_writel(value, dd->io_base + offset);
175}
176
177static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
178 u32 value, u32 mask)
179{
180 u32 val;
181
182 val = omap_sham_read(dd, address);
183 val &= ~mask;
184 val |= value;
185 omap_sham_write(dd, address, val);
186}
187
188static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
189{
190 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
191
192 while (!(omap_sham_read(dd, offset) & bit)) {
193 if (time_is_before_jiffies(timeout))
194 return -ETIMEDOUT;
195 }
196
197 return 0;
198}
199
200static void omap_sham_copy_hash(struct ahash_request *req, int out)
201{
202 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200203 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800204 int i;
205
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200206 /* MD5 is almost unused. So copy sha1 size to reduce code */
207 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
208 if (out)
209 hash[i] = omap_sham_read(ctx->dd,
210 SHA_REG_DIGEST(i));
211 else
212 omap_sham_write(ctx->dd,
213 SHA_REG_DIGEST(i), hash[i]);
214 }
215}
216
217static void omap_sham_copy_ready_hash(struct ahash_request *req)
218{
219 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
220 u32 *in = (u32 *)ctx->digest;
221 u32 *hash = (u32 *)req->result;
222 int i;
223
224 if (!hash)
225 return;
226
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800227 if (likely(ctx->flags & FLAGS_SHA1)) {
228 /* SHA1 results are in big endian */
229 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200230 hash[i] = be32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800231 } else {
232 /* MD5 results are in little endian */
233 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200234 hash[i] = le32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800235 }
236}
237
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200238static int omap_sham_hw_init(struct omap_sham_dev *dd)
239{
240 clk_enable(dd->iclk);
241
242 if (!(dd->flags & FLAGS_INIT)) {
243 omap_sham_write_mask(dd, SHA_REG_MASK,
244 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
245
246 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
247 SHA_REG_SYSSTATUS_RESETDONE))
248 return -ETIMEDOUT;
249
250 dd->flags |= FLAGS_INIT;
251 dd->err = 0;
252 }
253
254 return 0;
255}
256
257static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800258 int final, int dma)
259{
260 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
261 u32 val = length << 5, mask;
262
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200263 if (likely(ctx->digcnt))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800264 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800265
266 omap_sham_write_mask(dd, SHA_REG_MASK,
267 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
268 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
269 /*
270 * Setting ALGO_CONST only for the first iteration
271 * and CLOSE_HASH only for the last one.
272 */
273 if (ctx->flags & FLAGS_SHA1)
274 val |= SHA_REG_CTRL_ALGO;
275 if (!ctx->digcnt)
276 val |= SHA_REG_CTRL_ALGO_CONST;
277 if (final)
278 val |= SHA_REG_CTRL_CLOSE_HASH;
279
280 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
281 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
282
283 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800284}
285
286static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
287 size_t length, int final)
288{
289 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200290 int count, len32;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800291 const u32 *buffer = (const u32 *)buf;
292
293 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
294 ctx->digcnt, length, final);
295
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200296 omap_sham_write_ctrl(dd, length, final, 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800297
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200298 /* should be non-zero before next lines to disable clocks later */
299 ctx->digcnt += length;
300
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800301 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
302 return -ETIMEDOUT;
303
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800304 if (final)
305 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
306
307 len32 = DIV_ROUND_UP(length, sizeof(u32));
308
309 for (count = 0; count < len32; count++)
310 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
311
312 return -EINPROGRESS;
313}
314
315static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
316 size_t length, int final)
317{
318 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200319 int len32;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800320
321 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
322 ctx->digcnt, length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800323
324 len32 = DIV_ROUND_UP(length, sizeof(u32));
325
326 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
Samu Onkalo584db6a2010-09-03 19:20:19 +0800327 1, OMAP_DMA_SYNC_PACKET, dd->dma,
328 OMAP_DMA_DST_SYNC_PREFETCH);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800329
330 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
331 dma_addr, 0, 0);
332
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200333 omap_sham_write_ctrl(dd, length, final, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800334
335 ctx->digcnt += length;
336
337 if (final)
338 ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
339
340 dd->flags |= FLAGS_DMA_ACTIVE;
341
342 omap_start_dma(dd->dma_lch);
343
344 return -EINPROGRESS;
345}
346
347static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
348 const u8 *data, size_t length)
349{
350 size_t count = min(length, ctx->buflen - ctx->bufcnt);
351
352 count = min(count, ctx->total);
353 if (count <= 0)
354 return 0;
355 memcpy(ctx->buffer + ctx->bufcnt, data, count);
356 ctx->bufcnt += count;
357
358 return count;
359}
360
361static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
362{
363 size_t count;
364
365 while (ctx->sg) {
366 count = omap_sham_append_buffer(ctx,
367 sg_virt(ctx->sg) + ctx->offset,
368 ctx->sg->length - ctx->offset);
369 if (!count)
370 break;
371 ctx->offset += count;
372 ctx->total -= count;
373 if (ctx->offset == ctx->sg->length) {
374 ctx->sg = sg_next(ctx->sg);
375 if (ctx->sg)
376 ctx->offset = 0;
377 else
378 ctx->total = 0;
379 }
380 }
381
382 return 0;
383}
384
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200385static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
386 struct omap_sham_reqctx *ctx,
387 size_t length, int final)
388{
389 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
392 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
393 return -EINVAL;
394 }
395
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200396 ctx->flags &= ~FLAGS_SG;
397
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200398 /* next call does not fail... so no unmap in the case of error */
399 return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
400}
401
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800402static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
403{
404 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
405 unsigned int final;
406 size_t count;
407
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800408 omap_sham_append_sg(ctx);
409
410 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
411
412 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
413 ctx->bufcnt, ctx->digcnt, final);
414
415 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
416 count = ctx->bufcnt;
417 ctx->bufcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200418 return omap_sham_xmit_dma_map(dd, ctx, count, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800419 }
420
421 return 0;
422}
423
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200424/* Start address alignment */
425#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
426/* SHA1 block size alignment */
427#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
428
429static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800430{
431 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200432 unsigned int length, final, tail;
433 struct scatterlist *sg;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800434
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200435 if (!ctx->total)
436 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800437
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200438 if (ctx->bufcnt || ctx->offset)
439 return omap_sham_update_dma_slow(dd);
440
441 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
442 ctx->digcnt, ctx->bufcnt, ctx->total);
443
444 sg = ctx->sg;
445
446 if (!SG_AA(sg))
447 return omap_sham_update_dma_slow(dd);
448
449 if (!sg_is_last(sg) && !SG_SA(sg))
450 /* size is not SHA1_BLOCK_SIZE aligned */
451 return omap_sham_update_dma_slow(dd);
452
453 length = min(ctx->total, sg->length);
454
455 if (sg_is_last(sg)) {
456 if (!(ctx->flags & FLAGS_FINUP)) {
457 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
458 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
459 /* without finup() we need one block to close hash */
460 if (!tail)
461 tail = SHA1_MD5_BLOCK_SIZE;
462 length -= tail;
463 }
464 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800465
466 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
467 dev_err(dd->dev, "dma_map_sg error\n");
468 return -EINVAL;
469 }
470
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200471 ctx->flags |= FLAGS_SG;
472
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800473 ctx->total -= length;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200474 ctx->offset = length; /* offset where to start slow */
475
476 final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800477
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200478 /* next call does not fail... so no unmap in the case of error */
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200479 return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800480}
481
482static int omap_sham_update_cpu(struct omap_sham_dev *dd)
483{
484 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
485 int bufcnt;
486
487 omap_sham_append_sg(ctx);
488 bufcnt = ctx->bufcnt;
489 ctx->bufcnt = 0;
490
491 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
492}
493
494static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
495{
496 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
497
498 omap_stop_dma(dd->dma_lch);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200499 if (ctx->flags & FLAGS_SG) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800500 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200501 if (ctx->sg->length == ctx->offset) {
502 ctx->sg = sg_next(ctx->sg);
503 if (ctx->sg)
504 ctx->offset = 0;
505 }
506 } else {
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200507 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
508 DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200509 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800510
511 return 0;
512}
513
514static void omap_sham_cleanup(struct ahash_request *req)
515{
516 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
517 struct omap_sham_dev *dd = ctx->dd;
518 unsigned long flags;
519
520 spin_lock_irqsave(&dd->lock, flags);
521 if (ctx->flags & FLAGS_CLEAN) {
522 spin_unlock_irqrestore(&dd->lock, flags);
523 return;
524 }
525 ctx->flags |= FLAGS_CLEAN;
526 spin_unlock_irqrestore(&dd->lock, flags);
527
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200528 if (ctx->digcnt)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200529 omap_sham_copy_ready_hash(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800530
531 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
532}
533
534static int omap_sham_init(struct ahash_request *req)
535{
536 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
537 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
538 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
539 struct omap_sham_dev *dd = NULL, *tmp;
540
541 spin_lock_bh(&sham.lock);
542 if (!tctx->dd) {
543 list_for_each_entry(tmp, &sham.dev_list, list) {
544 dd = tmp;
545 break;
546 }
547 tctx->dd = dd;
548 } else {
549 dd = tctx->dd;
550 }
551 spin_unlock_bh(&sham.lock);
552
553 ctx->dd = dd;
554
555 ctx->flags = 0;
556
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800557 dev_dbg(dd->dev, "init: digest size: %d\n",
558 crypto_ahash_digestsize(tfm));
559
560 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
561 ctx->flags |= FLAGS_SHA1;
562
563 ctx->bufcnt = 0;
564 ctx->digcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200565 ctx->buflen = BUFLEN;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800566
567 if (tctx->flags & FLAGS_HMAC) {
568 struct omap_sham_hmac_ctx *bctx = tctx->base;
569
570 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
571 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
572 ctx->flags |= FLAGS_HMAC;
573 }
574
575 return 0;
576
577}
578
579static int omap_sham_update_req(struct omap_sham_dev *dd)
580{
581 struct ahash_request *req = dd->req;
582 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
583 int err;
584
585 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
586 ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
587
588 if (ctx->flags & FLAGS_CPU)
589 err = omap_sham_update_cpu(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800590 else
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200591 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800592
593 /* wait for dma completion before can take more data */
594 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
595
596 return err;
597}
598
599static int omap_sham_final_req(struct omap_sham_dev *dd)
600{
601 struct ahash_request *req = dd->req;
602 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
603 int err = 0, use_dma = 1;
604
605 if (ctx->bufcnt <= 64)
606 /* faster to handle last block with cpu */
607 use_dma = 0;
608
609 if (use_dma)
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200610 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800611 else
612 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
613
614 ctx->bufcnt = 0;
615
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800616 dev_dbg(dd->dev, "final_req: err: %d\n", err);
617
618 return err;
619}
620
621static int omap_sham_finish_req_hmac(struct ahash_request *req)
622{
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200623 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800624 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
625 struct omap_sham_hmac_ctx *bctx = tctx->base;
626 int bs = crypto_shash_blocksize(bctx->shash);
627 int ds = crypto_shash_digestsize(bctx->shash);
628 struct {
629 struct shash_desc shash;
630 char ctx[crypto_shash_descsize(bctx->shash)];
631 } desc;
632
633 desc.shash.tfm = bctx->shash;
634 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
635
636 return crypto_shash_init(&desc.shash) ?:
637 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200638 crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800639}
640
641static void omap_sham_finish_req(struct ahash_request *req, int err)
642{
643 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200644 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800645
646 if (!err) {
647 omap_sham_copy_hash(ctx->dd->req, 1);
648 if (ctx->flags & FLAGS_HMAC)
649 err = omap_sham_finish_req_hmac(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200650 } else {
651 ctx->flags |= FLAGS_ERROR;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800652 }
653
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200654 if ((ctx->flags & FLAGS_FINAL) || err)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800655 omap_sham_cleanup(req);
656
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200657 clk_disable(dd->iclk);
658 dd->flags &= ~FLAGS_BUSY;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800659
660 if (req->base.complete)
661 req->base.complete(&req->base, err);
662}
663
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200664static int omap_sham_handle_queue(struct omap_sham_dev *dd,
665 struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800666{
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200667 struct crypto_async_request *async_req, *backlog = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800668 struct omap_sham_reqctx *ctx;
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200669 struct ahash_request *prev_req;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800670 unsigned long flags;
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200671 int err = 0, ret = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800672
673 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200674 if (req)
675 ret = ahash_enqueue_request(&dd->queue, req);
676 if (dd->flags & FLAGS_BUSY) {
677 spin_unlock_irqrestore(&dd->lock, flags);
678 return ret;
679 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800680 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200681 if (async_req) {
682 dd->flags |= FLAGS_BUSY;
683 backlog = crypto_get_backlog(&dd->queue);
684 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800685 spin_unlock_irqrestore(&dd->lock, flags);
686
687 if (!async_req)
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200688 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800689
690 if (backlog)
691 backlog->complete(backlog, -EINPROGRESS);
692
693 req = ahash_request_cast(async_req);
694
695 prev_req = dd->req;
696 dd->req = req;
697
698 ctx = ahash_request_ctx(req);
699
700 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
701 ctx->op, req->nbytes);
702
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200703
704 err = omap_sham_hw_init(dd);
705 if (err)
706 goto err1;
707
708 omap_set_dma_dest_params(dd->dma_lch, 0,
709 OMAP_DMA_AMODE_CONSTANT,
710 dd->phys_base + SHA_REG_DIN(0), 0, 16);
711
712 omap_set_dma_dest_burst_mode(dd->dma_lch,
713 OMAP_DMA_DATA_BURST_16);
714
715 omap_set_dma_src_burst_mode(dd->dma_lch,
716 OMAP_DMA_DATA_BURST_4);
717
718 if (ctx->digcnt)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800719 /* request has changed - restore hash */
720 omap_sham_copy_hash(req, 0);
721
722 if (ctx->op == OP_UPDATE) {
723 err = omap_sham_update_req(dd);
724 if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
725 /* no final() after finup() */
726 err = omap_sham_final_req(dd);
727 } else if (ctx->op == OP_FINAL) {
728 err = omap_sham_final_req(dd);
729 }
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200730err1:
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800731 if (err != -EINPROGRESS) {
732 /* done_task will not finish it, so do it here */
733 omap_sham_finish_req(req, err);
734 tasklet_schedule(&dd->queue_task);
735 }
736
737 dev_dbg(dd->dev, "exit, err: %d\n", err);
738
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200739 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800740}
741
742static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
743{
744 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
745 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
746 struct omap_sham_dev *dd = tctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800747
748 ctx->op = op;
749
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200750 return omap_sham_handle_queue(dd, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800751}
752
753static int omap_sham_update(struct ahash_request *req)
754{
755 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
756
757 if (!req->nbytes)
758 return 0;
759
760 ctx->total = req->nbytes;
761 ctx->sg = req->src;
762 ctx->offset = 0;
763
764 if (ctx->flags & FLAGS_FINUP) {
765 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
766 /*
767 * OMAP HW accel works only with buffers >= 9
768 * will switch to bypass in final()
769 * final has the same request and data
770 */
771 omap_sham_append_sg(ctx);
772 return 0;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200773 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
774 /*
775 * faster to use CPU for short transfers
776 */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800777 ctx->flags |= FLAGS_CPU;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800778 }
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200779 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800780 omap_sham_append_sg(ctx);
781 return 0;
782 }
783
784 return omap_sham_enqueue(req, OP_UPDATE);
785}
786
787static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
788 const u8 *data, unsigned int len, u8 *out)
789{
790 struct {
791 struct shash_desc shash;
792 char ctx[crypto_shash_descsize(shash)];
793 } desc;
794
795 desc.shash.tfm = shash;
796 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
797
798 return crypto_shash_digest(&desc.shash, data, len, out);
799}
800
801static int omap_sham_final_shash(struct ahash_request *req)
802{
803 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
804 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
805
806 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
807 ctx->buffer, ctx->bufcnt, req->result);
808}
809
810static int omap_sham_final(struct ahash_request *req)
811{
812 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
813 int err = 0;
814
815 ctx->flags |= FLAGS_FINUP;
816
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200817 if (!(ctx->flags & FLAGS_ERROR)) {
818 /* OMAP HW accel works only with buffers >= 9 */
819 /* HMAC is always >= 9 because of ipad */
820 if ((ctx->digcnt + ctx->bufcnt) < 9)
821 err = omap_sham_final_shash(req);
822 else if (ctx->bufcnt)
823 return omap_sham_enqueue(req, OP_FINAL);
824 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800825
826 omap_sham_cleanup(req);
827
828 return err;
829}
830
831static int omap_sham_finup(struct ahash_request *req)
832{
833 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
834 int err1, err2;
835
836 ctx->flags |= FLAGS_FINUP;
837
838 err1 = omap_sham_update(req);
839 if (err1 == -EINPROGRESS)
840 return err1;
841 /*
842 * final() has to be always called to cleanup resources
843 * even if udpate() failed, except EINPROGRESS
844 */
845 err2 = omap_sham_final(req);
846
847 return err1 ?: err2;
848}
849
850static int omap_sham_digest(struct ahash_request *req)
851{
852 return omap_sham_init(req) ?: omap_sham_finup(req);
853}
854
855static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
856 unsigned int keylen)
857{
858 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
859 struct omap_sham_hmac_ctx *bctx = tctx->base;
860 int bs = crypto_shash_blocksize(bctx->shash);
861 int ds = crypto_shash_digestsize(bctx->shash);
862 int err, i;
863 err = crypto_shash_setkey(tctx->fallback, key, keylen);
864 if (err)
865 return err;
866
867 if (keylen > bs) {
868 err = omap_sham_shash_digest(bctx->shash,
869 crypto_shash_get_flags(bctx->shash),
870 key, keylen, bctx->ipad);
871 if (err)
872 return err;
873 keylen = ds;
874 } else {
875 memcpy(bctx->ipad, key, keylen);
876 }
877
878 memset(bctx->ipad + keylen, 0, bs - keylen);
879 memcpy(bctx->opad, bctx->ipad, bs);
880
881 for (i = 0; i < bs; i++) {
882 bctx->ipad[i] ^= 0x36;
883 bctx->opad[i] ^= 0x5c;
884 }
885
886 return err;
887}
888
889static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
890{
891 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
892 const char *alg_name = crypto_tfm_alg_name(tfm);
893
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200894 pr_info("enter\n");
895
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800896 /* Allocate a fallback and abort if it failed. */
897 tctx->fallback = crypto_alloc_shash(alg_name, 0,
898 CRYPTO_ALG_NEED_FALLBACK);
899 if (IS_ERR(tctx->fallback)) {
900 pr_err("omap-sham: fallback driver '%s' "
901 "could not be loaded.\n", alg_name);
902 return PTR_ERR(tctx->fallback);
903 }
904
905 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200906 sizeof(struct omap_sham_reqctx) + BUFLEN);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800907
908 if (alg_base) {
909 struct omap_sham_hmac_ctx *bctx = tctx->base;
910 tctx->flags |= FLAGS_HMAC;
911 bctx->shash = crypto_alloc_shash(alg_base, 0,
912 CRYPTO_ALG_NEED_FALLBACK);
913 if (IS_ERR(bctx->shash)) {
914 pr_err("omap-sham: base driver '%s' "
915 "could not be loaded.\n", alg_base);
916 crypto_free_shash(tctx->fallback);
917 return PTR_ERR(bctx->shash);
918 }
919
920 }
921
922 return 0;
923}
924
925static int omap_sham_cra_init(struct crypto_tfm *tfm)
926{
927 return omap_sham_cra_init_alg(tfm, NULL);
928}
929
930static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
931{
932 return omap_sham_cra_init_alg(tfm, "sha1");
933}
934
935static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
936{
937 return omap_sham_cra_init_alg(tfm, "md5");
938}
939
940static void omap_sham_cra_exit(struct crypto_tfm *tfm)
941{
942 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
943
944 crypto_free_shash(tctx->fallback);
945 tctx->fallback = NULL;
946
947 if (tctx->flags & FLAGS_HMAC) {
948 struct omap_sham_hmac_ctx *bctx = tctx->base;
949 crypto_free_shash(bctx->shash);
950 }
951}
952
953static struct ahash_alg algs[] = {
954{
955 .init = omap_sham_init,
956 .update = omap_sham_update,
957 .final = omap_sham_final,
958 .finup = omap_sham_finup,
959 .digest = omap_sham_digest,
960 .halg.digestsize = SHA1_DIGEST_SIZE,
961 .halg.base = {
962 .cra_name = "sha1",
963 .cra_driver_name = "omap-sha1",
964 .cra_priority = 100,
965 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
966 CRYPTO_ALG_ASYNC |
967 CRYPTO_ALG_NEED_FALLBACK,
968 .cra_blocksize = SHA1_BLOCK_SIZE,
969 .cra_ctxsize = sizeof(struct omap_sham_ctx),
970 .cra_alignmask = 0,
971 .cra_module = THIS_MODULE,
972 .cra_init = omap_sham_cra_init,
973 .cra_exit = omap_sham_cra_exit,
974 }
975},
976{
977 .init = omap_sham_init,
978 .update = omap_sham_update,
979 .final = omap_sham_final,
980 .finup = omap_sham_finup,
981 .digest = omap_sham_digest,
982 .halg.digestsize = MD5_DIGEST_SIZE,
983 .halg.base = {
984 .cra_name = "md5",
985 .cra_driver_name = "omap-md5",
986 .cra_priority = 100,
987 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
988 CRYPTO_ALG_ASYNC |
989 CRYPTO_ALG_NEED_FALLBACK,
990 .cra_blocksize = SHA1_BLOCK_SIZE,
991 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200992 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800993 .cra_module = THIS_MODULE,
994 .cra_init = omap_sham_cra_init,
995 .cra_exit = omap_sham_cra_exit,
996 }
997},
998{
999 .init = omap_sham_init,
1000 .update = omap_sham_update,
1001 .final = omap_sham_final,
1002 .finup = omap_sham_finup,
1003 .digest = omap_sham_digest,
1004 .setkey = omap_sham_setkey,
1005 .halg.digestsize = SHA1_DIGEST_SIZE,
1006 .halg.base = {
1007 .cra_name = "hmac(sha1)",
1008 .cra_driver_name = "omap-hmac-sha1",
1009 .cra_priority = 100,
1010 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1011 CRYPTO_ALG_ASYNC |
1012 CRYPTO_ALG_NEED_FALLBACK,
1013 .cra_blocksize = SHA1_BLOCK_SIZE,
1014 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1015 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001016 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001017 .cra_module = THIS_MODULE,
1018 .cra_init = omap_sham_cra_sha1_init,
1019 .cra_exit = omap_sham_cra_exit,
1020 }
1021},
1022{
1023 .init = omap_sham_init,
1024 .update = omap_sham_update,
1025 .final = omap_sham_final,
1026 .finup = omap_sham_finup,
1027 .digest = omap_sham_digest,
1028 .setkey = omap_sham_setkey,
1029 .halg.digestsize = MD5_DIGEST_SIZE,
1030 .halg.base = {
1031 .cra_name = "hmac(md5)",
1032 .cra_driver_name = "omap-hmac-md5",
1033 .cra_priority = 100,
1034 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1035 CRYPTO_ALG_ASYNC |
1036 CRYPTO_ALG_NEED_FALLBACK,
1037 .cra_blocksize = SHA1_BLOCK_SIZE,
1038 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1039 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001040 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001041 .cra_module = THIS_MODULE,
1042 .cra_init = omap_sham_cra_md5_init,
1043 .cra_exit = omap_sham_cra_exit,
1044 }
1045}
1046};
1047
1048static void omap_sham_done_task(unsigned long data)
1049{
1050 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1051 struct ahash_request *req = dd->req;
1052 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001053 int ready = 0, err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001054
1055 if (ctx->flags & FLAGS_OUTPUT_READY) {
1056 ctx->flags &= ~FLAGS_OUTPUT_READY;
1057 ready = 1;
1058 }
1059
1060 if (dd->flags & FLAGS_DMA_ACTIVE) {
1061 dd->flags &= ~FLAGS_DMA_ACTIVE;
1062 omap_sham_update_dma_stop(dd);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001063 if (!dd->err)
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001064 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001065 }
1066
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001067 err = dd->err ? : err;
1068
1069 if (err != -EINPROGRESS && (ready || err)) {
1070 dev_dbg(dd->dev, "update done: err: %d\n", err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001071 /* finish curent request */
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001072 omap_sham_finish_req(req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001073 /* start new request */
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001074 omap_sham_handle_queue(dd, NULL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001075 }
1076}
1077
1078static void omap_sham_queue_task(unsigned long data)
1079{
1080 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1081
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001082 omap_sham_handle_queue(dd, NULL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001083}
1084
1085static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1086{
1087 struct omap_sham_dev *dd = dev_id;
1088 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1089
1090 if (!ctx) {
1091 dev_err(dd->dev, "unknown interrupt.\n");
1092 return IRQ_HANDLED;
1093 }
1094
1095 if (unlikely(ctx->flags & FLAGS_FINAL))
1096 /* final -> allow device to go to power-saving mode */
1097 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1098
1099 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1100 SHA_REG_CTRL_OUTPUT_READY);
1101 omap_sham_read(dd, SHA_REG_CTRL);
1102
1103 ctx->flags |= FLAGS_OUTPUT_READY;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001104 dd->err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001105 tasklet_schedule(&dd->done_task);
1106
1107 return IRQ_HANDLED;
1108}
1109
1110static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1111{
1112 struct omap_sham_dev *dd = data;
1113
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001114 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1115 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1116 dd->err = -EIO;
1117 dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
1118 }
1119
1120 tasklet_schedule(&dd->done_task);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001121}
1122
1123static int omap_sham_dma_init(struct omap_sham_dev *dd)
1124{
1125 int err;
1126
1127 dd->dma_lch = -1;
1128
1129 err = omap_request_dma(dd->dma, dev_name(dd->dev),
1130 omap_sham_dma_callback, dd, &dd->dma_lch);
1131 if (err) {
1132 dev_err(dd->dev, "Unable to request DMA channel\n");
1133 return err;
1134 }
Samu Onkalo584db6a2010-09-03 19:20:19 +08001135
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001136 return 0;
1137}
1138
1139static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1140{
1141 if (dd->dma_lch >= 0) {
1142 omap_free_dma(dd->dma_lch);
1143 dd->dma_lch = -1;
1144 }
1145}
1146
1147static int __devinit omap_sham_probe(struct platform_device *pdev)
1148{
1149 struct omap_sham_dev *dd;
1150 struct device *dev = &pdev->dev;
1151 struct resource *res;
1152 int err, i, j;
1153
1154 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1155 if (dd == NULL) {
1156 dev_err(dev, "unable to alloc data struct.\n");
1157 err = -ENOMEM;
1158 goto data_err;
1159 }
1160 dd->dev = dev;
1161 platform_set_drvdata(pdev, dd);
1162
1163 INIT_LIST_HEAD(&dd->list);
1164 spin_lock_init(&dd->lock);
1165 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1166 tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
1167 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1168
1169 dd->irq = -1;
1170
1171 /* Get the base address */
1172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1173 if (!res) {
1174 dev_err(dev, "no MEM resource info\n");
1175 err = -ENODEV;
1176 goto res_err;
1177 }
1178 dd->phys_base = res->start;
1179
1180 /* Get the DMA */
1181 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1182 if (!res) {
1183 dev_err(dev, "no DMA resource info\n");
1184 err = -ENODEV;
1185 goto res_err;
1186 }
1187 dd->dma = res->start;
1188
1189 /* Get the IRQ */
1190 dd->irq = platform_get_irq(pdev, 0);
1191 if (dd->irq < 0) {
1192 dev_err(dev, "no IRQ resource info\n");
1193 err = dd->irq;
1194 goto res_err;
1195 }
1196
1197 err = request_irq(dd->irq, omap_sham_irq,
1198 IRQF_TRIGGER_LOW, dev_name(dev), dd);
1199 if (err) {
1200 dev_err(dev, "unable to request irq.\n");
1201 goto res_err;
1202 }
1203
1204 err = omap_sham_dma_init(dd);
1205 if (err)
1206 goto dma_err;
1207
1208 /* Initializing the clock */
1209 dd->iclk = clk_get(dev, "ick");
1210 if (!dd->iclk) {
1211 dev_err(dev, "clock intialization failed.\n");
1212 err = -ENODEV;
1213 goto clk_err;
1214 }
1215
1216 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1217 if (!dd->io_base) {
1218 dev_err(dev, "can't ioremap\n");
1219 err = -ENOMEM;
1220 goto io_err;
1221 }
1222
1223 clk_enable(dd->iclk);
1224 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1225 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1226 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1227 clk_disable(dd->iclk);
1228
1229 spin_lock(&sham.lock);
1230 list_add_tail(&dd->list, &sham.dev_list);
1231 spin_unlock(&sham.lock);
1232
1233 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1234 err = crypto_register_ahash(&algs[i]);
1235 if (err)
1236 goto err_algs;
1237 }
1238
1239 return 0;
1240
1241err_algs:
1242 for (j = 0; j < i; j++)
1243 crypto_unregister_ahash(&algs[j]);
1244 iounmap(dd->io_base);
1245io_err:
1246 clk_put(dd->iclk);
1247clk_err:
1248 omap_sham_dma_cleanup(dd);
1249dma_err:
1250 if (dd->irq >= 0)
1251 free_irq(dd->irq, dd);
1252res_err:
1253 kfree(dd);
1254 dd = NULL;
1255data_err:
1256 dev_err(dev, "initialization failed.\n");
1257
1258 return err;
1259}
1260
1261static int __devexit omap_sham_remove(struct platform_device *pdev)
1262{
1263 static struct omap_sham_dev *dd;
1264 int i;
1265
1266 dd = platform_get_drvdata(pdev);
1267 if (!dd)
1268 return -ENODEV;
1269 spin_lock(&sham.lock);
1270 list_del(&dd->list);
1271 spin_unlock(&sham.lock);
1272 for (i = 0; i < ARRAY_SIZE(algs); i++)
1273 crypto_unregister_ahash(&algs[i]);
1274 tasklet_kill(&dd->done_task);
1275 tasklet_kill(&dd->queue_task);
1276 iounmap(dd->io_base);
1277 clk_put(dd->iclk);
1278 omap_sham_dma_cleanup(dd);
1279 if (dd->irq >= 0)
1280 free_irq(dd->irq, dd);
1281 kfree(dd);
1282 dd = NULL;
1283
1284 return 0;
1285}
1286
1287static struct platform_driver omap_sham_driver = {
1288 .probe = omap_sham_probe,
1289 .remove = omap_sham_remove,
1290 .driver = {
1291 .name = "omap-sham",
1292 .owner = THIS_MODULE,
1293 },
1294};
1295
1296static int __init omap_sham_mod_init(void)
1297{
1298 pr_info("loading %s driver\n", "omap-sham");
1299
1300 if (!cpu_class_is_omap2() ||
1301 omap_type() != OMAP2_DEVICE_TYPE_SEC) {
1302 pr_err("Unsupported cpu\n");
1303 return -ENODEV;
1304 }
1305
1306 return platform_driver_register(&omap_sham_driver);
1307}
1308
1309static void __exit omap_sham_mod_exit(void)
1310{
1311 platform_driver_unregister(&omap_sham_driver);
1312}
1313
1314module_init(omap_sham_mod_init);
1315module_exit(omap_sham_mod_exit);
1316
1317MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1318MODULE_LICENSE("GPL v2");
1319MODULE_AUTHOR("Dmitry Kasatkin");