blob: f54ceb8f5b2403da0bb01a6889bac8975dcf1c68 [file] [log] [blame]
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from old omap-sha1-md5.c driver.
14 */
15
Mark A. Greerdfd061d2012-12-21 10:04:04 -070016#define OMAP_SHAM_DMA_PRIVATE
17
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080018#define pr_fmt(fmt) "%s: " fmt, __func__
19
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080020#include <linux/err.h>
21#include <linux/device.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/kernel.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080027#include <linux/irq.h>
28#include <linux/io.h>
29#include <linux/platform_device.h>
30#include <linux/scatterlist.h>
31#include <linux/dma-mapping.h>
Mark A. Greerdfd061d2012-12-21 10:04:04 -070032#include <linux/dmaengine.h>
33#include <linux/omap-dma.h>
Mark A. Greerb359f032012-12-21 10:04:02 -070034#include <linux/pm_runtime.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080035#include <linux/delay.h>
36#include <linux/crypto.h>
37#include <linux/cryptohash.h>
38#include <crypto/scatterwalk.h>
39#include <crypto/algapi.h>
40#include <crypto/sha.h>
41#include <crypto/hash.h>
42#include <crypto/internal/hash.h>
43
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080044#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
45#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
46
47#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48#define MD5_DIGEST_SIZE 16
49
Mark A. Greerdfd061d2012-12-21 10:04:04 -070050#define DST_MAXBURST 16
51#define DMA_MIN (DST_MAXBURST * sizeof(u32))
52
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080053#define SHA_REG_DIGCNT 0x14
54
55#define SHA_REG_CTRL 0x18
56#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
57#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
58#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
59#define SHA_REG_CTRL_ALGO (1 << 2)
60#define SHA_REG_CTRL_INPUT_READY (1 << 1)
61#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
62
63#define SHA_REG_REV 0x5C
64#define SHA_REG_REV_MAJOR 0xF0
65#define SHA_REG_REV_MINOR 0x0F
66
67#define SHA_REG_MASK 0x60
68#define SHA_REG_MASK_DMA_EN (1 << 3)
69#define SHA_REG_MASK_IT_EN (1 << 2)
70#define SHA_REG_MASK_SOFTRESET (1 << 1)
71#define SHA_REG_AUTOIDLE (1 << 0)
72
73#define SHA_REG_SYSSTATUS 0x64
74#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
75
76#define DEFAULT_TIMEOUT_INTERVAL HZ
77
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +030078/* mostly device flags */
79#define FLAGS_BUSY 0
80#define FLAGS_FINAL 1
81#define FLAGS_DMA_ACTIVE 2
82#define FLAGS_OUTPUT_READY 3
83#define FLAGS_INIT 4
84#define FLAGS_CPU 5
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +030085#define FLAGS_DMA_READY 6
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +030086/* context flags */
87#define FLAGS_FINUP 16
88#define FLAGS_SG 17
89#define FLAGS_SHA1 18
90#define FLAGS_HMAC 19
91#define FLAGS_ERROR 20
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080092
93#define OP_UPDATE 1
94#define OP_FINAL 2
95
Dmitry Kasatkin798eed52010-11-19 16:04:26 +020096#define OMAP_ALIGN_MASK (sizeof(u32)-1)
97#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
98
99#define BUFLEN PAGE_SIZE
100
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800101struct omap_sham_dev;
102
103struct omap_sham_reqctx {
104 struct omap_sham_dev *dd;
105 unsigned long flags;
106 unsigned long op;
107
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200108 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800109 size_t digcnt;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800110 size_t bufcnt;
111 size_t buflen;
112 dma_addr_t dma_addr;
113
114 /* walk state */
115 struct scatterlist *sg;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700116#ifndef OMAP_SHAM_DMA_PRIVATE
117 struct scatterlist sgl;
118#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800119 unsigned int offset; /* offset in current sg */
120 unsigned int total; /* total request */
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200121
122 u8 buffer[0] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800123};
124
125struct omap_sham_hmac_ctx {
126 struct crypto_shash *shash;
127 u8 ipad[SHA1_MD5_BLOCK_SIZE];
128 u8 opad[SHA1_MD5_BLOCK_SIZE];
129};
130
131struct omap_sham_ctx {
132 struct omap_sham_dev *dd;
133
134 unsigned long flags;
135
136 /* fallback stuff */
137 struct crypto_shash *fallback;
138
139 struct omap_sham_hmac_ctx base[0];
140};
141
142#define OMAP_SHAM_QUEUE_LENGTH 1
143
144struct omap_sham_dev {
145 struct list_head list;
146 unsigned long phys_base;
147 struct device *dev;
148 void __iomem *io_base;
149 int irq;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800150 spinlock_t lock;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200151 int err;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700152#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800153 int dma;
154 int dma_lch;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700155#else
156 struct dma_chan *dma_lch;
157#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800158 struct tasklet_struct done_task;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800159
160 unsigned long flags;
161 struct crypto_queue queue;
162 struct ahash_request *req;
163};
164
165struct omap_sham_drv {
166 struct list_head dev_list;
167 spinlock_t lock;
168 unsigned long flags;
169};
170
171static struct omap_sham_drv sham = {
172 .dev_list = LIST_HEAD_INIT(sham.dev_list),
173 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
174};
175
176static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
177{
178 return __raw_readl(dd->io_base + offset);
179}
180
181static inline void omap_sham_write(struct omap_sham_dev *dd,
182 u32 offset, u32 value)
183{
184 __raw_writel(value, dd->io_base + offset);
185}
186
187static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
188 u32 value, u32 mask)
189{
190 u32 val;
191
192 val = omap_sham_read(dd, address);
193 val &= ~mask;
194 val |= value;
195 omap_sham_write(dd, address, val);
196}
197
198static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
199{
200 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
201
202 while (!(omap_sham_read(dd, offset) & bit)) {
203 if (time_is_before_jiffies(timeout))
204 return -ETIMEDOUT;
205 }
206
207 return 0;
208}
209
210static void omap_sham_copy_hash(struct ahash_request *req, int out)
211{
212 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200213 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800214 int i;
215
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200216 /* MD5 is almost unused. So copy sha1 size to reduce code */
217 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
218 if (out)
219 hash[i] = omap_sham_read(ctx->dd,
220 SHA_REG_DIGEST(i));
221 else
222 omap_sham_write(ctx->dd,
223 SHA_REG_DIGEST(i), hash[i]);
224 }
225}
226
227static void omap_sham_copy_ready_hash(struct ahash_request *req)
228{
229 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
230 u32 *in = (u32 *)ctx->digest;
231 u32 *hash = (u32 *)req->result;
232 int i;
233
234 if (!hash)
235 return;
236
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300237 if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800238 /* SHA1 results are in big endian */
239 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200240 hash[i] = be32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800241 } else {
242 /* MD5 results are in little endian */
243 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200244 hash[i] = le32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800245 }
246}
247
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200248static int omap_sham_hw_init(struct omap_sham_dev *dd)
249{
Mark A. Greerb359f032012-12-21 10:04:02 -0700250 pm_runtime_get_sync(dd->dev);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200251
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300252 if (!test_bit(FLAGS_INIT, &dd->flags)) {
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200253 omap_sham_write_mask(dd, SHA_REG_MASK,
254 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
255
256 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
257 SHA_REG_SYSSTATUS_RESETDONE))
258 return -ETIMEDOUT;
259
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300260 set_bit(FLAGS_INIT, &dd->flags);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200261 dd->err = 0;
262 }
263
264 return 0;
265}
266
267static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800268 int final, int dma)
269{
270 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
271 u32 val = length << 5, mask;
272
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200273 if (likely(ctx->digcnt))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800274 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800275
276 omap_sham_write_mask(dd, SHA_REG_MASK,
277 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
278 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
279 /*
280 * Setting ALGO_CONST only for the first iteration
281 * and CLOSE_HASH only for the last one.
282 */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300283 if (ctx->flags & BIT(FLAGS_SHA1))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800284 val |= SHA_REG_CTRL_ALGO;
285 if (!ctx->digcnt)
286 val |= SHA_REG_CTRL_ALGO_CONST;
287 if (final)
288 val |= SHA_REG_CTRL_CLOSE_HASH;
289
290 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
291 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
292
293 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800294}
295
296static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
297 size_t length, int final)
298{
299 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200300 int count, len32;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800301 const u32 *buffer = (const u32 *)buf;
302
303 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
304 ctx->digcnt, length, final);
305
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200306 omap_sham_write_ctrl(dd, length, final, 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800307
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200308 /* should be non-zero before next lines to disable clocks later */
309 ctx->digcnt += length;
310
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800311 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
312 return -ETIMEDOUT;
313
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800314 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300315 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800316
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300317 set_bit(FLAGS_CPU, &dd->flags);
318
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800319 len32 = DIV_ROUND_UP(length, sizeof(u32));
320
321 for (count = 0; count < len32; count++)
322 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
323
324 return -EINPROGRESS;
325}
326
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700327#ifndef OMAP_SHAM_DMA_PRIVATE
328static void omap_sham_dma_callback(void *param)
329{
330 struct omap_sham_dev *dd = param;
331
332 set_bit(FLAGS_DMA_READY, &dd->flags);
333 tasklet_schedule(&dd->done_task);
334}
335#endif
336
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800337static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700338 size_t length, int final, int is_sg)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800339{
340 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700341#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200342 int len32;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700343#else
344 struct dma_async_tx_descriptor *tx;
345 struct dma_slave_config cfg;
346 int len32, ret;
347#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800348
349 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
350 ctx->digcnt, length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800351
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700352#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800353 len32 = DIV_ROUND_UP(length, sizeof(u32));
354
355 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
Samu Onkalo584db6a2010-09-03 19:20:19 +0800356 1, OMAP_DMA_SYNC_PACKET, dd->dma,
357 OMAP_DMA_DST_SYNC_PREFETCH);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800358
359 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
360 dma_addr, 0, 0);
361
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700362#else
363 memset(&cfg, 0, sizeof(cfg));
364
365 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(0);
366 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
367 cfg.dst_maxburst = DST_MAXBURST;
368
369 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
370 if (ret) {
371 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
372 return ret;
373 }
374
375 len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
376
377 if (is_sg) {
378 /*
379 * The SG entry passed in may not have the 'length' member
380 * set correctly so use a local SG entry (sgl) with the
381 * proper value for 'length' instead. If this is not done,
382 * the dmaengine may try to DMA the incorrect amount of data.
383 */
384 sg_init_table(&ctx->sgl, 1);
385 ctx->sgl.page_link = ctx->sg->page_link;
386 ctx->sgl.offset = ctx->sg->offset;
387 sg_dma_len(&ctx->sgl) = len32;
388 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
389
390 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
391 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
392 } else {
393 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
394 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
395 }
396
397 if (!tx) {
398 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
399 return -EINVAL;
400 }
401
402 tx->callback = omap_sham_dma_callback;
403 tx->callback_param = dd;
404#endif
405
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200406 omap_sham_write_ctrl(dd, length, final, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800407
408 ctx->digcnt += length;
409
410 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300411 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800412
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300413 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800414
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700415#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800416 omap_start_dma(dd->dma_lch);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700417#else
418 dmaengine_submit(tx);
419 dma_async_issue_pending(dd->dma_lch);
420#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800421
422 return -EINPROGRESS;
423}
424
425static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
426 const u8 *data, size_t length)
427{
428 size_t count = min(length, ctx->buflen - ctx->bufcnt);
429
430 count = min(count, ctx->total);
431 if (count <= 0)
432 return 0;
433 memcpy(ctx->buffer + ctx->bufcnt, data, count);
434 ctx->bufcnt += count;
435
436 return count;
437}
438
439static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
440{
441 size_t count;
442
443 while (ctx->sg) {
444 count = omap_sham_append_buffer(ctx,
445 sg_virt(ctx->sg) + ctx->offset,
446 ctx->sg->length - ctx->offset);
447 if (!count)
448 break;
449 ctx->offset += count;
450 ctx->total -= count;
451 if (ctx->offset == ctx->sg->length) {
452 ctx->sg = sg_next(ctx->sg);
453 if (ctx->sg)
454 ctx->offset = 0;
455 else
456 ctx->total = 0;
457 }
458 }
459
460 return 0;
461}
462
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200463static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
464 struct omap_sham_reqctx *ctx,
465 size_t length, int final)
466{
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700467 int ret;
468
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200469 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
470 DMA_TO_DEVICE);
471 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
472 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
473 return -EINVAL;
474 }
475
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300476 ctx->flags &= ~BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200477
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700478 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
479 if (ret)
480 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
481 DMA_TO_DEVICE);
482
483 return ret;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200484}
485
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800486static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
487{
488 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
489 unsigned int final;
490 size_t count;
491
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800492 omap_sham_append_sg(ctx);
493
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300494 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800495
496 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
497 ctx->bufcnt, ctx->digcnt, final);
498
499 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
500 count = ctx->bufcnt;
501 ctx->bufcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200502 return omap_sham_xmit_dma_map(dd, ctx, count, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800503 }
504
505 return 0;
506}
507
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200508/* Start address alignment */
509#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
510/* SHA1 block size alignment */
511#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
512
513static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800514{
515 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200516 unsigned int length, final, tail;
517 struct scatterlist *sg;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700518 int ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800519
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200520 if (!ctx->total)
521 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800522
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200523 if (ctx->bufcnt || ctx->offset)
524 return omap_sham_update_dma_slow(dd);
525
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700526#ifndef OMAP_SHAM_DMA_PRIVATE
527 /*
528 * Don't use the sg interface when the transfer size is less
529 * than the number of elements in a DMA frame. Otherwise,
530 * the dmaengine infrastructure will calculate that it needs
531 * to transfer 0 frames which ultimately fails.
532 */
533 if (ctx->total < (DST_MAXBURST * sizeof(u32)))
534 return omap_sham_update_dma_slow(dd);
535#endif
536
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200537 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
538 ctx->digcnt, ctx->bufcnt, ctx->total);
539
540 sg = ctx->sg;
541
542 if (!SG_AA(sg))
543 return omap_sham_update_dma_slow(dd);
544
545 if (!sg_is_last(sg) && !SG_SA(sg))
546 /* size is not SHA1_BLOCK_SIZE aligned */
547 return omap_sham_update_dma_slow(dd);
548
549 length = min(ctx->total, sg->length);
550
551 if (sg_is_last(sg)) {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300552 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200553 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
554 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
555 /* without finup() we need one block to close hash */
556 if (!tail)
557 tail = SHA1_MD5_BLOCK_SIZE;
558 length -= tail;
559 }
560 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800561
562 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
563 dev_err(dd->dev, "dma_map_sg error\n");
564 return -EINVAL;
565 }
566
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300567 ctx->flags |= BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200568
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800569 ctx->total -= length;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200570 ctx->offset = length; /* offset where to start slow */
571
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300572 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800573
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700574 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
575 if (ret)
576 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
577
578 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800579}
580
581static int omap_sham_update_cpu(struct omap_sham_dev *dd)
582{
583 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
584 int bufcnt;
585
586 omap_sham_append_sg(ctx);
587 bufcnt = ctx->bufcnt;
588 ctx->bufcnt = 0;
589
590 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
591}
592
593static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
594{
595 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
596
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700597#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800598 omap_stop_dma(dd->dma_lch);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700599#else
600 dmaengine_terminate_all(dd->dma_lch);
601#endif
602
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300603 if (ctx->flags & BIT(FLAGS_SG)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800604 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200605 if (ctx->sg->length == ctx->offset) {
606 ctx->sg = sg_next(ctx->sg);
607 if (ctx->sg)
608 ctx->offset = 0;
609 }
610 } else {
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200611 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
612 DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200613 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800614
615 return 0;
616}
617
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800618static int omap_sham_init(struct ahash_request *req)
619{
620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
621 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
622 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
623 struct omap_sham_dev *dd = NULL, *tmp;
624
625 spin_lock_bh(&sham.lock);
626 if (!tctx->dd) {
627 list_for_each_entry(tmp, &sham.dev_list, list) {
628 dd = tmp;
629 break;
630 }
631 tctx->dd = dd;
632 } else {
633 dd = tctx->dd;
634 }
635 spin_unlock_bh(&sham.lock);
636
637 ctx->dd = dd;
638
639 ctx->flags = 0;
640
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800641 dev_dbg(dd->dev, "init: digest size: %d\n",
642 crypto_ahash_digestsize(tfm));
643
644 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300645 ctx->flags |= BIT(FLAGS_SHA1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800646
647 ctx->bufcnt = 0;
648 ctx->digcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200649 ctx->buflen = BUFLEN;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800650
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300651 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800652 struct omap_sham_hmac_ctx *bctx = tctx->base;
653
654 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
655 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300656 ctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800657 }
658
659 return 0;
660
661}
662
663static int omap_sham_update_req(struct omap_sham_dev *dd)
664{
665 struct ahash_request *req = dd->req;
666 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
667 int err;
668
669 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300670 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800671
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300672 if (ctx->flags & BIT(FLAGS_CPU))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800673 err = omap_sham_update_cpu(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800674 else
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200675 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800676
677 /* wait for dma completion before can take more data */
678 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
679
680 return err;
681}
682
683static int omap_sham_final_req(struct omap_sham_dev *dd)
684{
685 struct ahash_request *req = dd->req;
686 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
687 int err = 0, use_dma = 1;
688
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700689 if (ctx->bufcnt <= DMA_MIN)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800690 /* faster to handle last block with cpu */
691 use_dma = 0;
692
693 if (use_dma)
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200694 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800695 else
696 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
697
698 ctx->bufcnt = 0;
699
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800700 dev_dbg(dd->dev, "final_req: err: %d\n", err);
701
702 return err;
703}
704
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300705static int omap_sham_finish_hmac(struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800706{
707 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
708 struct omap_sham_hmac_ctx *bctx = tctx->base;
709 int bs = crypto_shash_blocksize(bctx->shash);
710 int ds = crypto_shash_digestsize(bctx->shash);
711 struct {
712 struct shash_desc shash;
713 char ctx[crypto_shash_descsize(bctx->shash)];
714 } desc;
715
716 desc.shash.tfm = bctx->shash;
717 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
718
719 return crypto_shash_init(&desc.shash) ?:
720 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300721 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
722}
723
724static int omap_sham_finish(struct ahash_request *req)
725{
726 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
727 struct omap_sham_dev *dd = ctx->dd;
728 int err = 0;
729
730 if (ctx->digcnt) {
731 omap_sham_copy_ready_hash(req);
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300732 if (ctx->flags & BIT(FLAGS_HMAC))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300733 err = omap_sham_finish_hmac(req);
734 }
735
736 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
737
738 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800739}
740
741static void omap_sham_finish_req(struct ahash_request *req, int err)
742{
743 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200744 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800745
746 if (!err) {
Dmitry Kasatkin0e87b152011-06-02 21:10:03 +0300747 omap_sham_copy_hash(req, 1);
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300748 if (test_bit(FLAGS_FINAL, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300749 err = omap_sham_finish(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200750 } else {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300751 ctx->flags |= BIT(FLAGS_ERROR);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800752 }
753
Dmitry Kasatkin0efd4d82011-06-02 21:10:12 +0300754 /* atomic operation is not needed here */
755 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
756 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
Mark A. Greerb359f032012-12-21 10:04:02 -0700757
758 pm_runtime_put_sync(dd->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800759
760 if (req->base.complete)
761 req->base.complete(&req->base, err);
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +0300762
763 /* handle new request */
764 tasklet_schedule(&dd->done_task);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800765}
766
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200767static int omap_sham_handle_queue(struct omap_sham_dev *dd,
768 struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800769{
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100770 struct crypto_async_request *async_req, *backlog;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800771 struct omap_sham_reqctx *ctx;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800772 unsigned long flags;
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200773 int err = 0, ret = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800774
775 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200776 if (req)
777 ret = ahash_enqueue_request(&dd->queue, req);
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300778 if (test_bit(FLAGS_BUSY, &dd->flags)) {
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200779 spin_unlock_irqrestore(&dd->lock, flags);
780 return ret;
781 }
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100782 backlog = crypto_get_backlog(&dd->queue);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800783 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100784 if (async_req)
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300785 set_bit(FLAGS_BUSY, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800786 spin_unlock_irqrestore(&dd->lock, flags);
787
788 if (!async_req)
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200789 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800790
791 if (backlog)
792 backlog->complete(backlog, -EINPROGRESS);
793
794 req = ahash_request_cast(async_req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800795 dd->req = req;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800796 ctx = ahash_request_ctx(req);
797
798 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
799 ctx->op, req->nbytes);
800
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200801 err = omap_sham_hw_init(dd);
802 if (err)
803 goto err1;
804
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700805#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200806 omap_set_dma_dest_params(dd->dma_lch, 0,
807 OMAP_DMA_AMODE_CONSTANT,
808 dd->phys_base + SHA_REG_DIN(0), 0, 16);
809
810 omap_set_dma_dest_burst_mode(dd->dma_lch,
811 OMAP_DMA_DATA_BURST_16);
812
813 omap_set_dma_src_burst_mode(dd->dma_lch,
814 OMAP_DMA_DATA_BURST_4);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700815#endif
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200816
817 if (ctx->digcnt)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800818 /* request has changed - restore hash */
819 omap_sham_copy_hash(req, 0);
820
821 if (ctx->op == OP_UPDATE) {
822 err = omap_sham_update_req(dd);
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300823 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800824 /* no final() after finup() */
825 err = omap_sham_final_req(dd);
826 } else if (ctx->op == OP_FINAL) {
827 err = omap_sham_final_req(dd);
828 }
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200829err1:
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +0300830 if (err != -EINPROGRESS)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800831 /* done_task will not finish it, so do it here */
832 omap_sham_finish_req(req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800833
834 dev_dbg(dd->dev, "exit, err: %d\n", err);
835
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200836 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800837}
838
839static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
840{
841 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
842 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
843 struct omap_sham_dev *dd = tctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800844
845 ctx->op = op;
846
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200847 return omap_sham_handle_queue(dd, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800848}
849
850static int omap_sham_update(struct ahash_request *req)
851{
852 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
853
854 if (!req->nbytes)
855 return 0;
856
857 ctx->total = req->nbytes;
858 ctx->sg = req->src;
859 ctx->offset = 0;
860
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300861 if (ctx->flags & BIT(FLAGS_FINUP)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800862 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
863 /*
864 * OMAP HW accel works only with buffers >= 9
865 * will switch to bypass in final()
866 * final has the same request and data
867 */
868 omap_sham_append_sg(ctx);
869 return 0;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200870 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
871 /*
872 * faster to use CPU for short transfers
873 */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300874 ctx->flags |= BIT(FLAGS_CPU);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800875 }
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200876 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800877 omap_sham_append_sg(ctx);
878 return 0;
879 }
880
881 return omap_sham_enqueue(req, OP_UPDATE);
882}
883
884static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
885 const u8 *data, unsigned int len, u8 *out)
886{
887 struct {
888 struct shash_desc shash;
889 char ctx[crypto_shash_descsize(shash)];
890 } desc;
891
892 desc.shash.tfm = shash;
893 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
894
895 return crypto_shash_digest(&desc.shash, data, len, out);
896}
897
898static int omap_sham_final_shash(struct ahash_request *req)
899{
900 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
901 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
902
903 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
904 ctx->buffer, ctx->bufcnt, req->result);
905}
906
907static int omap_sham_final(struct ahash_request *req)
908{
909 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800910
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300911 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800912
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300913 if (ctx->flags & BIT(FLAGS_ERROR))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300914 return 0; /* uncompleted hash is not needed */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800915
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300916 /* OMAP HW accel works only with buffers >= 9 */
917 /* HMAC is always >= 9 because ipad == block size */
918 if ((ctx->digcnt + ctx->bufcnt) < 9)
919 return omap_sham_final_shash(req);
920 else if (ctx->bufcnt)
921 return omap_sham_enqueue(req, OP_FINAL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800922
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300923 /* copy ready hash (+ finalize hmac) */
924 return omap_sham_finish(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800925}
926
927static int omap_sham_finup(struct ahash_request *req)
928{
929 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
930 int err1, err2;
931
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300932 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800933
934 err1 = omap_sham_update(req);
Markku Kylanpaa455e3382011-04-20 13:34:55 +0300935 if (err1 == -EINPROGRESS || err1 == -EBUSY)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800936 return err1;
937 /*
938 * final() has to be always called to cleanup resources
939 * even if udpate() failed, except EINPROGRESS
940 */
941 err2 = omap_sham_final(req);
942
943 return err1 ?: err2;
944}
945
946static int omap_sham_digest(struct ahash_request *req)
947{
948 return omap_sham_init(req) ?: omap_sham_finup(req);
949}
950
951static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
952 unsigned int keylen)
953{
954 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
955 struct omap_sham_hmac_ctx *bctx = tctx->base;
956 int bs = crypto_shash_blocksize(bctx->shash);
957 int ds = crypto_shash_digestsize(bctx->shash);
958 int err, i;
959 err = crypto_shash_setkey(tctx->fallback, key, keylen);
960 if (err)
961 return err;
962
963 if (keylen > bs) {
964 err = omap_sham_shash_digest(bctx->shash,
965 crypto_shash_get_flags(bctx->shash),
966 key, keylen, bctx->ipad);
967 if (err)
968 return err;
969 keylen = ds;
970 } else {
971 memcpy(bctx->ipad, key, keylen);
972 }
973
974 memset(bctx->ipad + keylen, 0, bs - keylen);
975 memcpy(bctx->opad, bctx->ipad, bs);
976
977 for (i = 0; i < bs; i++) {
978 bctx->ipad[i] ^= 0x36;
979 bctx->opad[i] ^= 0x5c;
980 }
981
982 return err;
983}
984
985static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
986{
987 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
988 const char *alg_name = crypto_tfm_alg_name(tfm);
989
990 /* Allocate a fallback and abort if it failed. */
991 tctx->fallback = crypto_alloc_shash(alg_name, 0,
992 CRYPTO_ALG_NEED_FALLBACK);
993 if (IS_ERR(tctx->fallback)) {
994 pr_err("omap-sham: fallback driver '%s' "
995 "could not be loaded.\n", alg_name);
996 return PTR_ERR(tctx->fallback);
997 }
998
999 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001000 sizeof(struct omap_sham_reqctx) + BUFLEN);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001001
1002 if (alg_base) {
1003 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001004 tctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001005 bctx->shash = crypto_alloc_shash(alg_base, 0,
1006 CRYPTO_ALG_NEED_FALLBACK);
1007 if (IS_ERR(bctx->shash)) {
1008 pr_err("omap-sham: base driver '%s' "
1009 "could not be loaded.\n", alg_base);
1010 crypto_free_shash(tctx->fallback);
1011 return PTR_ERR(bctx->shash);
1012 }
1013
1014 }
1015
1016 return 0;
1017}
1018
1019static int omap_sham_cra_init(struct crypto_tfm *tfm)
1020{
1021 return omap_sham_cra_init_alg(tfm, NULL);
1022}
1023
1024static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1025{
1026 return omap_sham_cra_init_alg(tfm, "sha1");
1027}
1028
1029static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1030{
1031 return omap_sham_cra_init_alg(tfm, "md5");
1032}
1033
1034static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1035{
1036 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1037
1038 crypto_free_shash(tctx->fallback);
1039 tctx->fallback = NULL;
1040
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001041 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001042 struct omap_sham_hmac_ctx *bctx = tctx->base;
1043 crypto_free_shash(bctx->shash);
1044 }
1045}
1046
1047static struct ahash_alg algs[] = {
1048{
1049 .init = omap_sham_init,
1050 .update = omap_sham_update,
1051 .final = omap_sham_final,
1052 .finup = omap_sham_finup,
1053 .digest = omap_sham_digest,
1054 .halg.digestsize = SHA1_DIGEST_SIZE,
1055 .halg.base = {
1056 .cra_name = "sha1",
1057 .cra_driver_name = "omap-sha1",
1058 .cra_priority = 100,
1059 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001060 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001061 CRYPTO_ALG_ASYNC |
1062 CRYPTO_ALG_NEED_FALLBACK,
1063 .cra_blocksize = SHA1_BLOCK_SIZE,
1064 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1065 .cra_alignmask = 0,
1066 .cra_module = THIS_MODULE,
1067 .cra_init = omap_sham_cra_init,
1068 .cra_exit = omap_sham_cra_exit,
1069 }
1070},
1071{
1072 .init = omap_sham_init,
1073 .update = omap_sham_update,
1074 .final = omap_sham_final,
1075 .finup = omap_sham_finup,
1076 .digest = omap_sham_digest,
1077 .halg.digestsize = MD5_DIGEST_SIZE,
1078 .halg.base = {
1079 .cra_name = "md5",
1080 .cra_driver_name = "omap-md5",
1081 .cra_priority = 100,
1082 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001083 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001084 CRYPTO_ALG_ASYNC |
1085 CRYPTO_ALG_NEED_FALLBACK,
1086 .cra_blocksize = SHA1_BLOCK_SIZE,
1087 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001088 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001089 .cra_module = THIS_MODULE,
1090 .cra_init = omap_sham_cra_init,
1091 .cra_exit = omap_sham_cra_exit,
1092 }
1093},
1094{
1095 .init = omap_sham_init,
1096 .update = omap_sham_update,
1097 .final = omap_sham_final,
1098 .finup = omap_sham_finup,
1099 .digest = omap_sham_digest,
1100 .setkey = omap_sham_setkey,
1101 .halg.digestsize = SHA1_DIGEST_SIZE,
1102 .halg.base = {
1103 .cra_name = "hmac(sha1)",
1104 .cra_driver_name = "omap-hmac-sha1",
1105 .cra_priority = 100,
1106 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001107 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001108 CRYPTO_ALG_ASYNC |
1109 CRYPTO_ALG_NEED_FALLBACK,
1110 .cra_blocksize = SHA1_BLOCK_SIZE,
1111 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1112 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001113 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001114 .cra_module = THIS_MODULE,
1115 .cra_init = omap_sham_cra_sha1_init,
1116 .cra_exit = omap_sham_cra_exit,
1117 }
1118},
1119{
1120 .init = omap_sham_init,
1121 .update = omap_sham_update,
1122 .final = omap_sham_final,
1123 .finup = omap_sham_finup,
1124 .digest = omap_sham_digest,
1125 .setkey = omap_sham_setkey,
1126 .halg.digestsize = MD5_DIGEST_SIZE,
1127 .halg.base = {
1128 .cra_name = "hmac(md5)",
1129 .cra_driver_name = "omap-hmac-md5",
1130 .cra_priority = 100,
1131 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001132 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001133 CRYPTO_ALG_ASYNC |
1134 CRYPTO_ALG_NEED_FALLBACK,
1135 .cra_blocksize = SHA1_BLOCK_SIZE,
1136 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1137 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001138 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001139 .cra_module = THIS_MODULE,
1140 .cra_init = omap_sham_cra_md5_init,
1141 .cra_exit = omap_sham_cra_exit,
1142 }
1143}
1144};
1145
1146static void omap_sham_done_task(unsigned long data)
1147{
1148 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001149 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001150
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +03001151 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1152 omap_sham_handle_queue(dd, NULL);
1153 return;
1154 }
1155
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001156 if (test_bit(FLAGS_CPU, &dd->flags)) {
1157 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1158 goto finish;
1159 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1160 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1161 omap_sham_update_dma_stop(dd);
1162 if (dd->err) {
1163 err = dd->err;
1164 goto finish;
1165 }
1166 }
1167 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1168 /* hash or semi-hash ready */
1169 clear_bit(FLAGS_DMA_READY, &dd->flags);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001170 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001171 if (err != -EINPROGRESS)
1172 goto finish;
1173 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001174 }
1175
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001176 return;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001177
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001178finish:
1179 dev_dbg(dd->dev, "update done: err: %d\n", err);
1180 /* finish curent request */
1181 omap_sham_finish_req(dd->req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001182}
1183
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001184static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1185{
1186 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001187
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001188 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001189 /* final -> allow device to go to power-saving mode */
1190 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1191
1192 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1193 SHA_REG_CTRL_OUTPUT_READY);
1194 omap_sham_read(dd, SHA_REG_CTRL);
1195
Dmitry Kasatkincd3f1d52011-06-02 21:10:13 +03001196 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1197 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1198 return IRQ_HANDLED;
1199 }
1200
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001201 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001202 tasklet_schedule(&dd->done_task);
1203
1204 return IRQ_HANDLED;
1205}
1206
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001207#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001208static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1209{
1210 struct omap_sham_dev *dd = data;
1211
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001212 if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1213 pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1214 dd->err = -EIO;
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +03001215 clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001216 }
1217
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001218 set_bit(FLAGS_DMA_READY, &dd->flags);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001219 tasklet_schedule(&dd->done_task);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001220}
1221
1222static int omap_sham_dma_init(struct omap_sham_dev *dd)
1223{
1224 int err;
1225
1226 dd->dma_lch = -1;
1227
1228 err = omap_request_dma(dd->dma, dev_name(dd->dev),
1229 omap_sham_dma_callback, dd, &dd->dma_lch);
1230 if (err) {
1231 dev_err(dd->dev, "Unable to request DMA channel\n");
1232 return err;
1233 }
Samu Onkalo584db6a2010-09-03 19:20:19 +08001234
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001235 return 0;
1236}
1237
1238static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1239{
1240 if (dd->dma_lch >= 0) {
1241 omap_free_dma(dd->dma_lch);
1242 dd->dma_lch = -1;
1243 }
1244}
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001245#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001246
1247static int __devinit omap_sham_probe(struct platform_device *pdev)
1248{
1249 struct omap_sham_dev *dd;
1250 struct device *dev = &pdev->dev;
1251 struct resource *res;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001252#ifndef OMAP_SHAM_DMA_PRIVATE
1253 dma_cap_mask_t mask;
1254 unsigned dma_chan;
1255#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001256 int err, i, j;
1257
1258 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1259 if (dd == NULL) {
1260 dev_err(dev, "unable to alloc data struct.\n");
1261 err = -ENOMEM;
1262 goto data_err;
1263 }
1264 dd->dev = dev;
1265 platform_set_drvdata(pdev, dd);
1266
1267 INIT_LIST_HEAD(&dd->list);
1268 spin_lock_init(&dd->lock);
1269 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001270 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1271
1272 dd->irq = -1;
1273
1274 /* Get the base address */
1275 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1276 if (!res) {
1277 dev_err(dev, "no MEM resource info\n");
1278 err = -ENODEV;
1279 goto res_err;
1280 }
1281 dd->phys_base = res->start;
1282
1283 /* Get the DMA */
1284 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1285 if (!res) {
1286 dev_err(dev, "no DMA resource info\n");
1287 err = -ENODEV;
1288 goto res_err;
1289 }
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001290#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001291 dd->dma = res->start;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001292#else
1293 dma_chan = res->start;
1294#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001295
1296 /* Get the IRQ */
1297 dd->irq = platform_get_irq(pdev, 0);
1298 if (dd->irq < 0) {
1299 dev_err(dev, "no IRQ resource info\n");
1300 err = dd->irq;
1301 goto res_err;
1302 }
1303
1304 err = request_irq(dd->irq, omap_sham_irq,
1305 IRQF_TRIGGER_LOW, dev_name(dev), dd);
1306 if (err) {
1307 dev_err(dev, "unable to request irq.\n");
1308 goto res_err;
1309 }
1310
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001311#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001312 err = omap_sham_dma_init(dd);
1313 if (err)
1314 goto dma_err;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001315#else
1316 dma_cap_zero(mask);
1317 dma_cap_set(DMA_SLAVE, mask);
1318
1319 dd->dma_lch = dma_request_channel(mask, omap_dma_filter_fn, &dma_chan);
1320 if (!dd->dma_lch) {
1321 dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
1322 dma_chan);
1323 err = -ENXIO;
1324 goto dma_err;
1325 }
1326#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001327
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001328 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1329 if (!dd->io_base) {
1330 dev_err(dev, "can't ioremap\n");
1331 err = -ENOMEM;
1332 goto io_err;
1333 }
1334
Mark A. Greerb359f032012-12-21 10:04:02 -07001335 pm_runtime_enable(dev);
1336 pm_runtime_get_sync(dev);
1337
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001338 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1339 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1340 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
Mark A. Greerb359f032012-12-21 10:04:02 -07001341
1342 pm_runtime_put_sync(&pdev->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001343
1344 spin_lock(&sham.lock);
1345 list_add_tail(&dd->list, &sham.dev_list);
1346 spin_unlock(&sham.lock);
1347
1348 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1349 err = crypto_register_ahash(&algs[i]);
1350 if (err)
1351 goto err_algs;
1352 }
1353
1354 return 0;
1355
1356err_algs:
1357 for (j = 0; j < i; j++)
1358 crypto_unregister_ahash(&algs[j]);
1359 iounmap(dd->io_base);
Mark A. Greerb359f032012-12-21 10:04:02 -07001360 pm_runtime_disable(dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001361io_err:
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001362#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001363 omap_sham_dma_cleanup(dd);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001364#else
1365 dma_release_channel(dd->dma_lch);
1366#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001367dma_err:
1368 if (dd->irq >= 0)
1369 free_irq(dd->irq, dd);
1370res_err:
1371 kfree(dd);
1372 dd = NULL;
1373data_err:
1374 dev_err(dev, "initialization failed.\n");
1375
1376 return err;
1377}
1378
1379static int __devexit omap_sham_remove(struct platform_device *pdev)
1380{
1381 static struct omap_sham_dev *dd;
1382 int i;
1383
1384 dd = platform_get_drvdata(pdev);
1385 if (!dd)
1386 return -ENODEV;
1387 spin_lock(&sham.lock);
1388 list_del(&dd->list);
1389 spin_unlock(&sham.lock);
1390 for (i = 0; i < ARRAY_SIZE(algs); i++)
1391 crypto_unregister_ahash(&algs[i]);
1392 tasklet_kill(&dd->done_task);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001393 iounmap(dd->io_base);
Mark A. Greerb359f032012-12-21 10:04:02 -07001394 pm_runtime_disable(&pdev->dev);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001395#ifdef OMAP_SHAM_DMA_PRIVATE
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001396 omap_sham_dma_cleanup(dd);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001397#else
1398 dma_release_channel(dd->dma_lch);
1399#endif
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001400 if (dd->irq >= 0)
1401 free_irq(dd->irq, dd);
1402 kfree(dd);
1403 dd = NULL;
1404
1405 return 0;
1406}
1407
Mark A. Greer3b3f4402012-12-21 10:04:03 -07001408#ifdef CONFIG_PM_SLEEP
1409static int omap_sham_suspend(struct device *dev)
1410{
1411 pm_runtime_put_sync(dev);
1412 return 0;
1413}
1414
1415static int omap_sham_resume(struct device *dev)
1416{
1417 pm_runtime_get_sync(dev);
1418 return 0;
1419}
1420#endif
1421
1422static const struct dev_pm_ops omap_sham_pm_ops = {
1423 SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
1424};
1425
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001426static struct platform_driver omap_sham_driver = {
1427 .probe = omap_sham_probe,
1428 .remove = omap_sham_remove,
1429 .driver = {
1430 .name = "omap-sham",
1431 .owner = THIS_MODULE,
Mark A. Greer3b3f4402012-12-21 10:04:03 -07001432 .pm = &omap_sham_pm_ops,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001433 },
1434};
1435
1436static int __init omap_sham_mod_init(void)
1437{
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001438 return platform_driver_register(&omap_sham_driver);
1439}
1440
1441static void __exit omap_sham_mod_exit(void)
1442{
1443 platform_driver_unregister(&omap_sham_driver);
1444}
1445
1446module_init(omap_sham_mod_init);
1447module_exit(omap_sham_mod_exit);
1448
1449MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1450MODULE_LICENSE("GPL v2");
1451MODULE_AUTHOR("Dmitry Kasatkin");