blob: 8074bd9947d1239ad7babe56dafff0df182a951b [file] [log] [blame]
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from old omap-sha1-md5.c driver.
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080018#include <linux/err.h>
19#include <linux/device.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080025#include <linux/irq.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
28#include <linux/scatterlist.h>
29#include <linux/dma-mapping.h>
Mark A. Greerdfd061d2012-12-21 10:04:04 -070030#include <linux/dmaengine.h>
31#include <linux/omap-dma.h>
Mark A. Greerb359f032012-12-21 10:04:02 -070032#include <linux/pm_runtime.h>
Mark A. Greer03feec92012-12-21 10:04:06 -070033#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080037#include <linux/delay.h>
38#include <linux/crypto.h>
39#include <linux/cryptohash.h>
40#include <crypto/scatterwalk.h>
41#include <crypto/algapi.h>
42#include <crypto/sha.h>
43#include <crypto/hash.h>
44#include <crypto/internal/hash.h>
45
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080046#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
47#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
48
49#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
50#define MD5_DIGEST_SIZE 16
51
Mark A. Greerdfd061d2012-12-21 10:04:04 -070052#define DST_MAXBURST 16
53#define DMA_MIN (DST_MAXBURST * sizeof(u32))
54
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080055#define SHA_REG_DIGCNT 0x14
56
57#define SHA_REG_CTRL 0x18
58#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
59#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
60#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
61#define SHA_REG_CTRL_ALGO (1 << 2)
62#define SHA_REG_CTRL_INPUT_READY (1 << 1)
63#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
64
65#define SHA_REG_REV 0x5C
66#define SHA_REG_REV_MAJOR 0xF0
67#define SHA_REG_REV_MINOR 0x0F
68
69#define SHA_REG_MASK 0x60
70#define SHA_REG_MASK_DMA_EN (1 << 3)
71#define SHA_REG_MASK_IT_EN (1 << 2)
72#define SHA_REG_MASK_SOFTRESET (1 << 1)
73#define SHA_REG_AUTOIDLE (1 << 0)
74
75#define SHA_REG_SYSSTATUS 0x64
76#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
77
78#define DEFAULT_TIMEOUT_INTERVAL HZ
79
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +030080/* mostly device flags */
81#define FLAGS_BUSY 0
82#define FLAGS_FINAL 1
83#define FLAGS_DMA_ACTIVE 2
84#define FLAGS_OUTPUT_READY 3
85#define FLAGS_INIT 4
86#define FLAGS_CPU 5
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +030087#define FLAGS_DMA_READY 6
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +030088/* context flags */
89#define FLAGS_FINUP 16
90#define FLAGS_SG 17
91#define FLAGS_SHA1 18
92#define FLAGS_HMAC 19
93#define FLAGS_ERROR 20
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080094
95#define OP_UPDATE 1
96#define OP_FINAL 2
97
Dmitry Kasatkin798eed52010-11-19 16:04:26 +020098#define OMAP_ALIGN_MASK (sizeof(u32)-1)
99#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
100
101#define BUFLEN PAGE_SIZE
102
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800103struct omap_sham_dev;
104
105struct omap_sham_reqctx {
106 struct omap_sham_dev *dd;
107 unsigned long flags;
108 unsigned long op;
109
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200110 u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800111 size_t digcnt;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800112 size_t bufcnt;
113 size_t buflen;
114 dma_addr_t dma_addr;
115
116 /* walk state */
117 struct scatterlist *sg;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700118 struct scatterlist sgl;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800119 unsigned int offset; /* offset in current sg */
120 unsigned int total; /* total request */
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200121
122 u8 buffer[0] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800123};
124
125struct omap_sham_hmac_ctx {
126 struct crypto_shash *shash;
127 u8 ipad[SHA1_MD5_BLOCK_SIZE];
128 u8 opad[SHA1_MD5_BLOCK_SIZE];
129};
130
131struct omap_sham_ctx {
132 struct omap_sham_dev *dd;
133
134 unsigned long flags;
135
136 /* fallback stuff */
137 struct crypto_shash *fallback;
138
139 struct omap_sham_hmac_ctx base[0];
140};
141
142#define OMAP_SHAM_QUEUE_LENGTH 1
143
144struct omap_sham_dev {
145 struct list_head list;
146 unsigned long phys_base;
147 struct device *dev;
148 void __iomem *io_base;
149 int irq;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800150 spinlock_t lock;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200151 int err;
Mark A. Greer03feec92012-12-21 10:04:06 -0700152 unsigned int dma;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700153 struct dma_chan *dma_lch;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800154 struct tasklet_struct done_task;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800155
156 unsigned long flags;
157 struct crypto_queue queue;
158 struct ahash_request *req;
159};
160
161struct omap_sham_drv {
162 struct list_head dev_list;
163 spinlock_t lock;
164 unsigned long flags;
165};
166
167static struct omap_sham_drv sham = {
168 .dev_list = LIST_HEAD_INIT(sham.dev_list),
169 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
170};
171
172static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
173{
174 return __raw_readl(dd->io_base + offset);
175}
176
177static inline void omap_sham_write(struct omap_sham_dev *dd,
178 u32 offset, u32 value)
179{
180 __raw_writel(value, dd->io_base + offset);
181}
182
183static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
184 u32 value, u32 mask)
185{
186 u32 val;
187
188 val = omap_sham_read(dd, address);
189 val &= ~mask;
190 val |= value;
191 omap_sham_write(dd, address, val);
192}
193
194static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
195{
196 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
197
198 while (!(omap_sham_read(dd, offset) & bit)) {
199 if (time_is_before_jiffies(timeout))
200 return -ETIMEDOUT;
201 }
202
203 return 0;
204}
205
206static void omap_sham_copy_hash(struct ahash_request *req, int out)
207{
208 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200209 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800210 int i;
211
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200212 /* MD5 is almost unused. So copy sha1 size to reduce code */
213 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
214 if (out)
215 hash[i] = omap_sham_read(ctx->dd,
216 SHA_REG_DIGEST(i));
217 else
218 omap_sham_write(ctx->dd,
219 SHA_REG_DIGEST(i), hash[i]);
220 }
221}
222
223static void omap_sham_copy_ready_hash(struct ahash_request *req)
224{
225 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
226 u32 *in = (u32 *)ctx->digest;
227 u32 *hash = (u32 *)req->result;
228 int i;
229
230 if (!hash)
231 return;
232
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300233 if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800234 /* SHA1 results are in big endian */
235 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200236 hash[i] = be32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800237 } else {
238 /* MD5 results are in little endian */
239 for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200240 hash[i] = le32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800241 }
242}
243
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200244static int omap_sham_hw_init(struct omap_sham_dev *dd)
245{
Mark A. Greerb359f032012-12-21 10:04:02 -0700246 pm_runtime_get_sync(dd->dev);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200247
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300248 if (!test_bit(FLAGS_INIT, &dd->flags)) {
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200249 omap_sham_write_mask(dd, SHA_REG_MASK,
250 SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
251
252 if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
253 SHA_REG_SYSSTATUS_RESETDONE))
254 return -ETIMEDOUT;
255
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300256 set_bit(FLAGS_INIT, &dd->flags);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200257 dd->err = 0;
258 }
259
260 return 0;
261}
262
263static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800264 int final, int dma)
265{
266 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
267 u32 val = length << 5, mask;
268
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200269 if (likely(ctx->digcnt))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800270 omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800271
272 omap_sham_write_mask(dd, SHA_REG_MASK,
273 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
274 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
275 /*
276 * Setting ALGO_CONST only for the first iteration
277 * and CLOSE_HASH only for the last one.
278 */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300279 if (ctx->flags & BIT(FLAGS_SHA1))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800280 val |= SHA_REG_CTRL_ALGO;
281 if (!ctx->digcnt)
282 val |= SHA_REG_CTRL_ALGO_CONST;
283 if (final)
284 val |= SHA_REG_CTRL_CLOSE_HASH;
285
286 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
287 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
288
289 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800290}
291
292static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
293 size_t length, int final)
294{
295 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200296 int count, len32;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800297 const u32 *buffer = (const u32 *)buf;
298
299 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
300 ctx->digcnt, length, final);
301
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200302 omap_sham_write_ctrl(dd, length, final, 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800303
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200304 /* should be non-zero before next lines to disable clocks later */
305 ctx->digcnt += length;
306
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800307 if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
308 return -ETIMEDOUT;
309
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800310 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300311 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800312
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300313 set_bit(FLAGS_CPU, &dd->flags);
314
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800315 len32 = DIV_ROUND_UP(length, sizeof(u32));
316
317 for (count = 0; count < len32; count++)
318 omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
319
320 return -EINPROGRESS;
321}
322
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700323static void omap_sham_dma_callback(void *param)
324{
325 struct omap_sham_dev *dd = param;
326
327 set_bit(FLAGS_DMA_READY, &dd->flags);
328 tasklet_schedule(&dd->done_task);
329}
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700330
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800331static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700332 size_t length, int final, int is_sg)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800333{
334 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700335 struct dma_async_tx_descriptor *tx;
336 struct dma_slave_config cfg;
337 int len32, ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800338
339 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
340 ctx->digcnt, length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800341
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700342 memset(&cfg, 0, sizeof(cfg));
343
344 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(0);
345 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
346 cfg.dst_maxburst = DST_MAXBURST;
347
348 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
349 if (ret) {
350 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
351 return ret;
352 }
353
354 len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
355
356 if (is_sg) {
357 /*
358 * The SG entry passed in may not have the 'length' member
359 * set correctly so use a local SG entry (sgl) with the
360 * proper value for 'length' instead. If this is not done,
361 * the dmaengine may try to DMA the incorrect amount of data.
362 */
363 sg_init_table(&ctx->sgl, 1);
364 ctx->sgl.page_link = ctx->sg->page_link;
365 ctx->sgl.offset = ctx->sg->offset;
366 sg_dma_len(&ctx->sgl) = len32;
367 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
368
369 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
370 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
371 } else {
372 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
373 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
374 }
375
376 if (!tx) {
377 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
378 return -EINVAL;
379 }
380
381 tx->callback = omap_sham_dma_callback;
382 tx->callback_param = dd;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700383
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200384 omap_sham_write_ctrl(dd, length, final, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800385
386 ctx->digcnt += length;
387
388 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300389 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800390
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300391 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800392
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700393 dmaengine_submit(tx);
394 dma_async_issue_pending(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800395
396 return -EINPROGRESS;
397}
398
399static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
400 const u8 *data, size_t length)
401{
402 size_t count = min(length, ctx->buflen - ctx->bufcnt);
403
404 count = min(count, ctx->total);
405 if (count <= 0)
406 return 0;
407 memcpy(ctx->buffer + ctx->bufcnt, data, count);
408 ctx->bufcnt += count;
409
410 return count;
411}
412
413static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
414{
415 size_t count;
416
417 while (ctx->sg) {
418 count = omap_sham_append_buffer(ctx,
419 sg_virt(ctx->sg) + ctx->offset,
420 ctx->sg->length - ctx->offset);
421 if (!count)
422 break;
423 ctx->offset += count;
424 ctx->total -= count;
425 if (ctx->offset == ctx->sg->length) {
426 ctx->sg = sg_next(ctx->sg);
427 if (ctx->sg)
428 ctx->offset = 0;
429 else
430 ctx->total = 0;
431 }
432 }
433
434 return 0;
435}
436
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200437static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
438 struct omap_sham_reqctx *ctx,
439 size_t length, int final)
440{
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700441 int ret;
442
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200443 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
444 DMA_TO_DEVICE);
445 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
446 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
447 return -EINVAL;
448 }
449
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300450 ctx->flags &= ~BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200451
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700452 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
453 if (ret)
454 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
455 DMA_TO_DEVICE);
456
457 return ret;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200458}
459
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800460static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
461{
462 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
463 unsigned int final;
464 size_t count;
465
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800466 omap_sham_append_sg(ctx);
467
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300468 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800469
470 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
471 ctx->bufcnt, ctx->digcnt, final);
472
473 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
474 count = ctx->bufcnt;
475 ctx->bufcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200476 return omap_sham_xmit_dma_map(dd, ctx, count, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800477 }
478
479 return 0;
480}
481
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200482/* Start address alignment */
483#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
484/* SHA1 block size alignment */
485#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
486
487static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800488{
489 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200490 unsigned int length, final, tail;
491 struct scatterlist *sg;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700492 int ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800493
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200494 if (!ctx->total)
495 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800496
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200497 if (ctx->bufcnt || ctx->offset)
498 return omap_sham_update_dma_slow(dd);
499
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700500 /*
501 * Don't use the sg interface when the transfer size is less
502 * than the number of elements in a DMA frame. Otherwise,
503 * the dmaengine infrastructure will calculate that it needs
504 * to transfer 0 frames which ultimately fails.
505 */
506 if (ctx->total < (DST_MAXBURST * sizeof(u32)))
507 return omap_sham_update_dma_slow(dd);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700508
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200509 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
510 ctx->digcnt, ctx->bufcnt, ctx->total);
511
512 sg = ctx->sg;
513
514 if (!SG_AA(sg))
515 return omap_sham_update_dma_slow(dd);
516
517 if (!sg_is_last(sg) && !SG_SA(sg))
518 /* size is not SHA1_BLOCK_SIZE aligned */
519 return omap_sham_update_dma_slow(dd);
520
521 length = min(ctx->total, sg->length);
522
523 if (sg_is_last(sg)) {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300524 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200525 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
526 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
527 /* without finup() we need one block to close hash */
528 if (!tail)
529 tail = SHA1_MD5_BLOCK_SIZE;
530 length -= tail;
531 }
532 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800533
534 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
535 dev_err(dd->dev, "dma_map_sg error\n");
536 return -EINVAL;
537 }
538
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300539 ctx->flags |= BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200540
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800541 ctx->total -= length;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200542 ctx->offset = length; /* offset where to start slow */
543
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300544 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800545
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700546 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
547 if (ret)
548 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
549
550 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800551}
552
553static int omap_sham_update_cpu(struct omap_sham_dev *dd)
554{
555 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
556 int bufcnt;
557
558 omap_sham_append_sg(ctx);
559 bufcnt = ctx->bufcnt;
560 ctx->bufcnt = 0;
561
562 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
563}
564
565static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
566{
567 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
568
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700569 dmaengine_terminate_all(dd->dma_lch);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700570
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300571 if (ctx->flags & BIT(FLAGS_SG)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800572 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200573 if (ctx->sg->length == ctx->offset) {
574 ctx->sg = sg_next(ctx->sg);
575 if (ctx->sg)
576 ctx->offset = 0;
577 }
578 } else {
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200579 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
580 DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200581 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800582
583 return 0;
584}
585
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800586static int omap_sham_init(struct ahash_request *req)
587{
588 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
589 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
590 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
591 struct omap_sham_dev *dd = NULL, *tmp;
592
593 spin_lock_bh(&sham.lock);
594 if (!tctx->dd) {
595 list_for_each_entry(tmp, &sham.dev_list, list) {
596 dd = tmp;
597 break;
598 }
599 tctx->dd = dd;
600 } else {
601 dd = tctx->dd;
602 }
603 spin_unlock_bh(&sham.lock);
604
605 ctx->dd = dd;
606
607 ctx->flags = 0;
608
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800609 dev_dbg(dd->dev, "init: digest size: %d\n",
610 crypto_ahash_digestsize(tfm));
611
612 if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300613 ctx->flags |= BIT(FLAGS_SHA1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800614
615 ctx->bufcnt = 0;
616 ctx->digcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200617 ctx->buflen = BUFLEN;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800618
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300619 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800620 struct omap_sham_hmac_ctx *bctx = tctx->base;
621
622 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
623 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300624 ctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800625 }
626
627 return 0;
628
629}
630
631static int omap_sham_update_req(struct omap_sham_dev *dd)
632{
633 struct ahash_request *req = dd->req;
634 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
635 int err;
636
637 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300638 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800639
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300640 if (ctx->flags & BIT(FLAGS_CPU))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800641 err = omap_sham_update_cpu(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800642 else
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200643 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800644
645 /* wait for dma completion before can take more data */
646 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
647
648 return err;
649}
650
651static int omap_sham_final_req(struct omap_sham_dev *dd)
652{
653 struct ahash_request *req = dd->req;
654 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
655 int err = 0, use_dma = 1;
656
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700657 if (ctx->bufcnt <= DMA_MIN)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800658 /* faster to handle last block with cpu */
659 use_dma = 0;
660
661 if (use_dma)
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200662 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800663 else
664 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
665
666 ctx->bufcnt = 0;
667
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800668 dev_dbg(dd->dev, "final_req: err: %d\n", err);
669
670 return err;
671}
672
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300673static int omap_sham_finish_hmac(struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800674{
675 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
676 struct omap_sham_hmac_ctx *bctx = tctx->base;
677 int bs = crypto_shash_blocksize(bctx->shash);
678 int ds = crypto_shash_digestsize(bctx->shash);
679 struct {
680 struct shash_desc shash;
681 char ctx[crypto_shash_descsize(bctx->shash)];
682 } desc;
683
684 desc.shash.tfm = bctx->shash;
685 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
686
687 return crypto_shash_init(&desc.shash) ?:
688 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300689 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
690}
691
692static int omap_sham_finish(struct ahash_request *req)
693{
694 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
695 struct omap_sham_dev *dd = ctx->dd;
696 int err = 0;
697
698 if (ctx->digcnt) {
699 omap_sham_copy_ready_hash(req);
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300700 if (ctx->flags & BIT(FLAGS_HMAC))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300701 err = omap_sham_finish_hmac(req);
702 }
703
704 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
705
706 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800707}
708
709static void omap_sham_finish_req(struct ahash_request *req, int err)
710{
711 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200712 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800713
714 if (!err) {
Dmitry Kasatkin0e87b152011-06-02 21:10:03 +0300715 omap_sham_copy_hash(req, 1);
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300716 if (test_bit(FLAGS_FINAL, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300717 err = omap_sham_finish(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200718 } else {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300719 ctx->flags |= BIT(FLAGS_ERROR);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800720 }
721
Dmitry Kasatkin0efd4d82011-06-02 21:10:12 +0300722 /* atomic operation is not needed here */
723 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
724 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
Mark A. Greerb359f032012-12-21 10:04:02 -0700725
726 pm_runtime_put_sync(dd->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800727
728 if (req->base.complete)
729 req->base.complete(&req->base, err);
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +0300730
731 /* handle new request */
732 tasklet_schedule(&dd->done_task);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800733}
734
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200735static int omap_sham_handle_queue(struct omap_sham_dev *dd,
736 struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800737{
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100738 struct crypto_async_request *async_req, *backlog;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800739 struct omap_sham_reqctx *ctx;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800740 unsigned long flags;
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200741 int err = 0, ret = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800742
743 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200744 if (req)
745 ret = ahash_enqueue_request(&dd->queue, req);
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300746 if (test_bit(FLAGS_BUSY, &dd->flags)) {
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200747 spin_unlock_irqrestore(&dd->lock, flags);
748 return ret;
749 }
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100750 backlog = crypto_get_backlog(&dd->queue);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800751 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100752 if (async_req)
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300753 set_bit(FLAGS_BUSY, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800754 spin_unlock_irqrestore(&dd->lock, flags);
755
756 if (!async_req)
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200757 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800758
759 if (backlog)
760 backlog->complete(backlog, -EINPROGRESS);
761
762 req = ahash_request_cast(async_req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800763 dd->req = req;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800764 ctx = ahash_request_ctx(req);
765
766 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
767 ctx->op, req->nbytes);
768
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200769 err = omap_sham_hw_init(dd);
770 if (err)
771 goto err1;
772
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200773 if (ctx->digcnt)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800774 /* request has changed - restore hash */
775 omap_sham_copy_hash(req, 0);
776
777 if (ctx->op == OP_UPDATE) {
778 err = omap_sham_update_req(dd);
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300779 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800780 /* no final() after finup() */
781 err = omap_sham_final_req(dd);
782 } else if (ctx->op == OP_FINAL) {
783 err = omap_sham_final_req(dd);
784 }
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200785err1:
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +0300786 if (err != -EINPROGRESS)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800787 /* done_task will not finish it, so do it here */
788 omap_sham_finish_req(req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800789
790 dev_dbg(dd->dev, "exit, err: %d\n", err);
791
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200792 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800793}
794
795static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
796{
797 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
798 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
799 struct omap_sham_dev *dd = tctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800800
801 ctx->op = op;
802
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200803 return omap_sham_handle_queue(dd, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800804}
805
806static int omap_sham_update(struct ahash_request *req)
807{
808 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
809
810 if (!req->nbytes)
811 return 0;
812
813 ctx->total = req->nbytes;
814 ctx->sg = req->src;
815 ctx->offset = 0;
816
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300817 if (ctx->flags & BIT(FLAGS_FINUP)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800818 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
819 /*
820 * OMAP HW accel works only with buffers >= 9
821 * will switch to bypass in final()
822 * final has the same request and data
823 */
824 omap_sham_append_sg(ctx);
825 return 0;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200826 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
827 /*
828 * faster to use CPU for short transfers
829 */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300830 ctx->flags |= BIT(FLAGS_CPU);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800831 }
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200832 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800833 omap_sham_append_sg(ctx);
834 return 0;
835 }
836
837 return omap_sham_enqueue(req, OP_UPDATE);
838}
839
840static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
841 const u8 *data, unsigned int len, u8 *out)
842{
843 struct {
844 struct shash_desc shash;
845 char ctx[crypto_shash_descsize(shash)];
846 } desc;
847
848 desc.shash.tfm = shash;
849 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
850
851 return crypto_shash_digest(&desc.shash, data, len, out);
852}
853
854static int omap_sham_final_shash(struct ahash_request *req)
855{
856 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
857 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
858
859 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
860 ctx->buffer, ctx->bufcnt, req->result);
861}
862
863static int omap_sham_final(struct ahash_request *req)
864{
865 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800866
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300867 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800868
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300869 if (ctx->flags & BIT(FLAGS_ERROR))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300870 return 0; /* uncompleted hash is not needed */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800871
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300872 /* OMAP HW accel works only with buffers >= 9 */
873 /* HMAC is always >= 9 because ipad == block size */
874 if ((ctx->digcnt + ctx->bufcnt) < 9)
875 return omap_sham_final_shash(req);
876 else if (ctx->bufcnt)
877 return omap_sham_enqueue(req, OP_FINAL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800878
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300879 /* copy ready hash (+ finalize hmac) */
880 return omap_sham_finish(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800881}
882
883static int omap_sham_finup(struct ahash_request *req)
884{
885 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
886 int err1, err2;
887
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300888 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800889
890 err1 = omap_sham_update(req);
Markku Kylanpaa455e3382011-04-20 13:34:55 +0300891 if (err1 == -EINPROGRESS || err1 == -EBUSY)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800892 return err1;
893 /*
894 * final() has to be always called to cleanup resources
895 * even if udpate() failed, except EINPROGRESS
896 */
897 err2 = omap_sham_final(req);
898
899 return err1 ?: err2;
900}
901
902static int omap_sham_digest(struct ahash_request *req)
903{
904 return omap_sham_init(req) ?: omap_sham_finup(req);
905}
906
907static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
908 unsigned int keylen)
909{
910 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
911 struct omap_sham_hmac_ctx *bctx = tctx->base;
912 int bs = crypto_shash_blocksize(bctx->shash);
913 int ds = crypto_shash_digestsize(bctx->shash);
914 int err, i;
915 err = crypto_shash_setkey(tctx->fallback, key, keylen);
916 if (err)
917 return err;
918
919 if (keylen > bs) {
920 err = omap_sham_shash_digest(bctx->shash,
921 crypto_shash_get_flags(bctx->shash),
922 key, keylen, bctx->ipad);
923 if (err)
924 return err;
925 keylen = ds;
926 } else {
927 memcpy(bctx->ipad, key, keylen);
928 }
929
930 memset(bctx->ipad + keylen, 0, bs - keylen);
931 memcpy(bctx->opad, bctx->ipad, bs);
932
933 for (i = 0; i < bs; i++) {
934 bctx->ipad[i] ^= 0x36;
935 bctx->opad[i] ^= 0x5c;
936 }
937
938 return err;
939}
940
941static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
942{
943 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
944 const char *alg_name = crypto_tfm_alg_name(tfm);
945
946 /* Allocate a fallback and abort if it failed. */
947 tctx->fallback = crypto_alloc_shash(alg_name, 0,
948 CRYPTO_ALG_NEED_FALLBACK);
949 if (IS_ERR(tctx->fallback)) {
950 pr_err("omap-sham: fallback driver '%s' "
951 "could not be loaded.\n", alg_name);
952 return PTR_ERR(tctx->fallback);
953 }
954
955 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200956 sizeof(struct omap_sham_reqctx) + BUFLEN);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800957
958 if (alg_base) {
959 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300960 tctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800961 bctx->shash = crypto_alloc_shash(alg_base, 0,
962 CRYPTO_ALG_NEED_FALLBACK);
963 if (IS_ERR(bctx->shash)) {
964 pr_err("omap-sham: base driver '%s' "
965 "could not be loaded.\n", alg_base);
966 crypto_free_shash(tctx->fallback);
967 return PTR_ERR(bctx->shash);
968 }
969
970 }
971
972 return 0;
973}
974
975static int omap_sham_cra_init(struct crypto_tfm *tfm)
976{
977 return omap_sham_cra_init_alg(tfm, NULL);
978}
979
980static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
981{
982 return omap_sham_cra_init_alg(tfm, "sha1");
983}
984
985static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
986{
987 return omap_sham_cra_init_alg(tfm, "md5");
988}
989
990static void omap_sham_cra_exit(struct crypto_tfm *tfm)
991{
992 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
993
994 crypto_free_shash(tctx->fallback);
995 tctx->fallback = NULL;
996
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300997 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800998 struct omap_sham_hmac_ctx *bctx = tctx->base;
999 crypto_free_shash(bctx->shash);
1000 }
1001}
1002
1003static struct ahash_alg algs[] = {
1004{
1005 .init = omap_sham_init,
1006 .update = omap_sham_update,
1007 .final = omap_sham_final,
1008 .finup = omap_sham_finup,
1009 .digest = omap_sham_digest,
1010 .halg.digestsize = SHA1_DIGEST_SIZE,
1011 .halg.base = {
1012 .cra_name = "sha1",
1013 .cra_driver_name = "omap-sha1",
1014 .cra_priority = 100,
1015 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001016 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001017 CRYPTO_ALG_ASYNC |
1018 CRYPTO_ALG_NEED_FALLBACK,
1019 .cra_blocksize = SHA1_BLOCK_SIZE,
1020 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1021 .cra_alignmask = 0,
1022 .cra_module = THIS_MODULE,
1023 .cra_init = omap_sham_cra_init,
1024 .cra_exit = omap_sham_cra_exit,
1025 }
1026},
1027{
1028 .init = omap_sham_init,
1029 .update = omap_sham_update,
1030 .final = omap_sham_final,
1031 .finup = omap_sham_finup,
1032 .digest = omap_sham_digest,
1033 .halg.digestsize = MD5_DIGEST_SIZE,
1034 .halg.base = {
1035 .cra_name = "md5",
1036 .cra_driver_name = "omap-md5",
1037 .cra_priority = 100,
1038 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001039 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001040 CRYPTO_ALG_ASYNC |
1041 CRYPTO_ALG_NEED_FALLBACK,
1042 .cra_blocksize = SHA1_BLOCK_SIZE,
1043 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001044 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001045 .cra_module = THIS_MODULE,
1046 .cra_init = omap_sham_cra_init,
1047 .cra_exit = omap_sham_cra_exit,
1048 }
1049},
1050{
1051 .init = omap_sham_init,
1052 .update = omap_sham_update,
1053 .final = omap_sham_final,
1054 .finup = omap_sham_finup,
1055 .digest = omap_sham_digest,
1056 .setkey = omap_sham_setkey,
1057 .halg.digestsize = SHA1_DIGEST_SIZE,
1058 .halg.base = {
1059 .cra_name = "hmac(sha1)",
1060 .cra_driver_name = "omap-hmac-sha1",
1061 .cra_priority = 100,
1062 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001063 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001064 CRYPTO_ALG_ASYNC |
1065 CRYPTO_ALG_NEED_FALLBACK,
1066 .cra_blocksize = SHA1_BLOCK_SIZE,
1067 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1068 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001069 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001070 .cra_module = THIS_MODULE,
1071 .cra_init = omap_sham_cra_sha1_init,
1072 .cra_exit = omap_sham_cra_exit,
1073 }
1074},
1075{
1076 .init = omap_sham_init,
1077 .update = omap_sham_update,
1078 .final = omap_sham_final,
1079 .finup = omap_sham_finup,
1080 .digest = omap_sham_digest,
1081 .setkey = omap_sham_setkey,
1082 .halg.digestsize = MD5_DIGEST_SIZE,
1083 .halg.base = {
1084 .cra_name = "hmac(md5)",
1085 .cra_driver_name = "omap-hmac-md5",
1086 .cra_priority = 100,
1087 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001088 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001089 CRYPTO_ALG_ASYNC |
1090 CRYPTO_ALG_NEED_FALLBACK,
1091 .cra_blocksize = SHA1_BLOCK_SIZE,
1092 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1093 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001094 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001095 .cra_module = THIS_MODULE,
1096 .cra_init = omap_sham_cra_md5_init,
1097 .cra_exit = omap_sham_cra_exit,
1098 }
1099}
1100};
1101
1102static void omap_sham_done_task(unsigned long data)
1103{
1104 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001105 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001106
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +03001107 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1108 omap_sham_handle_queue(dd, NULL);
1109 return;
1110 }
1111
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001112 if (test_bit(FLAGS_CPU, &dd->flags)) {
1113 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1114 goto finish;
1115 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1116 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1117 omap_sham_update_dma_stop(dd);
1118 if (dd->err) {
1119 err = dd->err;
1120 goto finish;
1121 }
1122 }
1123 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1124 /* hash or semi-hash ready */
1125 clear_bit(FLAGS_DMA_READY, &dd->flags);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001126 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001127 if (err != -EINPROGRESS)
1128 goto finish;
1129 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001130 }
1131
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001132 return;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001133
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001134finish:
1135 dev_dbg(dd->dev, "update done: err: %d\n", err);
1136 /* finish curent request */
1137 omap_sham_finish_req(dd->req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001138}
1139
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001140static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1141{
1142 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001143
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001144 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001145 /* final -> allow device to go to power-saving mode */
1146 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1147
1148 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1149 SHA_REG_CTRL_OUTPUT_READY);
1150 omap_sham_read(dd, SHA_REG_CTRL);
1151
Dmitry Kasatkincd3f1d52011-06-02 21:10:13 +03001152 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1153 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1154 return IRQ_HANDLED;
1155 }
1156
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001157 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001158 tasklet_schedule(&dd->done_task);
1159
1160 return IRQ_HANDLED;
1161}
1162
Mark A. Greer03feec92012-12-21 10:04:06 -07001163#ifdef CONFIG_OF
1164static const struct of_device_id omap_sham_of_match[] = {
1165 {
1166 .compatible = "ti,omap2-sham",
1167 },
1168 {},
1169};
1170MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1171
1172static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1173 struct device *dev, struct resource *res)
1174{
1175 struct device_node *node = dev->of_node;
1176 const struct of_device_id *match;
1177 int err = 0;
1178
1179 match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
1180 if (!match) {
1181 dev_err(dev, "no compatible OF match\n");
1182 err = -EINVAL;
1183 goto err;
1184 }
1185
1186 err = of_address_to_resource(node, 0, res);
1187 if (err < 0) {
1188 dev_err(dev, "can't translate OF node address\n");
1189 err = -EINVAL;
1190 goto err;
1191 }
1192
1193 dd->irq = of_irq_to_resource(node, 0, NULL);
1194 if (!dd->irq) {
1195 dev_err(dev, "can't translate OF irq value\n");
1196 err = -EINVAL;
1197 goto err;
1198 }
1199
1200 dd->dma = -1; /* Dummy value that's unused */
1201
1202err:
1203 return err;
1204}
1205#else
1206static int omap_sham_get_res_dev(struct omap_sham_dev *dd,
1207 struct device *dev, struct resource *res)
1208{
1209 return -EINVAL;
1210}
1211#endif
1212
1213static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1214 struct platform_device *pdev, struct resource *res)
1215{
1216 struct device *dev = &pdev->dev;
1217 struct resource *r;
1218 int err = 0;
1219
1220 /* Get the base address */
1221 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1222 if (!r) {
1223 dev_err(dev, "no MEM resource info\n");
1224 err = -ENODEV;
1225 goto err;
1226 }
1227 memcpy(res, r, sizeof(*res));
1228
1229 /* Get the IRQ */
1230 dd->irq = platform_get_irq(pdev, 0);
1231 if (dd->irq < 0) {
1232 dev_err(dev, "no IRQ resource info\n");
1233 err = dd->irq;
1234 goto err;
1235 }
1236
1237 /* Get the DMA */
1238 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1239 if (!r) {
1240 dev_err(dev, "no DMA resource info\n");
1241 err = -ENODEV;
1242 goto err;
1243 }
1244 dd->dma = r->start;
1245
1246err:
1247 return err;
1248}
1249
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001250static int __devinit omap_sham_probe(struct platform_device *pdev)
1251{
1252 struct omap_sham_dev *dd;
1253 struct device *dev = &pdev->dev;
Mark A. Greer03feec92012-12-21 10:04:06 -07001254 struct resource res;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001255 dma_cap_mask_t mask;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001256 int err, i, j;
1257
1258 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1259 if (dd == NULL) {
1260 dev_err(dev, "unable to alloc data struct.\n");
1261 err = -ENOMEM;
1262 goto data_err;
1263 }
1264 dd->dev = dev;
1265 platform_set_drvdata(pdev, dd);
1266
1267 INIT_LIST_HEAD(&dd->list);
1268 spin_lock_init(&dd->lock);
1269 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001270 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1271
Mark A. Greer03feec92012-12-21 10:04:06 -07001272 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
1273 omap_sham_get_res_pdev(dd, pdev, &res);
1274 if (err)
1275 goto res_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001276
Mark A. Greer03feec92012-12-21 10:04:06 -07001277 dd->io_base = devm_request_and_ioremap(dev, &res);
1278 if (!dd->io_base) {
1279 dev_err(dev, "can't ioremap\n");
1280 err = -ENOMEM;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001281 goto res_err;
1282 }
Mark A. Greer03feec92012-12-21 10:04:06 -07001283 dd->phys_base = res.start;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001284
1285 err = request_irq(dd->irq, omap_sham_irq,
1286 IRQF_TRIGGER_LOW, dev_name(dev), dd);
1287 if (err) {
1288 dev_err(dev, "unable to request irq.\n");
1289 goto res_err;
1290 }
1291
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001292 dma_cap_zero(mask);
1293 dma_cap_set(DMA_SLAVE, mask);
1294
Mark A. Greer0e87e732012-12-21 10:04:07 -07001295 dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
1296 &dd->dma, dev, "rx");
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001297 if (!dd->dma_lch) {
1298 dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
Mark A. Greer03feec92012-12-21 10:04:06 -07001299 dd->dma);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001300 err = -ENXIO;
1301 goto dma_err;
1302 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001303
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001304 dd->io_base = ioremap(dd->phys_base, SZ_4K);
1305 if (!dd->io_base) {
1306 dev_err(dev, "can't ioremap\n");
1307 err = -ENOMEM;
1308 goto io_err;
1309 }
1310
Mark A. Greerb359f032012-12-21 10:04:02 -07001311 pm_runtime_enable(dev);
1312 pm_runtime_get_sync(dev);
1313
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001314 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1315 (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1316 omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
Mark A. Greerb359f032012-12-21 10:04:02 -07001317
1318 pm_runtime_put_sync(&pdev->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001319
1320 spin_lock(&sham.lock);
1321 list_add_tail(&dd->list, &sham.dev_list);
1322 spin_unlock(&sham.lock);
1323
1324 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1325 err = crypto_register_ahash(&algs[i]);
1326 if (err)
1327 goto err_algs;
1328 }
1329
1330 return 0;
1331
1332err_algs:
1333 for (j = 0; j < i; j++)
1334 crypto_unregister_ahash(&algs[j]);
Mark A. Greerb359f032012-12-21 10:04:02 -07001335 pm_runtime_disable(dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001336io_err:
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001337 dma_release_channel(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001338dma_err:
Mark A. Greer03feec92012-12-21 10:04:06 -07001339 free_irq(dd->irq, dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001340res_err:
1341 kfree(dd);
1342 dd = NULL;
1343data_err:
1344 dev_err(dev, "initialization failed.\n");
1345
1346 return err;
1347}
1348
1349static int __devexit omap_sham_remove(struct platform_device *pdev)
1350{
1351 static struct omap_sham_dev *dd;
1352 int i;
1353
1354 dd = platform_get_drvdata(pdev);
1355 if (!dd)
1356 return -ENODEV;
1357 spin_lock(&sham.lock);
1358 list_del(&dd->list);
1359 spin_unlock(&sham.lock);
1360 for (i = 0; i < ARRAY_SIZE(algs); i++)
1361 crypto_unregister_ahash(&algs[i]);
1362 tasklet_kill(&dd->done_task);
Mark A. Greerb359f032012-12-21 10:04:02 -07001363 pm_runtime_disable(&pdev->dev);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001364 dma_release_channel(dd->dma_lch);
Mark A. Greer03feec92012-12-21 10:04:06 -07001365 free_irq(dd->irq, dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001366 kfree(dd);
1367 dd = NULL;
1368
1369 return 0;
1370}
1371
Mark A. Greer3b3f4402012-12-21 10:04:03 -07001372#ifdef CONFIG_PM_SLEEP
1373static int omap_sham_suspend(struct device *dev)
1374{
1375 pm_runtime_put_sync(dev);
1376 return 0;
1377}
1378
1379static int omap_sham_resume(struct device *dev)
1380{
1381 pm_runtime_get_sync(dev);
1382 return 0;
1383}
1384#endif
1385
1386static const struct dev_pm_ops omap_sham_pm_ops = {
1387 SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
1388};
1389
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001390static struct platform_driver omap_sham_driver = {
1391 .probe = omap_sham_probe,
1392 .remove = omap_sham_remove,
1393 .driver = {
1394 .name = "omap-sham",
1395 .owner = THIS_MODULE,
Mark A. Greer3b3f4402012-12-21 10:04:03 -07001396 .pm = &omap_sham_pm_ops,
Mark A. Greer03feec92012-12-21 10:04:06 -07001397 .of_match_table = omap_sham_of_match,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001398 },
1399};
1400
1401static int __init omap_sham_mod_init(void)
1402{
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001403 return platform_driver_register(&omap_sham_driver);
1404}
1405
1406static void __exit omap_sham_mod_exit(void)
1407{
1408 platform_driver_unregister(&omap_sham_driver);
1409}
1410
1411module_init(omap_sham_mod_init);
1412module_exit(omap_sham_mod_exit);
1413
1414MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1415MODULE_LICENSE("GPL v2");
1416MODULE_AUTHOR("Dmitry Kasatkin");