blob: fab0af488b83933a2141f4783c8d67826754cf1d [file] [log] [blame]
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Mark A. Greer0d373d62012-12-21 10:04:08 -07008 * Copyright (c) 2011 Texas Instruments Incorporated
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Some ideas are from old omap-sha1-md5.c driver.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080019#include <linux/err.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/kernel.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080026#include <linux/irq.h>
27#include <linux/io.h>
28#include <linux/platform_device.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
Mark A. Greerdfd061d2012-12-21 10:04:04 -070031#include <linux/dmaengine.h>
32#include <linux/omap-dma.h>
Mark A. Greerb359f032012-12-21 10:04:02 -070033#include <linux/pm_runtime.h>
Mark A. Greer03feec92012-12-21 10:04:06 -070034#include <linux/of.h>
35#include <linux/of_device.h>
36#include <linux/of_address.h>
37#include <linux/of_irq.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080038#include <linux/delay.h>
39#include <linux/crypto.h>
40#include <linux/cryptohash.h>
41#include <crypto/scatterwalk.h>
42#include <crypto/algapi.h>
43#include <crypto/sha.h>
44#include <crypto/hash.h>
45#include <crypto/internal/hash.h>
46
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080047#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
48#define MD5_DIGEST_SIZE 16
49
Mark A. Greerdfd061d2012-12-21 10:04:04 -070050#define DST_MAXBURST 16
51#define DMA_MIN (DST_MAXBURST * sizeof(u32))
52
Mark A. Greer0d373d62012-12-21 10:04:08 -070053#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
54#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
55#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
56
57#define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080058
59#define SHA_REG_CTRL 0x18
60#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
61#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
62#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
63#define SHA_REG_CTRL_ALGO (1 << 2)
64#define SHA_REG_CTRL_INPUT_READY (1 << 1)
65#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
66
Mark A. Greer0d373d62012-12-21 10:04:08 -070067#define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080068
Mark A. Greer0d373d62012-12-21 10:04:08 -070069#define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080070#define SHA_REG_MASK_DMA_EN (1 << 3)
71#define SHA_REG_MASK_IT_EN (1 << 2)
72#define SHA_REG_MASK_SOFTRESET (1 << 1)
73#define SHA_REG_AUTOIDLE (1 << 0)
74
Mark A. Greer0d373d62012-12-21 10:04:08 -070075#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080076#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
77
Mark A. Greer0d373d62012-12-21 10:04:08 -070078#define SHA_REG_MODE 0x44
79#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
80#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
81#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
82#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
83#define SHA_REG_MODE_ALGO_MASK (3 << 1)
84#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
85#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
86
87#define SHA_REG_LENGTH 0x48
88
89#define SHA_REG_IRQSTATUS 0x118
90#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
91#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
92#define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
93#define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
94
95#define SHA_REG_IRQENA 0x11C
96#define SHA_REG_IRQENA_CTX_RDY (1 << 3)
97#define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
98#define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
99#define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
100
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800101#define DEFAULT_TIMEOUT_INTERVAL HZ
102
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300103/* mostly device flags */
104#define FLAGS_BUSY 0
105#define FLAGS_FINAL 1
106#define FLAGS_DMA_ACTIVE 2
107#define FLAGS_OUTPUT_READY 3
108#define FLAGS_INIT 4
109#define FLAGS_CPU 5
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300110#define FLAGS_DMA_READY 6
Mark A. Greer0d373d62012-12-21 10:04:08 -0700111#define FLAGS_AUTO_XOR 7
112#define FLAGS_BE32_SHA1 8
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300113/* context flags */
114#define FLAGS_FINUP 16
115#define FLAGS_SG 17
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800116
Mark A. Greer0d373d62012-12-21 10:04:08 -0700117#define FLAGS_MODE_SHIFT 18
118#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \
119 << (FLAGS_MODE_SHIFT - 1))
120#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \
121 << (FLAGS_MODE_SHIFT - 1))
122#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \
123 << (FLAGS_MODE_SHIFT - 1))
124#define FLAGS_HMAC 20
125#define FLAGS_ERROR 21
126
127#define OP_UPDATE 1
128#define OP_FINAL 2
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800129
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200130#define OMAP_ALIGN_MASK (sizeof(u32)-1)
131#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
132
Mark A. Greer0d373d62012-12-21 10:04:08 -0700133#define BUFLEN PAGE_SIZE
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200134
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800135struct omap_sham_dev;
136
137struct omap_sham_reqctx {
138 struct omap_sham_dev *dd;
139 unsigned long flags;
140 unsigned long op;
141
Mark A. Greer0d373d62012-12-21 10:04:08 -0700142 u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800143 size_t digcnt;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800144 size_t bufcnt;
145 size_t buflen;
146 dma_addr_t dma_addr;
147
148 /* walk state */
149 struct scatterlist *sg;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700150 struct scatterlist sgl;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800151 unsigned int offset; /* offset in current sg */
152 unsigned int total; /* total request */
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200153
154 u8 buffer[0] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800155};
156
157struct omap_sham_hmac_ctx {
158 struct crypto_shash *shash;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700159 u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
160 u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800161};
162
163struct omap_sham_ctx {
164 struct omap_sham_dev *dd;
165
166 unsigned long flags;
167
168 /* fallback stuff */
169 struct crypto_shash *fallback;
170
171 struct omap_sham_hmac_ctx base[0];
172};
173
174#define OMAP_SHAM_QUEUE_LENGTH 1
175
Mark A. Greer0d373d62012-12-21 10:04:08 -0700176struct omap_sham_pdata {
177 unsigned long flags;
178 int digest_size;
179
180 void (*copy_hash)(struct ahash_request *req, int out);
181 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
182 int final, int dma);
183 void (*trigger)(struct omap_sham_dev *dd, size_t length);
184 int (*poll_irq)(struct omap_sham_dev *dd);
185 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
186
187 u32 odigest_ofs;
188 u32 idigest_ofs;
189 u32 din_ofs;
190 u32 digcnt_ofs;
191 u32 rev_ofs;
192 u32 mask_ofs;
193 u32 sysstatus_ofs;
194
195 u32 major_mask;
196 u32 major_shift;
197 u32 minor_mask;
198 u32 minor_shift;
199};
200
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800201struct omap_sham_dev {
202 struct list_head list;
203 unsigned long phys_base;
204 struct device *dev;
205 void __iomem *io_base;
206 int irq;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800207 spinlock_t lock;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200208 int err;
Mark A. Greer03feec92012-12-21 10:04:06 -0700209 unsigned int dma;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700210 struct dma_chan *dma_lch;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800211 struct tasklet_struct done_task;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800212
213 unsigned long flags;
214 struct crypto_queue queue;
215 struct ahash_request *req;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700216
217 const struct omap_sham_pdata *pdata;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800218};
219
220struct omap_sham_drv {
221 struct list_head dev_list;
222 spinlock_t lock;
223 unsigned long flags;
224};
225
226static struct omap_sham_drv sham = {
227 .dev_list = LIST_HEAD_INIT(sham.dev_list),
228 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
229};
230
231static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
232{
233 return __raw_readl(dd->io_base + offset);
234}
235
236static inline void omap_sham_write(struct omap_sham_dev *dd,
237 u32 offset, u32 value)
238{
239 __raw_writel(value, dd->io_base + offset);
240}
241
242static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
243 u32 value, u32 mask)
244{
245 u32 val;
246
247 val = omap_sham_read(dd, address);
248 val &= ~mask;
249 val |= value;
250 omap_sham_write(dd, address, val);
251}
252
253static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
254{
255 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
256
257 while (!(omap_sham_read(dd, offset) & bit)) {
258 if (time_is_before_jiffies(timeout))
259 return -ETIMEDOUT;
260 }
261
262 return 0;
263}
264
Mark A. Greer0d373d62012-12-21 10:04:08 -0700265static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800266{
267 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700268 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200269 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800270 int i;
271
Mark A. Greer0d373d62012-12-21 10:04:08 -0700272 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200273 if (out)
Mark A. Greer0d373d62012-12-21 10:04:08 -0700274 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200275 else
Mark A. Greer0d373d62012-12-21 10:04:08 -0700276 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200277 }
278}
279
Mark A. Greer0d373d62012-12-21 10:04:08 -0700280static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
281{
282 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
283 struct omap_sham_dev *dd = ctx->dd;
284 int i;
285
286 if (ctx->flags & BIT(FLAGS_HMAC)) {
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
288 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
289 struct omap_sham_hmac_ctx *bctx = tctx->base;
290 u32 *opad = (u32 *)bctx->opad;
291
292 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
293 if (out)
294 opad[i] = omap_sham_read(dd,
295 SHA_REG_ODIGEST(i));
296 else
297 omap_sham_write(dd, SHA_REG_ODIGEST(i),
298 opad[i]);
299 }
300 }
301
302 omap_sham_copy_hash_omap2(req, out);
303}
304
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200305static void omap_sham_copy_ready_hash(struct ahash_request *req)
306{
307 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
308 u32 *in = (u32 *)ctx->digest;
309 u32 *hash = (u32 *)req->result;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700310 int i, d, big_endian = 0;
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200311
312 if (!hash)
313 return;
314
Mark A. Greer0d373d62012-12-21 10:04:08 -0700315 switch (ctx->flags & FLAGS_MODE_MASK) {
316 case FLAGS_MODE_MD5:
317 d = MD5_DIGEST_SIZE / sizeof(u32);
318 break;
319 case FLAGS_MODE_SHA1:
320 /* OMAP2 SHA1 is big endian */
321 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
322 big_endian = 1;
323 d = SHA1_DIGEST_SIZE / sizeof(u32);
324 break;
325 default:
326 d = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800327 }
Mark A. Greer0d373d62012-12-21 10:04:08 -0700328
329 if (big_endian)
330 for (i = 0; i < d; i++)
331 hash[i] = be32_to_cpu(in[i]);
332 else
333 for (i = 0; i < d; i++)
334 hash[i] = le32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800335}
336
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200337static int omap_sham_hw_init(struct omap_sham_dev *dd)
338{
Mark A. Greerb359f032012-12-21 10:04:02 -0700339 pm_runtime_get_sync(dd->dev);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200340
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300341 if (!test_bit(FLAGS_INIT, &dd->flags)) {
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300342 set_bit(FLAGS_INIT, &dd->flags);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200343 dd->err = 0;
344 }
345
346 return 0;
347}
348
Mark A. Greer0d373d62012-12-21 10:04:08 -0700349static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800350 int final, int dma)
351{
352 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
353 u32 val = length << 5, mask;
354
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200355 if (likely(ctx->digcnt))
Mark A. Greer0d373d62012-12-21 10:04:08 -0700356 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800357
Mark A. Greer0d373d62012-12-21 10:04:08 -0700358 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800359 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
360 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
361 /*
362 * Setting ALGO_CONST only for the first iteration
363 * and CLOSE_HASH only for the last one.
364 */
Mark A. Greer0d373d62012-12-21 10:04:08 -0700365 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800366 val |= SHA_REG_CTRL_ALGO;
367 if (!ctx->digcnt)
368 val |= SHA_REG_CTRL_ALGO_CONST;
369 if (final)
370 val |= SHA_REG_CTRL_CLOSE_HASH;
371
372 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
373 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
374
375 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800376}
377
Mark A. Greer0d373d62012-12-21 10:04:08 -0700378static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
379{
380}
381
382static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
383{
384 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
385}
386
387static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
388 u32 *value, int count)
389{
390 for (; count--; value++, offset += 4)
391 omap_sham_write(dd, offset, *value);
392}
393
394static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
395 int final, int dma)
396{
397 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
398 u32 val, mask;
399
400 /*
401 * Setting ALGO_CONST only for the first iteration and
402 * CLOSE_HASH only for the last one. Note that flags mode bits
403 * correspond to algorithm encoding in mode register.
404 */
405 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1);
406 if (!ctx->digcnt) {
407 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
408 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
409 struct omap_sham_hmac_ctx *bctx = tctx->base;
410
411 val |= SHA_REG_MODE_ALGO_CONSTANT;
412
413 if (ctx->flags & BIT(FLAGS_HMAC)) {
414 val |= SHA_REG_MODE_HMAC_KEY_PROC;
415 omap_sham_write_n(dd, SHA_REG_ODIGEST(0),
416 (u32 *)bctx->ipad,
417 SHA1_BLOCK_SIZE / sizeof(u32));
418 ctx->digcnt += SHA1_BLOCK_SIZE;
419 }
420 }
421
422 if (final) {
423 val |= SHA_REG_MODE_CLOSE_HASH;
424
425 if (ctx->flags & BIT(FLAGS_HMAC))
426 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
427 }
428
429 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
430 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
431 SHA_REG_MODE_HMAC_KEY_PROC;
432
433 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
434 omap_sham_write_mask(dd, SHA_REG_MODE, val, mask);
435 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
436 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
437 SHA_REG_MASK_IT_EN |
438 (dma ? SHA_REG_MASK_DMA_EN : 0),
439 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
440}
441
442static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
443{
444 omap_sham_write(dd, SHA_REG_LENGTH, length);
445}
446
447static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
448{
449 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
450 SHA_REG_IRQSTATUS_INPUT_RDY);
451}
452
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800453static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
454 size_t length, int final)
455{
456 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200457 int count, len32;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800458 const u32 *buffer = (const u32 *)buf;
459
460 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
461 ctx->digcnt, length, final);
462
Mark A. Greer0d373d62012-12-21 10:04:08 -0700463 dd->pdata->write_ctrl(dd, length, final, 0);
464 dd->pdata->trigger(dd, length);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800465
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200466 /* should be non-zero before next lines to disable clocks later */
467 ctx->digcnt += length;
468
Mark A. Greer0d373d62012-12-21 10:04:08 -0700469 if (dd->pdata->poll_irq(dd))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800470 return -ETIMEDOUT;
471
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800472 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300473 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800474
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300475 set_bit(FLAGS_CPU, &dd->flags);
476
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800477 len32 = DIV_ROUND_UP(length, sizeof(u32));
478
479 for (count = 0; count < len32; count++)
Mark A. Greer0d373d62012-12-21 10:04:08 -0700480 omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800481
482 return -EINPROGRESS;
483}
484
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700485static void omap_sham_dma_callback(void *param)
486{
487 struct omap_sham_dev *dd = param;
488
489 set_bit(FLAGS_DMA_READY, &dd->flags);
490 tasklet_schedule(&dd->done_task);
491}
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700492
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800493static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700494 size_t length, int final, int is_sg)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800495{
496 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700497 struct dma_async_tx_descriptor *tx;
498 struct dma_slave_config cfg;
499 int len32, ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800500
501 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
502 ctx->digcnt, length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800503
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700504 memset(&cfg, 0, sizeof(cfg));
505
Mark A. Greer0d373d62012-12-21 10:04:08 -0700506 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700507 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
508 cfg.dst_maxburst = DST_MAXBURST;
509
510 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
511 if (ret) {
512 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
513 return ret;
514 }
515
516 len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
517
518 if (is_sg) {
519 /*
520 * The SG entry passed in may not have the 'length' member
521 * set correctly so use a local SG entry (sgl) with the
522 * proper value for 'length' instead. If this is not done,
523 * the dmaengine may try to DMA the incorrect amount of data.
524 */
525 sg_init_table(&ctx->sgl, 1);
526 ctx->sgl.page_link = ctx->sg->page_link;
527 ctx->sgl.offset = ctx->sg->offset;
528 sg_dma_len(&ctx->sgl) = len32;
529 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
530
531 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
532 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
533 } else {
534 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
535 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
536 }
537
538 if (!tx) {
539 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
540 return -EINVAL;
541 }
542
543 tx->callback = omap_sham_dma_callback;
544 tx->callback_param = dd;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700545
Mark A. Greer0d373d62012-12-21 10:04:08 -0700546 dd->pdata->write_ctrl(dd, length, final, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800547
548 ctx->digcnt += length;
549
550 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300551 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800552
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300553 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800554
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700555 dmaengine_submit(tx);
556 dma_async_issue_pending(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800557
Mark A. Greer0d373d62012-12-21 10:04:08 -0700558 dd->pdata->trigger(dd, length);
559
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800560 return -EINPROGRESS;
561}
562
563static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
564 const u8 *data, size_t length)
565{
566 size_t count = min(length, ctx->buflen - ctx->bufcnt);
567
568 count = min(count, ctx->total);
569 if (count <= 0)
570 return 0;
571 memcpy(ctx->buffer + ctx->bufcnt, data, count);
572 ctx->bufcnt += count;
573
574 return count;
575}
576
577static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
578{
579 size_t count;
580
581 while (ctx->sg) {
582 count = omap_sham_append_buffer(ctx,
583 sg_virt(ctx->sg) + ctx->offset,
584 ctx->sg->length - ctx->offset);
585 if (!count)
586 break;
587 ctx->offset += count;
588 ctx->total -= count;
589 if (ctx->offset == ctx->sg->length) {
590 ctx->sg = sg_next(ctx->sg);
591 if (ctx->sg)
592 ctx->offset = 0;
593 else
594 ctx->total = 0;
595 }
596 }
597
598 return 0;
599}
600
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200601static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
602 struct omap_sham_reqctx *ctx,
603 size_t length, int final)
604{
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700605 int ret;
606
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200607 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
608 DMA_TO_DEVICE);
609 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
610 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
611 return -EINVAL;
612 }
613
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300614 ctx->flags &= ~BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200615
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700616 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700617 if (ret != -EINPROGRESS)
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700618 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
619 DMA_TO_DEVICE);
620
621 return ret;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200622}
623
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800624static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
625{
626 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
627 unsigned int final;
628 size_t count;
629
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800630 omap_sham_append_sg(ctx);
631
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300632 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800633
634 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
635 ctx->bufcnt, ctx->digcnt, final);
636
637 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
638 count = ctx->bufcnt;
639 ctx->bufcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200640 return omap_sham_xmit_dma_map(dd, ctx, count, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800641 }
642
643 return 0;
644}
645
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200646/* Start address alignment */
647#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
648/* SHA1 block size alignment */
649#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
650
651static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800652{
653 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200654 unsigned int length, final, tail;
655 struct scatterlist *sg;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700656 int ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800657
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200658 if (!ctx->total)
659 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800660
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200661 if (ctx->bufcnt || ctx->offset)
662 return omap_sham_update_dma_slow(dd);
663
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700664 /*
665 * Don't use the sg interface when the transfer size is less
666 * than the number of elements in a DMA frame. Otherwise,
667 * the dmaengine infrastructure will calculate that it needs
668 * to transfer 0 frames which ultimately fails.
669 */
670 if (ctx->total < (DST_MAXBURST * sizeof(u32)))
671 return omap_sham_update_dma_slow(dd);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700672
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200673 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
674 ctx->digcnt, ctx->bufcnt, ctx->total);
675
676 sg = ctx->sg;
677
678 if (!SG_AA(sg))
679 return omap_sham_update_dma_slow(dd);
680
681 if (!sg_is_last(sg) && !SG_SA(sg))
682 /* size is not SHA1_BLOCK_SIZE aligned */
683 return omap_sham_update_dma_slow(dd);
684
685 length = min(ctx->total, sg->length);
686
687 if (sg_is_last(sg)) {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300688 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200689 /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
690 tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
691 /* without finup() we need one block to close hash */
692 if (!tail)
693 tail = SHA1_MD5_BLOCK_SIZE;
694 length -= tail;
695 }
696 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800697
698 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
699 dev_err(dd->dev, "dma_map_sg error\n");
700 return -EINVAL;
701 }
702
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300703 ctx->flags |= BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200704
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800705 ctx->total -= length;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200706 ctx->offset = length; /* offset where to start slow */
707
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300708 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800709
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700710 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700711 if (ret != -EINPROGRESS)
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700712 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
713
714 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800715}
716
717static int omap_sham_update_cpu(struct omap_sham_dev *dd)
718{
719 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
720 int bufcnt;
721
722 omap_sham_append_sg(ctx);
723 bufcnt = ctx->bufcnt;
724 ctx->bufcnt = 0;
725
726 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
727}
728
729static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
730{
731 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
732
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700733 dmaengine_terminate_all(dd->dma_lch);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700734
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300735 if (ctx->flags & BIT(FLAGS_SG)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800736 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200737 if (ctx->sg->length == ctx->offset) {
738 ctx->sg = sg_next(ctx->sg);
739 if (ctx->sg)
740 ctx->offset = 0;
741 }
742 } else {
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200743 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
744 DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200745 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800746
747 return 0;
748}
749
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800750static int omap_sham_init(struct ahash_request *req)
751{
752 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
753 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
754 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
755 struct omap_sham_dev *dd = NULL, *tmp;
756
757 spin_lock_bh(&sham.lock);
758 if (!tctx->dd) {
759 list_for_each_entry(tmp, &sham.dev_list, list) {
760 dd = tmp;
761 break;
762 }
763 tctx->dd = dd;
764 } else {
765 dd = tctx->dd;
766 }
767 spin_unlock_bh(&sham.lock);
768
769 ctx->dd = dd;
770
771 ctx->flags = 0;
772
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800773 dev_dbg(dd->dev, "init: digest size: %d\n",
774 crypto_ahash_digestsize(tfm));
775
Mark A. Greer0d373d62012-12-21 10:04:08 -0700776 switch (crypto_ahash_digestsize(tfm)) {
777 case MD5_DIGEST_SIZE:
778 ctx->flags |= FLAGS_MODE_MD5;
779 break;
780 case SHA1_DIGEST_SIZE:
781 ctx->flags |= FLAGS_MODE_SHA1;
782 break;
783 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800784
785 ctx->bufcnt = 0;
786 ctx->digcnt = 0;
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200787 ctx->buflen = BUFLEN;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800788
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300789 if (tctx->flags & BIT(FLAGS_HMAC)) {
Mark A. Greer0d373d62012-12-21 10:04:08 -0700790 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
791 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800792
Mark A. Greer0d373d62012-12-21 10:04:08 -0700793 memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
794 ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
795 }
796
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300797 ctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800798 }
799
800 return 0;
801
802}
803
804static int omap_sham_update_req(struct omap_sham_dev *dd)
805{
806 struct ahash_request *req = dd->req;
807 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
808 int err;
809
810 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300811 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800812
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300813 if (ctx->flags & BIT(FLAGS_CPU))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800814 err = omap_sham_update_cpu(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800815 else
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200816 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800817
818 /* wait for dma completion before can take more data */
819 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
820
821 return err;
822}
823
824static int omap_sham_final_req(struct omap_sham_dev *dd)
825{
826 struct ahash_request *req = dd->req;
827 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
828 int err = 0, use_dma = 1;
829
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700830 if (ctx->bufcnt <= DMA_MIN)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800831 /* faster to handle last block with cpu */
832 use_dma = 0;
833
834 if (use_dma)
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200835 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800836 else
837 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
838
839 ctx->bufcnt = 0;
840
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800841 dev_dbg(dd->dev, "final_req: err: %d\n", err);
842
843 return err;
844}
845
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300846static int omap_sham_finish_hmac(struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800847{
848 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
849 struct omap_sham_hmac_ctx *bctx = tctx->base;
850 int bs = crypto_shash_blocksize(bctx->shash);
851 int ds = crypto_shash_digestsize(bctx->shash);
852 struct {
853 struct shash_desc shash;
854 char ctx[crypto_shash_descsize(bctx->shash)];
855 } desc;
856
857 desc.shash.tfm = bctx->shash;
858 desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
859
860 return crypto_shash_init(&desc.shash) ?:
861 crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300862 crypto_shash_finup(&desc.shash, req->result, ds, req->result);
863}
864
865static int omap_sham_finish(struct ahash_request *req)
866{
867 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
868 struct omap_sham_dev *dd = ctx->dd;
869 int err = 0;
870
871 if (ctx->digcnt) {
872 omap_sham_copy_ready_hash(req);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700873 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
874 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300875 err = omap_sham_finish_hmac(req);
876 }
877
878 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
879
880 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800881}
882
883static void omap_sham_finish_req(struct ahash_request *req, int err)
884{
885 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200886 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800887
888 if (!err) {
Mark A. Greer0d373d62012-12-21 10:04:08 -0700889 dd->pdata->copy_hash(req, 1);
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300890 if (test_bit(FLAGS_FINAL, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +0300891 err = omap_sham_finish(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200892 } else {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300893 ctx->flags |= BIT(FLAGS_ERROR);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800894 }
895
Dmitry Kasatkin0efd4d82011-06-02 21:10:12 +0300896 /* atomic operation is not needed here */
897 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
898 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
Mark A. Greerb359f032012-12-21 10:04:02 -0700899
900 pm_runtime_put_sync(dd->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800901
902 if (req->base.complete)
903 req->base.complete(&req->base, err);
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +0300904
905 /* handle new request */
906 tasklet_schedule(&dd->done_task);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800907}
908
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200909static int omap_sham_handle_queue(struct omap_sham_dev *dd,
910 struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800911{
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100912 struct crypto_async_request *async_req, *backlog;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800913 struct omap_sham_reqctx *ctx;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800914 unsigned long flags;
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200915 int err = 0, ret = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800916
917 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200918 if (req)
919 ret = ahash_enqueue_request(&dd->queue, req);
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300920 if (test_bit(FLAGS_BUSY, &dd->flags)) {
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200921 spin_unlock_irqrestore(&dd->lock, flags);
922 return ret;
923 }
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100924 backlog = crypto_get_backlog(&dd->queue);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800925 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +1100926 if (async_req)
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300927 set_bit(FLAGS_BUSY, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800928 spin_unlock_irqrestore(&dd->lock, flags);
929
930 if (!async_req)
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200931 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800932
933 if (backlog)
934 backlog->complete(backlog, -EINPROGRESS);
935
936 req = ahash_request_cast(async_req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800937 dd->req = req;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800938 ctx = ahash_request_ctx(req);
939
940 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
941 ctx->op, req->nbytes);
942
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200943 err = omap_sham_hw_init(dd);
944 if (err)
945 goto err1;
946
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200947 if (ctx->digcnt)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800948 /* request has changed - restore hash */
Mark A. Greer0d373d62012-12-21 10:04:08 -0700949 dd->pdata->copy_hash(req, 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800950
951 if (ctx->op == OP_UPDATE) {
952 err = omap_sham_update_req(dd);
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300953 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800954 /* no final() after finup() */
955 err = omap_sham_final_req(dd);
956 } else if (ctx->op == OP_FINAL) {
957 err = omap_sham_final_req(dd);
958 }
Dmitry Kasatkin798eed52010-11-19 16:04:26 +0200959err1:
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +0300960 if (err != -EINPROGRESS)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800961 /* done_task will not finish it, so do it here */
962 omap_sham_finish_req(req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800963
964 dev_dbg(dd->dev, "exit, err: %d\n", err);
965
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200966 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800967}
968
969static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
970{
971 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
972 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
973 struct omap_sham_dev *dd = tctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800974
975 ctx->op = op;
976
Dmitry Kasatkina5d87232010-11-19 16:04:25 +0200977 return omap_sham_handle_queue(dd, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800978}
979
980static int omap_sham_update(struct ahash_request *req)
981{
982 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
983
984 if (!req->nbytes)
985 return 0;
986
987 ctx->total = req->nbytes;
988 ctx->sg = req->src;
989 ctx->offset = 0;
990
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300991 if (ctx->flags & BIT(FLAGS_FINUP)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800992 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
993 /*
994 * OMAP HW accel works only with buffers >= 9
995 * will switch to bypass in final()
996 * final has the same request and data
997 */
998 omap_sham_append_sg(ctx);
999 return 0;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001000 } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
1001 /*
1002 * faster to use CPU for short transfers
1003 */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001004 ctx->flags |= BIT(FLAGS_CPU);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001005 }
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001006 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001007 omap_sham_append_sg(ctx);
1008 return 0;
1009 }
1010
1011 return omap_sham_enqueue(req, OP_UPDATE);
1012}
1013
1014static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
1015 const u8 *data, unsigned int len, u8 *out)
1016{
1017 struct {
1018 struct shash_desc shash;
1019 char ctx[crypto_shash_descsize(shash)];
1020 } desc;
1021
1022 desc.shash.tfm = shash;
1023 desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1024
1025 return crypto_shash_digest(&desc.shash, data, len, out);
1026}
1027
1028static int omap_sham_final_shash(struct ahash_request *req)
1029{
1030 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1031 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1032
1033 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
1034 ctx->buffer, ctx->bufcnt, req->result);
1035}
1036
1037static int omap_sham_final(struct ahash_request *req)
1038{
1039 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001040
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001041 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001042
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001043 if (ctx->flags & BIT(FLAGS_ERROR))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001044 return 0; /* uncompleted hash is not needed */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001045
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001046 /* OMAP HW accel works only with buffers >= 9 */
1047 /* HMAC is always >= 9 because ipad == block size */
1048 if ((ctx->digcnt + ctx->bufcnt) < 9)
1049 return omap_sham_final_shash(req);
1050 else if (ctx->bufcnt)
1051 return omap_sham_enqueue(req, OP_FINAL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001052
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001053 /* copy ready hash (+ finalize hmac) */
1054 return omap_sham_finish(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001055}
1056
1057static int omap_sham_finup(struct ahash_request *req)
1058{
1059 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1060 int err1, err2;
1061
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001062 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001063
1064 err1 = omap_sham_update(req);
Markku Kylanpaa455e3382011-04-20 13:34:55 +03001065 if (err1 == -EINPROGRESS || err1 == -EBUSY)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001066 return err1;
1067 /*
1068 * final() has to be always called to cleanup resources
1069 * even if udpate() failed, except EINPROGRESS
1070 */
1071 err2 = omap_sham_final(req);
1072
1073 return err1 ?: err2;
1074}
1075
1076static int omap_sham_digest(struct ahash_request *req)
1077{
1078 return omap_sham_init(req) ?: omap_sham_finup(req);
1079}
1080
1081static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1082 unsigned int keylen)
1083{
1084 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1085 struct omap_sham_hmac_ctx *bctx = tctx->base;
1086 int bs = crypto_shash_blocksize(bctx->shash);
1087 int ds = crypto_shash_digestsize(bctx->shash);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001088 struct omap_sham_dev *dd = NULL, *tmp;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001089 int err, i;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001090
1091 spin_lock_bh(&sham.lock);
1092 if (!tctx->dd) {
1093 list_for_each_entry(tmp, &sham.dev_list, list) {
1094 dd = tmp;
1095 break;
1096 }
1097 tctx->dd = dd;
1098 } else {
1099 dd = tctx->dd;
1100 }
1101 spin_unlock_bh(&sham.lock);
1102
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001103 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1104 if (err)
1105 return err;
1106
1107 if (keylen > bs) {
1108 err = omap_sham_shash_digest(bctx->shash,
1109 crypto_shash_get_flags(bctx->shash),
1110 key, keylen, bctx->ipad);
1111 if (err)
1112 return err;
1113 keylen = ds;
1114 } else {
1115 memcpy(bctx->ipad, key, keylen);
1116 }
1117
1118 memset(bctx->ipad + keylen, 0, bs - keylen);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001119
Mark A. Greer0d373d62012-12-21 10:04:08 -07001120 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1121 memcpy(bctx->opad, bctx->ipad, bs);
1122
1123 for (i = 0; i < bs; i++) {
1124 bctx->ipad[i] ^= 0x36;
1125 bctx->opad[i] ^= 0x5c;
1126 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001127 }
1128
1129 return err;
1130}
1131
1132static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1133{
1134 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1135 const char *alg_name = crypto_tfm_alg_name(tfm);
1136
1137 /* Allocate a fallback and abort if it failed. */
1138 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1139 CRYPTO_ALG_NEED_FALLBACK);
1140 if (IS_ERR(tctx->fallback)) {
1141 pr_err("omap-sham: fallback driver '%s' "
1142 "could not be loaded.\n", alg_name);
1143 return PTR_ERR(tctx->fallback);
1144 }
1145
1146 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001147 sizeof(struct omap_sham_reqctx) + BUFLEN);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001148
1149 if (alg_base) {
1150 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001151 tctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001152 bctx->shash = crypto_alloc_shash(alg_base, 0,
1153 CRYPTO_ALG_NEED_FALLBACK);
1154 if (IS_ERR(bctx->shash)) {
1155 pr_err("omap-sham: base driver '%s' "
1156 "could not be loaded.\n", alg_base);
1157 crypto_free_shash(tctx->fallback);
1158 return PTR_ERR(bctx->shash);
1159 }
1160
1161 }
1162
1163 return 0;
1164}
1165
1166static int omap_sham_cra_init(struct crypto_tfm *tfm)
1167{
1168 return omap_sham_cra_init_alg(tfm, NULL);
1169}
1170
1171static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1172{
1173 return omap_sham_cra_init_alg(tfm, "sha1");
1174}
1175
1176static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1177{
1178 return omap_sham_cra_init_alg(tfm, "md5");
1179}
1180
1181static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1182{
1183 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1184
1185 crypto_free_shash(tctx->fallback);
1186 tctx->fallback = NULL;
1187
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001188 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001189 struct omap_sham_hmac_ctx *bctx = tctx->base;
1190 crypto_free_shash(bctx->shash);
1191 }
1192}
1193
1194static struct ahash_alg algs[] = {
1195{
1196 .init = omap_sham_init,
1197 .update = omap_sham_update,
1198 .final = omap_sham_final,
1199 .finup = omap_sham_finup,
1200 .digest = omap_sham_digest,
1201 .halg.digestsize = SHA1_DIGEST_SIZE,
1202 .halg.base = {
1203 .cra_name = "sha1",
1204 .cra_driver_name = "omap-sha1",
1205 .cra_priority = 100,
1206 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001207 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001208 CRYPTO_ALG_ASYNC |
1209 CRYPTO_ALG_NEED_FALLBACK,
1210 .cra_blocksize = SHA1_BLOCK_SIZE,
1211 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1212 .cra_alignmask = 0,
1213 .cra_module = THIS_MODULE,
1214 .cra_init = omap_sham_cra_init,
1215 .cra_exit = omap_sham_cra_exit,
1216 }
1217},
1218{
1219 .init = omap_sham_init,
1220 .update = omap_sham_update,
1221 .final = omap_sham_final,
1222 .finup = omap_sham_finup,
1223 .digest = omap_sham_digest,
1224 .halg.digestsize = MD5_DIGEST_SIZE,
1225 .halg.base = {
1226 .cra_name = "md5",
1227 .cra_driver_name = "omap-md5",
1228 .cra_priority = 100,
1229 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001230 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001231 CRYPTO_ALG_ASYNC |
1232 CRYPTO_ALG_NEED_FALLBACK,
1233 .cra_blocksize = SHA1_BLOCK_SIZE,
1234 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001235 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001236 .cra_module = THIS_MODULE,
1237 .cra_init = omap_sham_cra_init,
1238 .cra_exit = omap_sham_cra_exit,
1239 }
1240},
1241{
1242 .init = omap_sham_init,
1243 .update = omap_sham_update,
1244 .final = omap_sham_final,
1245 .finup = omap_sham_finup,
1246 .digest = omap_sham_digest,
1247 .setkey = omap_sham_setkey,
1248 .halg.digestsize = SHA1_DIGEST_SIZE,
1249 .halg.base = {
1250 .cra_name = "hmac(sha1)",
1251 .cra_driver_name = "omap-hmac-sha1",
1252 .cra_priority = 100,
1253 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001254 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001255 CRYPTO_ALG_ASYNC |
1256 CRYPTO_ALG_NEED_FALLBACK,
1257 .cra_blocksize = SHA1_BLOCK_SIZE,
1258 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1259 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001260 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001261 .cra_module = THIS_MODULE,
1262 .cra_init = omap_sham_cra_sha1_init,
1263 .cra_exit = omap_sham_cra_exit,
1264 }
1265},
1266{
1267 .init = omap_sham_init,
1268 .update = omap_sham_update,
1269 .final = omap_sham_final,
1270 .finup = omap_sham_finup,
1271 .digest = omap_sham_digest,
1272 .setkey = omap_sham_setkey,
1273 .halg.digestsize = MD5_DIGEST_SIZE,
1274 .halg.base = {
1275 .cra_name = "hmac(md5)",
1276 .cra_driver_name = "omap-hmac-md5",
1277 .cra_priority = 100,
1278 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001279 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001280 CRYPTO_ALG_ASYNC |
1281 CRYPTO_ALG_NEED_FALLBACK,
1282 .cra_blocksize = SHA1_BLOCK_SIZE,
1283 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1284 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed52010-11-19 16:04:26 +02001285 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001286 .cra_module = THIS_MODULE,
1287 .cra_init = omap_sham_cra_md5_init,
1288 .cra_exit = omap_sham_cra_exit,
1289 }
1290}
1291};
1292
1293static void omap_sham_done_task(unsigned long data)
1294{
1295 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001296 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001297
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +03001298 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1299 omap_sham_handle_queue(dd, NULL);
1300 return;
1301 }
1302
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001303 if (test_bit(FLAGS_CPU, &dd->flags)) {
1304 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1305 goto finish;
1306 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1307 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1308 omap_sham_update_dma_stop(dd);
1309 if (dd->err) {
1310 err = dd->err;
1311 goto finish;
1312 }
1313 }
1314 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1315 /* hash or semi-hash ready */
1316 clear_bit(FLAGS_DMA_READY, &dd->flags);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001317 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001318 if (err != -EINPROGRESS)
1319 goto finish;
1320 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001321 }
1322
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001323 return;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001324
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001325finish:
1326 dev_dbg(dd->dev, "update done: err: %d\n", err);
1327 /* finish curent request */
1328 omap_sham_finish_req(dd->req, err);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001329}
1330
Mark A. Greer0d373d62012-12-21 10:04:08 -07001331static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1332{
1333 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1334 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1335 } else {
1336 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1337 tasklet_schedule(&dd->done_task);
1338 }
1339
1340 return IRQ_HANDLED;
1341}
1342
1343static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001344{
1345 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001346
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001347 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001348 /* final -> allow device to go to power-saving mode */
1349 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1350
1351 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1352 SHA_REG_CTRL_OUTPUT_READY);
1353 omap_sham_read(dd, SHA_REG_CTRL);
1354
Mark A. Greer0d373d62012-12-21 10:04:08 -07001355 return omap_sham_irq_common(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001356}
1357
Mark A. Greer0d373d62012-12-21 10:04:08 -07001358static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1359{
1360 struct omap_sham_dev *dd = dev_id;
1361
1362 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1363
1364 return omap_sham_irq_common(dd);
1365}
1366
1367static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1368 .flags = BIT(FLAGS_BE32_SHA1),
1369 .digest_size = SHA1_DIGEST_SIZE,
1370 .copy_hash = omap_sham_copy_hash_omap2,
1371 .write_ctrl = omap_sham_write_ctrl_omap2,
1372 .trigger = omap_sham_trigger_omap2,
1373 .poll_irq = omap_sham_poll_irq_omap2,
1374 .intr_hdlr = omap_sham_irq_omap2,
1375 .idigest_ofs = 0x00,
1376 .din_ofs = 0x1c,
1377 .digcnt_ofs = 0x14,
1378 .rev_ofs = 0x5c,
1379 .mask_ofs = 0x60,
1380 .sysstatus_ofs = 0x64,
1381 .major_mask = 0xf0,
1382 .major_shift = 4,
1383 .minor_mask = 0x0f,
1384 .minor_shift = 0,
1385};
1386
Mark A. Greer03feec92012-12-21 10:04:06 -07001387#ifdef CONFIG_OF
Mark A. Greer0d373d62012-12-21 10:04:08 -07001388static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1389 .flags = BIT(FLAGS_AUTO_XOR),
1390 .digest_size = SHA256_DIGEST_SIZE,
1391 .copy_hash = omap_sham_copy_hash_omap4,
1392 .write_ctrl = omap_sham_write_ctrl_omap4,
1393 .trigger = omap_sham_trigger_omap4,
1394 .poll_irq = omap_sham_poll_irq_omap4,
1395 .intr_hdlr = omap_sham_irq_omap4,
1396 .idigest_ofs = 0x020,
1397 .din_ofs = 0x080,
1398 .digcnt_ofs = 0x040,
1399 .rev_ofs = 0x100,
1400 .mask_ofs = 0x110,
1401 .sysstatus_ofs = 0x114,
1402 .major_mask = 0x0700,
1403 .major_shift = 8,
1404 .minor_mask = 0x003f,
1405 .minor_shift = 0,
1406};
1407
Mark A. Greer03feec92012-12-21 10:04:06 -07001408static const struct of_device_id omap_sham_of_match[] = {
1409 {
1410 .compatible = "ti,omap2-sham",
Mark A. Greer0d373d62012-12-21 10:04:08 -07001411 .data = &omap_sham_pdata_omap2,
1412 },
1413 {
1414 .compatible = "ti,omap4-sham",
1415 .data = &omap_sham_pdata_omap4,
Mark A. Greer03feec92012-12-21 10:04:06 -07001416 },
1417 {},
1418};
1419MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1420
1421static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1422 struct device *dev, struct resource *res)
1423{
1424 struct device_node *node = dev->of_node;
1425 const struct of_device_id *match;
1426 int err = 0;
1427
1428 match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
1429 if (!match) {
1430 dev_err(dev, "no compatible OF match\n");
1431 err = -EINVAL;
1432 goto err;
1433 }
1434
1435 err = of_address_to_resource(node, 0, res);
1436 if (err < 0) {
1437 dev_err(dev, "can't translate OF node address\n");
1438 err = -EINVAL;
1439 goto err;
1440 }
1441
1442 dd->irq = of_irq_to_resource(node, 0, NULL);
1443 if (!dd->irq) {
1444 dev_err(dev, "can't translate OF irq value\n");
1445 err = -EINVAL;
1446 goto err;
1447 }
1448
1449 dd->dma = -1; /* Dummy value that's unused */
Mark A. Greer0d373d62012-12-21 10:04:08 -07001450 dd->pdata = match->data;
Mark A. Greer03feec92012-12-21 10:04:06 -07001451
1452err:
1453 return err;
1454}
1455#else
1456static int omap_sham_get_res_dev(struct omap_sham_dev *dd,
1457 struct device *dev, struct resource *res)
1458{
1459 return -EINVAL;
1460}
1461#endif
1462
1463static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1464 struct platform_device *pdev, struct resource *res)
1465{
1466 struct device *dev = &pdev->dev;
1467 struct resource *r;
1468 int err = 0;
1469
1470 /* Get the base address */
1471 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1472 if (!r) {
1473 dev_err(dev, "no MEM resource info\n");
1474 err = -ENODEV;
1475 goto err;
1476 }
1477 memcpy(res, r, sizeof(*res));
1478
1479 /* Get the IRQ */
1480 dd->irq = platform_get_irq(pdev, 0);
1481 if (dd->irq < 0) {
1482 dev_err(dev, "no IRQ resource info\n");
1483 err = dd->irq;
1484 goto err;
1485 }
1486
1487 /* Get the DMA */
1488 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1489 if (!r) {
1490 dev_err(dev, "no DMA resource info\n");
1491 err = -ENODEV;
1492 goto err;
1493 }
1494 dd->dma = r->start;
1495
Mark A. Greer0d373d62012-12-21 10:04:08 -07001496 /* Only OMAP2/3 can be non-DT */
1497 dd->pdata = &omap_sham_pdata_omap2;
1498
Mark A. Greer03feec92012-12-21 10:04:06 -07001499err:
1500 return err;
1501}
1502
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001503static int __devinit omap_sham_probe(struct platform_device *pdev)
1504{
1505 struct omap_sham_dev *dd;
1506 struct device *dev = &pdev->dev;
Mark A. Greer03feec92012-12-21 10:04:06 -07001507 struct resource res;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001508 dma_cap_mask_t mask;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001509 int err, i, j;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001510 u32 rev;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001511
1512 dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1513 if (dd == NULL) {
1514 dev_err(dev, "unable to alloc data struct.\n");
1515 err = -ENOMEM;
1516 goto data_err;
1517 }
1518 dd->dev = dev;
1519 platform_set_drvdata(pdev, dd);
1520
1521 INIT_LIST_HEAD(&dd->list);
1522 spin_lock_init(&dd->lock);
1523 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001524 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1525
Mark A. Greer03feec92012-12-21 10:04:06 -07001526 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
1527 omap_sham_get_res_pdev(dd, pdev, &res);
1528 if (err)
1529 goto res_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001530
Mark A. Greer03feec92012-12-21 10:04:06 -07001531 dd->io_base = devm_request_and_ioremap(dev, &res);
1532 if (!dd->io_base) {
1533 dev_err(dev, "can't ioremap\n");
1534 err = -ENOMEM;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001535 goto res_err;
1536 }
Mark A. Greer03feec92012-12-21 10:04:06 -07001537 dd->phys_base = res.start;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001538
Mark A. Greer0d373d62012-12-21 10:04:08 -07001539 err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW,
1540 dev_name(dev), dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001541 if (err) {
1542 dev_err(dev, "unable to request irq.\n");
1543 goto res_err;
1544 }
1545
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001546 dma_cap_zero(mask);
1547 dma_cap_set(DMA_SLAVE, mask);
1548
Mark A. Greer0e87e732012-12-21 10:04:07 -07001549 dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
1550 &dd->dma, dev, "rx");
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001551 if (!dd->dma_lch) {
1552 dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
Mark A. Greer03feec92012-12-21 10:04:06 -07001553 dd->dma);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001554 err = -ENXIO;
1555 goto dma_err;
1556 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001557
Mark A. Greer0d373d62012-12-21 10:04:08 -07001558 dd->flags |= dd->pdata->flags;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001559
Mark A. Greerb359f032012-12-21 10:04:02 -07001560 pm_runtime_enable(dev);
1561 pm_runtime_get_sync(dev);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001562 rev = omap_sham_read(dd, SHA_REG_REV(dd));
1563 pm_runtime_put_sync(&pdev->dev);
Mark A. Greerb359f032012-12-21 10:04:02 -07001564
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001565 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
Mark A. Greer0d373d62012-12-21 10:04:08 -07001566 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
1567 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001568
1569 spin_lock(&sham.lock);
1570 list_add_tail(&dd->list, &sham.dev_list);
1571 spin_unlock(&sham.lock);
1572
1573 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1574 err = crypto_register_ahash(&algs[i]);
1575 if (err)
1576 goto err_algs;
1577 }
1578
1579 return 0;
1580
1581err_algs:
1582 for (j = 0; j < i; j++)
1583 crypto_unregister_ahash(&algs[j]);
Mark A. Greerb359f032012-12-21 10:04:02 -07001584 pm_runtime_disable(dev);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001585 dma_release_channel(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001586dma_err:
Mark A. Greer03feec92012-12-21 10:04:06 -07001587 free_irq(dd->irq, dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001588res_err:
1589 kfree(dd);
1590 dd = NULL;
1591data_err:
1592 dev_err(dev, "initialization failed.\n");
1593
1594 return err;
1595}
1596
1597static int __devexit omap_sham_remove(struct platform_device *pdev)
1598{
1599 static struct omap_sham_dev *dd;
1600 int i;
1601
1602 dd = platform_get_drvdata(pdev);
1603 if (!dd)
1604 return -ENODEV;
1605 spin_lock(&sham.lock);
1606 list_del(&dd->list);
1607 spin_unlock(&sham.lock);
1608 for (i = 0; i < ARRAY_SIZE(algs); i++)
1609 crypto_unregister_ahash(&algs[i]);
1610 tasklet_kill(&dd->done_task);
Mark A. Greerb359f032012-12-21 10:04:02 -07001611 pm_runtime_disable(&pdev->dev);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001612 dma_release_channel(dd->dma_lch);
Mark A. Greer03feec92012-12-21 10:04:06 -07001613 free_irq(dd->irq, dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001614 kfree(dd);
1615 dd = NULL;
1616
1617 return 0;
1618}
1619
Mark A. Greer3b3f4402012-12-21 10:04:03 -07001620#ifdef CONFIG_PM_SLEEP
1621static int omap_sham_suspend(struct device *dev)
1622{
1623 pm_runtime_put_sync(dev);
1624 return 0;
1625}
1626
1627static int omap_sham_resume(struct device *dev)
1628{
1629 pm_runtime_get_sync(dev);
1630 return 0;
1631}
1632#endif
1633
1634static const struct dev_pm_ops omap_sham_pm_ops = {
1635 SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
1636};
1637
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001638static struct platform_driver omap_sham_driver = {
1639 .probe = omap_sham_probe,
1640 .remove = omap_sham_remove,
1641 .driver = {
1642 .name = "omap-sham",
1643 .owner = THIS_MODULE,
Mark A. Greer3b3f4402012-12-21 10:04:03 -07001644 .pm = &omap_sham_pm_ops,
Mark A. Greer03feec92012-12-21 10:04:06 -07001645 .of_match_table = omap_sham_of_match,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001646 },
1647};
1648
1649static int __init omap_sham_mod_init(void)
1650{
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001651 return platform_driver_register(&omap_sham_driver);
1652}
1653
1654static void __exit omap_sham_mod_exit(void)
1655{
1656 platform_driver_unregister(&omap_sham_driver);
1657}
1658
1659module_init(omap_sham_mod_init);
1660module_exit(omap_sham_mod_exit);
1661
1662MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1663MODULE_LICENSE("GPL v2");
1664MODULE_AUTHOR("Dmitry Kasatkin");