blob: bd1ad97404abd99252627364a6b31fcabfdcfe04 [file] [log] [blame]
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Mark A. Greer0d355832013-01-08 11:57:46 -07008 * Copyright (c) 2011 Texas Instruments Incorporated
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
18#include <linux/err.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22#include <linux/kernel.h>
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080023#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25#include <linux/dma-mapping.h>
Mark A. Greerebedbf72013-01-08 11:57:42 -070026#include <linux/dmaengine.h>
27#include <linux/omap-dma.h>
Mark A. Greer5946c4a2013-01-08 11:57:40 -070028#include <linux/pm_runtime.h>
Mark A. Greerbc69d122013-01-08 11:57:44 -070029#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/of_address.h>
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080032#include <linux/io.h>
33#include <linux/crypto.h>
34#include <linux/interrupt.h>
35#include <crypto/scatterwalk.h>
36#include <crypto/aes.h>
37
Mark A. Greerebedbf72013-01-08 11:57:42 -070038#define DST_MAXBURST 4
39#define DMA_MIN (DST_MAXBURST * sizeof(u32))
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080040
41/* OMAP TRM gives bitfields as start:end, where start is the higher bit
42 number. For example 7:0 */
43#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
44#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
45
Mark A. Greer0d355832013-01-08 11:57:46 -070046#define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
47 ((x ^ 0x01) * 0x04))
48#define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04))
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080049
Mark A. Greer0d355832013-01-08 11:57:46 -070050#define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080051#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
52#define AES_REG_CTRL_CTR (1 << 6)
53#define AES_REG_CTRL_CBC (1 << 5)
54#define AES_REG_CTRL_KEY_SIZE (3 << 3)
55#define AES_REG_CTRL_DIRECTION (1 << 2)
56#define AES_REG_CTRL_INPUT_READY (1 << 1)
57#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
58
Mark A. Greer0d355832013-01-08 11:57:46 -070059#define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04))
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080060
Mark A. Greer0d355832013-01-08 11:57:46 -070061#define AES_REG_REV(dd) ((dd)->pdata->rev_ofs)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080062
Mark A. Greer0d355832013-01-08 11:57:46 -070063#define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080064#define AES_REG_MASK_SIDLE (1 << 6)
65#define AES_REG_MASK_START (1 << 5)
66#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
67#define AES_REG_MASK_DMA_IN_EN (1 << 2)
68#define AES_REG_MASK_SOFTRESET (1 << 1)
69#define AES_REG_AUTOIDLE (1 << 0)
70
Mark A. Greer0d355832013-01-08 11:57:46 -070071#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080072
73#define DEFAULT_TIMEOUT (5*HZ)
74
75#define FLAGS_MODE_MASK 0x000f
76#define FLAGS_ENCRYPT BIT(0)
77#define FLAGS_CBC BIT(1)
78#define FLAGS_GIV BIT(2)
79
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +020080#define FLAGS_INIT BIT(4)
81#define FLAGS_FAST BIT(5)
82#define FLAGS_BUSY BIT(6)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080083
84struct omap_aes_ctx {
85 struct omap_aes_dev *dd;
86
87 int keylen;
88 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
89 unsigned long flags;
90};
91
92struct omap_aes_reqctx {
93 unsigned long mode;
94};
95
96#define OMAP_AES_QUEUE_LENGTH 1
97#define OMAP_AES_CACHE_SIZE 0
98
Mark A. Greer0d355832013-01-08 11:57:46 -070099struct omap_aes_pdata {
100 void (*trigger)(struct omap_aes_dev *dd, int length);
101
102 u32 key_ofs;
103 u32 iv_ofs;
104 u32 ctrl_ofs;
105 u32 data_ofs;
106 u32 rev_ofs;
107 u32 mask_ofs;
108
109 u32 dma_enable_in;
110 u32 dma_enable_out;
111 u32 dma_start;
112
113 u32 major_mask;
114 u32 major_shift;
115 u32 minor_mask;
116 u32 minor_shift;
117};
118
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800119struct omap_aes_dev {
120 struct list_head list;
121 unsigned long phys_base;
Dmitry Kasatkinefce41b2010-11-30 10:13:32 +0200122 void __iomem *io_base;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800123 struct omap_aes_ctx *ctx;
124 struct device *dev;
125 unsigned long flags;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200126 int err;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800127
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200128 spinlock_t lock;
129 struct crypto_queue queue;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800130
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200131 struct tasklet_struct done_task;
132 struct tasklet_struct queue_task;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800133
134 struct ablkcipher_request *req;
135 size_t total;
136 struct scatterlist *in_sg;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700137 struct scatterlist in_sgl;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800138 size_t in_offset;
139 struct scatterlist *out_sg;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700140 struct scatterlist out_sgl;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800141 size_t out_offset;
142
143 size_t buflen;
144 void *buf_in;
145 size_t dma_size;
146 int dma_in;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700147 struct dma_chan *dma_lch_in;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800148 dma_addr_t dma_addr_in;
149 void *buf_out;
150 int dma_out;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700151 struct dma_chan *dma_lch_out;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800152 dma_addr_t dma_addr_out;
Mark A. Greer0d355832013-01-08 11:57:46 -0700153
154 const struct omap_aes_pdata *pdata;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800155};
156
157/* keep registered devices data here */
158static LIST_HEAD(dev_list);
159static DEFINE_SPINLOCK(list_lock);
160
161static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
162{
163 return __raw_readl(dd->io_base + offset);
164}
165
166static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
167 u32 value)
168{
169 __raw_writel(value, dd->io_base + offset);
170}
171
172static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
173 u32 value, u32 mask)
174{
175 u32 val;
176
177 val = omap_aes_read(dd, offset);
178 val &= ~mask;
179 val |= value;
180 omap_aes_write(dd, offset, val);
181}
182
183static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
184 u32 *value, int count)
185{
186 for (; count--; value++, offset += 4)
187 omap_aes_write(dd, offset, *value);
188}
189
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800190static int omap_aes_hw_init(struct omap_aes_dev *dd)
191{
Dmitry Kasatkin83ea7e02010-11-30 10:13:31 +0200192 /*
193 * clocks are enabled when request starts and disabled when finished.
194 * It may be long delays between requests.
195 * Device might go to off mode to save power.
196 */
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700197 pm_runtime_get_sync(dd->dev);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200198
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800199 if (!(dd->flags & FLAGS_INIT)) {
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200200 dd->flags |= FLAGS_INIT;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200201 dd->err = 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800202 }
203
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200204 return 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800205}
206
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200207static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800208{
209 unsigned int key32;
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200210 int i, err;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800211 u32 val, mask;
212
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200213 err = omap_aes_hw_init(dd);
214 if (err)
215 return err;
216
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800217 key32 = dd->ctx->keylen / sizeof(u32);
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200218
219 /* it seems a key should always be set even if it has not changed */
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800220 for (i = 0; i < key32; i++) {
Mark A. Greer0d355832013-01-08 11:57:46 -0700221 omap_aes_write(dd, AES_REG_KEY(dd, i),
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800222 __le32_to_cpu(dd->ctx->key[i]));
223 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800224
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200225 if ((dd->flags & FLAGS_CBC) && dd->req->info)
Mark A. Greer0d355832013-01-08 11:57:46 -0700226 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200227
228 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
229 if (dd->flags & FLAGS_CBC)
230 val |= AES_REG_CTRL_CBC;
231 if (dd->flags & FLAGS_ENCRYPT)
232 val |= AES_REG_CTRL_DIRECTION;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800233
234 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
235 AES_REG_CTRL_KEY_SIZE;
236
Mark A. Greer0d355832013-01-08 11:57:46 -0700237 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800238
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200239 return 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800240}
241
Mark A. Greer0d355832013-01-08 11:57:46 -0700242static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
243{
244 u32 mask, val;
245
246 val = dd->pdata->dma_start;
247
248 if (dd->dma_lch_out != NULL)
249 val |= dd->pdata->dma_enable_out;
250 if (dd->dma_lch_in != NULL)
251 val |= dd->pdata->dma_enable_in;
252
253 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
254 dd->pdata->dma_start;
255
256 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
257
258}
259
260static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
261{
262 omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
263 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
264
265 omap_aes_dma_trigger_omap2(dd, length);
266}
267
268static void omap_aes_dma_stop(struct omap_aes_dev *dd)
269{
270 u32 mask;
271
272 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
273 dd->pdata->dma_start;
274
275 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
276}
277
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800278static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
279{
280 struct omap_aes_dev *dd = NULL, *tmp;
281
282 spin_lock_bh(&list_lock);
283 if (!ctx->dd) {
284 list_for_each_entry(tmp, &dev_list, list) {
285 /* FIXME: take fist available aes core */
286 dd = tmp;
287 break;
288 }
289 ctx->dd = dd;
290 } else {
291 /* already found before */
292 dd = ctx->dd;
293 }
294 spin_unlock_bh(&list_lock);
295
296 return dd;
297}
298
Mark A. Greerebedbf72013-01-08 11:57:42 -0700299static void omap_aes_dma_out_callback(void *data)
300{
301 struct omap_aes_dev *dd = data;
302
303 /* dma_lch_out - completed */
304 tasklet_schedule(&dd->done_task);
305}
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800306
307static int omap_aes_dma_init(struct omap_aes_dev *dd)
308{
309 int err = -ENOMEM;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700310 dma_cap_mask_t mask;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800311
Mark A. Greerebedbf72013-01-08 11:57:42 -0700312 dd->dma_lch_out = NULL;
313 dd->dma_lch_in = NULL;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800314
315 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
316 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
317 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
318 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
319
320 if (!dd->buf_in || !dd->buf_out) {
321 dev_err(dd->dev, "unable to alloc pages.\n");
322 goto err_alloc;
323 }
324
325 /* MAP here */
326 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
327 DMA_TO_DEVICE);
328 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
329 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
330 err = -EINVAL;
331 goto err_map_in;
332 }
333
334 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
335 DMA_FROM_DEVICE);
336 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
337 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
338 err = -EINVAL;
339 goto err_map_out;
340 }
341
Mark A. Greerebedbf72013-01-08 11:57:42 -0700342 dma_cap_zero(mask);
343 dma_cap_set(DMA_SLAVE, mask);
344
Mark A. Greerb4b87a92013-01-08 11:57:45 -0700345 dd->dma_lch_in = dma_request_slave_channel_compat(mask,
346 omap_dma_filter_fn,
347 &dd->dma_in,
348 dd->dev, "rx");
Mark A. Greerebedbf72013-01-08 11:57:42 -0700349 if (!dd->dma_lch_in) {
350 dev_err(dd->dev, "Unable to request in DMA channel\n");
351 goto err_dma_in;
352 }
353
Mark A. Greerb4b87a92013-01-08 11:57:45 -0700354 dd->dma_lch_out = dma_request_slave_channel_compat(mask,
355 omap_dma_filter_fn,
356 &dd->dma_out,
357 dd->dev, "tx");
Mark A. Greerebedbf72013-01-08 11:57:42 -0700358 if (!dd->dma_lch_out) {
359 dev_err(dd->dev, "Unable to request out DMA channel\n");
360 goto err_dma_out;
361 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800362
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800363 return 0;
364
365err_dma_out:
Mark A. Greerebedbf72013-01-08 11:57:42 -0700366 dma_release_channel(dd->dma_lch_in);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800367err_dma_in:
368 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
369 DMA_FROM_DEVICE);
370err_map_out:
371 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
372err_map_in:
373 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
374 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
375err_alloc:
376 if (err)
377 pr_err("error: %d\n", err);
378 return err;
379}
380
381static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
382{
Mark A. Greerebedbf72013-01-08 11:57:42 -0700383 dma_release_channel(dd->dma_lch_out);
384 dma_release_channel(dd->dma_lch_in);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800385 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
386 DMA_FROM_DEVICE);
387 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
388 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
389 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
390}
391
392static void sg_copy_buf(void *buf, struct scatterlist *sg,
393 unsigned int start, unsigned int nbytes, int out)
394{
395 struct scatter_walk walk;
396
397 if (!nbytes)
398 return;
399
400 scatterwalk_start(&walk, sg);
401 scatterwalk_advance(&walk, start);
402 scatterwalk_copychunks(buf, &walk, nbytes, out);
403 scatterwalk_done(&walk, out, 0);
404}
405
406static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
407 size_t buflen, size_t total, int out)
408{
409 unsigned int count, off = 0;
410
411 while (buflen && total) {
412 count = min((*sg)->length - *offset, total);
413 count = min(count, buflen);
414
415 if (!count)
416 return off;
417
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200418 /*
419 * buflen and total are AES_BLOCK_SIZE size aligned,
420 * so count should be also aligned
421 */
422
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800423 sg_copy_buf(buf + off, *sg, *offset, count, out);
424
425 off += count;
426 buflen -= count;
427 *offset += count;
428 total -= count;
429
430 if (*offset == (*sg)->length) {
431 *sg = sg_next(*sg);
432 if (*sg)
433 *offset = 0;
434 else
435 total = 0;
436 }
437 }
438
439 return off;
440}
441
Mark A. Greerebedbf72013-01-08 11:57:42 -0700442static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
443 struct scatterlist *in_sg, struct scatterlist *out_sg)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800444{
445 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
446 struct omap_aes_dev *dd = ctx->dd;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700447 struct dma_async_tx_descriptor *tx_in, *tx_out;
448 struct dma_slave_config cfg;
449 dma_addr_t dma_addr_in = sg_dma_address(in_sg);
450 int ret, length = sg_dma_len(in_sg);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800451
452 pr_debug("len: %d\n", length);
453
454 dd->dma_size = length;
455
456 if (!(dd->flags & FLAGS_FAST))
457 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
458 DMA_TO_DEVICE);
459
Mark A. Greerebedbf72013-01-08 11:57:42 -0700460 memset(&cfg, 0, sizeof(cfg));
461
Mark A. Greer0d355832013-01-08 11:57:46 -0700462 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
463 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
Mark A. Greerebedbf72013-01-08 11:57:42 -0700464 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
465 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
466 cfg.src_maxburst = DST_MAXBURST;
467 cfg.dst_maxburst = DST_MAXBURST;
468
469 /* IN */
470 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
471 if (ret) {
472 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
473 ret);
474 return ret;
475 }
476
477 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
478 DMA_MEM_TO_DEV,
479 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
480 if (!tx_in) {
481 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
482 return -EINVAL;
483 }
484
485 /* No callback necessary */
486 tx_in->callback_param = dd;
487
488 /* OUT */
489 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
490 if (ret) {
491 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
492 ret);
493 return ret;
494 }
495
496 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
497 DMA_DEV_TO_MEM,
498 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
499 if (!tx_out) {
500 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
501 return -EINVAL;
502 }
503
504 tx_out->callback = omap_aes_dma_out_callback;
505 tx_out->callback_param = dd;
506
507 dmaengine_submit(tx_in);
508 dmaengine_submit(tx_out);
509
510 dma_async_issue_pending(dd->dma_lch_in);
511 dma_async_issue_pending(dd->dma_lch_out);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800512
Mark A. Greer0d355832013-01-08 11:57:46 -0700513 /* start DMA */
514 dd->pdata->trigger(dd, length);
Dmitry Kasatkin83ea7e02010-11-30 10:13:31 +0200515
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800516 return 0;
517}
518
519static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
520{
521 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
522 crypto_ablkcipher_reqtfm(dd->req));
523 int err, fast = 0, in, out;
524 size_t count;
525 dma_addr_t addr_in, addr_out;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700526 struct scatterlist *in_sg, *out_sg;
527 int len32;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800528
529 pr_debug("total: %d\n", dd->total);
530
531 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
532 /* check for alignment */
533 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
534 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
535
536 fast = in && out;
537 }
538
539 if (fast) {
540 count = min(dd->total, sg_dma_len(dd->in_sg));
541 count = min(count, sg_dma_len(dd->out_sg));
542
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200543 if (count != dd->total) {
544 pr_err("request length != buffer length\n");
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800545 return -EINVAL;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200546 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800547
548 pr_debug("fast\n");
549
550 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
551 if (!err) {
552 dev_err(dd->dev, "dma_map_sg() error\n");
553 return -EINVAL;
554 }
555
556 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
557 if (!err) {
558 dev_err(dd->dev, "dma_map_sg() error\n");
559 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
560 return -EINVAL;
561 }
562
563 addr_in = sg_dma_address(dd->in_sg);
564 addr_out = sg_dma_address(dd->out_sg);
565
Mark A. Greerebedbf72013-01-08 11:57:42 -0700566 in_sg = dd->in_sg;
567 out_sg = dd->out_sg;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700568
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800569 dd->flags |= FLAGS_FAST;
570
571 } else {
572 /* use cache buffers */
573 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
574 dd->buflen, dd->total, 0);
575
Mark A. Greerebedbf72013-01-08 11:57:42 -0700576 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
577
578 /*
579 * The data going into the AES module has been copied
580 * to a local buffer and the data coming out will go
581 * into a local buffer so set up local SG entries for
582 * both.
583 */
584 sg_init_table(&dd->in_sgl, 1);
585 dd->in_sgl.offset = dd->in_offset;
586 sg_dma_len(&dd->in_sgl) = len32;
587 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
588
589 sg_init_table(&dd->out_sgl, 1);
590 dd->out_sgl.offset = dd->out_offset;
591 sg_dma_len(&dd->out_sgl) = len32;
592 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
593
594 in_sg = &dd->in_sgl;
595 out_sg = &dd->out_sgl;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700596
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800597 addr_in = dd->dma_addr_in;
598 addr_out = dd->dma_addr_out;
599
600 dd->flags &= ~FLAGS_FAST;
601
602 }
603
604 dd->total -= count;
605
Mark A. Greerebedbf72013-01-08 11:57:42 -0700606 err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200607 if (err) {
608 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
609 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
610 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800611
612 return err;
613}
614
615static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
616{
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200617 struct ablkcipher_request *req = dd->req;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800618
619 pr_debug("err: %d\n", err);
620
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700621 pm_runtime_put_sync(dd->dev);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200622 dd->flags &= ~FLAGS_BUSY;
623
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200624 req->base.complete(&req->base, err);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800625}
626
627static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
628{
629 int err = 0;
630 size_t count;
631
632 pr_debug("total: %d\n", dd->total);
633
Mark A. Greer0d355832013-01-08 11:57:46 -0700634 omap_aes_dma_stop(dd);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800635
Mark A. Greerebedbf72013-01-08 11:57:42 -0700636 dmaengine_terminate_all(dd->dma_lch_in);
637 dmaengine_terminate_all(dd->dma_lch_out);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800638
639 if (dd->flags & FLAGS_FAST) {
640 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
641 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
642 } else {
643 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
644 dd->dma_size, DMA_FROM_DEVICE);
645
646 /* copy data */
647 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
648 dd->buflen, dd->dma_size, 1);
649 if (count != dd->dma_size) {
650 err = -EINVAL;
651 pr_err("not all data converted: %u\n", count);
652 }
653 }
654
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800655 return err;
656}
657
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200658static int omap_aes_handle_queue(struct omap_aes_dev *dd,
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200659 struct ablkcipher_request *req)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800660{
661 struct crypto_async_request *async_req, *backlog;
662 struct omap_aes_ctx *ctx;
663 struct omap_aes_reqctx *rctx;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800664 unsigned long flags;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200665 int err, ret = 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800666
667 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200668 if (req)
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200669 ret = ablkcipher_enqueue_request(&dd->queue, req);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200670 if (dd->flags & FLAGS_BUSY) {
671 spin_unlock_irqrestore(&dd->lock, flags);
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200672 return ret;
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200673 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800674 backlog = crypto_get_backlog(&dd->queue);
675 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200676 if (async_req)
677 dd->flags |= FLAGS_BUSY;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800678 spin_unlock_irqrestore(&dd->lock, flags);
679
680 if (!async_req)
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200681 return ret;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800682
683 if (backlog)
684 backlog->complete(backlog, -EINPROGRESS);
685
686 req = ablkcipher_request_cast(async_req);
687
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800688 /* assign new request to device */
689 dd->req = req;
690 dd->total = req->nbytes;
691 dd->in_offset = 0;
692 dd->in_sg = req->src;
693 dd->out_offset = 0;
694 dd->out_sg = req->dst;
695
696 rctx = ablkcipher_request_ctx(req);
697 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
698 rctx->mode &= FLAGS_MODE_MASK;
699 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
700
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200701 dd->ctx = ctx;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800702 ctx->dd = dd;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800703
Dmitry Kasatkin83ea7e02010-11-30 10:13:31 +0200704 err = omap_aes_write_ctrl(dd);
705 if (!err)
706 err = omap_aes_crypt_dma_start(dd);
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200707 if (err) {
708 /* aes_task will not finish it, so do it here */
709 omap_aes_finish_req(dd, err);
710 tasklet_schedule(&dd->queue_task);
711 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800712
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200713 return ret; /* return ret, which is enqueue return value */
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800714}
715
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200716static void omap_aes_done_task(unsigned long data)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800717{
718 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200719 int err;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800720
721 pr_debug("enter\n");
722
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200723 err = omap_aes_crypt_dma_stop(dd);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800724
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200725 err = dd->err ? : err;
726
727 if (dd->total && !err) {
728 err = omap_aes_crypt_dma_start(dd);
729 if (!err)
730 return; /* DMA started. Not fininishing. */
731 }
732
733 omap_aes_finish_req(dd, err);
734 omap_aes_handle_queue(dd, NULL);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800735
736 pr_debug("exit\n");
737}
738
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200739static void omap_aes_queue_task(unsigned long data)
740{
741 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
742
743 omap_aes_handle_queue(dd, NULL);
744}
745
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800746static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
747{
748 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
749 crypto_ablkcipher_reqtfm(req));
750 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
751 struct omap_aes_dev *dd;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800752
753 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
754 !!(mode & FLAGS_ENCRYPT),
755 !!(mode & FLAGS_CBC));
756
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200757 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
758 pr_err("request size is not exact amount of AES blocks\n");
759 return -EINVAL;
760 }
761
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800762 dd = omap_aes_find_dev(ctx);
763 if (!dd)
764 return -ENODEV;
765
766 rctx->mode = mode;
767
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200768 return omap_aes_handle_queue(dd, req);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800769}
770
771/* ********************** ALG API ************************************ */
772
773static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
774 unsigned int keylen)
775{
776 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
777
778 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
779 keylen != AES_KEYSIZE_256)
780 return -EINVAL;
781
782 pr_debug("enter, keylen: %d\n", keylen);
783
784 memcpy(ctx->key, key, keylen);
785 ctx->keylen = keylen;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800786
787 return 0;
788}
789
790static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
791{
792 return omap_aes_crypt(req, FLAGS_ENCRYPT);
793}
794
795static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
796{
797 return omap_aes_crypt(req, 0);
798}
799
800static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
801{
802 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
803}
804
805static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
806{
807 return omap_aes_crypt(req, FLAGS_CBC);
808}
809
810static int omap_aes_cra_init(struct crypto_tfm *tfm)
811{
812 pr_debug("enter\n");
813
814 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
815
816 return 0;
817}
818
819static void omap_aes_cra_exit(struct crypto_tfm *tfm)
820{
821 pr_debug("enter\n");
822}
823
824/* ********************** ALGS ************************************ */
825
826static struct crypto_alg algs[] = {
827{
828 .cra_name = "ecb(aes)",
829 .cra_driver_name = "ecb-aes-omap",
830 .cra_priority = 100,
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +0100831 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
832 CRYPTO_ALG_KERN_DRIVER_ONLY |
833 CRYPTO_ALG_ASYNC,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800834 .cra_blocksize = AES_BLOCK_SIZE,
835 .cra_ctxsize = sizeof(struct omap_aes_ctx),
Dmitry Kasatkinefce41b2010-11-30 10:13:32 +0200836 .cra_alignmask = 0,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800837 .cra_type = &crypto_ablkcipher_type,
838 .cra_module = THIS_MODULE,
839 .cra_init = omap_aes_cra_init,
840 .cra_exit = omap_aes_cra_exit,
841 .cra_u.ablkcipher = {
842 .min_keysize = AES_MIN_KEY_SIZE,
843 .max_keysize = AES_MAX_KEY_SIZE,
844 .setkey = omap_aes_setkey,
845 .encrypt = omap_aes_ecb_encrypt,
846 .decrypt = omap_aes_ecb_decrypt,
847 }
848},
849{
850 .cra_name = "cbc(aes)",
851 .cra_driver_name = "cbc-aes-omap",
852 .cra_priority = 100,
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +0100853 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
854 CRYPTO_ALG_KERN_DRIVER_ONLY |
855 CRYPTO_ALG_ASYNC,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800856 .cra_blocksize = AES_BLOCK_SIZE,
857 .cra_ctxsize = sizeof(struct omap_aes_ctx),
Dmitry Kasatkinefce41b2010-11-30 10:13:32 +0200858 .cra_alignmask = 0,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800859 .cra_type = &crypto_ablkcipher_type,
860 .cra_module = THIS_MODULE,
861 .cra_init = omap_aes_cra_init,
862 .cra_exit = omap_aes_cra_exit,
863 .cra_u.ablkcipher = {
864 .min_keysize = AES_MIN_KEY_SIZE,
865 .max_keysize = AES_MAX_KEY_SIZE,
866 .ivsize = AES_BLOCK_SIZE,
867 .setkey = omap_aes_setkey,
868 .encrypt = omap_aes_cbc_encrypt,
869 .decrypt = omap_aes_cbc_decrypt,
870 }
871}
872};
873
Mark A. Greer0d355832013-01-08 11:57:46 -0700874static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
875 .trigger = omap_aes_dma_trigger_omap2,
876 .key_ofs = 0x1c,
877 .iv_ofs = 0x20,
878 .ctrl_ofs = 0x30,
879 .data_ofs = 0x34,
880 .rev_ofs = 0x44,
881 .mask_ofs = 0x48,
882 .dma_enable_in = BIT(2),
883 .dma_enable_out = BIT(3),
884 .dma_start = BIT(5),
885 .major_mask = 0xf0,
886 .major_shift = 4,
887 .minor_mask = 0x0f,
888 .minor_shift = 0,
889};
890
Mark A. Greerbc69d122013-01-08 11:57:44 -0700891#ifdef CONFIG_OF
Mark A. Greer0d355832013-01-08 11:57:46 -0700892static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
893 .trigger = omap_aes_dma_trigger_omap4,
894 .key_ofs = 0x3c,
895 .iv_ofs = 0x40,
896 .ctrl_ofs = 0x50,
897 .data_ofs = 0x60,
898 .rev_ofs = 0x80,
899 .mask_ofs = 0x84,
900 .dma_enable_in = BIT(5),
901 .dma_enable_out = BIT(6),
902 .major_mask = 0x0700,
903 .major_shift = 8,
904 .minor_mask = 0x003f,
905 .minor_shift = 0,
906};
907
Mark A. Greerbc69d122013-01-08 11:57:44 -0700908static const struct of_device_id omap_aes_of_match[] = {
909 {
910 .compatible = "ti,omap2-aes",
Mark A. Greer0d355832013-01-08 11:57:46 -0700911 .data = &omap_aes_pdata_omap2,
912 },
913 {
914 .compatible = "ti,omap4-aes",
915 .data = &omap_aes_pdata_omap4,
Mark A. Greerbc69d122013-01-08 11:57:44 -0700916 },
917 {},
918};
919MODULE_DEVICE_TABLE(of, omap_aes_of_match);
920
921static int omap_aes_get_res_of(struct omap_aes_dev *dd,
922 struct device *dev, struct resource *res)
923{
924 struct device_node *node = dev->of_node;
925 const struct of_device_id *match;
926 int err = 0;
927
928 match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
929 if (!match) {
930 dev_err(dev, "no compatible OF match\n");
931 err = -EINVAL;
932 goto err;
933 }
934
935 err = of_address_to_resource(node, 0, res);
936 if (err < 0) {
937 dev_err(dev, "can't translate OF node address\n");
938 err = -EINVAL;
939 goto err;
940 }
941
942 dd->dma_out = -1; /* Dummy value that's unused */
943 dd->dma_in = -1; /* Dummy value that's unused */
944
Mark A. Greer0d355832013-01-08 11:57:46 -0700945 dd->pdata = match->data;
946
Mark A. Greerbc69d122013-01-08 11:57:44 -0700947err:
948 return err;
949}
950#else
951static const struct of_device_id omap_aes_of_match[] = {
952 {},
953};
954
955static int omap_aes_get_res_of(struct omap_aes_dev *dd,
956 struct device *dev, struct resource *res)
957{
958 return -EINVAL;
959}
960#endif
961
962static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
963 struct platform_device *pdev, struct resource *res)
964{
965 struct device *dev = &pdev->dev;
966 struct resource *r;
967 int err = 0;
968
969 /* Get the base address */
970 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
971 if (!r) {
972 dev_err(dev, "no MEM resource info\n");
973 err = -ENODEV;
974 goto err;
975 }
976 memcpy(res, r, sizeof(*res));
977
978 /* Get the DMA out channel */
979 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
980 if (!r) {
981 dev_err(dev, "no DMA out resource info\n");
982 err = -ENODEV;
983 goto err;
984 }
985 dd->dma_out = r->start;
986
987 /* Get the DMA in channel */
988 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
989 if (!r) {
990 dev_err(dev, "no DMA in resource info\n");
991 err = -ENODEV;
992 goto err;
993 }
994 dd->dma_in = r->start;
995
Mark A. Greer0d355832013-01-08 11:57:46 -0700996 /* Only OMAP2/3 can be non-DT */
997 dd->pdata = &omap_aes_pdata_omap2;
998
Mark A. Greerbc69d122013-01-08 11:57:44 -0700999err:
1000 return err;
1001}
1002
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001003static int omap_aes_probe(struct platform_device *pdev)
1004{
1005 struct device *dev = &pdev->dev;
1006 struct omap_aes_dev *dd;
Mark A. Greerbc69d122013-01-08 11:57:44 -07001007 struct resource res;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001008 int err = -ENOMEM, i, j;
1009 u32 reg;
1010
1011 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
1012 if (dd == NULL) {
1013 dev_err(dev, "unable to alloc data struct.\n");
1014 goto err_data;
1015 }
1016 dd->dev = dev;
1017 platform_set_drvdata(pdev, dd);
1018
1019 spin_lock_init(&dd->lock);
1020 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
1021
Mark A. Greerbc69d122013-01-08 11:57:44 -07001022 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1023 omap_aes_get_res_pdev(dd, pdev, &res);
1024 if (err)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001025 goto err_res;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001026
Mark A. Greerbc69d122013-01-08 11:57:44 -07001027 dd->io_base = devm_request_and_ioremap(dev, &res);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001028 if (!dd->io_base) {
1029 dev_err(dev, "can't ioremap\n");
1030 err = -ENOMEM;
Mark A. Greer5946c4a2013-01-08 11:57:40 -07001031 goto err_res;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001032 }
Mark A. Greerbc69d122013-01-08 11:57:44 -07001033 dd->phys_base = res.start;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001034
Mark A. Greer5946c4a2013-01-08 11:57:40 -07001035 pm_runtime_enable(dev);
1036 pm_runtime_get_sync(dev);
1037
Mark A. Greer0d355832013-01-08 11:57:46 -07001038 omap_aes_dma_stop(dd);
1039
1040 reg = omap_aes_read(dd, AES_REG_REV(dd));
Mark A. Greer5946c4a2013-01-08 11:57:40 -07001041
1042 pm_runtime_put_sync(dev);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001043
Mark A. Greer0d355832013-01-08 11:57:46 -07001044 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1045 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1046 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1047
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +02001048 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1049 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001050
1051 err = omap_aes_dma_init(dd);
1052 if (err)
1053 goto err_dma;
1054
1055 INIT_LIST_HEAD(&dd->list);
1056 spin_lock(&list_lock);
1057 list_add_tail(&dd->list, &dev_list);
1058 spin_unlock(&list_lock);
1059
1060 for (i = 0; i < ARRAY_SIZE(algs); i++) {
1061 pr_debug("i: %d\n", i);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001062 err = crypto_register_alg(&algs[i]);
1063 if (err)
1064 goto err_algs;
1065 }
1066
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001067 return 0;
1068err_algs:
1069 for (j = 0; j < i; j++)
1070 crypto_unregister_alg(&algs[j]);
1071 omap_aes_dma_cleanup(dd);
1072err_dma:
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +02001073 tasklet_kill(&dd->done_task);
1074 tasklet_kill(&dd->queue_task);
Mark A. Greer5946c4a2013-01-08 11:57:40 -07001075 pm_runtime_disable(dev);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001076err_res:
1077 kfree(dd);
1078 dd = NULL;
1079err_data:
1080 dev_err(dev, "initialization failed.\n");
1081 return err;
1082}
1083
1084static int omap_aes_remove(struct platform_device *pdev)
1085{
1086 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1087 int i;
1088
1089 if (!dd)
1090 return -ENODEV;
1091
1092 spin_lock(&list_lock);
1093 list_del(&dd->list);
1094 spin_unlock(&list_lock);
1095
1096 for (i = 0; i < ARRAY_SIZE(algs); i++)
1097 crypto_unregister_alg(&algs[i]);
1098
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +02001099 tasklet_kill(&dd->done_task);
1100 tasklet_kill(&dd->queue_task);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001101 omap_aes_dma_cleanup(dd);
Mark A. Greer5946c4a2013-01-08 11:57:40 -07001102 pm_runtime_disable(dd->dev);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001103 kfree(dd);
1104 dd = NULL;
1105
1106 return 0;
1107}
1108
Mark A. Greer0635fb32013-01-08 11:57:41 -07001109#ifdef CONFIG_PM_SLEEP
1110static int omap_aes_suspend(struct device *dev)
1111{
1112 pm_runtime_put_sync(dev);
1113 return 0;
1114}
1115
1116static int omap_aes_resume(struct device *dev)
1117{
1118 pm_runtime_get_sync(dev);
1119 return 0;
1120}
1121#endif
1122
1123static const struct dev_pm_ops omap_aes_pm_ops = {
1124 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
1125};
1126
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001127static struct platform_driver omap_aes_driver = {
1128 .probe = omap_aes_probe,
1129 .remove = omap_aes_remove,
1130 .driver = {
1131 .name = "omap-aes",
1132 .owner = THIS_MODULE,
Mark A. Greer0635fb32013-01-08 11:57:41 -07001133 .pm = &omap_aes_pm_ops,
Mark A. Greerbc69d122013-01-08 11:57:44 -07001134 .of_match_table = omap_aes_of_match,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001135 },
1136};
1137
1138static int __init omap_aes_mod_init(void)
1139{
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001140 return platform_driver_register(&omap_aes_driver);
1141}
1142
1143static void __exit omap_aes_mod_exit(void)
1144{
1145 platform_driver_unregister(&omap_aes_driver);
1146}
1147
1148module_init(omap_aes_mod_init);
1149module_exit(omap_aes_mod_exit);
1150
1151MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1152MODULE_LICENSE("GPL v2");
1153MODULE_AUTHOR("Dmitry Kasatkin");
1154