blob: dfebd402565478b5a9ef555b7fcb08282859e95b [file] [log] [blame]
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080022#include <linux/platform_device.h>
23#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h>
Mark A. Greerebedbf72013-01-08 11:57:42 -070025#include <linux/dmaengine.h>
26#include <linux/omap-dma.h>
Mark A. Greer5946c4a2013-01-08 11:57:40 -070027#include <linux/pm_runtime.h>
Mark A. Greerbc69d122013-01-08 11:57:44 -070028#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/of_address.h>
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080031#include <linux/io.h>
32#include <linux/crypto.h>
33#include <linux/interrupt.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/aes.h>
36
Mark A. Greerebedbf72013-01-08 11:57:42 -070037#define DST_MAXBURST 4
38#define DMA_MIN (DST_MAXBURST * sizeof(u32))
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080039
40/* OMAP TRM gives bitfields as start:end, where start is the higher bit
41 number. For example 7:0 */
42#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
43#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
44
45#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
46#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
47
48#define AES_REG_CTRL 0x30
49#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
50#define AES_REG_CTRL_CTR (1 << 6)
51#define AES_REG_CTRL_CBC (1 << 5)
52#define AES_REG_CTRL_KEY_SIZE (3 << 3)
53#define AES_REG_CTRL_DIRECTION (1 << 2)
54#define AES_REG_CTRL_INPUT_READY (1 << 1)
55#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
56
57#define AES_REG_DATA 0x34
58#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
59
60#define AES_REG_REV 0x44
61#define AES_REG_REV_MAJOR 0xF0
62#define AES_REG_REV_MINOR 0x0F
63
64#define AES_REG_MASK 0x48
65#define AES_REG_MASK_SIDLE (1 << 6)
66#define AES_REG_MASK_START (1 << 5)
67#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
68#define AES_REG_MASK_DMA_IN_EN (1 << 2)
69#define AES_REG_MASK_SOFTRESET (1 << 1)
70#define AES_REG_AUTOIDLE (1 << 0)
71
72#define AES_REG_SYSSTATUS 0x4C
73#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
74
75#define DEFAULT_TIMEOUT (5*HZ)
76
77#define FLAGS_MODE_MASK 0x000f
78#define FLAGS_ENCRYPT BIT(0)
79#define FLAGS_CBC BIT(1)
80#define FLAGS_GIV BIT(2)
81
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +020082#define FLAGS_INIT BIT(4)
83#define FLAGS_FAST BIT(5)
84#define FLAGS_BUSY BIT(6)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +080085
86struct omap_aes_ctx {
87 struct omap_aes_dev *dd;
88
89 int keylen;
90 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
91 unsigned long flags;
92};
93
94struct omap_aes_reqctx {
95 unsigned long mode;
96};
97
98#define OMAP_AES_QUEUE_LENGTH 1
99#define OMAP_AES_CACHE_SIZE 0
100
101struct omap_aes_dev {
102 struct list_head list;
103 unsigned long phys_base;
Dmitry Kasatkinefce41b2010-11-30 10:13:32 +0200104 void __iomem *io_base;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800105 struct omap_aes_ctx *ctx;
106 struct device *dev;
107 unsigned long flags;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200108 int err;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800109
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200110 spinlock_t lock;
111 struct crypto_queue queue;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800112
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200113 struct tasklet_struct done_task;
114 struct tasklet_struct queue_task;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800115
116 struct ablkcipher_request *req;
117 size_t total;
118 struct scatterlist *in_sg;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700119 struct scatterlist in_sgl;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800120 size_t in_offset;
121 struct scatterlist *out_sg;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700122 struct scatterlist out_sgl;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800123 size_t out_offset;
124
125 size_t buflen;
126 void *buf_in;
127 size_t dma_size;
128 int dma_in;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700129 struct dma_chan *dma_lch_in;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800130 dma_addr_t dma_addr_in;
131 void *buf_out;
132 int dma_out;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700133 struct dma_chan *dma_lch_out;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800134 dma_addr_t dma_addr_out;
135};
136
137/* keep registered devices data here */
138static LIST_HEAD(dev_list);
139static DEFINE_SPINLOCK(list_lock);
140
141static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
142{
143 return __raw_readl(dd->io_base + offset);
144}
145
146static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
147 u32 value)
148{
149 __raw_writel(value, dd->io_base + offset);
150}
151
152static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
153 u32 value, u32 mask)
154{
155 u32 val;
156
157 val = omap_aes_read(dd, offset);
158 val &= ~mask;
159 val |= value;
160 omap_aes_write(dd, offset, val);
161}
162
163static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
164 u32 *value, int count)
165{
166 for (; count--; value++, offset += 4)
167 omap_aes_write(dd, offset, *value);
168}
169
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800170static int omap_aes_hw_init(struct omap_aes_dev *dd)
171{
Dmitry Kasatkin83ea7e02010-11-30 10:13:31 +0200172 /*
173 * clocks are enabled when request starts and disabled when finished.
174 * It may be long delays between requests.
175 * Device might go to off mode to save power.
176 */
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700177 pm_runtime_get_sync(dd->dev);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200178
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800179 if (!(dd->flags & FLAGS_INIT)) {
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200180 dd->flags |= FLAGS_INIT;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200181 dd->err = 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800182 }
183
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200184 return 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800185}
186
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200187static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800188{
189 unsigned int key32;
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200190 int i, err;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800191 u32 val, mask;
192
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200193 err = omap_aes_hw_init(dd);
194 if (err)
195 return err;
196
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800197 val = 0;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700198 if (dd->dma_lch_out != NULL)
199 val |= AES_REG_MASK_DMA_OUT_EN;
200 if (dd->dma_lch_in != NULL)
201 val |= AES_REG_MASK_DMA_IN_EN;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800202
203 mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
204
205 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
206
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800207 key32 = dd->ctx->keylen / sizeof(u32);
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200208
209 /* it seems a key should always be set even if it has not changed */
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800210 for (i = 0; i < key32; i++) {
211 omap_aes_write(dd, AES_REG_KEY(i),
212 __le32_to_cpu(dd->ctx->key[i]));
213 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800214
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200215 if ((dd->flags & FLAGS_CBC) && dd->req->info)
216 omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
217
218 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
219 if (dd->flags & FLAGS_CBC)
220 val |= AES_REG_CTRL_CBC;
221 if (dd->flags & FLAGS_ENCRYPT)
222 val |= AES_REG_CTRL_DIRECTION;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800223
224 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
225 AES_REG_CTRL_KEY_SIZE;
226
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200227 omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800228
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200229 return 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800230}
231
232static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
233{
234 struct omap_aes_dev *dd = NULL, *tmp;
235
236 spin_lock_bh(&list_lock);
237 if (!ctx->dd) {
238 list_for_each_entry(tmp, &dev_list, list) {
239 /* FIXME: take fist available aes core */
240 dd = tmp;
241 break;
242 }
243 ctx->dd = dd;
244 } else {
245 /* already found before */
246 dd = ctx->dd;
247 }
248 spin_unlock_bh(&list_lock);
249
250 return dd;
251}
252
Mark A. Greerebedbf72013-01-08 11:57:42 -0700253static void omap_aes_dma_out_callback(void *data)
254{
255 struct omap_aes_dev *dd = data;
256
257 /* dma_lch_out - completed */
258 tasklet_schedule(&dd->done_task);
259}
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800260
261static int omap_aes_dma_init(struct omap_aes_dev *dd)
262{
263 int err = -ENOMEM;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700264 dma_cap_mask_t mask;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800265
Mark A. Greerebedbf72013-01-08 11:57:42 -0700266 dd->dma_lch_out = NULL;
267 dd->dma_lch_in = NULL;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800268
269 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
270 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
271 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
272 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
273
274 if (!dd->buf_in || !dd->buf_out) {
275 dev_err(dd->dev, "unable to alloc pages.\n");
276 goto err_alloc;
277 }
278
279 /* MAP here */
280 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
281 DMA_TO_DEVICE);
282 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
283 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
284 err = -EINVAL;
285 goto err_map_in;
286 }
287
288 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
289 DMA_FROM_DEVICE);
290 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
291 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
292 err = -EINVAL;
293 goto err_map_out;
294 }
295
Mark A. Greerebedbf72013-01-08 11:57:42 -0700296 dma_cap_zero(mask);
297 dma_cap_set(DMA_SLAVE, mask);
298
299 dd->dma_lch_in = dma_request_channel(mask, omap_dma_filter_fn,
300 &dd->dma_in);
301 if (!dd->dma_lch_in) {
302 dev_err(dd->dev, "Unable to request in DMA channel\n");
303 goto err_dma_in;
304 }
305
306 dd->dma_lch_out = dma_request_channel(mask, omap_dma_filter_fn,
307 &dd->dma_out);
308 if (!dd->dma_lch_out) {
309 dev_err(dd->dev, "Unable to request out DMA channel\n");
310 goto err_dma_out;
311 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800312
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800313 return 0;
314
315err_dma_out:
Mark A. Greerebedbf72013-01-08 11:57:42 -0700316 dma_release_channel(dd->dma_lch_in);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800317err_dma_in:
318 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
319 DMA_FROM_DEVICE);
320err_map_out:
321 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
322err_map_in:
323 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
324 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
325err_alloc:
326 if (err)
327 pr_err("error: %d\n", err);
328 return err;
329}
330
331static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
332{
Mark A. Greerebedbf72013-01-08 11:57:42 -0700333 dma_release_channel(dd->dma_lch_out);
334 dma_release_channel(dd->dma_lch_in);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800335 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
336 DMA_FROM_DEVICE);
337 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
338 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
339 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
340}
341
342static void sg_copy_buf(void *buf, struct scatterlist *sg,
343 unsigned int start, unsigned int nbytes, int out)
344{
345 struct scatter_walk walk;
346
347 if (!nbytes)
348 return;
349
350 scatterwalk_start(&walk, sg);
351 scatterwalk_advance(&walk, start);
352 scatterwalk_copychunks(buf, &walk, nbytes, out);
353 scatterwalk_done(&walk, out, 0);
354}
355
356static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
357 size_t buflen, size_t total, int out)
358{
359 unsigned int count, off = 0;
360
361 while (buflen && total) {
362 count = min((*sg)->length - *offset, total);
363 count = min(count, buflen);
364
365 if (!count)
366 return off;
367
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200368 /*
369 * buflen and total are AES_BLOCK_SIZE size aligned,
370 * so count should be also aligned
371 */
372
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800373 sg_copy_buf(buf + off, *sg, *offset, count, out);
374
375 off += count;
376 buflen -= count;
377 *offset += count;
378 total -= count;
379
380 if (*offset == (*sg)->length) {
381 *sg = sg_next(*sg);
382 if (*sg)
383 *offset = 0;
384 else
385 total = 0;
386 }
387 }
388
389 return off;
390}
391
Mark A. Greerebedbf72013-01-08 11:57:42 -0700392static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
393 struct scatterlist *in_sg, struct scatterlist *out_sg)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800394{
395 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
396 struct omap_aes_dev *dd = ctx->dd;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700397 struct dma_async_tx_descriptor *tx_in, *tx_out;
398 struct dma_slave_config cfg;
399 dma_addr_t dma_addr_in = sg_dma_address(in_sg);
400 int ret, length = sg_dma_len(in_sg);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800401
402 pr_debug("len: %d\n", length);
403
404 dd->dma_size = length;
405
406 if (!(dd->flags & FLAGS_FAST))
407 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
408 DMA_TO_DEVICE);
409
Mark A. Greerebedbf72013-01-08 11:57:42 -0700410 memset(&cfg, 0, sizeof(cfg));
411
412 cfg.src_addr = dd->phys_base + AES_REG_DATA;
413 cfg.dst_addr = dd->phys_base + AES_REG_DATA;
414 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
415 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
416 cfg.src_maxburst = DST_MAXBURST;
417 cfg.dst_maxburst = DST_MAXBURST;
418
419 /* IN */
420 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
421 if (ret) {
422 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
423 ret);
424 return ret;
425 }
426
427 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
428 DMA_MEM_TO_DEV,
429 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
430 if (!tx_in) {
431 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
432 return -EINVAL;
433 }
434
435 /* No callback necessary */
436 tx_in->callback_param = dd;
437
438 /* OUT */
439 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
440 if (ret) {
441 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
442 ret);
443 return ret;
444 }
445
446 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
447 DMA_DEV_TO_MEM,
448 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
449 if (!tx_out) {
450 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
451 return -EINVAL;
452 }
453
454 tx_out->callback = omap_aes_dma_out_callback;
455 tx_out->callback_param = dd;
456
457 dmaengine_submit(tx_in);
458 dmaengine_submit(tx_out);
459
460 dma_async_issue_pending(dd->dma_lch_in);
461 dma_async_issue_pending(dd->dma_lch_out);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800462
Dmitry Kasatkin83ea7e02010-11-30 10:13:31 +0200463 /* start DMA or disable idle mode */
464 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
465 AES_REG_MASK_START);
466
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800467 return 0;
468}
469
470static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
471{
472 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
473 crypto_ablkcipher_reqtfm(dd->req));
474 int err, fast = 0, in, out;
475 size_t count;
476 dma_addr_t addr_in, addr_out;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700477 struct scatterlist *in_sg, *out_sg;
478 int len32;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800479
480 pr_debug("total: %d\n", dd->total);
481
482 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
483 /* check for alignment */
484 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
485 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
486
487 fast = in && out;
488 }
489
490 if (fast) {
491 count = min(dd->total, sg_dma_len(dd->in_sg));
492 count = min(count, sg_dma_len(dd->out_sg));
493
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200494 if (count != dd->total) {
495 pr_err("request length != buffer length\n");
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800496 return -EINVAL;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200497 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800498
499 pr_debug("fast\n");
500
501 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
502 if (!err) {
503 dev_err(dd->dev, "dma_map_sg() error\n");
504 return -EINVAL;
505 }
506
507 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
508 if (!err) {
509 dev_err(dd->dev, "dma_map_sg() error\n");
510 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
511 return -EINVAL;
512 }
513
514 addr_in = sg_dma_address(dd->in_sg);
515 addr_out = sg_dma_address(dd->out_sg);
516
Mark A. Greerebedbf72013-01-08 11:57:42 -0700517 in_sg = dd->in_sg;
518 out_sg = dd->out_sg;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700519
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800520 dd->flags |= FLAGS_FAST;
521
522 } else {
523 /* use cache buffers */
524 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
525 dd->buflen, dd->total, 0);
526
Mark A. Greerebedbf72013-01-08 11:57:42 -0700527 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
528
529 /*
530 * The data going into the AES module has been copied
531 * to a local buffer and the data coming out will go
532 * into a local buffer so set up local SG entries for
533 * both.
534 */
535 sg_init_table(&dd->in_sgl, 1);
536 dd->in_sgl.offset = dd->in_offset;
537 sg_dma_len(&dd->in_sgl) = len32;
538 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
539
540 sg_init_table(&dd->out_sgl, 1);
541 dd->out_sgl.offset = dd->out_offset;
542 sg_dma_len(&dd->out_sgl) = len32;
543 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
544
545 in_sg = &dd->in_sgl;
546 out_sg = &dd->out_sgl;
Mark A. Greerebedbf72013-01-08 11:57:42 -0700547
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800548 addr_in = dd->dma_addr_in;
549 addr_out = dd->dma_addr_out;
550
551 dd->flags &= ~FLAGS_FAST;
552
553 }
554
555 dd->total -= count;
556
Mark A. Greerebedbf72013-01-08 11:57:42 -0700557 err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200558 if (err) {
559 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
560 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
561 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800562
563 return err;
564}
565
566static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
567{
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200568 struct ablkcipher_request *req = dd->req;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800569
570 pr_debug("err: %d\n", err);
571
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700572 pm_runtime_put_sync(dd->dev);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200573 dd->flags &= ~FLAGS_BUSY;
574
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200575 req->base.complete(&req->base, err);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800576}
577
578static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
579{
580 int err = 0;
581 size_t count;
582
583 pr_debug("total: %d\n", dd->total);
584
585 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
586
Mark A. Greerebedbf72013-01-08 11:57:42 -0700587 dmaengine_terminate_all(dd->dma_lch_in);
588 dmaengine_terminate_all(dd->dma_lch_out);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800589
590 if (dd->flags & FLAGS_FAST) {
591 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
592 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
593 } else {
594 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
595 dd->dma_size, DMA_FROM_DEVICE);
596
597 /* copy data */
598 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
599 dd->buflen, dd->dma_size, 1);
600 if (count != dd->dma_size) {
601 err = -EINVAL;
602 pr_err("not all data converted: %u\n", count);
603 }
604 }
605
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800606 return err;
607}
608
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200609static int omap_aes_handle_queue(struct omap_aes_dev *dd,
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200610 struct ablkcipher_request *req)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800611{
612 struct crypto_async_request *async_req, *backlog;
613 struct omap_aes_ctx *ctx;
614 struct omap_aes_reqctx *rctx;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800615 unsigned long flags;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200616 int err, ret = 0;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800617
618 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200619 if (req)
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200620 ret = ablkcipher_enqueue_request(&dd->queue, req);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200621 if (dd->flags & FLAGS_BUSY) {
622 spin_unlock_irqrestore(&dd->lock, flags);
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200623 return ret;
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200624 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800625 backlog = crypto_get_backlog(&dd->queue);
626 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkineeb2b202010-11-30 10:13:28 +0200627 if (async_req)
628 dd->flags |= FLAGS_BUSY;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800629 spin_unlock_irqrestore(&dd->lock, flags);
630
631 if (!async_req)
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200632 return ret;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800633
634 if (backlog)
635 backlog->complete(backlog, -EINPROGRESS);
636
637 req = ablkcipher_request_cast(async_req);
638
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800639 /* assign new request to device */
640 dd->req = req;
641 dd->total = req->nbytes;
642 dd->in_offset = 0;
643 dd->in_sg = req->src;
644 dd->out_offset = 0;
645 dd->out_sg = req->dst;
646
647 rctx = ablkcipher_request_ctx(req);
648 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
649 rctx->mode &= FLAGS_MODE_MASK;
650 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
651
Dmitry Kasatkin67a730c2010-11-30 10:13:30 +0200652 dd->ctx = ctx;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800653 ctx->dd = dd;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800654
Dmitry Kasatkin83ea7e02010-11-30 10:13:31 +0200655 err = omap_aes_write_ctrl(dd);
656 if (!err)
657 err = omap_aes_crypt_dma_start(dd);
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200658 if (err) {
659 /* aes_task will not finish it, so do it here */
660 omap_aes_finish_req(dd, err);
661 tasklet_schedule(&dd->queue_task);
662 }
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800663
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200664 return ret; /* return ret, which is enqueue return value */
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800665}
666
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200667static void omap_aes_done_task(unsigned long data)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800668{
669 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200670 int err;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800671
672 pr_debug("enter\n");
673
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200674 err = omap_aes_crypt_dma_stop(dd);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800675
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200676 err = dd->err ? : err;
677
678 if (dd->total && !err) {
679 err = omap_aes_crypt_dma_start(dd);
680 if (!err)
681 return; /* DMA started. Not fininishing. */
682 }
683
684 omap_aes_finish_req(dd, err);
685 omap_aes_handle_queue(dd, NULL);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800686
687 pr_debug("exit\n");
688}
689
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200690static void omap_aes_queue_task(unsigned long data)
691{
692 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
693
694 omap_aes_handle_queue(dd, NULL);
695}
696
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800697static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
698{
699 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
700 crypto_ablkcipher_reqtfm(req));
701 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
702 struct omap_aes_dev *dd;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800703
704 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
705 !!(mode & FLAGS_ENCRYPT),
706 !!(mode & FLAGS_CBC));
707
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200708 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
709 pr_err("request size is not exact amount of AES blocks\n");
710 return -EINVAL;
711 }
712
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800713 dd = omap_aes_find_dev(ctx);
714 if (!dd)
715 return -ENODEV;
716
717 rctx->mode = mode;
718
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200719 return omap_aes_handle_queue(dd, req);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800720}
721
722/* ********************** ALG API ************************************ */
723
724static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
725 unsigned int keylen)
726{
727 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
728
729 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
730 keylen != AES_KEYSIZE_256)
731 return -EINVAL;
732
733 pr_debug("enter, keylen: %d\n", keylen);
734
735 memcpy(ctx->key, key, keylen);
736 ctx->keylen = keylen;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800737
738 return 0;
739}
740
741static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
742{
743 return omap_aes_crypt(req, FLAGS_ENCRYPT);
744}
745
746static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
747{
748 return omap_aes_crypt(req, 0);
749}
750
751static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
752{
753 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
754}
755
756static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
757{
758 return omap_aes_crypt(req, FLAGS_CBC);
759}
760
761static int omap_aes_cra_init(struct crypto_tfm *tfm)
762{
763 pr_debug("enter\n");
764
765 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
766
767 return 0;
768}
769
770static void omap_aes_cra_exit(struct crypto_tfm *tfm)
771{
772 pr_debug("enter\n");
773}
774
775/* ********************** ALGS ************************************ */
776
777static struct crypto_alg algs[] = {
778{
779 .cra_name = "ecb(aes)",
780 .cra_driver_name = "ecb-aes-omap",
781 .cra_priority = 100,
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +0100782 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
783 CRYPTO_ALG_KERN_DRIVER_ONLY |
784 CRYPTO_ALG_ASYNC,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800785 .cra_blocksize = AES_BLOCK_SIZE,
786 .cra_ctxsize = sizeof(struct omap_aes_ctx),
Dmitry Kasatkinefce41b2010-11-30 10:13:32 +0200787 .cra_alignmask = 0,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800788 .cra_type = &crypto_ablkcipher_type,
789 .cra_module = THIS_MODULE,
790 .cra_init = omap_aes_cra_init,
791 .cra_exit = omap_aes_cra_exit,
792 .cra_u.ablkcipher = {
793 .min_keysize = AES_MIN_KEY_SIZE,
794 .max_keysize = AES_MAX_KEY_SIZE,
795 .setkey = omap_aes_setkey,
796 .encrypt = omap_aes_ecb_encrypt,
797 .decrypt = omap_aes_ecb_decrypt,
798 }
799},
800{
801 .cra_name = "cbc(aes)",
802 .cra_driver_name = "cbc-aes-omap",
803 .cra_priority = 100,
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +0100804 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
805 CRYPTO_ALG_KERN_DRIVER_ONLY |
806 CRYPTO_ALG_ASYNC,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800807 .cra_blocksize = AES_BLOCK_SIZE,
808 .cra_ctxsize = sizeof(struct omap_aes_ctx),
Dmitry Kasatkinefce41b2010-11-30 10:13:32 +0200809 .cra_alignmask = 0,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800810 .cra_type = &crypto_ablkcipher_type,
811 .cra_module = THIS_MODULE,
812 .cra_init = omap_aes_cra_init,
813 .cra_exit = omap_aes_cra_exit,
814 .cra_u.ablkcipher = {
815 .min_keysize = AES_MIN_KEY_SIZE,
816 .max_keysize = AES_MAX_KEY_SIZE,
817 .ivsize = AES_BLOCK_SIZE,
818 .setkey = omap_aes_setkey,
819 .encrypt = omap_aes_cbc_encrypt,
820 .decrypt = omap_aes_cbc_decrypt,
821 }
822}
823};
824
Mark A. Greerbc69d122013-01-08 11:57:44 -0700825#ifdef CONFIG_OF
826static const struct of_device_id omap_aes_of_match[] = {
827 {
828 .compatible = "ti,omap2-aes",
829 },
830 {},
831};
832MODULE_DEVICE_TABLE(of, omap_aes_of_match);
833
834static int omap_aes_get_res_of(struct omap_aes_dev *dd,
835 struct device *dev, struct resource *res)
836{
837 struct device_node *node = dev->of_node;
838 const struct of_device_id *match;
839 int err = 0;
840
841 match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
842 if (!match) {
843 dev_err(dev, "no compatible OF match\n");
844 err = -EINVAL;
845 goto err;
846 }
847
848 err = of_address_to_resource(node, 0, res);
849 if (err < 0) {
850 dev_err(dev, "can't translate OF node address\n");
851 err = -EINVAL;
852 goto err;
853 }
854
855 dd->dma_out = -1; /* Dummy value that's unused */
856 dd->dma_in = -1; /* Dummy value that's unused */
857
858err:
859 return err;
860}
861#else
862static const struct of_device_id omap_aes_of_match[] = {
863 {},
864};
865
866static int omap_aes_get_res_of(struct omap_aes_dev *dd,
867 struct device *dev, struct resource *res)
868{
869 return -EINVAL;
870}
871#endif
872
873static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
874 struct platform_device *pdev, struct resource *res)
875{
876 struct device *dev = &pdev->dev;
877 struct resource *r;
878 int err = 0;
879
880 /* Get the base address */
881 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
882 if (!r) {
883 dev_err(dev, "no MEM resource info\n");
884 err = -ENODEV;
885 goto err;
886 }
887 memcpy(res, r, sizeof(*res));
888
889 /* Get the DMA out channel */
890 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
891 if (!r) {
892 dev_err(dev, "no DMA out resource info\n");
893 err = -ENODEV;
894 goto err;
895 }
896 dd->dma_out = r->start;
897
898 /* Get the DMA in channel */
899 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
900 if (!r) {
901 dev_err(dev, "no DMA in resource info\n");
902 err = -ENODEV;
903 goto err;
904 }
905 dd->dma_in = r->start;
906
907err:
908 return err;
909}
910
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800911static int omap_aes_probe(struct platform_device *pdev)
912{
913 struct device *dev = &pdev->dev;
914 struct omap_aes_dev *dd;
Mark A. Greerbc69d122013-01-08 11:57:44 -0700915 struct resource res;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800916 int err = -ENOMEM, i, j;
917 u32 reg;
918
919 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
920 if (dd == NULL) {
921 dev_err(dev, "unable to alloc data struct.\n");
922 goto err_data;
923 }
924 dd->dev = dev;
925 platform_set_drvdata(pdev, dd);
926
927 spin_lock_init(&dd->lock);
928 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
929
Mark A. Greerbc69d122013-01-08 11:57:44 -0700930 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
931 omap_aes_get_res_pdev(dd, pdev, &res);
932 if (err)
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800933 goto err_res;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800934
Mark A. Greerbc69d122013-01-08 11:57:44 -0700935 dd->io_base = devm_request_and_ioremap(dev, &res);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800936 if (!dd->io_base) {
937 dev_err(dev, "can't ioremap\n");
938 err = -ENOMEM;
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700939 goto err_res;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800940 }
Mark A. Greerbc69d122013-01-08 11:57:44 -0700941 dd->phys_base = res.start;
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800942
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700943 pm_runtime_enable(dev);
944 pm_runtime_get_sync(dev);
945
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800946 reg = omap_aes_read(dd, AES_REG_REV);
947 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
948 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700949
950 pm_runtime_put_sync(dev);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800951
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200952 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
953 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800954
955 err = omap_aes_dma_init(dd);
956 if (err)
957 goto err_dma;
958
959 INIT_LIST_HEAD(&dd->list);
960 spin_lock(&list_lock);
961 list_add_tail(&dd->list, &dev_list);
962 spin_unlock(&list_lock);
963
964 for (i = 0; i < ARRAY_SIZE(algs); i++) {
965 pr_debug("i: %d\n", i);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800966 err = crypto_register_alg(&algs[i]);
967 if (err)
968 goto err_algs;
969 }
970
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800971 return 0;
972err_algs:
973 for (j = 0; j < i; j++)
974 crypto_unregister_alg(&algs[j]);
975 omap_aes_dma_cleanup(dd);
976err_dma:
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +0200977 tasklet_kill(&dd->done_task);
978 tasklet_kill(&dd->queue_task);
Mark A. Greer5946c4a2013-01-08 11:57:40 -0700979 pm_runtime_disable(dev);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +0800980err_res:
981 kfree(dd);
982 dd = NULL;
983err_data:
984 dev_err(dev, "initialization failed.\n");
985 return err;
986}
987
988static int omap_aes_remove(struct platform_device *pdev)
989{
990 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
991 int i;
992
993 if (!dd)
994 return -ENODEV;
995
996 spin_lock(&list_lock);
997 list_del(&dd->list);
998 spin_unlock(&list_lock);
999
1000 for (i = 0; i < ARRAY_SIZE(algs); i++)
1001 crypto_unregister_alg(&algs[i]);
1002
Dmitry Kasatkin21fe9762010-11-30 10:13:29 +02001003 tasklet_kill(&dd->done_task);
1004 tasklet_kill(&dd->queue_task);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001005 omap_aes_dma_cleanup(dd);
Mark A. Greer5946c4a2013-01-08 11:57:40 -07001006 pm_runtime_disable(dd->dev);
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001007 kfree(dd);
1008 dd = NULL;
1009
1010 return 0;
1011}
1012
Mark A. Greer0635fb32013-01-08 11:57:41 -07001013#ifdef CONFIG_PM_SLEEP
1014static int omap_aes_suspend(struct device *dev)
1015{
1016 pm_runtime_put_sync(dev);
1017 return 0;
1018}
1019
1020static int omap_aes_resume(struct device *dev)
1021{
1022 pm_runtime_get_sync(dev);
1023 return 0;
1024}
1025#endif
1026
1027static const struct dev_pm_ops omap_aes_pm_ops = {
1028 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
1029};
1030
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001031static struct platform_driver omap_aes_driver = {
1032 .probe = omap_aes_probe,
1033 .remove = omap_aes_remove,
1034 .driver = {
1035 .name = "omap-aes",
1036 .owner = THIS_MODULE,
Mark A. Greer0635fb32013-01-08 11:57:41 -07001037 .pm = &omap_aes_pm_ops,
Mark A. Greerbc69d122013-01-08 11:57:44 -07001038 .of_match_table = omap_aes_of_match,
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001039 },
1040};
1041
1042static int __init omap_aes_mod_init(void)
1043{
Dmitry Kasatkin537559a2010-09-03 19:16:02 +08001044 return platform_driver_register(&omap_aes_driver);
1045}
1046
1047static void __exit omap_aes_mod_exit(void)
1048{
1049 platform_driver_unregister(&omap_aes_driver);
1050}
1051
1052module_init(omap_aes_mod_init);
1053module_exit(omap_aes_mod_exit);
1054
1055MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1056MODULE_LICENSE("GPL v2");
1057MODULE_AUTHOR("Dmitry Kasatkin");
1058