blob: 04d3b4309e4c387808496c8c43d39e3cb0aa3330 [file] [log] [blame]
Javier Martin5de88752013-03-01 12:37:53 +01001/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
6 * Copyright (c) 2013 Vista Silicon S.L.
7 * Author: Javier Martin <javier.martin@vista-silicon.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Based on omap-aes.c and tegra-aes.c
14 */
15
16#include <crypto/algapi.h>
17#include <crypto/aes.h>
18
19#include <linux/clk.h>
20#include <linux/crypto.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/kernel.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010025#include <linux/kthread.h>
Javier Martin5de88752013-03-01 12:37:53 +010026#include <linux/module.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010027#include <linux/mutex.h>
Javier Martin5de88752013-03-01 12:37:53 +010028#include <linux/of.h>
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010029#include <linux/of_device.h>
Javier Martin5de88752013-03-01 12:37:53 +010030#include <linux/platform_device.h>
31
32#define SAHARA_NAME "sahara"
33#define SAHARA_VERSION_3 3
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010034#define SAHARA_VERSION_4 4
Javier Martin5de88752013-03-01 12:37:53 +010035#define SAHARA_TIMEOUT_MS 1000
36#define SAHARA_MAX_HW_DESC 2
37#define SAHARA_MAX_HW_LINK 20
38
39#define FLAGS_MODE_MASK 0x000f
40#define FLAGS_ENCRYPT BIT(0)
41#define FLAGS_CBC BIT(1)
42#define FLAGS_NEW_KEY BIT(3)
Javier Martin5de88752013-03-01 12:37:53 +010043
44#define SAHARA_HDR_BASE 0x00800000
45#define SAHARA_HDR_SKHA_ALG_AES 0
46#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
47#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
48#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
49#define SAHARA_HDR_FORM_DATA (5 << 16)
50#define SAHARA_HDR_FORM_KEY (8 << 16)
51#define SAHARA_HDR_LLO (1 << 24)
52#define SAHARA_HDR_CHA_SKHA (1 << 28)
53#define SAHARA_HDR_CHA_MDHA (2 << 28)
54#define SAHARA_HDR_PARITY_BIT (1 << 31)
55
56/* SAHARA can only process one request at a time */
57#define SAHARA_QUEUE_LENGTH 1
58
59#define SAHARA_REG_VERSION 0x00
60#define SAHARA_REG_DAR 0x04
61#define SAHARA_REG_CONTROL 0x08
62#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
63#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
64#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
65#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
66#define SAHARA_REG_CMD 0x0C
67#define SAHARA_CMD_RESET (1 << 0)
68#define SAHARA_CMD_CLEAR_INT (1 << 8)
69#define SAHARA_CMD_CLEAR_ERR (1 << 9)
70#define SAHARA_CMD_SINGLE_STEP (1 << 10)
71#define SAHARA_CMD_MODE_BATCH (1 << 16)
72#define SAHARA_CMD_MODE_DEBUG (1 << 18)
73#define SAHARA_REG_STATUS 0x10
74#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
75#define SAHARA_STATE_IDLE 0
76#define SAHARA_STATE_BUSY 1
77#define SAHARA_STATE_ERR 2
78#define SAHARA_STATE_FAULT 3
79#define SAHARA_STATE_COMPLETE 4
80#define SAHARA_STATE_COMP_FLAG (1 << 2)
81#define SAHARA_STATUS_DAR_FULL (1 << 3)
82#define SAHARA_STATUS_ERROR (1 << 4)
83#define SAHARA_STATUS_SECURE (1 << 5)
84#define SAHARA_STATUS_FAIL (1 << 6)
85#define SAHARA_STATUS_INIT (1 << 7)
86#define SAHARA_STATUS_RNG_RESEED (1 << 8)
87#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
88#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
89#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
90#define SAHARA_STATUS_MODE_BATCH (1 << 16)
91#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
92#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
93#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
94#define SAHARA_REG_ERRSTATUS 0x14
95#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
96#define SAHARA_ERRSOURCE_CHA 14
97#define SAHARA_ERRSOURCE_DMA 15
98#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
99#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
100#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
101#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
102#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
103#define SAHARA_REG_FADDR 0x18
104#define SAHARA_REG_CDAR 0x1C
105#define SAHARA_REG_IDAR 0x20
106
107struct sahara_hw_desc {
108 u32 hdr;
109 u32 len1;
110 dma_addr_t p1;
111 u32 len2;
112 dma_addr_t p2;
113 dma_addr_t next;
114};
115
116struct sahara_hw_link {
117 u32 len;
118 dma_addr_t p;
119 dma_addr_t next;
120};
121
122struct sahara_ctx {
Javier Martin5de88752013-03-01 12:37:53 +0100123 unsigned long flags;
124 int keylen;
125 u8 key[AES_KEYSIZE_128];
126 struct crypto_ablkcipher *fallback;
127};
128
129struct sahara_aes_reqctx {
130 unsigned long mode;
131};
132
133struct sahara_dev {
134 struct device *device;
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100135 unsigned int version;
Javier Martin5de88752013-03-01 12:37:53 +0100136 void __iomem *regs_base;
137 struct clk *clk_ipg;
138 struct clk *clk_ahb;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100139 struct mutex queue_mutex;
140 struct task_struct *kthread;
141 struct completion dma_completion;
Javier Martin5de88752013-03-01 12:37:53 +0100142
143 struct sahara_ctx *ctx;
144 spinlock_t lock;
145 struct crypto_queue queue;
146 unsigned long flags;
147
Javier Martin5de88752013-03-01 12:37:53 +0100148 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
149 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
150
151 u8 *key_base;
152 dma_addr_t key_phys_base;
153
154 u8 *iv_base;
155 dma_addr_t iv_phys_base;
156
157 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
158 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
159
Javier Martin5de88752013-03-01 12:37:53 +0100160 size_t total;
161 struct scatterlist *in_sg;
162 unsigned int nb_in_sg;
163 struct scatterlist *out_sg;
164 unsigned int nb_out_sg;
165
166 u32 error;
Javier Martin5de88752013-03-01 12:37:53 +0100167};
168
169static struct sahara_dev *dev_ptr;
170
171static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
172{
173 writel(data, dev->regs_base + reg);
174}
175
176static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
177{
178 return readl(dev->regs_base + reg);
179}
180
181static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
182{
183 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
184 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
185 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
186
187 if (dev->flags & FLAGS_CBC) {
188 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
189 hdr ^= SAHARA_HDR_PARITY_BIT;
190 }
191
192 if (dev->flags & FLAGS_ENCRYPT) {
193 hdr |= SAHARA_HDR_SKHA_OP_ENC;
194 hdr ^= SAHARA_HDR_PARITY_BIT;
195 }
196
197 return hdr;
198}
199
200static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
201{
202 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
203 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
204}
205
206static int sahara_sg_length(struct scatterlist *sg,
207 unsigned int total)
208{
209 int sg_nb;
210 unsigned int len;
211 struct scatterlist *sg_list;
212
213 sg_nb = 0;
214 sg_list = sg;
215
216 while (total) {
217 len = min(sg_list->length, total);
218
219 sg_nb++;
220 total -= len;
221
222 sg_list = sg_next(sg_list);
223 if (!sg_list)
224 total = 0;
225 }
226
227 return sg_nb;
228}
229
230static char *sahara_err_src[16] = {
231 "No error",
232 "Header error",
233 "Descriptor length error",
234 "Descriptor length or pointer error",
235 "Link length error",
236 "Link pointer error",
237 "Input buffer error",
238 "Output buffer error",
239 "Output buffer starvation",
240 "Internal state fault",
241 "General descriptor problem",
242 "Reserved",
243 "Descriptor address error",
244 "Link address error",
245 "CHA error",
246 "DMA error"
247};
248
249static char *sahara_err_dmasize[4] = {
250 "Byte transfer",
251 "Half-word transfer",
252 "Word transfer",
253 "Reserved"
254};
255
256static char *sahara_err_dmasrc[8] = {
257 "No error",
258 "AHB bus error",
259 "Internal IP bus error",
260 "Parity error",
261 "DMA crosses 256 byte boundary",
262 "DMA is busy",
263 "Reserved",
264 "DMA HW error"
265};
266
267static char *sahara_cha_errsrc[12] = {
268 "Input buffer non-empty",
269 "Illegal address",
270 "Illegal mode",
271 "Illegal data size",
272 "Illegal key size",
273 "Write during processing",
274 "CTX read during processing",
275 "HW error",
276 "Input buffer disabled/underflow",
277 "Output buffer disabled/overflow",
278 "DES key parity error",
279 "Reserved"
280};
281
282static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
283
284static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
285{
286 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
287 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
288
289 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
290
291 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
292
293 if (source == SAHARA_ERRSOURCE_DMA) {
294 if (error & SAHARA_ERRSTATUS_DMA_DIR)
295 dev_err(dev->device, " * DMA read.\n");
296 else
297 dev_err(dev->device, " * DMA write.\n");
298
299 dev_err(dev->device, " * %s.\n",
300 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
301 dev_err(dev->device, " * %s.\n",
302 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
303 } else if (source == SAHARA_ERRSOURCE_CHA) {
304 dev_err(dev->device, " * %s.\n",
305 sahara_cha_errsrc[chasrc]);
306 dev_err(dev->device, " * %s.\n",
307 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
308 }
309 dev_err(dev->device, "\n");
310}
311
312static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
313
314static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
315{
316 u8 state;
317
318 if (!IS_ENABLED(DEBUG))
319 return;
320
321 state = SAHARA_STATUS_GET_STATE(status);
322
323 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
324 __func__, status);
325
326 dev_dbg(dev->device, " - State = %d:\n", state);
327 if (state & SAHARA_STATE_COMP_FLAG)
328 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
329
330 dev_dbg(dev->device, " * %s.\n",
331 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
332
333 if (status & SAHARA_STATUS_DAR_FULL)
334 dev_dbg(dev->device, " - DAR Full.\n");
335 if (status & SAHARA_STATUS_ERROR)
336 dev_dbg(dev->device, " - Error.\n");
337 if (status & SAHARA_STATUS_SECURE)
338 dev_dbg(dev->device, " - Secure.\n");
339 if (status & SAHARA_STATUS_FAIL)
340 dev_dbg(dev->device, " - Fail.\n");
341 if (status & SAHARA_STATUS_RNG_RESEED)
342 dev_dbg(dev->device, " - RNG Reseed Request.\n");
343 if (status & SAHARA_STATUS_ACTIVE_RNG)
344 dev_dbg(dev->device, " - RNG Active.\n");
345 if (status & SAHARA_STATUS_ACTIVE_MDHA)
346 dev_dbg(dev->device, " - MDHA Active.\n");
347 if (status & SAHARA_STATUS_ACTIVE_SKHA)
348 dev_dbg(dev->device, " - SKHA Active.\n");
349
350 if (status & SAHARA_STATUS_MODE_BATCH)
351 dev_dbg(dev->device, " - Batch Mode.\n");
352 else if (status & SAHARA_STATUS_MODE_DEDICATED)
353 dev_dbg(dev->device, " - Decidated Mode.\n");
354 else if (status & SAHARA_STATUS_MODE_DEBUG)
355 dev_dbg(dev->device, " - Debug Mode.\n");
356
357 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
358 SAHARA_STATUS_GET_ISTATE(status));
359
360 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
361 sahara_read(dev, SAHARA_REG_CDAR));
362 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
363 sahara_read(dev, SAHARA_REG_IDAR));
364}
365
366static void sahara_dump_descriptors(struct sahara_dev *dev)
367{
368 int i;
369
370 if (!IS_ENABLED(DEBUG))
371 return;
372
373 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
374 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
375 i, dev->hw_phys_desc[i]);
376 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
377 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
378 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
379 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
380 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
381 dev_dbg(dev->device, "\tnext = 0x%08x\n",
382 dev->hw_desc[i]->next);
383 }
384 dev_dbg(dev->device, "\n");
385}
386
387static void sahara_dump_links(struct sahara_dev *dev)
388{
389 int i;
390
391 if (!IS_ENABLED(DEBUG))
392 return;
393
394 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
395 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
396 i, dev->hw_phys_link[i]);
397 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
398 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
399 dev_dbg(dev->device, "\tnext = 0x%08x\n",
400 dev->hw_link[i]->next);
401 }
402 dev_dbg(dev->device, "\n");
403}
404
Javier Martin5de88752013-03-01 12:37:53 +0100405static int sahara_hw_descriptor_create(struct sahara_dev *dev)
406{
407 struct sahara_ctx *ctx = dev->ctx;
408 struct scatterlist *sg;
409 int ret;
410 int i, j;
411
412 /* Copy new key if necessary */
413 if (ctx->flags & FLAGS_NEW_KEY) {
414 memcpy(dev->key_base, ctx->key, ctx->keylen);
415 ctx->flags &= ~FLAGS_NEW_KEY;
416
417 if (dev->flags & FLAGS_CBC) {
418 dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
419 dev->hw_desc[0]->p1 = dev->iv_phys_base;
420 } else {
421 dev->hw_desc[0]->len1 = 0;
422 dev->hw_desc[0]->p1 = 0;
423 }
424 dev->hw_desc[0]->len2 = ctx->keylen;
425 dev->hw_desc[0]->p2 = dev->key_phys_base;
426 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
427 }
428 dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
429
430 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
431 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
432 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
433 dev_err(dev->device, "not enough hw links (%d)\n",
434 dev->nb_in_sg + dev->nb_out_sg);
435 return -EINVAL;
436 }
437
438 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
439 DMA_TO_DEVICE);
440 if (ret != dev->nb_in_sg) {
441 dev_err(dev->device, "couldn't map in sg\n");
442 goto unmap_in;
443 }
444 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
445 DMA_FROM_DEVICE);
446 if (ret != dev->nb_out_sg) {
447 dev_err(dev->device, "couldn't map out sg\n");
448 goto unmap_out;
449 }
450
451 /* Create input links */
452 dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
453 sg = dev->in_sg;
454 for (i = 0; i < dev->nb_in_sg; i++) {
455 dev->hw_link[i]->len = sg->length;
456 dev->hw_link[i]->p = sg->dma_address;
457 if (i == (dev->nb_in_sg - 1)) {
458 dev->hw_link[i]->next = 0;
459 } else {
460 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
461 sg = sg_next(sg);
462 }
463 }
464
465 /* Create output links */
466 dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
467 sg = dev->out_sg;
468 for (j = i; j < dev->nb_out_sg + i; j++) {
469 dev->hw_link[j]->len = sg->length;
470 dev->hw_link[j]->p = sg->dma_address;
471 if (j == (dev->nb_out_sg + i - 1)) {
472 dev->hw_link[j]->next = 0;
473 } else {
474 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
475 sg = sg_next(sg);
476 }
477 }
478
479 /* Fill remaining fields of hw_desc[1] */
480 dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
481 dev->hw_desc[1]->len1 = dev->total;
482 dev->hw_desc[1]->len2 = dev->total;
483 dev->hw_desc[1]->next = 0;
484
485 sahara_dump_descriptors(dev);
486 sahara_dump_links(dev);
487
Javier Martin5de88752013-03-01 12:37:53 +0100488 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
489
490 return 0;
491
492unmap_out:
493 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
494 DMA_TO_DEVICE);
495unmap_in:
496 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
497 DMA_FROM_DEVICE);
498
499 return -EINVAL;
500}
501
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100502static int sahara_aes_process(struct ablkcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100503{
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100504 struct sahara_dev *dev = dev_ptr;
Javier Martin5de88752013-03-01 12:37:53 +0100505 struct sahara_ctx *ctx;
506 struct sahara_aes_reqctx *rctx;
Javier Martin5de88752013-03-01 12:37:53 +0100507 int ret;
508
Javier Martin5de88752013-03-01 12:37:53 +0100509 /* Request is ready to be dispatched by the device */
510 dev_dbg(dev->device,
511 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
512 req->nbytes, req->src, req->dst);
513
514 /* assign new request to device */
Javier Martin5de88752013-03-01 12:37:53 +0100515 dev->total = req->nbytes;
516 dev->in_sg = req->src;
517 dev->out_sg = req->dst;
518
519 rctx = ablkcipher_request_ctx(req);
520 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
521 rctx->mode &= FLAGS_MODE_MASK;
522 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
523
524 if ((dev->flags & FLAGS_CBC) && req->info)
525 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
526
527 /* assign new context to device */
Javier Martin5de88752013-03-01 12:37:53 +0100528 dev->ctx = ctx;
529
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100530 reinit_completion(&dev->dma_completion);
531
Javier Martin5de88752013-03-01 12:37:53 +0100532 ret = sahara_hw_descriptor_create(dev);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100533
534 ret = wait_for_completion_timeout(&dev->dma_completion,
535 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
536 if (!ret) {
537 dev_err(dev->device, "AES timeout\n");
538 return -ETIMEDOUT;
Javier Martin5de88752013-03-01 12:37:53 +0100539 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100540
541 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
542 DMA_TO_DEVICE);
543 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
544 DMA_FROM_DEVICE);
545
546 return 0;
Javier Martin5de88752013-03-01 12:37:53 +0100547}
548
549static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
550 unsigned int keylen)
551{
552 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
553 int ret;
554
555 ctx->keylen = keylen;
556
557 /* SAHARA only supports 128bit keys */
558 if (keylen == AES_KEYSIZE_128) {
559 memcpy(ctx->key, key, keylen);
560 ctx->flags |= FLAGS_NEW_KEY;
561 return 0;
562 }
563
564 if (keylen != AES_KEYSIZE_128 &&
565 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
566 return -EINVAL;
567
568 /*
569 * The requested key size is not supported by HW, do a fallback.
570 */
571 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
572 ctx->fallback->base.crt_flags |=
573 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
574
575 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
576 if (ret) {
577 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
578
579 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
580 tfm_aux->crt_flags |=
581 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
582 }
583 return ret;
584}
585
586static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
587{
Javier Martin5de88752013-03-01 12:37:53 +0100588 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
589 struct sahara_dev *dev = dev_ptr;
590 int err = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100591
592 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
593 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
594
595 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
596 dev_err(dev->device,
597 "request size is not exact amount of AES blocks\n");
598 return -EINVAL;
599 }
600
Javier Martin5de88752013-03-01 12:37:53 +0100601 rctx->mode = mode;
Javier Martin5de88752013-03-01 12:37:53 +0100602
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100603 mutex_lock(&dev->queue_mutex);
604 err = ablkcipher_enqueue_request(&dev->queue, req);
605 mutex_unlock(&dev->queue_mutex);
606
607 wake_up_process(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +0100608
609 return err;
610}
611
612static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
613{
614 struct crypto_tfm *tfm =
615 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
616 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
617 crypto_ablkcipher_reqtfm(req));
618 int err;
619
620 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
621 ablkcipher_request_set_tfm(req, ctx->fallback);
622 err = crypto_ablkcipher_encrypt(req);
623 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
624 return err;
625 }
626
627 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
628}
629
630static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
631{
632 struct crypto_tfm *tfm =
633 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
634 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
635 crypto_ablkcipher_reqtfm(req));
636 int err;
637
638 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
639 ablkcipher_request_set_tfm(req, ctx->fallback);
640 err = crypto_ablkcipher_decrypt(req);
641 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
642 return err;
643 }
644
645 return sahara_aes_crypt(req, 0);
646}
647
648static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
649{
650 struct crypto_tfm *tfm =
651 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
652 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
653 crypto_ablkcipher_reqtfm(req));
654 int err;
655
656 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
657 ablkcipher_request_set_tfm(req, ctx->fallback);
658 err = crypto_ablkcipher_encrypt(req);
659 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
660 return err;
661 }
662
663 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
664}
665
666static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
667{
668 struct crypto_tfm *tfm =
669 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
670 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
671 crypto_ablkcipher_reqtfm(req));
672 int err;
673
674 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
675 ablkcipher_request_set_tfm(req, ctx->fallback);
676 err = crypto_ablkcipher_decrypt(req);
677 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
678 return err;
679 }
680
681 return sahara_aes_crypt(req, FLAGS_CBC);
682}
683
684static int sahara_aes_cra_init(struct crypto_tfm *tfm)
685{
Marek Vasutefa59e22014-05-14 11:41:03 +0200686 const char *name = crypto_tfm_alg_name(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100687 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
688
689 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
690 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
691 if (IS_ERR(ctx->fallback)) {
692 pr_err("Error allocating fallback algo %s\n", name);
693 return PTR_ERR(ctx->fallback);
694 }
695
696 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
697
698 return 0;
699}
700
701static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
702{
703 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
704
705 if (ctx->fallback)
706 crypto_free_ablkcipher(ctx->fallback);
707 ctx->fallback = NULL;
708}
709
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100710static int sahara_queue_manage(void *data)
711{
712 struct sahara_dev *dev = (struct sahara_dev *)data;
713 struct crypto_async_request *async_req;
714 int ret = 0;
715
716 do {
717 __set_current_state(TASK_INTERRUPTIBLE);
718
719 mutex_lock(&dev->queue_mutex);
720 async_req = crypto_dequeue_request(&dev->queue);
721 mutex_unlock(&dev->queue_mutex);
722
723 if (async_req) {
724 struct ablkcipher_request *req =
725 ablkcipher_request_cast(async_req);
726
727 ret = sahara_aes_process(req);
728
729 async_req->complete(async_req, ret);
730
731 continue;
732 }
733
734 schedule();
735 } while (!kthread_should_stop());
736
737 return 0;
738}
739
Javier Martin5de88752013-03-01 12:37:53 +0100740static struct crypto_alg aes_algs[] = {
741{
742 .cra_name = "ecb(aes)",
743 .cra_driver_name = "sahara-ecb-aes",
744 .cra_priority = 300,
745 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
746 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
747 .cra_blocksize = AES_BLOCK_SIZE,
748 .cra_ctxsize = sizeof(struct sahara_ctx),
749 .cra_alignmask = 0x0,
750 .cra_type = &crypto_ablkcipher_type,
751 .cra_module = THIS_MODULE,
752 .cra_init = sahara_aes_cra_init,
753 .cra_exit = sahara_aes_cra_exit,
754 .cra_u.ablkcipher = {
755 .min_keysize = AES_MIN_KEY_SIZE ,
756 .max_keysize = AES_MAX_KEY_SIZE,
757 .setkey = sahara_aes_setkey,
758 .encrypt = sahara_aes_ecb_encrypt,
759 .decrypt = sahara_aes_ecb_decrypt,
760 }
761}, {
762 .cra_name = "cbc(aes)",
763 .cra_driver_name = "sahara-cbc-aes",
764 .cra_priority = 300,
765 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
766 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
767 .cra_blocksize = AES_BLOCK_SIZE,
768 .cra_ctxsize = sizeof(struct sahara_ctx),
769 .cra_alignmask = 0x0,
770 .cra_type = &crypto_ablkcipher_type,
771 .cra_module = THIS_MODULE,
772 .cra_init = sahara_aes_cra_init,
773 .cra_exit = sahara_aes_cra_exit,
774 .cra_u.ablkcipher = {
775 .min_keysize = AES_MIN_KEY_SIZE ,
776 .max_keysize = AES_MAX_KEY_SIZE,
777 .ivsize = AES_BLOCK_SIZE,
778 .setkey = sahara_aes_setkey,
779 .encrypt = sahara_aes_cbc_encrypt,
780 .decrypt = sahara_aes_cbc_decrypt,
781 }
782}
783};
784
785static irqreturn_t sahara_irq_handler(int irq, void *data)
786{
787 struct sahara_dev *dev = (struct sahara_dev *)data;
788 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
789 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
790
Javier Martin5de88752013-03-01 12:37:53 +0100791 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
792 SAHARA_REG_CMD);
793
794 sahara_decode_status(dev, stat);
795
796 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
797 return IRQ_NONE;
798 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
799 dev->error = 0;
800 } else {
801 sahara_decode_error(dev, err);
802 dev->error = -EINVAL;
803 }
804
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100805 complete(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +0100806
807 return IRQ_HANDLED;
808}
809
810
811static int sahara_register_algs(struct sahara_dev *dev)
812{
813 int err, i, j;
814
815 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
816 INIT_LIST_HEAD(&aes_algs[i].cra_list);
817 err = crypto_register_alg(&aes_algs[i]);
818 if (err)
819 goto err_aes_algs;
820 }
821
822 return 0;
823
824err_aes_algs:
825 for (j = 0; j < i; j++)
826 crypto_unregister_alg(&aes_algs[j]);
827
828 return err;
829}
830
831static void sahara_unregister_algs(struct sahara_dev *dev)
832{
833 int i;
834
835 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
836 crypto_unregister_alg(&aes_algs[i]);
837}
838
839static struct platform_device_id sahara_platform_ids[] = {
840 { .name = "sahara-imx27" },
841 { /* sentinel */ }
842};
843MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
844
845static struct of_device_id sahara_dt_ids[] = {
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100846 { .compatible = "fsl,imx53-sahara" },
Javier Martin5de88752013-03-01 12:37:53 +0100847 { .compatible = "fsl,imx27-sahara" },
848 { /* sentinel */ }
849};
Arnd Bergmann68be0b1a2013-06-03 23:57:37 +0200850MODULE_DEVICE_TABLE(of, sahara_dt_ids);
Javier Martin5de88752013-03-01 12:37:53 +0100851
852static int sahara_probe(struct platform_device *pdev)
853{
854 struct sahara_dev *dev;
855 struct resource *res;
856 u32 version;
857 int irq;
858 int err;
859 int i;
860
861 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
862 if (dev == NULL) {
863 dev_err(&pdev->dev, "unable to alloc data struct.\n");
864 return -ENOMEM;
865 }
866
867 dev->device = &pdev->dev;
868 platform_set_drvdata(pdev, dev);
869
870 /* Get the base address */
871 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Jingoo Han9e952752014-02-12 13:23:37 +0900872 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
873 if (IS_ERR(dev->regs_base))
874 return PTR_ERR(dev->regs_base);
Javier Martin5de88752013-03-01 12:37:53 +0100875
876 /* Get the IRQ */
877 irq = platform_get_irq(pdev, 0);
878 if (irq < 0) {
879 dev_err(&pdev->dev, "failed to get irq resource\n");
880 return irq;
881 }
882
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +0800883 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
884 0, dev_name(&pdev->dev), dev);
885 if (err) {
Javier Martin5de88752013-03-01 12:37:53 +0100886 dev_err(&pdev->dev, "failed to request irq\n");
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +0800887 return err;
Javier Martin5de88752013-03-01 12:37:53 +0100888 }
889
890 /* clocks */
891 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
892 if (IS_ERR(dev->clk_ipg)) {
893 dev_err(&pdev->dev, "Could not get ipg clock\n");
894 return PTR_ERR(dev->clk_ipg);
895 }
896
897 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
898 if (IS_ERR(dev->clk_ahb)) {
899 dev_err(&pdev->dev, "Could not get ahb clock\n");
900 return PTR_ERR(dev->clk_ahb);
901 }
902
903 /* Allocate HW descriptors */
904 dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
905 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
906 &dev->hw_phys_desc[0], GFP_KERNEL);
907 if (!dev->hw_desc[0]) {
908 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
909 return -ENOMEM;
910 }
911 dev->hw_desc[1] = dev->hw_desc[0] + 1;
912 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
913 sizeof(struct sahara_hw_desc);
914
915 /* Allocate space for iv and key */
916 dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
917 &dev->key_phys_base, GFP_KERNEL);
918 if (!dev->key_base) {
919 dev_err(&pdev->dev, "Could not allocate memory for key\n");
920 err = -ENOMEM;
921 goto err_key;
922 }
923 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
924 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
925
926 /* Allocate space for HW links */
927 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
928 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
929 &dev->hw_phys_link[0], GFP_KERNEL);
Dan Carpenter393e6612013-08-20 11:51:41 +0300930 if (!dev->hw_link[0]) {
Javier Martin5de88752013-03-01 12:37:53 +0100931 dev_err(&pdev->dev, "Could not allocate hw links\n");
932 err = -ENOMEM;
933 goto err_link;
934 }
935 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
936 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
937 sizeof(struct sahara_hw_link);
938 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
939 }
940
941 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
942
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +0100943 spin_lock_init(&dev->lock);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100944 mutex_init(&dev->queue_mutex);
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +0100945
Javier Martin5de88752013-03-01 12:37:53 +0100946 dev_ptr = dev;
947
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100948 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
949 if (IS_ERR(dev->kthread)) {
950 err = PTR_ERR(dev->kthread);
951 goto err_link;
952 }
Javier Martin5de88752013-03-01 12:37:53 +0100953
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100954 init_completion(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +0100955
956 clk_prepare_enable(dev->clk_ipg);
957 clk_prepare_enable(dev->clk_ahb);
958
959 version = sahara_read(dev, SAHARA_REG_VERSION);
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100960 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
961 if (version != SAHARA_VERSION_3)
962 err = -ENODEV;
963 } else if (of_device_is_compatible(pdev->dev.of_node,
964 "fsl,imx53-sahara")) {
965 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
966 err = -ENODEV;
967 version = (version >> 8) & 0xff;
968 }
969 if (err == -ENODEV) {
Javier Martin5de88752013-03-01 12:37:53 +0100970 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100971 version);
Javier Martin5de88752013-03-01 12:37:53 +0100972 goto err_algs;
973 }
974
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100975 dev->version = version;
976
Javier Martin5de88752013-03-01 12:37:53 +0100977 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
978 SAHARA_REG_CMD);
979 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
980 SAHARA_CONTROL_SET_MAXBURST(8) |
981 SAHARA_CONTROL_RNG_AUTORSD |
982 SAHARA_CONTROL_ENABLE_INT,
983 SAHARA_REG_CONTROL);
984
985 err = sahara_register_algs(dev);
986 if (err)
987 goto err_algs;
988
989 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
990
991 return 0;
992
993err_algs:
994 dma_free_coherent(&pdev->dev,
995 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
996 dev->hw_link[0], dev->hw_phys_link[0]);
997 clk_disable_unprepare(dev->clk_ipg);
998 clk_disable_unprepare(dev->clk_ahb);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100999 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001000 dev_ptr = NULL;
1001err_link:
1002 dma_free_coherent(&pdev->dev,
1003 2 * AES_KEYSIZE_128,
1004 dev->key_base, dev->key_phys_base);
1005err_key:
1006 dma_free_coherent(&pdev->dev,
1007 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1008 dev->hw_desc[0], dev->hw_phys_desc[0]);
1009
1010 return err;
1011}
1012
1013static int sahara_remove(struct platform_device *pdev)
1014{
1015 struct sahara_dev *dev = platform_get_drvdata(pdev);
1016
1017 dma_free_coherent(&pdev->dev,
1018 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1019 dev->hw_link[0], dev->hw_phys_link[0]);
1020 dma_free_coherent(&pdev->dev,
1021 2 * AES_KEYSIZE_128,
1022 dev->key_base, dev->key_phys_base);
1023 dma_free_coherent(&pdev->dev,
1024 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1025 dev->hw_desc[0], dev->hw_phys_desc[0]);
1026
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001027 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001028
1029 sahara_unregister_algs(dev);
1030
1031 clk_disable_unprepare(dev->clk_ipg);
1032 clk_disable_unprepare(dev->clk_ahb);
1033
1034 dev_ptr = NULL;
1035
1036 return 0;
1037}
1038
1039static struct platform_driver sahara_driver = {
1040 .probe = sahara_probe,
1041 .remove = sahara_remove,
1042 .driver = {
1043 .name = SAHARA_NAME,
1044 .owner = THIS_MODULE,
Sachin Kamat1b0b2602013-09-30 08:49:41 +05301045 .of_match_table = sahara_dt_ids,
Javier Martin5de88752013-03-01 12:37:53 +01001046 },
1047 .id_table = sahara_platform_ids,
1048};
1049
1050module_platform_driver(sahara_driver);
1051
1052MODULE_LICENSE("GPL");
1053MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1054MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");