blob: 820dc3acb28ccfa3a008ccd9e121434ae9fba94d [file] [log] [blame]
Javier Martin5de88752013-03-01 12:37:53 +01001/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01006 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
Javier Martin5de88752013-03-01 12:37:53 +01007 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
17#include <crypto/algapi.h>
18#include <crypto/aes.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010019#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
Javier Martin5de88752013-03-01 12:37:53 +010023
24#include <linux/clk.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/kernel.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010030#include <linux/kthread.h>
Javier Martin5de88752013-03-01 12:37:53 +010031#include <linux/module.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010032#include <linux/mutex.h>
Javier Martin5de88752013-03-01 12:37:53 +010033#include <linux/of.h>
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010034#include <linux/of_device.h>
Javier Martin5de88752013-03-01 12:37:53 +010035#include <linux/platform_device.h>
36
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010037#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
Javier Martin5de88752013-03-01 12:37:53 +010040#define SAHARA_NAME "sahara"
41#define SAHARA_VERSION_3 3
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010042#define SAHARA_VERSION_4 4
Javier Martin5de88752013-03-01 12:37:53 +010043#define SAHARA_TIMEOUT_MS 1000
44#define SAHARA_MAX_HW_DESC 2
45#define SAHARA_MAX_HW_LINK 20
46
47#define FLAGS_MODE_MASK 0x000f
48#define FLAGS_ENCRYPT BIT(0)
49#define FLAGS_CBC BIT(1)
50#define FLAGS_NEW_KEY BIT(3)
Javier Martin5de88752013-03-01 12:37:53 +010051
52#define SAHARA_HDR_BASE 0x00800000
53#define SAHARA_HDR_SKHA_ALG_AES 0
54#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57#define SAHARA_HDR_FORM_DATA (5 << 16)
58#define SAHARA_HDR_FORM_KEY (8 << 16)
59#define SAHARA_HDR_LLO (1 << 24)
60#define SAHARA_HDR_CHA_SKHA (1 << 28)
61#define SAHARA_HDR_CHA_MDHA (2 << 28)
62#define SAHARA_HDR_PARITY_BIT (1 << 31)
63
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010064#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
Javier Martin5de88752013-03-01 12:37:53 +010081/* SAHARA can only process one request at a time */
82#define SAHARA_QUEUE_LENGTH 1
83
84#define SAHARA_REG_VERSION 0x00
85#define SAHARA_REG_DAR 0x04
86#define SAHARA_REG_CONTROL 0x08
87#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91#define SAHARA_REG_CMD 0x0C
92#define SAHARA_CMD_RESET (1 << 0)
93#define SAHARA_CMD_CLEAR_INT (1 << 8)
94#define SAHARA_CMD_CLEAR_ERR (1 << 9)
95#define SAHARA_CMD_SINGLE_STEP (1 << 10)
96#define SAHARA_CMD_MODE_BATCH (1 << 16)
97#define SAHARA_CMD_MODE_DEBUG (1 << 18)
98#define SAHARA_REG_STATUS 0x10
99#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100#define SAHARA_STATE_IDLE 0
101#define SAHARA_STATE_BUSY 1
102#define SAHARA_STATE_ERR 2
103#define SAHARA_STATE_FAULT 3
104#define SAHARA_STATE_COMPLETE 4
105#define SAHARA_STATE_COMP_FLAG (1 << 2)
106#define SAHARA_STATUS_DAR_FULL (1 << 3)
107#define SAHARA_STATUS_ERROR (1 << 4)
108#define SAHARA_STATUS_SECURE (1 << 5)
109#define SAHARA_STATUS_FAIL (1 << 6)
110#define SAHARA_STATUS_INIT (1 << 7)
111#define SAHARA_STATUS_RNG_RESEED (1 << 8)
112#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115#define SAHARA_STATUS_MODE_BATCH (1 << 16)
116#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119#define SAHARA_REG_ERRSTATUS 0x14
120#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121#define SAHARA_ERRSOURCE_CHA 14
122#define SAHARA_ERRSOURCE_DMA 15
123#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128#define SAHARA_REG_FADDR 0x18
129#define SAHARA_REG_CDAR 0x1C
130#define SAHARA_REG_IDAR 0x20
131
132struct sahara_hw_desc {
133 u32 hdr;
134 u32 len1;
135 dma_addr_t p1;
136 u32 len2;
137 dma_addr_t p2;
138 dma_addr_t next;
139};
140
141struct sahara_hw_link {
142 u32 len;
143 dma_addr_t p;
144 dma_addr_t next;
145};
146
147struct sahara_ctx {
Javier Martin5de88752013-03-01 12:37:53 +0100148 unsigned long flags;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100149
150 /* AES-specific context */
Javier Martin5de88752013-03-01 12:37:53 +0100151 int keylen;
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
Javier Martin5de88752013-03-01 12:37:53 +0100157};
158
159struct sahara_aes_reqctx {
160 unsigned long mode;
161};
162
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer
178 * @last: is this the last block
179 * @first: is this the first block
180 * @active: inside a transfer
181 */
182struct sahara_sha_reqctx {
183 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
185 u8 context[SHA256_DIGEST_SIZE + 4];
186 struct mutex mutex;
187 unsigned int mode;
188 unsigned int digest_size;
189 unsigned int context_size;
190 unsigned int buf_cnt;
191 unsigned int sg_in_idx;
192 struct scatterlist *in_sg;
193 struct scatterlist in_sg_chain[2];
194 bool in_sg_chained;
195 size_t total;
196 unsigned int last;
197 unsigned int first;
198 unsigned int active;
199};
200
Javier Martin5de88752013-03-01 12:37:53 +0100201struct sahara_dev {
202 struct device *device;
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100203 unsigned int version;
Javier Martin5de88752013-03-01 12:37:53 +0100204 void __iomem *regs_base;
205 struct clk *clk_ipg;
206 struct clk *clk_ahb;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100207 struct mutex queue_mutex;
208 struct task_struct *kthread;
209 struct completion dma_completion;
Javier Martin5de88752013-03-01 12:37:53 +0100210
211 struct sahara_ctx *ctx;
212 spinlock_t lock;
213 struct crypto_queue queue;
214 unsigned long flags;
215
Javier Martin5de88752013-03-01 12:37:53 +0100216 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
217 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
218
219 u8 *key_base;
220 dma_addr_t key_phys_base;
221
222 u8 *iv_base;
223 dma_addr_t iv_phys_base;
224
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100225 u8 *context_base;
226 dma_addr_t context_phys_base;
227
Javier Martin5de88752013-03-01 12:37:53 +0100228 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
229 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
230
Javier Martin5de88752013-03-01 12:37:53 +0100231 size_t total;
232 struct scatterlist *in_sg;
233 unsigned int nb_in_sg;
234 struct scatterlist *out_sg;
235 unsigned int nb_out_sg;
236
237 u32 error;
Javier Martin5de88752013-03-01 12:37:53 +0100238};
239
240static struct sahara_dev *dev_ptr;
241
242static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
243{
244 writel(data, dev->regs_base + reg);
245}
246
247static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
248{
249 return readl(dev->regs_base + reg);
250}
251
252static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
253{
254 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
255 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
256 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
257
258 if (dev->flags & FLAGS_CBC) {
259 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
260 hdr ^= SAHARA_HDR_PARITY_BIT;
261 }
262
263 if (dev->flags & FLAGS_ENCRYPT) {
264 hdr |= SAHARA_HDR_SKHA_OP_ENC;
265 hdr ^= SAHARA_HDR_PARITY_BIT;
266 }
267
268 return hdr;
269}
270
271static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
272{
273 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
274 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
275}
276
277static int sahara_sg_length(struct scatterlist *sg,
278 unsigned int total)
279{
280 int sg_nb;
281 unsigned int len;
282 struct scatterlist *sg_list;
283
284 sg_nb = 0;
285 sg_list = sg;
286
287 while (total) {
288 len = min(sg_list->length, total);
289
290 sg_nb++;
291 total -= len;
292
293 sg_list = sg_next(sg_list);
294 if (!sg_list)
295 total = 0;
296 }
297
298 return sg_nb;
299}
300
301static char *sahara_err_src[16] = {
302 "No error",
303 "Header error",
304 "Descriptor length error",
305 "Descriptor length or pointer error",
306 "Link length error",
307 "Link pointer error",
308 "Input buffer error",
309 "Output buffer error",
310 "Output buffer starvation",
311 "Internal state fault",
312 "General descriptor problem",
313 "Reserved",
314 "Descriptor address error",
315 "Link address error",
316 "CHA error",
317 "DMA error"
318};
319
320static char *sahara_err_dmasize[4] = {
321 "Byte transfer",
322 "Half-word transfer",
323 "Word transfer",
324 "Reserved"
325};
326
327static char *sahara_err_dmasrc[8] = {
328 "No error",
329 "AHB bus error",
330 "Internal IP bus error",
331 "Parity error",
332 "DMA crosses 256 byte boundary",
333 "DMA is busy",
334 "Reserved",
335 "DMA HW error"
336};
337
338static char *sahara_cha_errsrc[12] = {
339 "Input buffer non-empty",
340 "Illegal address",
341 "Illegal mode",
342 "Illegal data size",
343 "Illegal key size",
344 "Write during processing",
345 "CTX read during processing",
346 "HW error",
347 "Input buffer disabled/underflow",
348 "Output buffer disabled/overflow",
349 "DES key parity error",
350 "Reserved"
351};
352
353static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
354
355static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
356{
357 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
358 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
359
360 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
361
362 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
363
364 if (source == SAHARA_ERRSOURCE_DMA) {
365 if (error & SAHARA_ERRSTATUS_DMA_DIR)
366 dev_err(dev->device, " * DMA read.\n");
367 else
368 dev_err(dev->device, " * DMA write.\n");
369
370 dev_err(dev->device, " * %s.\n",
371 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
372 dev_err(dev->device, " * %s.\n",
373 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
374 } else if (source == SAHARA_ERRSOURCE_CHA) {
375 dev_err(dev->device, " * %s.\n",
376 sahara_cha_errsrc[chasrc]);
377 dev_err(dev->device, " * %s.\n",
378 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
379 }
380 dev_err(dev->device, "\n");
381}
382
383static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
384
385static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
386{
387 u8 state;
388
389 if (!IS_ENABLED(DEBUG))
390 return;
391
392 state = SAHARA_STATUS_GET_STATE(status);
393
394 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
395 __func__, status);
396
397 dev_dbg(dev->device, " - State = %d:\n", state);
398 if (state & SAHARA_STATE_COMP_FLAG)
399 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
400
401 dev_dbg(dev->device, " * %s.\n",
402 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
403
404 if (status & SAHARA_STATUS_DAR_FULL)
405 dev_dbg(dev->device, " - DAR Full.\n");
406 if (status & SAHARA_STATUS_ERROR)
407 dev_dbg(dev->device, " - Error.\n");
408 if (status & SAHARA_STATUS_SECURE)
409 dev_dbg(dev->device, " - Secure.\n");
410 if (status & SAHARA_STATUS_FAIL)
411 dev_dbg(dev->device, " - Fail.\n");
412 if (status & SAHARA_STATUS_RNG_RESEED)
413 dev_dbg(dev->device, " - RNG Reseed Request.\n");
414 if (status & SAHARA_STATUS_ACTIVE_RNG)
415 dev_dbg(dev->device, " - RNG Active.\n");
416 if (status & SAHARA_STATUS_ACTIVE_MDHA)
417 dev_dbg(dev->device, " - MDHA Active.\n");
418 if (status & SAHARA_STATUS_ACTIVE_SKHA)
419 dev_dbg(dev->device, " - SKHA Active.\n");
420
421 if (status & SAHARA_STATUS_MODE_BATCH)
422 dev_dbg(dev->device, " - Batch Mode.\n");
423 else if (status & SAHARA_STATUS_MODE_DEDICATED)
424 dev_dbg(dev->device, " - Decidated Mode.\n");
425 else if (status & SAHARA_STATUS_MODE_DEBUG)
426 dev_dbg(dev->device, " - Debug Mode.\n");
427
428 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
429 SAHARA_STATUS_GET_ISTATE(status));
430
431 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
432 sahara_read(dev, SAHARA_REG_CDAR));
433 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
434 sahara_read(dev, SAHARA_REG_IDAR));
435}
436
437static void sahara_dump_descriptors(struct sahara_dev *dev)
438{
439 int i;
440
441 if (!IS_ENABLED(DEBUG))
442 return;
443
444 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
445 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
446 i, dev->hw_phys_desc[i]);
447 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
448 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
449 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
450 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
451 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
452 dev_dbg(dev->device, "\tnext = 0x%08x\n",
453 dev->hw_desc[i]->next);
454 }
455 dev_dbg(dev->device, "\n");
456}
457
458static void sahara_dump_links(struct sahara_dev *dev)
459{
460 int i;
461
462 if (!IS_ENABLED(DEBUG))
463 return;
464
465 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
466 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
467 i, dev->hw_phys_link[i]);
468 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
469 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
470 dev_dbg(dev->device, "\tnext = 0x%08x\n",
471 dev->hw_link[i]->next);
472 }
473 dev_dbg(dev->device, "\n");
474}
475
Javier Martin5de88752013-03-01 12:37:53 +0100476static int sahara_hw_descriptor_create(struct sahara_dev *dev)
477{
478 struct sahara_ctx *ctx = dev->ctx;
479 struct scatterlist *sg;
480 int ret;
481 int i, j;
Steffen Trumtrar17110452015-04-07 17:13:42 +0200482 int idx = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100483
484 /* Copy new key if necessary */
485 if (ctx->flags & FLAGS_NEW_KEY) {
486 memcpy(dev->key_base, ctx->key, ctx->keylen);
487 ctx->flags &= ~FLAGS_NEW_KEY;
488
489 if (dev->flags & FLAGS_CBC) {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200490 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
491 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
Javier Martin5de88752013-03-01 12:37:53 +0100492 } else {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200493 dev->hw_desc[idx]->len1 = 0;
494 dev->hw_desc[idx]->p1 = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100495 }
Steffen Trumtrar17110452015-04-07 17:13:42 +0200496 dev->hw_desc[idx]->len2 = ctx->keylen;
497 dev->hw_desc[idx]->p2 = dev->key_phys_base;
498 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
499
500 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
501
502 idx++;
Javier Martin5de88752013-03-01 12:37:53 +0100503 }
Javier Martin5de88752013-03-01 12:37:53 +0100504
505 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
506 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
507 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
508 dev_err(dev->device, "not enough hw links (%d)\n",
509 dev->nb_in_sg + dev->nb_out_sg);
510 return -EINVAL;
511 }
512
513 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
514 DMA_TO_DEVICE);
515 if (ret != dev->nb_in_sg) {
516 dev_err(dev->device, "couldn't map in sg\n");
517 goto unmap_in;
518 }
519 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
520 DMA_FROM_DEVICE);
521 if (ret != dev->nb_out_sg) {
522 dev_err(dev->device, "couldn't map out sg\n");
523 goto unmap_out;
524 }
525
526 /* Create input links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200527 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
Javier Martin5de88752013-03-01 12:37:53 +0100528 sg = dev->in_sg;
529 for (i = 0; i < dev->nb_in_sg; i++) {
530 dev->hw_link[i]->len = sg->length;
531 dev->hw_link[i]->p = sg->dma_address;
532 if (i == (dev->nb_in_sg - 1)) {
533 dev->hw_link[i]->next = 0;
534 } else {
535 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
536 sg = sg_next(sg);
537 }
538 }
539
540 /* Create output links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200541 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
Javier Martin5de88752013-03-01 12:37:53 +0100542 sg = dev->out_sg;
543 for (j = i; j < dev->nb_out_sg + i; j++) {
544 dev->hw_link[j]->len = sg->length;
545 dev->hw_link[j]->p = sg->dma_address;
546 if (j == (dev->nb_out_sg + i - 1)) {
547 dev->hw_link[j]->next = 0;
548 } else {
549 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
550 sg = sg_next(sg);
551 }
552 }
553
554 /* Fill remaining fields of hw_desc[1] */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200555 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
556 dev->hw_desc[idx]->len1 = dev->total;
557 dev->hw_desc[idx]->len2 = dev->total;
558 dev->hw_desc[idx]->next = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100559
560 sahara_dump_descriptors(dev);
561 sahara_dump_links(dev);
562
Javier Martin5de88752013-03-01 12:37:53 +0100563 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
564
565 return 0;
566
567unmap_out:
568 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
569 DMA_TO_DEVICE);
570unmap_in:
571 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
572 DMA_FROM_DEVICE);
573
574 return -EINVAL;
575}
576
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100577static int sahara_aes_process(struct ablkcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100578{
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100579 struct sahara_dev *dev = dev_ptr;
Javier Martin5de88752013-03-01 12:37:53 +0100580 struct sahara_ctx *ctx;
581 struct sahara_aes_reqctx *rctx;
Javier Martin5de88752013-03-01 12:37:53 +0100582 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500583 unsigned long timeout;
Javier Martin5de88752013-03-01 12:37:53 +0100584
Javier Martin5de88752013-03-01 12:37:53 +0100585 /* Request is ready to be dispatched by the device */
586 dev_dbg(dev->device,
587 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
588 req->nbytes, req->src, req->dst);
589
590 /* assign new request to device */
Javier Martin5de88752013-03-01 12:37:53 +0100591 dev->total = req->nbytes;
592 dev->in_sg = req->src;
593 dev->out_sg = req->dst;
594
595 rctx = ablkcipher_request_ctx(req);
596 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
597 rctx->mode &= FLAGS_MODE_MASK;
598 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
599
600 if ((dev->flags & FLAGS_CBC) && req->info)
601 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
602
603 /* assign new context to device */
Javier Martin5de88752013-03-01 12:37:53 +0100604 dev->ctx = ctx;
605
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100606 reinit_completion(&dev->dma_completion);
607
Javier Martin5de88752013-03-01 12:37:53 +0100608 ret = sahara_hw_descriptor_create(dev);
Nicholas Mc Guire6cf02fc2015-02-07 06:27:45 -0500609 if (ret)
610 return -EINVAL;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100611
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500612 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100613 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500614 if (!timeout) {
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100615 dev_err(dev->device, "AES timeout\n");
616 return -ETIMEDOUT;
Javier Martin5de88752013-03-01 12:37:53 +0100617 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100618
619 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
620 DMA_TO_DEVICE);
621 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
622 DMA_FROM_DEVICE);
623
624 return 0;
Javier Martin5de88752013-03-01 12:37:53 +0100625}
626
627static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
628 unsigned int keylen)
629{
630 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
631 int ret;
632
633 ctx->keylen = keylen;
634
635 /* SAHARA only supports 128bit keys */
636 if (keylen == AES_KEYSIZE_128) {
637 memcpy(ctx->key, key, keylen);
638 ctx->flags |= FLAGS_NEW_KEY;
639 return 0;
640 }
641
642 if (keylen != AES_KEYSIZE_128 &&
643 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
644 return -EINVAL;
645
646 /*
647 * The requested key size is not supported by HW, do a fallback.
648 */
649 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
650 ctx->fallback->base.crt_flags |=
651 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
652
653 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
654 if (ret) {
655 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
656
657 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
658 tfm_aux->crt_flags |=
659 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
660 }
661 return ret;
662}
663
664static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
665{
Javier Martin5de88752013-03-01 12:37:53 +0100666 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
667 struct sahara_dev *dev = dev_ptr;
668 int err = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100669
670 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
671 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
672
673 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
674 dev_err(dev->device,
675 "request size is not exact amount of AES blocks\n");
676 return -EINVAL;
677 }
678
Javier Martin5de88752013-03-01 12:37:53 +0100679 rctx->mode = mode;
Javier Martin5de88752013-03-01 12:37:53 +0100680
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100681 mutex_lock(&dev->queue_mutex);
682 err = ablkcipher_enqueue_request(&dev->queue, req);
683 mutex_unlock(&dev->queue_mutex);
684
685 wake_up_process(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +0100686
687 return err;
688}
689
690static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
691{
692 struct crypto_tfm *tfm =
693 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
694 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
695 crypto_ablkcipher_reqtfm(req));
696 int err;
697
698 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
699 ablkcipher_request_set_tfm(req, ctx->fallback);
700 err = crypto_ablkcipher_encrypt(req);
701 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
702 return err;
703 }
704
705 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
706}
707
708static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
709{
710 struct crypto_tfm *tfm =
711 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
712 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
713 crypto_ablkcipher_reqtfm(req));
714 int err;
715
716 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
717 ablkcipher_request_set_tfm(req, ctx->fallback);
718 err = crypto_ablkcipher_decrypt(req);
719 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
720 return err;
721 }
722
723 return sahara_aes_crypt(req, 0);
724}
725
726static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
727{
728 struct crypto_tfm *tfm =
729 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
730 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
731 crypto_ablkcipher_reqtfm(req));
732 int err;
733
734 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
735 ablkcipher_request_set_tfm(req, ctx->fallback);
736 err = crypto_ablkcipher_encrypt(req);
737 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
738 return err;
739 }
740
741 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
742}
743
744static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
745{
746 struct crypto_tfm *tfm =
747 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
748 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
749 crypto_ablkcipher_reqtfm(req));
750 int err;
751
752 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
753 ablkcipher_request_set_tfm(req, ctx->fallback);
754 err = crypto_ablkcipher_decrypt(req);
755 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
756 return err;
757 }
758
759 return sahara_aes_crypt(req, FLAGS_CBC);
760}
761
762static int sahara_aes_cra_init(struct crypto_tfm *tfm)
763{
Marek Vasutefa59e22014-05-14 11:41:03 +0200764 const char *name = crypto_tfm_alg_name(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100765 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
766
767 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
768 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
769 if (IS_ERR(ctx->fallback)) {
770 pr_err("Error allocating fallback algo %s\n", name);
771 return PTR_ERR(ctx->fallback);
772 }
773
774 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
775
776 return 0;
777}
778
779static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
780{
781 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
782
783 if (ctx->fallback)
784 crypto_free_ablkcipher(ctx->fallback);
785 ctx->fallback = NULL;
786}
787
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100788static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
789 struct sahara_sha_reqctx *rctx)
790{
791 u32 hdr = 0;
792
793 hdr = rctx->mode;
794
795 if (rctx->first) {
796 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
797 hdr |= SAHARA_HDR_MDHA_INIT;
798 } else {
799 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
800 }
801
802 if (rctx->last)
803 hdr |= SAHARA_HDR_MDHA_PDATA;
804
805 if (hweight_long(hdr) % 2 == 0)
806 hdr |= SAHARA_HDR_PARITY_BIT;
807
808 return hdr;
809}
810
811static int sahara_sha_hw_links_create(struct sahara_dev *dev,
812 struct sahara_sha_reqctx *rctx,
813 int start)
814{
815 struct scatterlist *sg;
816 unsigned int i;
817 int ret;
818
819 dev->in_sg = rctx->in_sg;
820
821 dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
822 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
823 dev_err(dev->device, "not enough hw links (%d)\n",
824 dev->nb_in_sg + dev->nb_out_sg);
825 return -EINVAL;
826 }
827
828 if (rctx->in_sg_chained) {
829 i = start;
830 sg = dev->in_sg;
831 while (sg) {
832 ret = dma_map_sg(dev->device, sg, 1,
833 DMA_TO_DEVICE);
834 if (!ret)
835 return -EFAULT;
836
837 dev->hw_link[i]->len = sg->length;
838 dev->hw_link[i]->p = sg->dma_address;
839 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
840 sg = sg_next(sg);
841 i += 1;
842 }
843 dev->hw_link[i-1]->next = 0;
844 } else {
845 sg = dev->in_sg;
846 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
847 DMA_TO_DEVICE);
848 if (!ret)
849 return -EFAULT;
850
851 for (i = start; i < dev->nb_in_sg + start; i++) {
852 dev->hw_link[i]->len = sg->length;
853 dev->hw_link[i]->p = sg->dma_address;
854 if (i == (dev->nb_in_sg + start - 1)) {
855 dev->hw_link[i]->next = 0;
856 } else {
857 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
858 sg = sg_next(sg);
859 }
860 }
861 }
862
863 return i;
864}
865
866static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
867 struct sahara_sha_reqctx *rctx,
868 struct ahash_request *req,
869 int index)
870{
871 unsigned result_len;
872 int i = index;
873
874 if (rctx->first)
875 /* Create initial descriptor: #8*/
876 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
877 else
878 /* Create hash descriptor: #10. Must follow #6. */
879 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
880
881 dev->hw_desc[index]->len1 = rctx->total;
882 if (dev->hw_desc[index]->len1 == 0) {
883 /* if len1 is 0, p1 must be 0, too */
884 dev->hw_desc[index]->p1 = 0;
885 rctx->sg_in_idx = 0;
886 } else {
887 /* Create input links */
888 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
889 i = sahara_sha_hw_links_create(dev, rctx, index);
890
891 rctx->sg_in_idx = index;
892 if (i < 0)
893 return i;
894 }
895
896 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
897
898 /* Save the context for the next operation */
899 result_len = rctx->context_size;
900 dev->hw_link[i]->p = dev->context_phys_base;
901
902 dev->hw_link[i]->len = result_len;
903 dev->hw_desc[index]->len2 = result_len;
904
905 dev->hw_link[i]->next = 0;
906
907 return 0;
908}
909
910/*
911 * Load descriptor aka #6
912 *
913 * To load a previously saved context back to the MDHA unit
914 *
915 * p1: Saved Context
916 * p2: NULL
917 *
918 */
919static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
920 struct sahara_sha_reqctx *rctx,
921 struct ahash_request *req,
922 int index)
923{
924 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
925
926 dev->hw_desc[index]->len1 = rctx->context_size;
927 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
928 dev->hw_desc[index]->len2 = 0;
929 dev->hw_desc[index]->p2 = 0;
930
931 dev->hw_link[index]->len = rctx->context_size;
932 dev->hw_link[index]->p = dev->context_phys_base;
933 dev->hw_link[index]->next = 0;
934
935 return 0;
936}
937
938static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
939{
940 if (!sg || !sg->length)
941 return nbytes;
942
943 while (nbytes && sg) {
944 if (nbytes <= sg->length) {
945 sg->length = nbytes;
946 sg_mark_end(sg);
947 break;
948 }
949 nbytes -= sg->length;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200950 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100951 }
952
953 return nbytes;
954}
955
956static int sahara_sha_prepare_request(struct ahash_request *req)
957{
958 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
959 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
960 unsigned int hash_later;
961 unsigned int block_size;
962 unsigned int len;
963
964 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
965
966 /* append bytes from previous operation */
967 len = rctx->buf_cnt + req->nbytes;
968
969 /* only the last transfer can be padded in hardware */
970 if (!rctx->last && (len < block_size)) {
971 /* to few data, save for next operation */
972 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
973 0, req->nbytes, 0);
974 rctx->buf_cnt += req->nbytes;
975
976 return 0;
977 }
978
979 /* add data from previous operation first */
980 if (rctx->buf_cnt)
981 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
982
983 /* data must always be a multiple of block_size */
984 hash_later = rctx->last ? 0 : len & (block_size - 1);
985 if (hash_later) {
986 unsigned int offset = req->nbytes - hash_later;
987 /* Save remaining bytes for later use */
988 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
989 hash_later, 0);
990 }
991
992 /* nbytes should now be multiple of blocksize */
993 req->nbytes = req->nbytes - hash_later;
994
995 sahara_walk_and_recalc(req->src, req->nbytes);
996
997 /* have data from previous operation and current */
998 if (rctx->buf_cnt && req->nbytes) {
999 sg_init_table(rctx->in_sg_chain, 2);
1000 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
1001
Dan Williamsc56f6d12015-08-07 18:15:13 +02001002 sg_chain(rctx->in_sg_chain, 2, req->src);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001003
1004 rctx->total = req->nbytes + rctx->buf_cnt;
1005 rctx->in_sg = rctx->in_sg_chain;
1006
1007 rctx->in_sg_chained = true;
1008 req->src = rctx->in_sg_chain;
1009 /* only data from previous operation */
1010 } else if (rctx->buf_cnt) {
1011 if (req->src)
1012 rctx->in_sg = req->src;
1013 else
1014 rctx->in_sg = rctx->in_sg_chain;
1015 /* buf was copied into rembuf above */
1016 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
1017 rctx->total = rctx->buf_cnt;
1018 rctx->in_sg_chained = false;
1019 /* no data from previous operation */
1020 } else {
1021 rctx->in_sg = req->src;
1022 rctx->total = req->nbytes;
1023 req->src = rctx->in_sg;
1024 rctx->in_sg_chained = false;
1025 }
1026
1027 /* on next call, we only have the remaining data in the buffer */
1028 rctx->buf_cnt = hash_later;
1029
1030 return -EINPROGRESS;
1031}
1032
1033static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1034 struct sahara_sha_reqctx *rctx)
1035{
1036 struct scatterlist *sg;
1037
1038 if (rctx->in_sg_chained) {
1039 sg = dev->in_sg;
1040 while (sg) {
1041 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1042 sg = sg_next(sg);
1043 }
1044 } else {
1045 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1046 DMA_TO_DEVICE);
1047 }
1048}
1049
1050static int sahara_sha_process(struct ahash_request *req)
1051{
1052 struct sahara_dev *dev = dev_ptr;
1053 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
Nicholas Mc Guiredf586cb2015-02-07 06:16:46 -05001054 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001055 unsigned long timeout;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001056
1057 ret = sahara_sha_prepare_request(req);
1058 if (!ret)
1059 return ret;
1060
1061 if (rctx->first) {
1062 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1063 dev->hw_desc[0]->next = 0;
1064 rctx->first = 0;
1065 } else {
1066 memcpy(dev->context_base, rctx->context, rctx->context_size);
1067
1068 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1069 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1070 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1071 dev->hw_desc[1]->next = 0;
1072 }
1073
1074 sahara_dump_descriptors(dev);
1075 sahara_dump_links(dev);
1076
1077 reinit_completion(&dev->dma_completion);
1078
1079 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1080
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001081 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001082 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001083 if (!timeout) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001084 dev_err(dev->device, "SHA timeout\n");
1085 return -ETIMEDOUT;
1086 }
1087
1088 if (rctx->sg_in_idx)
1089 sahara_sha_unmap_sg(dev, rctx);
1090
1091 memcpy(rctx->context, dev->context_base, rctx->context_size);
1092
1093 if (req->result)
1094 memcpy(req->result, rctx->context, rctx->digest_size);
1095
1096 return 0;
1097}
1098
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001099static int sahara_queue_manage(void *data)
1100{
1101 struct sahara_dev *dev = (struct sahara_dev *)data;
1102 struct crypto_async_request *async_req;
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001103 struct crypto_async_request *backlog;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001104 int ret = 0;
1105
1106 do {
1107 __set_current_state(TASK_INTERRUPTIBLE);
1108
1109 mutex_lock(&dev->queue_mutex);
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001110 backlog = crypto_get_backlog(&dev->queue);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001111 async_req = crypto_dequeue_request(&dev->queue);
1112 mutex_unlock(&dev->queue_mutex);
1113
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001114 if (backlog)
1115 backlog->complete(backlog, -EINPROGRESS);
1116
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001117 if (async_req) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001118 if (crypto_tfm_alg_type(async_req->tfm) ==
1119 CRYPTO_ALG_TYPE_AHASH) {
1120 struct ahash_request *req =
1121 ahash_request_cast(async_req);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001122
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001123 ret = sahara_sha_process(req);
1124 } else {
1125 struct ablkcipher_request *req =
1126 ablkcipher_request_cast(async_req);
1127
1128 ret = sahara_aes_process(req);
1129 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001130
1131 async_req->complete(async_req, ret);
1132
1133 continue;
1134 }
1135
1136 schedule();
1137 } while (!kthread_should_stop());
1138
1139 return 0;
1140}
1141
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001142static int sahara_sha_enqueue(struct ahash_request *req, int last)
1143{
1144 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1145 struct sahara_dev *dev = dev_ptr;
1146 int ret;
1147
1148 if (!req->nbytes && !last)
1149 return 0;
1150
1151 mutex_lock(&rctx->mutex);
1152 rctx->last = last;
1153
1154 if (!rctx->active) {
1155 rctx->active = 1;
1156 rctx->first = 1;
1157 }
1158
1159 mutex_lock(&dev->queue_mutex);
1160 ret = crypto_enqueue_request(&dev->queue, &req->base);
1161 mutex_unlock(&dev->queue_mutex);
1162
1163 wake_up_process(dev->kthread);
1164 mutex_unlock(&rctx->mutex);
1165
1166 return ret;
1167}
1168
1169static int sahara_sha_init(struct ahash_request *req)
1170{
1171 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1172 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1173
1174 memset(rctx, 0, sizeof(*rctx));
1175
1176 switch (crypto_ahash_digestsize(tfm)) {
1177 case SHA1_DIGEST_SIZE:
1178 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1179 rctx->digest_size = SHA1_DIGEST_SIZE;
1180 break;
1181 case SHA256_DIGEST_SIZE:
1182 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1183 rctx->digest_size = SHA256_DIGEST_SIZE;
1184 break;
1185 default:
1186 return -EINVAL;
1187 }
1188
1189 rctx->context_size = rctx->digest_size + 4;
1190 rctx->active = 0;
1191
1192 mutex_init(&rctx->mutex);
1193
1194 return 0;
1195}
1196
1197static int sahara_sha_update(struct ahash_request *req)
1198{
1199 return sahara_sha_enqueue(req, 0);
1200}
1201
1202static int sahara_sha_final(struct ahash_request *req)
1203{
1204 req->nbytes = 0;
1205 return sahara_sha_enqueue(req, 1);
1206}
1207
1208static int sahara_sha_finup(struct ahash_request *req)
1209{
1210 return sahara_sha_enqueue(req, 1);
1211}
1212
1213static int sahara_sha_digest(struct ahash_request *req)
1214{
1215 sahara_sha_init(req);
1216
1217 return sahara_sha_finup(req);
1218}
1219
1220static int sahara_sha_export(struct ahash_request *req, void *out)
1221{
1222 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1223 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1224 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1225
1226 memcpy(out, ctx, sizeof(struct sahara_ctx));
1227 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1228 sizeof(struct sahara_sha_reqctx));
1229
1230 return 0;
1231}
1232
1233static int sahara_sha_import(struct ahash_request *req, const void *in)
1234{
1235 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1236 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1237 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1238
1239 memcpy(ctx, in, sizeof(struct sahara_ctx));
1240 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1241 sizeof(struct sahara_sha_reqctx));
1242
1243 return 0;
1244}
1245
1246static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1247{
1248 const char *name = crypto_tfm_alg_name(tfm);
1249 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1250
1251 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1252 CRYPTO_ALG_NEED_FALLBACK);
1253 if (IS_ERR(ctx->shash_fallback)) {
1254 pr_err("Error allocating fallback algo %s\n", name);
1255 return PTR_ERR(ctx->shash_fallback);
1256 }
1257 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1258 sizeof(struct sahara_sha_reqctx) +
1259 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1260
1261 return 0;
1262}
1263
1264static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1265{
1266 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1267
1268 crypto_free_shash(ctx->shash_fallback);
1269 ctx->shash_fallback = NULL;
1270}
1271
Javier Martin5de88752013-03-01 12:37:53 +01001272static struct crypto_alg aes_algs[] = {
1273{
1274 .cra_name = "ecb(aes)",
1275 .cra_driver_name = "sahara-ecb-aes",
1276 .cra_priority = 300,
1277 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1278 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1279 .cra_blocksize = AES_BLOCK_SIZE,
1280 .cra_ctxsize = sizeof(struct sahara_ctx),
1281 .cra_alignmask = 0x0,
1282 .cra_type = &crypto_ablkcipher_type,
1283 .cra_module = THIS_MODULE,
1284 .cra_init = sahara_aes_cra_init,
1285 .cra_exit = sahara_aes_cra_exit,
1286 .cra_u.ablkcipher = {
1287 .min_keysize = AES_MIN_KEY_SIZE ,
1288 .max_keysize = AES_MAX_KEY_SIZE,
1289 .setkey = sahara_aes_setkey,
1290 .encrypt = sahara_aes_ecb_encrypt,
1291 .decrypt = sahara_aes_ecb_decrypt,
1292 }
1293}, {
1294 .cra_name = "cbc(aes)",
1295 .cra_driver_name = "sahara-cbc-aes",
1296 .cra_priority = 300,
1297 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1298 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1299 .cra_blocksize = AES_BLOCK_SIZE,
1300 .cra_ctxsize = sizeof(struct sahara_ctx),
1301 .cra_alignmask = 0x0,
1302 .cra_type = &crypto_ablkcipher_type,
1303 .cra_module = THIS_MODULE,
1304 .cra_init = sahara_aes_cra_init,
1305 .cra_exit = sahara_aes_cra_exit,
1306 .cra_u.ablkcipher = {
1307 .min_keysize = AES_MIN_KEY_SIZE ,
1308 .max_keysize = AES_MAX_KEY_SIZE,
1309 .ivsize = AES_BLOCK_SIZE,
1310 .setkey = sahara_aes_setkey,
1311 .encrypt = sahara_aes_cbc_encrypt,
1312 .decrypt = sahara_aes_cbc_decrypt,
1313 }
1314}
1315};
1316
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001317static struct ahash_alg sha_v3_algs[] = {
1318{
1319 .init = sahara_sha_init,
1320 .update = sahara_sha_update,
1321 .final = sahara_sha_final,
1322 .finup = sahara_sha_finup,
1323 .digest = sahara_sha_digest,
1324 .export = sahara_sha_export,
1325 .import = sahara_sha_import,
1326 .halg.digestsize = SHA1_DIGEST_SIZE,
1327 .halg.base = {
1328 .cra_name = "sha1",
1329 .cra_driver_name = "sahara-sha1",
1330 .cra_priority = 300,
1331 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1332 CRYPTO_ALG_ASYNC |
1333 CRYPTO_ALG_NEED_FALLBACK,
1334 .cra_blocksize = SHA1_BLOCK_SIZE,
1335 .cra_ctxsize = sizeof(struct sahara_ctx),
1336 .cra_alignmask = 0,
1337 .cra_module = THIS_MODULE,
1338 .cra_init = sahara_sha_cra_init,
1339 .cra_exit = sahara_sha_cra_exit,
1340 }
1341},
1342};
1343
1344static struct ahash_alg sha_v4_algs[] = {
1345{
1346 .init = sahara_sha_init,
1347 .update = sahara_sha_update,
1348 .final = sahara_sha_final,
1349 .finup = sahara_sha_finup,
1350 .digest = sahara_sha_digest,
1351 .export = sahara_sha_export,
1352 .import = sahara_sha_import,
1353 .halg.digestsize = SHA256_DIGEST_SIZE,
1354 .halg.base = {
1355 .cra_name = "sha256",
1356 .cra_driver_name = "sahara-sha256",
1357 .cra_priority = 300,
1358 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1359 CRYPTO_ALG_ASYNC |
1360 CRYPTO_ALG_NEED_FALLBACK,
1361 .cra_blocksize = SHA256_BLOCK_SIZE,
1362 .cra_ctxsize = sizeof(struct sahara_ctx),
1363 .cra_alignmask = 0,
1364 .cra_module = THIS_MODULE,
1365 .cra_init = sahara_sha_cra_init,
1366 .cra_exit = sahara_sha_cra_exit,
1367 }
1368},
1369};
1370
Javier Martin5de88752013-03-01 12:37:53 +01001371static irqreturn_t sahara_irq_handler(int irq, void *data)
1372{
1373 struct sahara_dev *dev = (struct sahara_dev *)data;
1374 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1375 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1376
Javier Martin5de88752013-03-01 12:37:53 +01001377 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1378 SAHARA_REG_CMD);
1379
1380 sahara_decode_status(dev, stat);
1381
1382 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1383 return IRQ_NONE;
1384 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1385 dev->error = 0;
1386 } else {
1387 sahara_decode_error(dev, err);
1388 dev->error = -EINVAL;
1389 }
1390
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001391 complete(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001392
1393 return IRQ_HANDLED;
1394}
1395
1396
1397static int sahara_register_algs(struct sahara_dev *dev)
1398{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001399 int err;
1400 unsigned int i, j, k, l;
Javier Martin5de88752013-03-01 12:37:53 +01001401
1402 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1403 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1404 err = crypto_register_alg(&aes_algs[i]);
1405 if (err)
1406 goto err_aes_algs;
1407 }
1408
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001409 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1410 err = crypto_register_ahash(&sha_v3_algs[k]);
1411 if (err)
1412 goto err_sha_v3_algs;
1413 }
1414
1415 if (dev->version > SAHARA_VERSION_3)
1416 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1417 err = crypto_register_ahash(&sha_v4_algs[l]);
1418 if (err)
1419 goto err_sha_v4_algs;
1420 }
1421
Javier Martin5de88752013-03-01 12:37:53 +01001422 return 0;
1423
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001424err_sha_v4_algs:
1425 for (j = 0; j < l; j++)
1426 crypto_unregister_ahash(&sha_v4_algs[j]);
1427
1428err_sha_v3_algs:
1429 for (j = 0; j < k; j++)
1430 crypto_unregister_ahash(&sha_v4_algs[j]);
1431
Javier Martin5de88752013-03-01 12:37:53 +01001432err_aes_algs:
1433 for (j = 0; j < i; j++)
1434 crypto_unregister_alg(&aes_algs[j]);
1435
1436 return err;
1437}
1438
1439static void sahara_unregister_algs(struct sahara_dev *dev)
1440{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001441 unsigned int i;
Javier Martin5de88752013-03-01 12:37:53 +01001442
1443 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1444 crypto_unregister_alg(&aes_algs[i]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001445
1446 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1447 crypto_unregister_ahash(&sha_v3_algs[i]);
1448
1449 if (dev->version > SAHARA_VERSION_3)
1450 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1451 crypto_unregister_ahash(&sha_v4_algs[i]);
Javier Martin5de88752013-03-01 12:37:53 +01001452}
1453
1454static struct platform_device_id sahara_platform_ids[] = {
1455 { .name = "sahara-imx27" },
1456 { /* sentinel */ }
1457};
1458MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1459
1460static struct of_device_id sahara_dt_ids[] = {
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001461 { .compatible = "fsl,imx53-sahara" },
Javier Martin5de88752013-03-01 12:37:53 +01001462 { .compatible = "fsl,imx27-sahara" },
1463 { /* sentinel */ }
1464};
Arnd Bergmann68be0b1a2013-06-03 23:57:37 +02001465MODULE_DEVICE_TABLE(of, sahara_dt_ids);
Javier Martin5de88752013-03-01 12:37:53 +01001466
1467static int sahara_probe(struct platform_device *pdev)
1468{
1469 struct sahara_dev *dev;
1470 struct resource *res;
1471 u32 version;
1472 int irq;
1473 int err;
1474 int i;
1475
1476 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1477 if (dev == NULL) {
1478 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1479 return -ENOMEM;
1480 }
1481
1482 dev->device = &pdev->dev;
1483 platform_set_drvdata(pdev, dev);
1484
1485 /* Get the base address */
1486 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Jingoo Han9e952752014-02-12 13:23:37 +09001487 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1488 if (IS_ERR(dev->regs_base))
1489 return PTR_ERR(dev->regs_base);
Javier Martin5de88752013-03-01 12:37:53 +01001490
1491 /* Get the IRQ */
1492 irq = platform_get_irq(pdev, 0);
1493 if (irq < 0) {
1494 dev_err(&pdev->dev, "failed to get irq resource\n");
1495 return irq;
1496 }
1497
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001498 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1499 0, dev_name(&pdev->dev), dev);
1500 if (err) {
Javier Martin5de88752013-03-01 12:37:53 +01001501 dev_err(&pdev->dev, "failed to request irq\n");
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001502 return err;
Javier Martin5de88752013-03-01 12:37:53 +01001503 }
1504
1505 /* clocks */
1506 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1507 if (IS_ERR(dev->clk_ipg)) {
1508 dev_err(&pdev->dev, "Could not get ipg clock\n");
1509 return PTR_ERR(dev->clk_ipg);
1510 }
1511
1512 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1513 if (IS_ERR(dev->clk_ahb)) {
1514 dev_err(&pdev->dev, "Could not get ahb clock\n");
1515 return PTR_ERR(dev->clk_ahb);
1516 }
1517
1518 /* Allocate HW descriptors */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301519 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001520 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1521 &dev->hw_phys_desc[0], GFP_KERNEL);
1522 if (!dev->hw_desc[0]) {
1523 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1524 return -ENOMEM;
1525 }
1526 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1527 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1528 sizeof(struct sahara_hw_desc);
1529
1530 /* Allocate space for iv and key */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301531 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
Javier Martin5de88752013-03-01 12:37:53 +01001532 &dev->key_phys_base, GFP_KERNEL);
1533 if (!dev->key_base) {
1534 dev_err(&pdev->dev, "Could not allocate memory for key\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301535 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001536 }
1537 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1538 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1539
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001540 /* Allocate space for context: largest digest + message length field */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301541 dev->context_base = dmam_alloc_coherent(&pdev->dev,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001542 SHA256_DIGEST_SIZE + 4,
1543 &dev->context_phys_base, GFP_KERNEL);
1544 if (!dev->context_base) {
1545 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301546 return -ENOMEM;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001547 }
1548
Javier Martin5de88752013-03-01 12:37:53 +01001549 /* Allocate space for HW links */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301550 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001551 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1552 &dev->hw_phys_link[0], GFP_KERNEL);
Dan Carpenter393e6612013-08-20 11:51:41 +03001553 if (!dev->hw_link[0]) {
Javier Martin5de88752013-03-01 12:37:53 +01001554 dev_err(&pdev->dev, "Could not allocate hw links\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301555 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001556 }
1557 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1558 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1559 sizeof(struct sahara_hw_link);
1560 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1561 }
1562
1563 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1564
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001565 spin_lock_init(&dev->lock);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001566 mutex_init(&dev->queue_mutex);
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001567
Javier Martin5de88752013-03-01 12:37:53 +01001568 dev_ptr = dev;
1569
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001570 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1571 if (IS_ERR(dev->kthread)) {
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301572 return PTR_ERR(dev->kthread);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001573 }
Javier Martin5de88752013-03-01 12:37:53 +01001574
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001575 init_completion(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001576
Fabio Estevam7eac7142015-06-20 15:30:22 -03001577 err = clk_prepare_enable(dev->clk_ipg);
1578 if (err)
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301579 return err;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001580 err = clk_prepare_enable(dev->clk_ahb);
1581 if (err)
1582 goto clk_ipg_disable;
Javier Martin5de88752013-03-01 12:37:53 +01001583
1584 version = sahara_read(dev, SAHARA_REG_VERSION);
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001585 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1586 if (version != SAHARA_VERSION_3)
1587 err = -ENODEV;
1588 } else if (of_device_is_compatible(pdev->dev.of_node,
1589 "fsl,imx53-sahara")) {
1590 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1591 err = -ENODEV;
1592 version = (version >> 8) & 0xff;
1593 }
1594 if (err == -ENODEV) {
Javier Martin5de88752013-03-01 12:37:53 +01001595 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001596 version);
Javier Martin5de88752013-03-01 12:37:53 +01001597 goto err_algs;
1598 }
1599
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001600 dev->version = version;
1601
Javier Martin5de88752013-03-01 12:37:53 +01001602 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1603 SAHARA_REG_CMD);
1604 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1605 SAHARA_CONTROL_SET_MAXBURST(8) |
1606 SAHARA_CONTROL_RNG_AUTORSD |
1607 SAHARA_CONTROL_ENABLE_INT,
1608 SAHARA_REG_CONTROL);
1609
1610 err = sahara_register_algs(dev);
1611 if (err)
1612 goto err_algs;
1613
1614 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1615
1616 return 0;
1617
1618err_algs:
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001619 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001620 dev_ptr = NULL;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001621 clk_disable_unprepare(dev->clk_ahb);
1622clk_ipg_disable:
1623 clk_disable_unprepare(dev->clk_ipg);
Javier Martin5de88752013-03-01 12:37:53 +01001624
1625 return err;
1626}
1627
1628static int sahara_remove(struct platform_device *pdev)
1629{
1630 struct sahara_dev *dev = platform_get_drvdata(pdev);
1631
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001632 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001633
1634 sahara_unregister_algs(dev);
1635
1636 clk_disable_unprepare(dev->clk_ipg);
1637 clk_disable_unprepare(dev->clk_ahb);
1638
1639 dev_ptr = NULL;
1640
1641 return 0;
1642}
1643
1644static struct platform_driver sahara_driver = {
1645 .probe = sahara_probe,
1646 .remove = sahara_remove,
1647 .driver = {
1648 .name = SAHARA_NAME,
Sachin Kamat1b0b2602013-09-30 08:49:41 +05301649 .of_match_table = sahara_dt_ids,
Javier Martin5de88752013-03-01 12:37:53 +01001650 },
1651 .id_table = sahara_platform_ids,
1652};
1653
1654module_platform_driver(sahara_driver);
1655
1656MODULE_LICENSE("GPL");
1657MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001658MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
Javier Martin5de88752013-03-01 12:37:53 +01001659MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");