blob: cea241125938fe1ff521f2375872da91417656ff [file] [log] [blame]
Javier Martin5de88752013-03-01 12:37:53 +01001/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01006 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
Javier Martin5de88752013-03-01 12:37:53 +01007 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
17#include <crypto/algapi.h>
18#include <crypto/aes.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010019#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
Javier Martin5de88752013-03-01 12:37:53 +010023
24#include <linux/clk.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/kernel.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010030#include <linux/kthread.h>
Javier Martin5de88752013-03-01 12:37:53 +010031#include <linux/module.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010032#include <linux/mutex.h>
Javier Martin5de88752013-03-01 12:37:53 +010033#include <linux/of.h>
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010034#include <linux/of_device.h>
Javier Martin5de88752013-03-01 12:37:53 +010035#include <linux/platform_device.h>
36
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010037#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
Javier Martin5de88752013-03-01 12:37:53 +010040#define SAHARA_NAME "sahara"
41#define SAHARA_VERSION_3 3
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010042#define SAHARA_VERSION_4 4
Javier Martin5de88752013-03-01 12:37:53 +010043#define SAHARA_TIMEOUT_MS 1000
44#define SAHARA_MAX_HW_DESC 2
45#define SAHARA_MAX_HW_LINK 20
46
47#define FLAGS_MODE_MASK 0x000f
48#define FLAGS_ENCRYPT BIT(0)
49#define FLAGS_CBC BIT(1)
50#define FLAGS_NEW_KEY BIT(3)
Javier Martin5de88752013-03-01 12:37:53 +010051
52#define SAHARA_HDR_BASE 0x00800000
53#define SAHARA_HDR_SKHA_ALG_AES 0
54#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57#define SAHARA_HDR_FORM_DATA (5 << 16)
58#define SAHARA_HDR_FORM_KEY (8 << 16)
59#define SAHARA_HDR_LLO (1 << 24)
60#define SAHARA_HDR_CHA_SKHA (1 << 28)
61#define SAHARA_HDR_CHA_MDHA (2 << 28)
62#define SAHARA_HDR_PARITY_BIT (1 << 31)
63
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010064#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
Javier Martin5de88752013-03-01 12:37:53 +010081/* SAHARA can only process one request at a time */
82#define SAHARA_QUEUE_LENGTH 1
83
84#define SAHARA_REG_VERSION 0x00
85#define SAHARA_REG_DAR 0x04
86#define SAHARA_REG_CONTROL 0x08
87#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91#define SAHARA_REG_CMD 0x0C
92#define SAHARA_CMD_RESET (1 << 0)
93#define SAHARA_CMD_CLEAR_INT (1 << 8)
94#define SAHARA_CMD_CLEAR_ERR (1 << 9)
95#define SAHARA_CMD_SINGLE_STEP (1 << 10)
96#define SAHARA_CMD_MODE_BATCH (1 << 16)
97#define SAHARA_CMD_MODE_DEBUG (1 << 18)
98#define SAHARA_REG_STATUS 0x10
99#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100#define SAHARA_STATE_IDLE 0
101#define SAHARA_STATE_BUSY 1
102#define SAHARA_STATE_ERR 2
103#define SAHARA_STATE_FAULT 3
104#define SAHARA_STATE_COMPLETE 4
105#define SAHARA_STATE_COMP_FLAG (1 << 2)
106#define SAHARA_STATUS_DAR_FULL (1 << 3)
107#define SAHARA_STATUS_ERROR (1 << 4)
108#define SAHARA_STATUS_SECURE (1 << 5)
109#define SAHARA_STATUS_FAIL (1 << 6)
110#define SAHARA_STATUS_INIT (1 << 7)
111#define SAHARA_STATUS_RNG_RESEED (1 << 8)
112#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115#define SAHARA_STATUS_MODE_BATCH (1 << 16)
116#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119#define SAHARA_REG_ERRSTATUS 0x14
120#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121#define SAHARA_ERRSOURCE_CHA 14
122#define SAHARA_ERRSOURCE_DMA 15
123#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128#define SAHARA_REG_FADDR 0x18
129#define SAHARA_REG_CDAR 0x1C
130#define SAHARA_REG_IDAR 0x20
131
132struct sahara_hw_desc {
133 u32 hdr;
134 u32 len1;
135 dma_addr_t p1;
136 u32 len2;
137 dma_addr_t p2;
138 dma_addr_t next;
139};
140
141struct sahara_hw_link {
142 u32 len;
143 dma_addr_t p;
144 dma_addr_t next;
145};
146
147struct sahara_ctx {
Javier Martin5de88752013-03-01 12:37:53 +0100148 unsigned long flags;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100149
150 /* AES-specific context */
Javier Martin5de88752013-03-01 12:37:53 +0100151 int keylen;
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
Javier Martin5de88752013-03-01 12:37:53 +0100157};
158
159struct sahara_aes_reqctx {
160 unsigned long mode;
161};
162
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer
178 * @last: is this the last block
179 * @first: is this the first block
180 * @active: inside a transfer
181 */
182struct sahara_sha_reqctx {
183 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
185 u8 context[SHA256_DIGEST_SIZE + 4];
186 struct mutex mutex;
187 unsigned int mode;
188 unsigned int digest_size;
189 unsigned int context_size;
190 unsigned int buf_cnt;
191 unsigned int sg_in_idx;
192 struct scatterlist *in_sg;
193 struct scatterlist in_sg_chain[2];
194 bool in_sg_chained;
195 size_t total;
196 unsigned int last;
197 unsigned int first;
198 unsigned int active;
199};
200
Javier Martin5de88752013-03-01 12:37:53 +0100201struct sahara_dev {
202 struct device *device;
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100203 unsigned int version;
Javier Martin5de88752013-03-01 12:37:53 +0100204 void __iomem *regs_base;
205 struct clk *clk_ipg;
206 struct clk *clk_ahb;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100207 struct mutex queue_mutex;
208 struct task_struct *kthread;
209 struct completion dma_completion;
Javier Martin5de88752013-03-01 12:37:53 +0100210
211 struct sahara_ctx *ctx;
212 spinlock_t lock;
213 struct crypto_queue queue;
214 unsigned long flags;
215
Javier Martin5de88752013-03-01 12:37:53 +0100216 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
217 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
218
219 u8 *key_base;
220 dma_addr_t key_phys_base;
221
222 u8 *iv_base;
223 dma_addr_t iv_phys_base;
224
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100225 u8 *context_base;
226 dma_addr_t context_phys_base;
227
Javier Martin5de88752013-03-01 12:37:53 +0100228 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
229 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
230
Javier Martin5de88752013-03-01 12:37:53 +0100231 size_t total;
232 struct scatterlist *in_sg;
233 unsigned int nb_in_sg;
234 struct scatterlist *out_sg;
235 unsigned int nb_out_sg;
236
237 u32 error;
Javier Martin5de88752013-03-01 12:37:53 +0100238};
239
240static struct sahara_dev *dev_ptr;
241
242static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
243{
244 writel(data, dev->regs_base + reg);
245}
246
247static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
248{
249 return readl(dev->regs_base + reg);
250}
251
252static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
253{
254 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
255 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
256 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
257
258 if (dev->flags & FLAGS_CBC) {
259 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
260 hdr ^= SAHARA_HDR_PARITY_BIT;
261 }
262
263 if (dev->flags & FLAGS_ENCRYPT) {
264 hdr |= SAHARA_HDR_SKHA_OP_ENC;
265 hdr ^= SAHARA_HDR_PARITY_BIT;
266 }
267
268 return hdr;
269}
270
271static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
272{
273 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
274 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
275}
276
Javier Martin5de88752013-03-01 12:37:53 +0100277static char *sahara_err_src[16] = {
278 "No error",
279 "Header error",
280 "Descriptor length error",
281 "Descriptor length or pointer error",
282 "Link length error",
283 "Link pointer error",
284 "Input buffer error",
285 "Output buffer error",
286 "Output buffer starvation",
287 "Internal state fault",
288 "General descriptor problem",
289 "Reserved",
290 "Descriptor address error",
291 "Link address error",
292 "CHA error",
293 "DMA error"
294};
295
296static char *sahara_err_dmasize[4] = {
297 "Byte transfer",
298 "Half-word transfer",
299 "Word transfer",
300 "Reserved"
301};
302
303static char *sahara_err_dmasrc[8] = {
304 "No error",
305 "AHB bus error",
306 "Internal IP bus error",
307 "Parity error",
308 "DMA crosses 256 byte boundary",
309 "DMA is busy",
310 "Reserved",
311 "DMA HW error"
312};
313
314static char *sahara_cha_errsrc[12] = {
315 "Input buffer non-empty",
316 "Illegal address",
317 "Illegal mode",
318 "Illegal data size",
319 "Illegal key size",
320 "Write during processing",
321 "CTX read during processing",
322 "HW error",
323 "Input buffer disabled/underflow",
324 "Output buffer disabled/overflow",
325 "DES key parity error",
326 "Reserved"
327};
328
329static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
330
331static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
332{
333 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
334 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
335
336 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
337
338 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
339
340 if (source == SAHARA_ERRSOURCE_DMA) {
341 if (error & SAHARA_ERRSTATUS_DMA_DIR)
342 dev_err(dev->device, " * DMA read.\n");
343 else
344 dev_err(dev->device, " * DMA write.\n");
345
346 dev_err(dev->device, " * %s.\n",
347 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
348 dev_err(dev->device, " * %s.\n",
349 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
350 } else if (source == SAHARA_ERRSOURCE_CHA) {
351 dev_err(dev->device, " * %s.\n",
352 sahara_cha_errsrc[chasrc]);
353 dev_err(dev->device, " * %s.\n",
354 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
355 }
356 dev_err(dev->device, "\n");
357}
358
359static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
360
361static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
362{
363 u8 state;
364
365 if (!IS_ENABLED(DEBUG))
366 return;
367
368 state = SAHARA_STATUS_GET_STATE(status);
369
370 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
371 __func__, status);
372
373 dev_dbg(dev->device, " - State = %d:\n", state);
374 if (state & SAHARA_STATE_COMP_FLAG)
375 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
376
377 dev_dbg(dev->device, " * %s.\n",
378 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
379
380 if (status & SAHARA_STATUS_DAR_FULL)
381 dev_dbg(dev->device, " - DAR Full.\n");
382 if (status & SAHARA_STATUS_ERROR)
383 dev_dbg(dev->device, " - Error.\n");
384 if (status & SAHARA_STATUS_SECURE)
385 dev_dbg(dev->device, " - Secure.\n");
386 if (status & SAHARA_STATUS_FAIL)
387 dev_dbg(dev->device, " - Fail.\n");
388 if (status & SAHARA_STATUS_RNG_RESEED)
389 dev_dbg(dev->device, " - RNG Reseed Request.\n");
390 if (status & SAHARA_STATUS_ACTIVE_RNG)
391 dev_dbg(dev->device, " - RNG Active.\n");
392 if (status & SAHARA_STATUS_ACTIVE_MDHA)
393 dev_dbg(dev->device, " - MDHA Active.\n");
394 if (status & SAHARA_STATUS_ACTIVE_SKHA)
395 dev_dbg(dev->device, " - SKHA Active.\n");
396
397 if (status & SAHARA_STATUS_MODE_BATCH)
398 dev_dbg(dev->device, " - Batch Mode.\n");
399 else if (status & SAHARA_STATUS_MODE_DEDICATED)
400 dev_dbg(dev->device, " - Decidated Mode.\n");
401 else if (status & SAHARA_STATUS_MODE_DEBUG)
402 dev_dbg(dev->device, " - Debug Mode.\n");
403
404 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
405 SAHARA_STATUS_GET_ISTATE(status));
406
407 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
408 sahara_read(dev, SAHARA_REG_CDAR));
409 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
410 sahara_read(dev, SAHARA_REG_IDAR));
411}
412
413static void sahara_dump_descriptors(struct sahara_dev *dev)
414{
415 int i;
416
417 if (!IS_ENABLED(DEBUG))
418 return;
419
420 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
421 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
422 i, dev->hw_phys_desc[i]);
423 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
424 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
425 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
426 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
427 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
428 dev_dbg(dev->device, "\tnext = 0x%08x\n",
429 dev->hw_desc[i]->next);
430 }
431 dev_dbg(dev->device, "\n");
432}
433
434static void sahara_dump_links(struct sahara_dev *dev)
435{
436 int i;
437
438 if (!IS_ENABLED(DEBUG))
439 return;
440
441 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
442 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
443 i, dev->hw_phys_link[i]);
444 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
445 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
446 dev_dbg(dev->device, "\tnext = 0x%08x\n",
447 dev->hw_link[i]->next);
448 }
449 dev_dbg(dev->device, "\n");
450}
451
Javier Martin5de88752013-03-01 12:37:53 +0100452static int sahara_hw_descriptor_create(struct sahara_dev *dev)
453{
454 struct sahara_ctx *ctx = dev->ctx;
455 struct scatterlist *sg;
456 int ret;
457 int i, j;
Steffen Trumtrar17110452015-04-07 17:13:42 +0200458 int idx = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100459
460 /* Copy new key if necessary */
461 if (ctx->flags & FLAGS_NEW_KEY) {
462 memcpy(dev->key_base, ctx->key, ctx->keylen);
463 ctx->flags &= ~FLAGS_NEW_KEY;
464
465 if (dev->flags & FLAGS_CBC) {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200466 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
467 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
Javier Martin5de88752013-03-01 12:37:53 +0100468 } else {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200469 dev->hw_desc[idx]->len1 = 0;
470 dev->hw_desc[idx]->p1 = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100471 }
Steffen Trumtrar17110452015-04-07 17:13:42 +0200472 dev->hw_desc[idx]->len2 = ctx->keylen;
473 dev->hw_desc[idx]->p2 = dev->key_phys_base;
474 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
475
476 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
477
478 idx++;
Javier Martin5de88752013-03-01 12:37:53 +0100479 }
Javier Martin5de88752013-03-01 12:37:53 +0100480
LABBE Corentind23afa12015-09-18 14:57:11 +0200481 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
482 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
Javier Martin5de88752013-03-01 12:37:53 +0100483 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
484 dev_err(dev->device, "not enough hw links (%d)\n",
485 dev->nb_in_sg + dev->nb_out_sg);
486 return -EINVAL;
487 }
488
489 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
490 DMA_TO_DEVICE);
491 if (ret != dev->nb_in_sg) {
492 dev_err(dev->device, "couldn't map in sg\n");
493 goto unmap_in;
494 }
495 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
496 DMA_FROM_DEVICE);
497 if (ret != dev->nb_out_sg) {
498 dev_err(dev->device, "couldn't map out sg\n");
499 goto unmap_out;
500 }
501
502 /* Create input links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200503 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
Javier Martin5de88752013-03-01 12:37:53 +0100504 sg = dev->in_sg;
505 for (i = 0; i < dev->nb_in_sg; i++) {
506 dev->hw_link[i]->len = sg->length;
507 dev->hw_link[i]->p = sg->dma_address;
508 if (i == (dev->nb_in_sg - 1)) {
509 dev->hw_link[i]->next = 0;
510 } else {
511 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
512 sg = sg_next(sg);
513 }
514 }
515
516 /* Create output links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200517 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
Javier Martin5de88752013-03-01 12:37:53 +0100518 sg = dev->out_sg;
519 for (j = i; j < dev->nb_out_sg + i; j++) {
520 dev->hw_link[j]->len = sg->length;
521 dev->hw_link[j]->p = sg->dma_address;
522 if (j == (dev->nb_out_sg + i - 1)) {
523 dev->hw_link[j]->next = 0;
524 } else {
525 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
526 sg = sg_next(sg);
527 }
528 }
529
530 /* Fill remaining fields of hw_desc[1] */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200531 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
532 dev->hw_desc[idx]->len1 = dev->total;
533 dev->hw_desc[idx]->len2 = dev->total;
534 dev->hw_desc[idx]->next = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100535
536 sahara_dump_descriptors(dev);
537 sahara_dump_links(dev);
538
Javier Martin5de88752013-03-01 12:37:53 +0100539 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
540
541 return 0;
542
543unmap_out:
544 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
545 DMA_TO_DEVICE);
546unmap_in:
547 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
548 DMA_FROM_DEVICE);
549
550 return -EINVAL;
551}
552
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100553static int sahara_aes_process(struct ablkcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100554{
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100555 struct sahara_dev *dev = dev_ptr;
Javier Martin5de88752013-03-01 12:37:53 +0100556 struct sahara_ctx *ctx;
557 struct sahara_aes_reqctx *rctx;
Javier Martin5de88752013-03-01 12:37:53 +0100558 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500559 unsigned long timeout;
Javier Martin5de88752013-03-01 12:37:53 +0100560
Javier Martin5de88752013-03-01 12:37:53 +0100561 /* Request is ready to be dispatched by the device */
562 dev_dbg(dev->device,
563 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
564 req->nbytes, req->src, req->dst);
565
566 /* assign new request to device */
Javier Martin5de88752013-03-01 12:37:53 +0100567 dev->total = req->nbytes;
568 dev->in_sg = req->src;
569 dev->out_sg = req->dst;
570
571 rctx = ablkcipher_request_ctx(req);
572 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
573 rctx->mode &= FLAGS_MODE_MASK;
574 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
575
576 if ((dev->flags & FLAGS_CBC) && req->info)
577 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
578
579 /* assign new context to device */
Javier Martin5de88752013-03-01 12:37:53 +0100580 dev->ctx = ctx;
581
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100582 reinit_completion(&dev->dma_completion);
583
Javier Martin5de88752013-03-01 12:37:53 +0100584 ret = sahara_hw_descriptor_create(dev);
Nicholas Mc Guire6cf02fc2015-02-07 06:27:45 -0500585 if (ret)
586 return -EINVAL;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100587
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500588 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100589 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500590 if (!timeout) {
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100591 dev_err(dev->device, "AES timeout\n");
592 return -ETIMEDOUT;
Javier Martin5de88752013-03-01 12:37:53 +0100593 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100594
595 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
596 DMA_TO_DEVICE);
597 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
598 DMA_FROM_DEVICE);
599
600 return 0;
Javier Martin5de88752013-03-01 12:37:53 +0100601}
602
603static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
604 unsigned int keylen)
605{
606 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
607 int ret;
608
609 ctx->keylen = keylen;
610
611 /* SAHARA only supports 128bit keys */
612 if (keylen == AES_KEYSIZE_128) {
613 memcpy(ctx->key, key, keylen);
614 ctx->flags |= FLAGS_NEW_KEY;
615 return 0;
616 }
617
618 if (keylen != AES_KEYSIZE_128 &&
619 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
620 return -EINVAL;
621
622 /*
623 * The requested key size is not supported by HW, do a fallback.
624 */
625 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
626 ctx->fallback->base.crt_flags |=
627 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
628
629 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
630 if (ret) {
631 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
632
633 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
634 tfm_aux->crt_flags |=
635 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
636 }
637 return ret;
638}
639
640static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
641{
Javier Martin5de88752013-03-01 12:37:53 +0100642 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
643 struct sahara_dev *dev = dev_ptr;
644 int err = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100645
646 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
647 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
648
649 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
650 dev_err(dev->device,
651 "request size is not exact amount of AES blocks\n");
652 return -EINVAL;
653 }
654
Javier Martin5de88752013-03-01 12:37:53 +0100655 rctx->mode = mode;
Javier Martin5de88752013-03-01 12:37:53 +0100656
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100657 mutex_lock(&dev->queue_mutex);
658 err = ablkcipher_enqueue_request(&dev->queue, req);
659 mutex_unlock(&dev->queue_mutex);
660
661 wake_up_process(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +0100662
663 return err;
664}
665
666static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
667{
668 struct crypto_tfm *tfm =
669 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
670 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
671 crypto_ablkcipher_reqtfm(req));
672 int err;
673
674 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
675 ablkcipher_request_set_tfm(req, ctx->fallback);
676 err = crypto_ablkcipher_encrypt(req);
677 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
678 return err;
679 }
680
681 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
682}
683
684static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
685{
686 struct crypto_tfm *tfm =
687 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
688 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
689 crypto_ablkcipher_reqtfm(req));
690 int err;
691
692 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
693 ablkcipher_request_set_tfm(req, ctx->fallback);
694 err = crypto_ablkcipher_decrypt(req);
695 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
696 return err;
697 }
698
699 return sahara_aes_crypt(req, 0);
700}
701
702static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
703{
704 struct crypto_tfm *tfm =
705 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
706 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
707 crypto_ablkcipher_reqtfm(req));
708 int err;
709
710 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
711 ablkcipher_request_set_tfm(req, ctx->fallback);
712 err = crypto_ablkcipher_encrypt(req);
713 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
714 return err;
715 }
716
717 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
718}
719
720static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
721{
722 struct crypto_tfm *tfm =
723 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
724 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
725 crypto_ablkcipher_reqtfm(req));
726 int err;
727
728 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
729 ablkcipher_request_set_tfm(req, ctx->fallback);
730 err = crypto_ablkcipher_decrypt(req);
731 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
732 return err;
733 }
734
735 return sahara_aes_crypt(req, FLAGS_CBC);
736}
737
738static int sahara_aes_cra_init(struct crypto_tfm *tfm)
739{
Marek Vasutefa59e22014-05-14 11:41:03 +0200740 const char *name = crypto_tfm_alg_name(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100741 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
742
743 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
744 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
745 if (IS_ERR(ctx->fallback)) {
746 pr_err("Error allocating fallback algo %s\n", name);
747 return PTR_ERR(ctx->fallback);
748 }
749
750 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
751
752 return 0;
753}
754
755static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
756{
757 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
758
759 if (ctx->fallback)
760 crypto_free_ablkcipher(ctx->fallback);
761 ctx->fallback = NULL;
762}
763
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100764static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
765 struct sahara_sha_reqctx *rctx)
766{
767 u32 hdr = 0;
768
769 hdr = rctx->mode;
770
771 if (rctx->first) {
772 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
773 hdr |= SAHARA_HDR_MDHA_INIT;
774 } else {
775 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
776 }
777
778 if (rctx->last)
779 hdr |= SAHARA_HDR_MDHA_PDATA;
780
781 if (hweight_long(hdr) % 2 == 0)
782 hdr |= SAHARA_HDR_PARITY_BIT;
783
784 return hdr;
785}
786
787static int sahara_sha_hw_links_create(struct sahara_dev *dev,
788 struct sahara_sha_reqctx *rctx,
789 int start)
790{
791 struct scatterlist *sg;
792 unsigned int i;
793 int ret;
794
795 dev->in_sg = rctx->in_sg;
796
LABBE Corentind23afa12015-09-18 14:57:11 +0200797 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100798 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
799 dev_err(dev->device, "not enough hw links (%d)\n",
800 dev->nb_in_sg + dev->nb_out_sg);
801 return -EINVAL;
802 }
803
804 if (rctx->in_sg_chained) {
805 i = start;
806 sg = dev->in_sg;
807 while (sg) {
808 ret = dma_map_sg(dev->device, sg, 1,
809 DMA_TO_DEVICE);
810 if (!ret)
811 return -EFAULT;
812
813 dev->hw_link[i]->len = sg->length;
814 dev->hw_link[i]->p = sg->dma_address;
815 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
816 sg = sg_next(sg);
817 i += 1;
818 }
819 dev->hw_link[i-1]->next = 0;
820 } else {
821 sg = dev->in_sg;
822 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
823 DMA_TO_DEVICE);
824 if (!ret)
825 return -EFAULT;
826
827 for (i = start; i < dev->nb_in_sg + start; i++) {
828 dev->hw_link[i]->len = sg->length;
829 dev->hw_link[i]->p = sg->dma_address;
830 if (i == (dev->nb_in_sg + start - 1)) {
831 dev->hw_link[i]->next = 0;
832 } else {
833 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
834 sg = sg_next(sg);
835 }
836 }
837 }
838
839 return i;
840}
841
842static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
843 struct sahara_sha_reqctx *rctx,
844 struct ahash_request *req,
845 int index)
846{
847 unsigned result_len;
848 int i = index;
849
850 if (rctx->first)
851 /* Create initial descriptor: #8*/
852 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
853 else
854 /* Create hash descriptor: #10. Must follow #6. */
855 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
856
857 dev->hw_desc[index]->len1 = rctx->total;
858 if (dev->hw_desc[index]->len1 == 0) {
859 /* if len1 is 0, p1 must be 0, too */
860 dev->hw_desc[index]->p1 = 0;
861 rctx->sg_in_idx = 0;
862 } else {
863 /* Create input links */
864 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
865 i = sahara_sha_hw_links_create(dev, rctx, index);
866
867 rctx->sg_in_idx = index;
868 if (i < 0)
869 return i;
870 }
871
872 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
873
874 /* Save the context for the next operation */
875 result_len = rctx->context_size;
876 dev->hw_link[i]->p = dev->context_phys_base;
877
878 dev->hw_link[i]->len = result_len;
879 dev->hw_desc[index]->len2 = result_len;
880
881 dev->hw_link[i]->next = 0;
882
883 return 0;
884}
885
886/*
887 * Load descriptor aka #6
888 *
889 * To load a previously saved context back to the MDHA unit
890 *
891 * p1: Saved Context
892 * p2: NULL
893 *
894 */
895static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
896 struct sahara_sha_reqctx *rctx,
897 struct ahash_request *req,
898 int index)
899{
900 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
901
902 dev->hw_desc[index]->len1 = rctx->context_size;
903 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
904 dev->hw_desc[index]->len2 = 0;
905 dev->hw_desc[index]->p2 = 0;
906
907 dev->hw_link[index]->len = rctx->context_size;
908 dev->hw_link[index]->p = dev->context_phys_base;
909 dev->hw_link[index]->next = 0;
910
911 return 0;
912}
913
914static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
915{
916 if (!sg || !sg->length)
917 return nbytes;
918
919 while (nbytes && sg) {
920 if (nbytes <= sg->length) {
921 sg->length = nbytes;
922 sg_mark_end(sg);
923 break;
924 }
925 nbytes -= sg->length;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200926 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100927 }
928
929 return nbytes;
930}
931
932static int sahara_sha_prepare_request(struct ahash_request *req)
933{
934 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
935 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
936 unsigned int hash_later;
937 unsigned int block_size;
938 unsigned int len;
939
940 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
941
942 /* append bytes from previous operation */
943 len = rctx->buf_cnt + req->nbytes;
944
945 /* only the last transfer can be padded in hardware */
946 if (!rctx->last && (len < block_size)) {
947 /* to few data, save for next operation */
948 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
949 0, req->nbytes, 0);
950 rctx->buf_cnt += req->nbytes;
951
952 return 0;
953 }
954
955 /* add data from previous operation first */
956 if (rctx->buf_cnt)
957 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
958
959 /* data must always be a multiple of block_size */
960 hash_later = rctx->last ? 0 : len & (block_size - 1);
961 if (hash_later) {
962 unsigned int offset = req->nbytes - hash_later;
963 /* Save remaining bytes for later use */
964 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
965 hash_later, 0);
966 }
967
968 /* nbytes should now be multiple of blocksize */
969 req->nbytes = req->nbytes - hash_later;
970
971 sahara_walk_and_recalc(req->src, req->nbytes);
972
973 /* have data from previous operation and current */
974 if (rctx->buf_cnt && req->nbytes) {
975 sg_init_table(rctx->in_sg_chain, 2);
976 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
977
Dan Williamsc56f6d12015-08-07 18:15:13 +0200978 sg_chain(rctx->in_sg_chain, 2, req->src);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100979
980 rctx->total = req->nbytes + rctx->buf_cnt;
981 rctx->in_sg = rctx->in_sg_chain;
982
983 rctx->in_sg_chained = true;
984 req->src = rctx->in_sg_chain;
985 /* only data from previous operation */
986 } else if (rctx->buf_cnt) {
987 if (req->src)
988 rctx->in_sg = req->src;
989 else
990 rctx->in_sg = rctx->in_sg_chain;
991 /* buf was copied into rembuf above */
992 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
993 rctx->total = rctx->buf_cnt;
994 rctx->in_sg_chained = false;
995 /* no data from previous operation */
996 } else {
997 rctx->in_sg = req->src;
998 rctx->total = req->nbytes;
999 req->src = rctx->in_sg;
1000 rctx->in_sg_chained = false;
1001 }
1002
1003 /* on next call, we only have the remaining data in the buffer */
1004 rctx->buf_cnt = hash_later;
1005
1006 return -EINPROGRESS;
1007}
1008
1009static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1010 struct sahara_sha_reqctx *rctx)
1011{
1012 struct scatterlist *sg;
1013
1014 if (rctx->in_sg_chained) {
1015 sg = dev->in_sg;
1016 while (sg) {
1017 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1018 sg = sg_next(sg);
1019 }
1020 } else {
1021 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1022 DMA_TO_DEVICE);
1023 }
1024}
1025
1026static int sahara_sha_process(struct ahash_request *req)
1027{
1028 struct sahara_dev *dev = dev_ptr;
1029 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
Nicholas Mc Guiredf586cb2015-02-07 06:16:46 -05001030 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001031 unsigned long timeout;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001032
1033 ret = sahara_sha_prepare_request(req);
1034 if (!ret)
1035 return ret;
1036
1037 if (rctx->first) {
1038 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1039 dev->hw_desc[0]->next = 0;
1040 rctx->first = 0;
1041 } else {
1042 memcpy(dev->context_base, rctx->context, rctx->context_size);
1043
1044 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1045 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1046 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1047 dev->hw_desc[1]->next = 0;
1048 }
1049
1050 sahara_dump_descriptors(dev);
1051 sahara_dump_links(dev);
1052
1053 reinit_completion(&dev->dma_completion);
1054
1055 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1056
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001057 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001058 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001059 if (!timeout) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001060 dev_err(dev->device, "SHA timeout\n");
1061 return -ETIMEDOUT;
1062 }
1063
1064 if (rctx->sg_in_idx)
1065 sahara_sha_unmap_sg(dev, rctx);
1066
1067 memcpy(rctx->context, dev->context_base, rctx->context_size);
1068
1069 if (req->result)
1070 memcpy(req->result, rctx->context, rctx->digest_size);
1071
1072 return 0;
1073}
1074
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001075static int sahara_queue_manage(void *data)
1076{
1077 struct sahara_dev *dev = (struct sahara_dev *)data;
1078 struct crypto_async_request *async_req;
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001079 struct crypto_async_request *backlog;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001080 int ret = 0;
1081
1082 do {
1083 __set_current_state(TASK_INTERRUPTIBLE);
1084
1085 mutex_lock(&dev->queue_mutex);
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001086 backlog = crypto_get_backlog(&dev->queue);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001087 async_req = crypto_dequeue_request(&dev->queue);
1088 mutex_unlock(&dev->queue_mutex);
1089
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001090 if (backlog)
1091 backlog->complete(backlog, -EINPROGRESS);
1092
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001093 if (async_req) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001094 if (crypto_tfm_alg_type(async_req->tfm) ==
1095 CRYPTO_ALG_TYPE_AHASH) {
1096 struct ahash_request *req =
1097 ahash_request_cast(async_req);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001098
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001099 ret = sahara_sha_process(req);
1100 } else {
1101 struct ablkcipher_request *req =
1102 ablkcipher_request_cast(async_req);
1103
1104 ret = sahara_aes_process(req);
1105 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001106
1107 async_req->complete(async_req, ret);
1108
1109 continue;
1110 }
1111
1112 schedule();
1113 } while (!kthread_should_stop());
1114
1115 return 0;
1116}
1117
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001118static int sahara_sha_enqueue(struct ahash_request *req, int last)
1119{
1120 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1121 struct sahara_dev *dev = dev_ptr;
1122 int ret;
1123
1124 if (!req->nbytes && !last)
1125 return 0;
1126
1127 mutex_lock(&rctx->mutex);
1128 rctx->last = last;
1129
1130 if (!rctx->active) {
1131 rctx->active = 1;
1132 rctx->first = 1;
1133 }
1134
1135 mutex_lock(&dev->queue_mutex);
1136 ret = crypto_enqueue_request(&dev->queue, &req->base);
1137 mutex_unlock(&dev->queue_mutex);
1138
1139 wake_up_process(dev->kthread);
1140 mutex_unlock(&rctx->mutex);
1141
1142 return ret;
1143}
1144
1145static int sahara_sha_init(struct ahash_request *req)
1146{
1147 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1148 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1149
1150 memset(rctx, 0, sizeof(*rctx));
1151
1152 switch (crypto_ahash_digestsize(tfm)) {
1153 case SHA1_DIGEST_SIZE:
1154 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1155 rctx->digest_size = SHA1_DIGEST_SIZE;
1156 break;
1157 case SHA256_DIGEST_SIZE:
1158 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1159 rctx->digest_size = SHA256_DIGEST_SIZE;
1160 break;
1161 default:
1162 return -EINVAL;
1163 }
1164
1165 rctx->context_size = rctx->digest_size + 4;
1166 rctx->active = 0;
1167
1168 mutex_init(&rctx->mutex);
1169
1170 return 0;
1171}
1172
1173static int sahara_sha_update(struct ahash_request *req)
1174{
1175 return sahara_sha_enqueue(req, 0);
1176}
1177
1178static int sahara_sha_final(struct ahash_request *req)
1179{
1180 req->nbytes = 0;
1181 return sahara_sha_enqueue(req, 1);
1182}
1183
1184static int sahara_sha_finup(struct ahash_request *req)
1185{
1186 return sahara_sha_enqueue(req, 1);
1187}
1188
1189static int sahara_sha_digest(struct ahash_request *req)
1190{
1191 sahara_sha_init(req);
1192
1193 return sahara_sha_finup(req);
1194}
1195
1196static int sahara_sha_export(struct ahash_request *req, void *out)
1197{
1198 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1199 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1200 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1201
1202 memcpy(out, ctx, sizeof(struct sahara_ctx));
1203 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1204 sizeof(struct sahara_sha_reqctx));
1205
1206 return 0;
1207}
1208
1209static int sahara_sha_import(struct ahash_request *req, const void *in)
1210{
1211 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1212 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1213 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1214
1215 memcpy(ctx, in, sizeof(struct sahara_ctx));
1216 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1217 sizeof(struct sahara_sha_reqctx));
1218
1219 return 0;
1220}
1221
1222static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1223{
1224 const char *name = crypto_tfm_alg_name(tfm);
1225 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1226
1227 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1228 CRYPTO_ALG_NEED_FALLBACK);
1229 if (IS_ERR(ctx->shash_fallback)) {
1230 pr_err("Error allocating fallback algo %s\n", name);
1231 return PTR_ERR(ctx->shash_fallback);
1232 }
1233 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1234 sizeof(struct sahara_sha_reqctx) +
1235 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1236
1237 return 0;
1238}
1239
1240static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1241{
1242 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1243
1244 crypto_free_shash(ctx->shash_fallback);
1245 ctx->shash_fallback = NULL;
1246}
1247
Javier Martin5de88752013-03-01 12:37:53 +01001248static struct crypto_alg aes_algs[] = {
1249{
1250 .cra_name = "ecb(aes)",
1251 .cra_driver_name = "sahara-ecb-aes",
1252 .cra_priority = 300,
1253 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1254 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1255 .cra_blocksize = AES_BLOCK_SIZE,
1256 .cra_ctxsize = sizeof(struct sahara_ctx),
1257 .cra_alignmask = 0x0,
1258 .cra_type = &crypto_ablkcipher_type,
1259 .cra_module = THIS_MODULE,
1260 .cra_init = sahara_aes_cra_init,
1261 .cra_exit = sahara_aes_cra_exit,
1262 .cra_u.ablkcipher = {
1263 .min_keysize = AES_MIN_KEY_SIZE ,
1264 .max_keysize = AES_MAX_KEY_SIZE,
1265 .setkey = sahara_aes_setkey,
1266 .encrypt = sahara_aes_ecb_encrypt,
1267 .decrypt = sahara_aes_ecb_decrypt,
1268 }
1269}, {
1270 .cra_name = "cbc(aes)",
1271 .cra_driver_name = "sahara-cbc-aes",
1272 .cra_priority = 300,
1273 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1274 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1275 .cra_blocksize = AES_BLOCK_SIZE,
1276 .cra_ctxsize = sizeof(struct sahara_ctx),
1277 .cra_alignmask = 0x0,
1278 .cra_type = &crypto_ablkcipher_type,
1279 .cra_module = THIS_MODULE,
1280 .cra_init = sahara_aes_cra_init,
1281 .cra_exit = sahara_aes_cra_exit,
1282 .cra_u.ablkcipher = {
1283 .min_keysize = AES_MIN_KEY_SIZE ,
1284 .max_keysize = AES_MAX_KEY_SIZE,
1285 .ivsize = AES_BLOCK_SIZE,
1286 .setkey = sahara_aes_setkey,
1287 .encrypt = sahara_aes_cbc_encrypt,
1288 .decrypt = sahara_aes_cbc_decrypt,
1289 }
1290}
1291};
1292
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001293static struct ahash_alg sha_v3_algs[] = {
1294{
1295 .init = sahara_sha_init,
1296 .update = sahara_sha_update,
1297 .final = sahara_sha_final,
1298 .finup = sahara_sha_finup,
1299 .digest = sahara_sha_digest,
1300 .export = sahara_sha_export,
1301 .import = sahara_sha_import,
1302 .halg.digestsize = SHA1_DIGEST_SIZE,
1303 .halg.base = {
1304 .cra_name = "sha1",
1305 .cra_driver_name = "sahara-sha1",
1306 .cra_priority = 300,
1307 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1308 CRYPTO_ALG_ASYNC |
1309 CRYPTO_ALG_NEED_FALLBACK,
1310 .cra_blocksize = SHA1_BLOCK_SIZE,
1311 .cra_ctxsize = sizeof(struct sahara_ctx),
1312 .cra_alignmask = 0,
1313 .cra_module = THIS_MODULE,
1314 .cra_init = sahara_sha_cra_init,
1315 .cra_exit = sahara_sha_cra_exit,
1316 }
1317},
1318};
1319
1320static struct ahash_alg sha_v4_algs[] = {
1321{
1322 .init = sahara_sha_init,
1323 .update = sahara_sha_update,
1324 .final = sahara_sha_final,
1325 .finup = sahara_sha_finup,
1326 .digest = sahara_sha_digest,
1327 .export = sahara_sha_export,
1328 .import = sahara_sha_import,
1329 .halg.digestsize = SHA256_DIGEST_SIZE,
1330 .halg.base = {
1331 .cra_name = "sha256",
1332 .cra_driver_name = "sahara-sha256",
1333 .cra_priority = 300,
1334 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1335 CRYPTO_ALG_ASYNC |
1336 CRYPTO_ALG_NEED_FALLBACK,
1337 .cra_blocksize = SHA256_BLOCK_SIZE,
1338 .cra_ctxsize = sizeof(struct sahara_ctx),
1339 .cra_alignmask = 0,
1340 .cra_module = THIS_MODULE,
1341 .cra_init = sahara_sha_cra_init,
1342 .cra_exit = sahara_sha_cra_exit,
1343 }
1344},
1345};
1346
Javier Martin5de88752013-03-01 12:37:53 +01001347static irqreturn_t sahara_irq_handler(int irq, void *data)
1348{
1349 struct sahara_dev *dev = (struct sahara_dev *)data;
1350 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1351 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1352
Javier Martin5de88752013-03-01 12:37:53 +01001353 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1354 SAHARA_REG_CMD);
1355
1356 sahara_decode_status(dev, stat);
1357
1358 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1359 return IRQ_NONE;
1360 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1361 dev->error = 0;
1362 } else {
1363 sahara_decode_error(dev, err);
1364 dev->error = -EINVAL;
1365 }
1366
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001367 complete(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001368
1369 return IRQ_HANDLED;
1370}
1371
1372
1373static int sahara_register_algs(struct sahara_dev *dev)
1374{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001375 int err;
1376 unsigned int i, j, k, l;
Javier Martin5de88752013-03-01 12:37:53 +01001377
1378 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1379 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1380 err = crypto_register_alg(&aes_algs[i]);
1381 if (err)
1382 goto err_aes_algs;
1383 }
1384
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001385 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1386 err = crypto_register_ahash(&sha_v3_algs[k]);
1387 if (err)
1388 goto err_sha_v3_algs;
1389 }
1390
1391 if (dev->version > SAHARA_VERSION_3)
1392 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1393 err = crypto_register_ahash(&sha_v4_algs[l]);
1394 if (err)
1395 goto err_sha_v4_algs;
1396 }
1397
Javier Martin5de88752013-03-01 12:37:53 +01001398 return 0;
1399
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001400err_sha_v4_algs:
1401 for (j = 0; j < l; j++)
1402 crypto_unregister_ahash(&sha_v4_algs[j]);
1403
1404err_sha_v3_algs:
1405 for (j = 0; j < k; j++)
1406 crypto_unregister_ahash(&sha_v4_algs[j]);
1407
Javier Martin5de88752013-03-01 12:37:53 +01001408err_aes_algs:
1409 for (j = 0; j < i; j++)
1410 crypto_unregister_alg(&aes_algs[j]);
1411
1412 return err;
1413}
1414
1415static void sahara_unregister_algs(struct sahara_dev *dev)
1416{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001417 unsigned int i;
Javier Martin5de88752013-03-01 12:37:53 +01001418
1419 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1420 crypto_unregister_alg(&aes_algs[i]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001421
1422 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1423 crypto_unregister_ahash(&sha_v3_algs[i]);
1424
1425 if (dev->version > SAHARA_VERSION_3)
1426 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1427 crypto_unregister_ahash(&sha_v4_algs[i]);
Javier Martin5de88752013-03-01 12:37:53 +01001428}
1429
1430static struct platform_device_id sahara_platform_ids[] = {
1431 { .name = "sahara-imx27" },
1432 { /* sentinel */ }
1433};
1434MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1435
1436static struct of_device_id sahara_dt_ids[] = {
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001437 { .compatible = "fsl,imx53-sahara" },
Javier Martin5de88752013-03-01 12:37:53 +01001438 { .compatible = "fsl,imx27-sahara" },
1439 { /* sentinel */ }
1440};
Arnd Bergmann68be0b1a2013-06-03 23:57:37 +02001441MODULE_DEVICE_TABLE(of, sahara_dt_ids);
Javier Martin5de88752013-03-01 12:37:53 +01001442
1443static int sahara_probe(struct platform_device *pdev)
1444{
1445 struct sahara_dev *dev;
1446 struct resource *res;
1447 u32 version;
1448 int irq;
1449 int err;
1450 int i;
1451
1452 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1453 if (dev == NULL) {
1454 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1455 return -ENOMEM;
1456 }
1457
1458 dev->device = &pdev->dev;
1459 platform_set_drvdata(pdev, dev);
1460
1461 /* Get the base address */
1462 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Jingoo Han9e952752014-02-12 13:23:37 +09001463 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1464 if (IS_ERR(dev->regs_base))
1465 return PTR_ERR(dev->regs_base);
Javier Martin5de88752013-03-01 12:37:53 +01001466
1467 /* Get the IRQ */
1468 irq = platform_get_irq(pdev, 0);
1469 if (irq < 0) {
1470 dev_err(&pdev->dev, "failed to get irq resource\n");
1471 return irq;
1472 }
1473
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001474 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1475 0, dev_name(&pdev->dev), dev);
1476 if (err) {
Javier Martin5de88752013-03-01 12:37:53 +01001477 dev_err(&pdev->dev, "failed to request irq\n");
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001478 return err;
Javier Martin5de88752013-03-01 12:37:53 +01001479 }
1480
1481 /* clocks */
1482 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1483 if (IS_ERR(dev->clk_ipg)) {
1484 dev_err(&pdev->dev, "Could not get ipg clock\n");
1485 return PTR_ERR(dev->clk_ipg);
1486 }
1487
1488 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1489 if (IS_ERR(dev->clk_ahb)) {
1490 dev_err(&pdev->dev, "Could not get ahb clock\n");
1491 return PTR_ERR(dev->clk_ahb);
1492 }
1493
1494 /* Allocate HW descriptors */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301495 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001496 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1497 &dev->hw_phys_desc[0], GFP_KERNEL);
1498 if (!dev->hw_desc[0]) {
1499 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1500 return -ENOMEM;
1501 }
1502 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1503 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1504 sizeof(struct sahara_hw_desc);
1505
1506 /* Allocate space for iv and key */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301507 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
Javier Martin5de88752013-03-01 12:37:53 +01001508 &dev->key_phys_base, GFP_KERNEL);
1509 if (!dev->key_base) {
1510 dev_err(&pdev->dev, "Could not allocate memory for key\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301511 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001512 }
1513 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1514 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1515
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001516 /* Allocate space for context: largest digest + message length field */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301517 dev->context_base = dmam_alloc_coherent(&pdev->dev,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001518 SHA256_DIGEST_SIZE + 4,
1519 &dev->context_phys_base, GFP_KERNEL);
1520 if (!dev->context_base) {
1521 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301522 return -ENOMEM;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001523 }
1524
Javier Martin5de88752013-03-01 12:37:53 +01001525 /* Allocate space for HW links */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301526 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001527 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1528 &dev->hw_phys_link[0], GFP_KERNEL);
Dan Carpenter393e6612013-08-20 11:51:41 +03001529 if (!dev->hw_link[0]) {
Javier Martin5de88752013-03-01 12:37:53 +01001530 dev_err(&pdev->dev, "Could not allocate hw links\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301531 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001532 }
1533 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1534 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1535 sizeof(struct sahara_hw_link);
1536 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1537 }
1538
1539 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1540
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001541 spin_lock_init(&dev->lock);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001542 mutex_init(&dev->queue_mutex);
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001543
Javier Martin5de88752013-03-01 12:37:53 +01001544 dev_ptr = dev;
1545
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001546 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1547 if (IS_ERR(dev->kthread)) {
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301548 return PTR_ERR(dev->kthread);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001549 }
Javier Martin5de88752013-03-01 12:37:53 +01001550
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001551 init_completion(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001552
Fabio Estevam7eac7142015-06-20 15:30:22 -03001553 err = clk_prepare_enable(dev->clk_ipg);
1554 if (err)
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301555 return err;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001556 err = clk_prepare_enable(dev->clk_ahb);
1557 if (err)
1558 goto clk_ipg_disable;
Javier Martin5de88752013-03-01 12:37:53 +01001559
1560 version = sahara_read(dev, SAHARA_REG_VERSION);
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001561 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1562 if (version != SAHARA_VERSION_3)
1563 err = -ENODEV;
1564 } else if (of_device_is_compatible(pdev->dev.of_node,
1565 "fsl,imx53-sahara")) {
1566 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1567 err = -ENODEV;
1568 version = (version >> 8) & 0xff;
1569 }
1570 if (err == -ENODEV) {
Javier Martin5de88752013-03-01 12:37:53 +01001571 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001572 version);
Javier Martin5de88752013-03-01 12:37:53 +01001573 goto err_algs;
1574 }
1575
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001576 dev->version = version;
1577
Javier Martin5de88752013-03-01 12:37:53 +01001578 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1579 SAHARA_REG_CMD);
1580 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1581 SAHARA_CONTROL_SET_MAXBURST(8) |
1582 SAHARA_CONTROL_RNG_AUTORSD |
1583 SAHARA_CONTROL_ENABLE_INT,
1584 SAHARA_REG_CONTROL);
1585
1586 err = sahara_register_algs(dev);
1587 if (err)
1588 goto err_algs;
1589
1590 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1591
1592 return 0;
1593
1594err_algs:
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001595 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001596 dev_ptr = NULL;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001597 clk_disable_unprepare(dev->clk_ahb);
1598clk_ipg_disable:
1599 clk_disable_unprepare(dev->clk_ipg);
Javier Martin5de88752013-03-01 12:37:53 +01001600
1601 return err;
1602}
1603
1604static int sahara_remove(struct platform_device *pdev)
1605{
1606 struct sahara_dev *dev = platform_get_drvdata(pdev);
1607
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001608 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001609
1610 sahara_unregister_algs(dev);
1611
1612 clk_disable_unprepare(dev->clk_ipg);
1613 clk_disable_unprepare(dev->clk_ahb);
1614
1615 dev_ptr = NULL;
1616
1617 return 0;
1618}
1619
1620static struct platform_driver sahara_driver = {
1621 .probe = sahara_probe,
1622 .remove = sahara_remove,
1623 .driver = {
1624 .name = SAHARA_NAME,
Sachin Kamat1b0b2602013-09-30 08:49:41 +05301625 .of_match_table = sahara_dt_ids,
Javier Martin5de88752013-03-01 12:37:53 +01001626 },
1627 .id_table = sahara_platform_ids,
1628};
1629
1630module_platform_driver(sahara_driver);
1631
1632MODULE_LICENSE("GPL");
1633MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001634MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
Javier Martin5de88752013-03-01 12:37:53 +01001635MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");