blob: 6c4f91c5e6b352e13f630cb71f4f236cf5e75263 [file] [log] [blame]
Javier Martin5de88752013-03-01 12:37:53 +01001/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01006 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
Javier Martin5de88752013-03-01 12:37:53 +01007 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
17#include <crypto/algapi.h>
18#include <crypto/aes.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010019#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
Javier Martin5de88752013-03-01 12:37:53 +010023
24#include <linux/clk.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/kernel.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010030#include <linux/kthread.h>
Javier Martin5de88752013-03-01 12:37:53 +010031#include <linux/module.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010032#include <linux/mutex.h>
Javier Martin5de88752013-03-01 12:37:53 +010033#include <linux/of.h>
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010034#include <linux/of_device.h>
Javier Martin5de88752013-03-01 12:37:53 +010035#include <linux/platform_device.h>
36
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010037#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
Javier Martin5de88752013-03-01 12:37:53 +010040#define SAHARA_NAME "sahara"
41#define SAHARA_VERSION_3 3
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010042#define SAHARA_VERSION_4 4
Javier Martin5de88752013-03-01 12:37:53 +010043#define SAHARA_TIMEOUT_MS 1000
44#define SAHARA_MAX_HW_DESC 2
45#define SAHARA_MAX_HW_LINK 20
46
47#define FLAGS_MODE_MASK 0x000f
48#define FLAGS_ENCRYPT BIT(0)
49#define FLAGS_CBC BIT(1)
50#define FLAGS_NEW_KEY BIT(3)
Javier Martin5de88752013-03-01 12:37:53 +010051
52#define SAHARA_HDR_BASE 0x00800000
53#define SAHARA_HDR_SKHA_ALG_AES 0
54#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57#define SAHARA_HDR_FORM_DATA (5 << 16)
58#define SAHARA_HDR_FORM_KEY (8 << 16)
59#define SAHARA_HDR_LLO (1 << 24)
60#define SAHARA_HDR_CHA_SKHA (1 << 28)
61#define SAHARA_HDR_CHA_MDHA (2 << 28)
62#define SAHARA_HDR_PARITY_BIT (1 << 31)
63
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010064#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
Javier Martin5de88752013-03-01 12:37:53 +010081/* SAHARA can only process one request at a time */
82#define SAHARA_QUEUE_LENGTH 1
83
84#define SAHARA_REG_VERSION 0x00
85#define SAHARA_REG_DAR 0x04
86#define SAHARA_REG_CONTROL 0x08
87#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91#define SAHARA_REG_CMD 0x0C
92#define SAHARA_CMD_RESET (1 << 0)
93#define SAHARA_CMD_CLEAR_INT (1 << 8)
94#define SAHARA_CMD_CLEAR_ERR (1 << 9)
95#define SAHARA_CMD_SINGLE_STEP (1 << 10)
96#define SAHARA_CMD_MODE_BATCH (1 << 16)
97#define SAHARA_CMD_MODE_DEBUG (1 << 18)
98#define SAHARA_REG_STATUS 0x10
99#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100#define SAHARA_STATE_IDLE 0
101#define SAHARA_STATE_BUSY 1
102#define SAHARA_STATE_ERR 2
103#define SAHARA_STATE_FAULT 3
104#define SAHARA_STATE_COMPLETE 4
105#define SAHARA_STATE_COMP_FLAG (1 << 2)
106#define SAHARA_STATUS_DAR_FULL (1 << 3)
107#define SAHARA_STATUS_ERROR (1 << 4)
108#define SAHARA_STATUS_SECURE (1 << 5)
109#define SAHARA_STATUS_FAIL (1 << 6)
110#define SAHARA_STATUS_INIT (1 << 7)
111#define SAHARA_STATUS_RNG_RESEED (1 << 8)
112#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115#define SAHARA_STATUS_MODE_BATCH (1 << 16)
116#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119#define SAHARA_REG_ERRSTATUS 0x14
120#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121#define SAHARA_ERRSOURCE_CHA 14
122#define SAHARA_ERRSOURCE_DMA 15
123#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128#define SAHARA_REG_FADDR 0x18
129#define SAHARA_REG_CDAR 0x1C
130#define SAHARA_REG_IDAR 0x20
131
132struct sahara_hw_desc {
Arnd Bergmann75d3f812015-12-08 16:23:51 +0100133 u32 hdr;
134 u32 len1;
135 u32 p1;
136 u32 len2;
137 u32 p2;
138 u32 next;
Javier Martin5de88752013-03-01 12:37:53 +0100139};
140
141struct sahara_hw_link {
Arnd Bergmann75d3f812015-12-08 16:23:51 +0100142 u32 len;
143 u32 p;
144 u32 next;
Javier Martin5de88752013-03-01 12:37:53 +0100145};
146
147struct sahara_ctx {
Javier Martin5de88752013-03-01 12:37:53 +0100148 unsigned long flags;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100149
150 /* AES-specific context */
Javier Martin5de88752013-03-01 12:37:53 +0100151 int keylen;
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
Javier Martin5de88752013-03-01 12:37:53 +0100157};
158
159struct sahara_aes_reqctx {
160 unsigned long mode;
161};
162
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100176 * @total: total number of bytes for transfer
177 * @last: is this the last block
178 * @first: is this the first block
179 * @active: inside a transfer
180 */
181struct sahara_sha_reqctx {
182 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
183 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 context[SHA256_DIGEST_SIZE + 4];
185 struct mutex mutex;
186 unsigned int mode;
187 unsigned int digest_size;
188 unsigned int context_size;
189 unsigned int buf_cnt;
190 unsigned int sg_in_idx;
191 struct scatterlist *in_sg;
192 struct scatterlist in_sg_chain[2];
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100193 size_t total;
194 unsigned int last;
195 unsigned int first;
196 unsigned int active;
197};
198
Javier Martin5de88752013-03-01 12:37:53 +0100199struct sahara_dev {
200 struct device *device;
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100201 unsigned int version;
Javier Martin5de88752013-03-01 12:37:53 +0100202 void __iomem *regs_base;
203 struct clk *clk_ipg;
204 struct clk *clk_ahb;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100205 struct mutex queue_mutex;
206 struct task_struct *kthread;
207 struct completion dma_completion;
Javier Martin5de88752013-03-01 12:37:53 +0100208
209 struct sahara_ctx *ctx;
210 spinlock_t lock;
211 struct crypto_queue queue;
212 unsigned long flags;
213
Javier Martin5de88752013-03-01 12:37:53 +0100214 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
215 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
216
217 u8 *key_base;
218 dma_addr_t key_phys_base;
219
220 u8 *iv_base;
221 dma_addr_t iv_phys_base;
222
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100223 u8 *context_base;
224 dma_addr_t context_phys_base;
225
Javier Martin5de88752013-03-01 12:37:53 +0100226 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
227 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
228
Javier Martin5de88752013-03-01 12:37:53 +0100229 size_t total;
230 struct scatterlist *in_sg;
LABBE Corentinf8e28a02015-11-19 13:38:17 +0100231 int nb_in_sg;
Javier Martin5de88752013-03-01 12:37:53 +0100232 struct scatterlist *out_sg;
LABBE Corentinf8e28a02015-11-19 13:38:17 +0100233 int nb_out_sg;
Javier Martin5de88752013-03-01 12:37:53 +0100234
235 u32 error;
Javier Martin5de88752013-03-01 12:37:53 +0100236};
237
238static struct sahara_dev *dev_ptr;
239
240static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
241{
242 writel(data, dev->regs_base + reg);
243}
244
245static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
246{
247 return readl(dev->regs_base + reg);
248}
249
250static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
251{
252 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
253 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
254 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
255
256 if (dev->flags & FLAGS_CBC) {
257 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
258 hdr ^= SAHARA_HDR_PARITY_BIT;
259 }
260
261 if (dev->flags & FLAGS_ENCRYPT) {
262 hdr |= SAHARA_HDR_SKHA_OP_ENC;
263 hdr ^= SAHARA_HDR_PARITY_BIT;
264 }
265
266 return hdr;
267}
268
269static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
270{
271 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
272 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
273}
274
LABBE Corentincac367b2015-10-14 21:14:19 +0200275static const char *sahara_err_src[16] = {
Javier Martin5de88752013-03-01 12:37:53 +0100276 "No error",
277 "Header error",
278 "Descriptor length error",
279 "Descriptor length or pointer error",
280 "Link length error",
281 "Link pointer error",
282 "Input buffer error",
283 "Output buffer error",
284 "Output buffer starvation",
285 "Internal state fault",
286 "General descriptor problem",
287 "Reserved",
288 "Descriptor address error",
289 "Link address error",
290 "CHA error",
291 "DMA error"
292};
293
LABBE Corentincac367b2015-10-14 21:14:19 +0200294static const char *sahara_err_dmasize[4] = {
Javier Martin5de88752013-03-01 12:37:53 +0100295 "Byte transfer",
296 "Half-word transfer",
297 "Word transfer",
298 "Reserved"
299};
300
LABBE Corentincac367b2015-10-14 21:14:19 +0200301static const char *sahara_err_dmasrc[8] = {
Javier Martin5de88752013-03-01 12:37:53 +0100302 "No error",
303 "AHB bus error",
304 "Internal IP bus error",
305 "Parity error",
306 "DMA crosses 256 byte boundary",
307 "DMA is busy",
308 "Reserved",
309 "DMA HW error"
310};
311
LABBE Corentincac367b2015-10-14 21:14:19 +0200312static const char *sahara_cha_errsrc[12] = {
Javier Martin5de88752013-03-01 12:37:53 +0100313 "Input buffer non-empty",
314 "Illegal address",
315 "Illegal mode",
316 "Illegal data size",
317 "Illegal key size",
318 "Write during processing",
319 "CTX read during processing",
320 "HW error",
321 "Input buffer disabled/underflow",
322 "Output buffer disabled/overflow",
323 "DES key parity error",
324 "Reserved"
325};
326
LABBE Corentincac367b2015-10-14 21:14:19 +0200327static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
Javier Martin5de88752013-03-01 12:37:53 +0100328
329static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
330{
331 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
332 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
333
334 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
335
336 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
337
338 if (source == SAHARA_ERRSOURCE_DMA) {
339 if (error & SAHARA_ERRSTATUS_DMA_DIR)
340 dev_err(dev->device, " * DMA read.\n");
341 else
342 dev_err(dev->device, " * DMA write.\n");
343
344 dev_err(dev->device, " * %s.\n",
345 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
346 dev_err(dev->device, " * %s.\n",
347 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
348 } else if (source == SAHARA_ERRSOURCE_CHA) {
349 dev_err(dev->device, " * %s.\n",
350 sahara_cha_errsrc[chasrc]);
351 dev_err(dev->device, " * %s.\n",
352 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
353 }
354 dev_err(dev->device, "\n");
355}
356
LABBE Corentincac367b2015-10-14 21:14:19 +0200357static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
Javier Martin5de88752013-03-01 12:37:53 +0100358
359static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
360{
361 u8 state;
362
363 if (!IS_ENABLED(DEBUG))
364 return;
365
366 state = SAHARA_STATUS_GET_STATE(status);
367
368 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
369 __func__, status);
370
371 dev_dbg(dev->device, " - State = %d:\n", state);
372 if (state & SAHARA_STATE_COMP_FLAG)
373 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
374
375 dev_dbg(dev->device, " * %s.\n",
376 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
377
378 if (status & SAHARA_STATUS_DAR_FULL)
379 dev_dbg(dev->device, " - DAR Full.\n");
380 if (status & SAHARA_STATUS_ERROR)
381 dev_dbg(dev->device, " - Error.\n");
382 if (status & SAHARA_STATUS_SECURE)
383 dev_dbg(dev->device, " - Secure.\n");
384 if (status & SAHARA_STATUS_FAIL)
385 dev_dbg(dev->device, " - Fail.\n");
386 if (status & SAHARA_STATUS_RNG_RESEED)
387 dev_dbg(dev->device, " - RNG Reseed Request.\n");
388 if (status & SAHARA_STATUS_ACTIVE_RNG)
389 dev_dbg(dev->device, " - RNG Active.\n");
390 if (status & SAHARA_STATUS_ACTIVE_MDHA)
391 dev_dbg(dev->device, " - MDHA Active.\n");
392 if (status & SAHARA_STATUS_ACTIVE_SKHA)
393 dev_dbg(dev->device, " - SKHA Active.\n");
394
395 if (status & SAHARA_STATUS_MODE_BATCH)
396 dev_dbg(dev->device, " - Batch Mode.\n");
397 else if (status & SAHARA_STATUS_MODE_DEDICATED)
398 dev_dbg(dev->device, " - Decidated Mode.\n");
399 else if (status & SAHARA_STATUS_MODE_DEBUG)
400 dev_dbg(dev->device, " - Debug Mode.\n");
401
402 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
403 SAHARA_STATUS_GET_ISTATE(status));
404
405 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
406 sahara_read(dev, SAHARA_REG_CDAR));
407 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
408 sahara_read(dev, SAHARA_REG_IDAR));
409}
410
411static void sahara_dump_descriptors(struct sahara_dev *dev)
412{
413 int i;
414
415 if (!IS_ENABLED(DEBUG))
416 return;
417
418 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
Arnd Bergmannd4b98f22015-12-08 16:24:22 +0100419 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
420 i, &dev->hw_phys_desc[i]);
Javier Martin5de88752013-03-01 12:37:53 +0100421 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
422 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
423 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
424 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
425 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
426 dev_dbg(dev->device, "\tnext = 0x%08x\n",
427 dev->hw_desc[i]->next);
428 }
429 dev_dbg(dev->device, "\n");
430}
431
432static void sahara_dump_links(struct sahara_dev *dev)
433{
434 int i;
435
436 if (!IS_ENABLED(DEBUG))
437 return;
438
439 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
Arnd Bergmannd4b98f22015-12-08 16:24:22 +0100440 dev_dbg(dev->device, "Link (%d) (%pad):\n",
441 i, &dev->hw_phys_link[i]);
Javier Martin5de88752013-03-01 12:37:53 +0100442 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
443 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
444 dev_dbg(dev->device, "\tnext = 0x%08x\n",
445 dev->hw_link[i]->next);
446 }
447 dev_dbg(dev->device, "\n");
448}
449
Javier Martin5de88752013-03-01 12:37:53 +0100450static int sahara_hw_descriptor_create(struct sahara_dev *dev)
451{
452 struct sahara_ctx *ctx = dev->ctx;
453 struct scatterlist *sg;
454 int ret;
455 int i, j;
Steffen Trumtrar17110452015-04-07 17:13:42 +0200456 int idx = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100457
458 /* Copy new key if necessary */
459 if (ctx->flags & FLAGS_NEW_KEY) {
460 memcpy(dev->key_base, ctx->key, ctx->keylen);
461 ctx->flags &= ~FLAGS_NEW_KEY;
462
463 if (dev->flags & FLAGS_CBC) {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200464 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
465 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
Javier Martin5de88752013-03-01 12:37:53 +0100466 } else {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200467 dev->hw_desc[idx]->len1 = 0;
468 dev->hw_desc[idx]->p1 = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100469 }
Steffen Trumtrar17110452015-04-07 17:13:42 +0200470 dev->hw_desc[idx]->len2 = ctx->keylen;
471 dev->hw_desc[idx]->p2 = dev->key_phys_base;
472 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
473
474 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
475
476 idx++;
Javier Martin5de88752013-03-01 12:37:53 +0100477 }
Javier Martin5de88752013-03-01 12:37:53 +0100478
LABBE Corentind23afa12015-09-18 14:57:11 +0200479 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100480 if (dev->nb_in_sg < 0) {
481 dev_err(dev->device, "Invalid numbers of src SG.\n");
482 return dev->nb_in_sg;
483 }
LABBE Corentind23afa12015-09-18 14:57:11 +0200484 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100485 if (dev->nb_out_sg < 0) {
486 dev_err(dev->device, "Invalid numbers of dst SG.\n");
487 return dev->nb_out_sg;
488 }
Javier Martin5de88752013-03-01 12:37:53 +0100489 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
490 dev_err(dev->device, "not enough hw links (%d)\n",
491 dev->nb_in_sg + dev->nb_out_sg);
492 return -EINVAL;
493 }
494
495 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
496 DMA_TO_DEVICE);
497 if (ret != dev->nb_in_sg) {
498 dev_err(dev->device, "couldn't map in sg\n");
499 goto unmap_in;
500 }
501 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
502 DMA_FROM_DEVICE);
503 if (ret != dev->nb_out_sg) {
504 dev_err(dev->device, "couldn't map out sg\n");
505 goto unmap_out;
506 }
507
508 /* Create input links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200509 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
Javier Martin5de88752013-03-01 12:37:53 +0100510 sg = dev->in_sg;
511 for (i = 0; i < dev->nb_in_sg; i++) {
512 dev->hw_link[i]->len = sg->length;
513 dev->hw_link[i]->p = sg->dma_address;
514 if (i == (dev->nb_in_sg - 1)) {
515 dev->hw_link[i]->next = 0;
516 } else {
517 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
518 sg = sg_next(sg);
519 }
520 }
521
522 /* Create output links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200523 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
Javier Martin5de88752013-03-01 12:37:53 +0100524 sg = dev->out_sg;
525 for (j = i; j < dev->nb_out_sg + i; j++) {
526 dev->hw_link[j]->len = sg->length;
527 dev->hw_link[j]->p = sg->dma_address;
528 if (j == (dev->nb_out_sg + i - 1)) {
529 dev->hw_link[j]->next = 0;
530 } else {
531 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
532 sg = sg_next(sg);
533 }
534 }
535
536 /* Fill remaining fields of hw_desc[1] */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200537 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
538 dev->hw_desc[idx]->len1 = dev->total;
539 dev->hw_desc[idx]->len2 = dev->total;
540 dev->hw_desc[idx]->next = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100541
542 sahara_dump_descriptors(dev);
543 sahara_dump_links(dev);
544
Javier Martin5de88752013-03-01 12:37:53 +0100545 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
546
547 return 0;
548
549unmap_out:
550 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
551 DMA_TO_DEVICE);
552unmap_in:
553 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
554 DMA_FROM_DEVICE);
555
556 return -EINVAL;
557}
558
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100559static int sahara_aes_process(struct ablkcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100560{
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100561 struct sahara_dev *dev = dev_ptr;
Javier Martin5de88752013-03-01 12:37:53 +0100562 struct sahara_ctx *ctx;
563 struct sahara_aes_reqctx *rctx;
Javier Martin5de88752013-03-01 12:37:53 +0100564 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500565 unsigned long timeout;
Javier Martin5de88752013-03-01 12:37:53 +0100566
Javier Martin5de88752013-03-01 12:37:53 +0100567 /* Request is ready to be dispatched by the device */
568 dev_dbg(dev->device,
569 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
570 req->nbytes, req->src, req->dst);
571
572 /* assign new request to device */
Javier Martin5de88752013-03-01 12:37:53 +0100573 dev->total = req->nbytes;
574 dev->in_sg = req->src;
575 dev->out_sg = req->dst;
576
577 rctx = ablkcipher_request_ctx(req);
578 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
579 rctx->mode &= FLAGS_MODE_MASK;
580 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
581
582 if ((dev->flags & FLAGS_CBC) && req->info)
583 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
584
585 /* assign new context to device */
Javier Martin5de88752013-03-01 12:37:53 +0100586 dev->ctx = ctx;
587
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100588 reinit_completion(&dev->dma_completion);
589
Javier Martin5de88752013-03-01 12:37:53 +0100590 ret = sahara_hw_descriptor_create(dev);
Nicholas Mc Guire6cf02fc2015-02-07 06:27:45 -0500591 if (ret)
592 return -EINVAL;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100593
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500594 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100595 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500596 if (!timeout) {
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100597 dev_err(dev->device, "AES timeout\n");
598 return -ETIMEDOUT;
Javier Martin5de88752013-03-01 12:37:53 +0100599 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100600
601 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
602 DMA_TO_DEVICE);
603 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
604 DMA_FROM_DEVICE);
605
606 return 0;
Javier Martin5de88752013-03-01 12:37:53 +0100607}
608
609static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
610 unsigned int keylen)
611{
612 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
613 int ret;
614
615 ctx->keylen = keylen;
616
617 /* SAHARA only supports 128bit keys */
618 if (keylen == AES_KEYSIZE_128) {
619 memcpy(ctx->key, key, keylen);
620 ctx->flags |= FLAGS_NEW_KEY;
621 return 0;
622 }
623
624 if (keylen != AES_KEYSIZE_128 &&
625 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
626 return -EINVAL;
627
628 /*
629 * The requested key size is not supported by HW, do a fallback.
630 */
631 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
632 ctx->fallback->base.crt_flags |=
633 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
634
635 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
636 if (ret) {
637 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
638
639 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
640 tfm_aux->crt_flags |=
641 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
642 }
643 return ret;
644}
645
646static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
647{
Javier Martin5de88752013-03-01 12:37:53 +0100648 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
649 struct sahara_dev *dev = dev_ptr;
650 int err = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100651
652 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
653 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
654
655 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
656 dev_err(dev->device,
657 "request size is not exact amount of AES blocks\n");
658 return -EINVAL;
659 }
660
Javier Martin5de88752013-03-01 12:37:53 +0100661 rctx->mode = mode;
Javier Martin5de88752013-03-01 12:37:53 +0100662
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100663 mutex_lock(&dev->queue_mutex);
664 err = ablkcipher_enqueue_request(&dev->queue, req);
665 mutex_unlock(&dev->queue_mutex);
666
667 wake_up_process(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +0100668
669 return err;
670}
671
672static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
673{
674 struct crypto_tfm *tfm =
675 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
676 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
677 crypto_ablkcipher_reqtfm(req));
678 int err;
679
680 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
681 ablkcipher_request_set_tfm(req, ctx->fallback);
682 err = crypto_ablkcipher_encrypt(req);
683 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
684 return err;
685 }
686
687 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
688}
689
690static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
691{
692 struct crypto_tfm *tfm =
693 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
694 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
695 crypto_ablkcipher_reqtfm(req));
696 int err;
697
698 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
699 ablkcipher_request_set_tfm(req, ctx->fallback);
700 err = crypto_ablkcipher_decrypt(req);
701 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
702 return err;
703 }
704
705 return sahara_aes_crypt(req, 0);
706}
707
708static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
709{
710 struct crypto_tfm *tfm =
711 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
712 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
713 crypto_ablkcipher_reqtfm(req));
714 int err;
715
716 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
717 ablkcipher_request_set_tfm(req, ctx->fallback);
718 err = crypto_ablkcipher_encrypt(req);
719 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
720 return err;
721 }
722
723 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
724}
725
726static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
727{
728 struct crypto_tfm *tfm =
729 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
730 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
731 crypto_ablkcipher_reqtfm(req));
732 int err;
733
734 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
735 ablkcipher_request_set_tfm(req, ctx->fallback);
736 err = crypto_ablkcipher_decrypt(req);
737 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
738 return err;
739 }
740
741 return sahara_aes_crypt(req, FLAGS_CBC);
742}
743
744static int sahara_aes_cra_init(struct crypto_tfm *tfm)
745{
Marek Vasutefa59e22014-05-14 11:41:03 +0200746 const char *name = crypto_tfm_alg_name(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100747 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
748
749 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
750 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
751 if (IS_ERR(ctx->fallback)) {
752 pr_err("Error allocating fallback algo %s\n", name);
753 return PTR_ERR(ctx->fallback);
754 }
755
756 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
757
758 return 0;
759}
760
761static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
762{
763 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
764
765 if (ctx->fallback)
766 crypto_free_ablkcipher(ctx->fallback);
767 ctx->fallback = NULL;
768}
769
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100770static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
771 struct sahara_sha_reqctx *rctx)
772{
773 u32 hdr = 0;
774
775 hdr = rctx->mode;
776
777 if (rctx->first) {
778 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
779 hdr |= SAHARA_HDR_MDHA_INIT;
780 } else {
781 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
782 }
783
784 if (rctx->last)
785 hdr |= SAHARA_HDR_MDHA_PDATA;
786
787 if (hweight_long(hdr) % 2 == 0)
788 hdr |= SAHARA_HDR_PARITY_BIT;
789
790 return hdr;
791}
792
793static int sahara_sha_hw_links_create(struct sahara_dev *dev,
794 struct sahara_sha_reqctx *rctx,
795 int start)
796{
797 struct scatterlist *sg;
798 unsigned int i;
799 int ret;
800
801 dev->in_sg = rctx->in_sg;
802
LABBE Corentind23afa12015-09-18 14:57:11 +0200803 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100804 if (dev->nb_in_sg < 0) {
805 dev_err(dev->device, "Invalid numbers of src SG.\n");
806 return dev->nb_in_sg;
807 }
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100808 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
809 dev_err(dev->device, "not enough hw links (%d)\n",
810 dev->nb_in_sg + dev->nb_out_sg);
811 return -EINVAL;
812 }
813
LABBE Corentin640eec52015-09-23 13:55:28 +0200814 sg = dev->in_sg;
815 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
816 if (!ret)
817 return -EFAULT;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100818
LABBE Corentin640eec52015-09-23 13:55:28 +0200819 for (i = start; i < dev->nb_in_sg + start; i++) {
820 dev->hw_link[i]->len = sg->length;
821 dev->hw_link[i]->p = sg->dma_address;
822 if (i == (dev->nb_in_sg + start - 1)) {
823 dev->hw_link[i]->next = 0;
824 } else {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100825 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
826 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100827 }
828 }
829
830 return i;
831}
832
833static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
834 struct sahara_sha_reqctx *rctx,
835 struct ahash_request *req,
836 int index)
837{
838 unsigned result_len;
839 int i = index;
840
841 if (rctx->first)
842 /* Create initial descriptor: #8*/
843 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
844 else
845 /* Create hash descriptor: #10. Must follow #6. */
846 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
847
848 dev->hw_desc[index]->len1 = rctx->total;
849 if (dev->hw_desc[index]->len1 == 0) {
850 /* if len1 is 0, p1 must be 0, too */
851 dev->hw_desc[index]->p1 = 0;
852 rctx->sg_in_idx = 0;
853 } else {
854 /* Create input links */
855 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
856 i = sahara_sha_hw_links_create(dev, rctx, index);
857
858 rctx->sg_in_idx = index;
859 if (i < 0)
860 return i;
861 }
862
863 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
864
865 /* Save the context for the next operation */
866 result_len = rctx->context_size;
867 dev->hw_link[i]->p = dev->context_phys_base;
868
869 dev->hw_link[i]->len = result_len;
870 dev->hw_desc[index]->len2 = result_len;
871
872 dev->hw_link[i]->next = 0;
873
874 return 0;
875}
876
877/*
878 * Load descriptor aka #6
879 *
880 * To load a previously saved context back to the MDHA unit
881 *
882 * p1: Saved Context
883 * p2: NULL
884 *
885 */
886static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
887 struct sahara_sha_reqctx *rctx,
888 struct ahash_request *req,
889 int index)
890{
891 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
892
893 dev->hw_desc[index]->len1 = rctx->context_size;
894 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
895 dev->hw_desc[index]->len2 = 0;
896 dev->hw_desc[index]->p2 = 0;
897
898 dev->hw_link[index]->len = rctx->context_size;
899 dev->hw_link[index]->p = dev->context_phys_base;
900 dev->hw_link[index]->next = 0;
901
902 return 0;
903}
904
905static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
906{
907 if (!sg || !sg->length)
908 return nbytes;
909
910 while (nbytes && sg) {
911 if (nbytes <= sg->length) {
912 sg->length = nbytes;
913 sg_mark_end(sg);
914 break;
915 }
916 nbytes -= sg->length;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200917 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100918 }
919
920 return nbytes;
921}
922
923static int sahara_sha_prepare_request(struct ahash_request *req)
924{
925 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
926 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
927 unsigned int hash_later;
928 unsigned int block_size;
929 unsigned int len;
930
931 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
932
933 /* append bytes from previous operation */
934 len = rctx->buf_cnt + req->nbytes;
935
936 /* only the last transfer can be padded in hardware */
937 if (!rctx->last && (len < block_size)) {
938 /* to few data, save for next operation */
939 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
940 0, req->nbytes, 0);
941 rctx->buf_cnt += req->nbytes;
942
943 return 0;
944 }
945
946 /* add data from previous operation first */
947 if (rctx->buf_cnt)
948 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
949
950 /* data must always be a multiple of block_size */
951 hash_later = rctx->last ? 0 : len & (block_size - 1);
952 if (hash_later) {
953 unsigned int offset = req->nbytes - hash_later;
954 /* Save remaining bytes for later use */
955 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
956 hash_later, 0);
957 }
958
959 /* nbytes should now be multiple of blocksize */
960 req->nbytes = req->nbytes - hash_later;
961
962 sahara_walk_and_recalc(req->src, req->nbytes);
963
964 /* have data from previous operation and current */
965 if (rctx->buf_cnt && req->nbytes) {
966 sg_init_table(rctx->in_sg_chain, 2);
967 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
968
Dan Williamsc56f6d12015-08-07 18:15:13 +0200969 sg_chain(rctx->in_sg_chain, 2, req->src);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100970
971 rctx->total = req->nbytes + rctx->buf_cnt;
972 rctx->in_sg = rctx->in_sg_chain;
973
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100974 req->src = rctx->in_sg_chain;
975 /* only data from previous operation */
976 } else if (rctx->buf_cnt) {
977 if (req->src)
978 rctx->in_sg = req->src;
979 else
980 rctx->in_sg = rctx->in_sg_chain;
981 /* buf was copied into rembuf above */
982 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
983 rctx->total = rctx->buf_cnt;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100984 /* no data from previous operation */
985 } else {
986 rctx->in_sg = req->src;
987 rctx->total = req->nbytes;
988 req->src = rctx->in_sg;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100989 }
990
991 /* on next call, we only have the remaining data in the buffer */
992 rctx->buf_cnt = hash_later;
993
994 return -EINPROGRESS;
995}
996
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100997static int sahara_sha_process(struct ahash_request *req)
998{
999 struct sahara_dev *dev = dev_ptr;
1000 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
Nicholas Mc Guiredf586cb2015-02-07 06:16:46 -05001001 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001002 unsigned long timeout;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001003
1004 ret = sahara_sha_prepare_request(req);
1005 if (!ret)
1006 return ret;
1007
1008 if (rctx->first) {
1009 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1010 dev->hw_desc[0]->next = 0;
1011 rctx->first = 0;
1012 } else {
1013 memcpy(dev->context_base, rctx->context, rctx->context_size);
1014
1015 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1016 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1017 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1018 dev->hw_desc[1]->next = 0;
1019 }
1020
1021 sahara_dump_descriptors(dev);
1022 sahara_dump_links(dev);
1023
1024 reinit_completion(&dev->dma_completion);
1025
1026 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1027
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001028 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001029 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001030 if (!timeout) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001031 dev_err(dev->device, "SHA timeout\n");
1032 return -ETIMEDOUT;
1033 }
1034
1035 if (rctx->sg_in_idx)
LABBE Corentin640eec52015-09-23 13:55:28 +02001036 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1037 DMA_TO_DEVICE);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001038
1039 memcpy(rctx->context, dev->context_base, rctx->context_size);
1040
1041 if (req->result)
1042 memcpy(req->result, rctx->context, rctx->digest_size);
1043
1044 return 0;
1045}
1046
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001047static int sahara_queue_manage(void *data)
1048{
1049 struct sahara_dev *dev = (struct sahara_dev *)data;
1050 struct crypto_async_request *async_req;
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001051 struct crypto_async_request *backlog;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001052 int ret = 0;
1053
1054 do {
1055 __set_current_state(TASK_INTERRUPTIBLE);
1056
1057 mutex_lock(&dev->queue_mutex);
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001058 backlog = crypto_get_backlog(&dev->queue);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001059 async_req = crypto_dequeue_request(&dev->queue);
1060 mutex_unlock(&dev->queue_mutex);
1061
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001062 if (backlog)
1063 backlog->complete(backlog, -EINPROGRESS);
1064
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001065 if (async_req) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001066 if (crypto_tfm_alg_type(async_req->tfm) ==
1067 CRYPTO_ALG_TYPE_AHASH) {
1068 struct ahash_request *req =
1069 ahash_request_cast(async_req);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001070
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001071 ret = sahara_sha_process(req);
1072 } else {
1073 struct ablkcipher_request *req =
1074 ablkcipher_request_cast(async_req);
1075
1076 ret = sahara_aes_process(req);
1077 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001078
1079 async_req->complete(async_req, ret);
1080
1081 continue;
1082 }
1083
1084 schedule();
1085 } while (!kthread_should_stop());
1086
1087 return 0;
1088}
1089
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001090static int sahara_sha_enqueue(struct ahash_request *req, int last)
1091{
1092 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1093 struct sahara_dev *dev = dev_ptr;
1094 int ret;
1095
1096 if (!req->nbytes && !last)
1097 return 0;
1098
1099 mutex_lock(&rctx->mutex);
1100 rctx->last = last;
1101
1102 if (!rctx->active) {
1103 rctx->active = 1;
1104 rctx->first = 1;
1105 }
1106
1107 mutex_lock(&dev->queue_mutex);
1108 ret = crypto_enqueue_request(&dev->queue, &req->base);
1109 mutex_unlock(&dev->queue_mutex);
1110
1111 wake_up_process(dev->kthread);
1112 mutex_unlock(&rctx->mutex);
1113
1114 return ret;
1115}
1116
1117static int sahara_sha_init(struct ahash_request *req)
1118{
1119 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1120 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1121
1122 memset(rctx, 0, sizeof(*rctx));
1123
1124 switch (crypto_ahash_digestsize(tfm)) {
1125 case SHA1_DIGEST_SIZE:
1126 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1127 rctx->digest_size = SHA1_DIGEST_SIZE;
1128 break;
1129 case SHA256_DIGEST_SIZE:
1130 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1131 rctx->digest_size = SHA256_DIGEST_SIZE;
1132 break;
1133 default:
1134 return -EINVAL;
1135 }
1136
1137 rctx->context_size = rctx->digest_size + 4;
1138 rctx->active = 0;
1139
1140 mutex_init(&rctx->mutex);
1141
1142 return 0;
1143}
1144
1145static int sahara_sha_update(struct ahash_request *req)
1146{
1147 return sahara_sha_enqueue(req, 0);
1148}
1149
1150static int sahara_sha_final(struct ahash_request *req)
1151{
1152 req->nbytes = 0;
1153 return sahara_sha_enqueue(req, 1);
1154}
1155
1156static int sahara_sha_finup(struct ahash_request *req)
1157{
1158 return sahara_sha_enqueue(req, 1);
1159}
1160
1161static int sahara_sha_digest(struct ahash_request *req)
1162{
1163 sahara_sha_init(req);
1164
1165 return sahara_sha_finup(req);
1166}
1167
1168static int sahara_sha_export(struct ahash_request *req, void *out)
1169{
1170 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1171 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1172 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1173
1174 memcpy(out, ctx, sizeof(struct sahara_ctx));
1175 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1176 sizeof(struct sahara_sha_reqctx));
1177
1178 return 0;
1179}
1180
1181static int sahara_sha_import(struct ahash_request *req, const void *in)
1182{
1183 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1184 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1185 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1186
1187 memcpy(ctx, in, sizeof(struct sahara_ctx));
1188 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1189 sizeof(struct sahara_sha_reqctx));
1190
1191 return 0;
1192}
1193
1194static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1195{
1196 const char *name = crypto_tfm_alg_name(tfm);
1197 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1198
1199 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1200 CRYPTO_ALG_NEED_FALLBACK);
1201 if (IS_ERR(ctx->shash_fallback)) {
1202 pr_err("Error allocating fallback algo %s\n", name);
1203 return PTR_ERR(ctx->shash_fallback);
1204 }
1205 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1206 sizeof(struct sahara_sha_reqctx) +
1207 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1208
1209 return 0;
1210}
1211
1212static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1213{
1214 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1215
1216 crypto_free_shash(ctx->shash_fallback);
1217 ctx->shash_fallback = NULL;
1218}
1219
Javier Martin5de88752013-03-01 12:37:53 +01001220static struct crypto_alg aes_algs[] = {
1221{
1222 .cra_name = "ecb(aes)",
1223 .cra_driver_name = "sahara-ecb-aes",
1224 .cra_priority = 300,
1225 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1226 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1227 .cra_blocksize = AES_BLOCK_SIZE,
1228 .cra_ctxsize = sizeof(struct sahara_ctx),
1229 .cra_alignmask = 0x0,
1230 .cra_type = &crypto_ablkcipher_type,
1231 .cra_module = THIS_MODULE,
1232 .cra_init = sahara_aes_cra_init,
1233 .cra_exit = sahara_aes_cra_exit,
1234 .cra_u.ablkcipher = {
1235 .min_keysize = AES_MIN_KEY_SIZE ,
1236 .max_keysize = AES_MAX_KEY_SIZE,
1237 .setkey = sahara_aes_setkey,
1238 .encrypt = sahara_aes_ecb_encrypt,
1239 .decrypt = sahara_aes_ecb_decrypt,
1240 }
1241}, {
1242 .cra_name = "cbc(aes)",
1243 .cra_driver_name = "sahara-cbc-aes",
1244 .cra_priority = 300,
1245 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1246 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1247 .cra_blocksize = AES_BLOCK_SIZE,
1248 .cra_ctxsize = sizeof(struct sahara_ctx),
1249 .cra_alignmask = 0x0,
1250 .cra_type = &crypto_ablkcipher_type,
1251 .cra_module = THIS_MODULE,
1252 .cra_init = sahara_aes_cra_init,
1253 .cra_exit = sahara_aes_cra_exit,
1254 .cra_u.ablkcipher = {
1255 .min_keysize = AES_MIN_KEY_SIZE ,
1256 .max_keysize = AES_MAX_KEY_SIZE,
1257 .ivsize = AES_BLOCK_SIZE,
1258 .setkey = sahara_aes_setkey,
1259 .encrypt = sahara_aes_cbc_encrypt,
1260 .decrypt = sahara_aes_cbc_decrypt,
1261 }
1262}
1263};
1264
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001265static struct ahash_alg sha_v3_algs[] = {
1266{
1267 .init = sahara_sha_init,
1268 .update = sahara_sha_update,
1269 .final = sahara_sha_final,
1270 .finup = sahara_sha_finup,
1271 .digest = sahara_sha_digest,
1272 .export = sahara_sha_export,
1273 .import = sahara_sha_import,
1274 .halg.digestsize = SHA1_DIGEST_SIZE,
1275 .halg.base = {
1276 .cra_name = "sha1",
1277 .cra_driver_name = "sahara-sha1",
1278 .cra_priority = 300,
1279 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1280 CRYPTO_ALG_ASYNC |
1281 CRYPTO_ALG_NEED_FALLBACK,
1282 .cra_blocksize = SHA1_BLOCK_SIZE,
1283 .cra_ctxsize = sizeof(struct sahara_ctx),
1284 .cra_alignmask = 0,
1285 .cra_module = THIS_MODULE,
1286 .cra_init = sahara_sha_cra_init,
1287 .cra_exit = sahara_sha_cra_exit,
1288 }
1289},
1290};
1291
1292static struct ahash_alg sha_v4_algs[] = {
1293{
1294 .init = sahara_sha_init,
1295 .update = sahara_sha_update,
1296 .final = sahara_sha_final,
1297 .finup = sahara_sha_finup,
1298 .digest = sahara_sha_digest,
1299 .export = sahara_sha_export,
1300 .import = sahara_sha_import,
1301 .halg.digestsize = SHA256_DIGEST_SIZE,
1302 .halg.base = {
1303 .cra_name = "sha256",
1304 .cra_driver_name = "sahara-sha256",
1305 .cra_priority = 300,
1306 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1307 CRYPTO_ALG_ASYNC |
1308 CRYPTO_ALG_NEED_FALLBACK,
1309 .cra_blocksize = SHA256_BLOCK_SIZE,
1310 .cra_ctxsize = sizeof(struct sahara_ctx),
1311 .cra_alignmask = 0,
1312 .cra_module = THIS_MODULE,
1313 .cra_init = sahara_sha_cra_init,
1314 .cra_exit = sahara_sha_cra_exit,
1315 }
1316},
1317};
1318
Javier Martin5de88752013-03-01 12:37:53 +01001319static irqreturn_t sahara_irq_handler(int irq, void *data)
1320{
1321 struct sahara_dev *dev = (struct sahara_dev *)data;
1322 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1323 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1324
Javier Martin5de88752013-03-01 12:37:53 +01001325 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1326 SAHARA_REG_CMD);
1327
1328 sahara_decode_status(dev, stat);
1329
1330 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1331 return IRQ_NONE;
1332 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1333 dev->error = 0;
1334 } else {
1335 sahara_decode_error(dev, err);
1336 dev->error = -EINVAL;
1337 }
1338
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001339 complete(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001340
1341 return IRQ_HANDLED;
1342}
1343
1344
1345static int sahara_register_algs(struct sahara_dev *dev)
1346{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001347 int err;
1348 unsigned int i, j, k, l;
Javier Martin5de88752013-03-01 12:37:53 +01001349
1350 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1351 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1352 err = crypto_register_alg(&aes_algs[i]);
1353 if (err)
1354 goto err_aes_algs;
1355 }
1356
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001357 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1358 err = crypto_register_ahash(&sha_v3_algs[k]);
1359 if (err)
1360 goto err_sha_v3_algs;
1361 }
1362
1363 if (dev->version > SAHARA_VERSION_3)
1364 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1365 err = crypto_register_ahash(&sha_v4_algs[l]);
1366 if (err)
1367 goto err_sha_v4_algs;
1368 }
1369
Javier Martin5de88752013-03-01 12:37:53 +01001370 return 0;
1371
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001372err_sha_v4_algs:
1373 for (j = 0; j < l; j++)
1374 crypto_unregister_ahash(&sha_v4_algs[j]);
1375
1376err_sha_v3_algs:
1377 for (j = 0; j < k; j++)
1378 crypto_unregister_ahash(&sha_v4_algs[j]);
1379
Javier Martin5de88752013-03-01 12:37:53 +01001380err_aes_algs:
1381 for (j = 0; j < i; j++)
1382 crypto_unregister_alg(&aes_algs[j]);
1383
1384 return err;
1385}
1386
1387static void sahara_unregister_algs(struct sahara_dev *dev)
1388{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001389 unsigned int i;
Javier Martin5de88752013-03-01 12:37:53 +01001390
1391 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1392 crypto_unregister_alg(&aes_algs[i]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001393
1394 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1395 crypto_unregister_ahash(&sha_v3_algs[i]);
1396
1397 if (dev->version > SAHARA_VERSION_3)
1398 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1399 crypto_unregister_ahash(&sha_v4_algs[i]);
Javier Martin5de88752013-03-01 12:37:53 +01001400}
1401
1402static struct platform_device_id sahara_platform_ids[] = {
1403 { .name = "sahara-imx27" },
1404 { /* sentinel */ }
1405};
1406MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1407
1408static struct of_device_id sahara_dt_ids[] = {
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001409 { .compatible = "fsl,imx53-sahara" },
Javier Martin5de88752013-03-01 12:37:53 +01001410 { .compatible = "fsl,imx27-sahara" },
1411 { /* sentinel */ }
1412};
Arnd Bergmann68be0b1a2013-06-03 23:57:37 +02001413MODULE_DEVICE_TABLE(of, sahara_dt_ids);
Javier Martin5de88752013-03-01 12:37:53 +01001414
1415static int sahara_probe(struct platform_device *pdev)
1416{
1417 struct sahara_dev *dev;
1418 struct resource *res;
1419 u32 version;
1420 int irq;
1421 int err;
1422 int i;
1423
1424 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1425 if (dev == NULL) {
1426 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1427 return -ENOMEM;
1428 }
1429
1430 dev->device = &pdev->dev;
1431 platform_set_drvdata(pdev, dev);
1432
1433 /* Get the base address */
1434 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Jingoo Han9e952752014-02-12 13:23:37 +09001435 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1436 if (IS_ERR(dev->regs_base))
1437 return PTR_ERR(dev->regs_base);
Javier Martin5de88752013-03-01 12:37:53 +01001438
1439 /* Get the IRQ */
1440 irq = platform_get_irq(pdev, 0);
1441 if (irq < 0) {
1442 dev_err(&pdev->dev, "failed to get irq resource\n");
1443 return irq;
1444 }
1445
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001446 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1447 0, dev_name(&pdev->dev), dev);
1448 if (err) {
Javier Martin5de88752013-03-01 12:37:53 +01001449 dev_err(&pdev->dev, "failed to request irq\n");
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001450 return err;
Javier Martin5de88752013-03-01 12:37:53 +01001451 }
1452
1453 /* clocks */
1454 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1455 if (IS_ERR(dev->clk_ipg)) {
1456 dev_err(&pdev->dev, "Could not get ipg clock\n");
1457 return PTR_ERR(dev->clk_ipg);
1458 }
1459
1460 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1461 if (IS_ERR(dev->clk_ahb)) {
1462 dev_err(&pdev->dev, "Could not get ahb clock\n");
1463 return PTR_ERR(dev->clk_ahb);
1464 }
1465
1466 /* Allocate HW descriptors */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301467 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001468 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1469 &dev->hw_phys_desc[0], GFP_KERNEL);
1470 if (!dev->hw_desc[0]) {
1471 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1472 return -ENOMEM;
1473 }
1474 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1475 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1476 sizeof(struct sahara_hw_desc);
1477
1478 /* Allocate space for iv and key */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301479 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
Javier Martin5de88752013-03-01 12:37:53 +01001480 &dev->key_phys_base, GFP_KERNEL);
1481 if (!dev->key_base) {
1482 dev_err(&pdev->dev, "Could not allocate memory for key\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301483 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001484 }
1485 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1486 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1487
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001488 /* Allocate space for context: largest digest + message length field */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301489 dev->context_base = dmam_alloc_coherent(&pdev->dev,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001490 SHA256_DIGEST_SIZE + 4,
1491 &dev->context_phys_base, GFP_KERNEL);
1492 if (!dev->context_base) {
1493 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301494 return -ENOMEM;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001495 }
1496
Javier Martin5de88752013-03-01 12:37:53 +01001497 /* Allocate space for HW links */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301498 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001499 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1500 &dev->hw_phys_link[0], GFP_KERNEL);
Dan Carpenter393e6612013-08-20 11:51:41 +03001501 if (!dev->hw_link[0]) {
Javier Martin5de88752013-03-01 12:37:53 +01001502 dev_err(&pdev->dev, "Could not allocate hw links\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301503 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001504 }
1505 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1506 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1507 sizeof(struct sahara_hw_link);
1508 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1509 }
1510
1511 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1512
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001513 spin_lock_init(&dev->lock);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001514 mutex_init(&dev->queue_mutex);
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001515
Javier Martin5de88752013-03-01 12:37:53 +01001516 dev_ptr = dev;
1517
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001518 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1519 if (IS_ERR(dev->kthread)) {
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301520 return PTR_ERR(dev->kthread);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001521 }
Javier Martin5de88752013-03-01 12:37:53 +01001522
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001523 init_completion(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001524
Fabio Estevam7eac7142015-06-20 15:30:22 -03001525 err = clk_prepare_enable(dev->clk_ipg);
1526 if (err)
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301527 return err;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001528 err = clk_prepare_enable(dev->clk_ahb);
1529 if (err)
1530 goto clk_ipg_disable;
Javier Martin5de88752013-03-01 12:37:53 +01001531
1532 version = sahara_read(dev, SAHARA_REG_VERSION);
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001533 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1534 if (version != SAHARA_VERSION_3)
1535 err = -ENODEV;
1536 } else if (of_device_is_compatible(pdev->dev.of_node,
1537 "fsl,imx53-sahara")) {
1538 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1539 err = -ENODEV;
1540 version = (version >> 8) & 0xff;
1541 }
1542 if (err == -ENODEV) {
Javier Martin5de88752013-03-01 12:37:53 +01001543 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001544 version);
Javier Martin5de88752013-03-01 12:37:53 +01001545 goto err_algs;
1546 }
1547
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001548 dev->version = version;
1549
Javier Martin5de88752013-03-01 12:37:53 +01001550 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1551 SAHARA_REG_CMD);
1552 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1553 SAHARA_CONTROL_SET_MAXBURST(8) |
1554 SAHARA_CONTROL_RNG_AUTORSD |
1555 SAHARA_CONTROL_ENABLE_INT,
1556 SAHARA_REG_CONTROL);
1557
1558 err = sahara_register_algs(dev);
1559 if (err)
1560 goto err_algs;
1561
1562 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1563
1564 return 0;
1565
1566err_algs:
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001567 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001568 dev_ptr = NULL;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001569 clk_disable_unprepare(dev->clk_ahb);
1570clk_ipg_disable:
1571 clk_disable_unprepare(dev->clk_ipg);
Javier Martin5de88752013-03-01 12:37:53 +01001572
1573 return err;
1574}
1575
1576static int sahara_remove(struct platform_device *pdev)
1577{
1578 struct sahara_dev *dev = platform_get_drvdata(pdev);
1579
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001580 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001581
1582 sahara_unregister_algs(dev);
1583
1584 clk_disable_unprepare(dev->clk_ipg);
1585 clk_disable_unprepare(dev->clk_ahb);
1586
1587 dev_ptr = NULL;
1588
1589 return 0;
1590}
1591
1592static struct platform_driver sahara_driver = {
1593 .probe = sahara_probe,
1594 .remove = sahara_remove,
1595 .driver = {
1596 .name = SAHARA_NAME,
Sachin Kamat1b0b2602013-09-30 08:49:41 +05301597 .of_match_table = sahara_dt_ids,
Javier Martin5de88752013-03-01 12:37:53 +01001598 },
1599 .id_table = sahara_platform_ids,
1600};
1601
1602module_platform_driver(sahara_driver);
1603
1604MODULE_LICENSE("GPL");
1605MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001606MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
Javier Martin5de88752013-03-01 12:37:53 +01001607MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");