blob: 2b4f07aa89e8ba7422de65eea027e06daa5f7797 [file] [log] [blame]
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17
18#include "mv_cesa.h"
19/*
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
28 */
29enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
33};
34
35/**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
Uri Simchoni3b61a902010-04-08 19:27:33 +030042 * @hw_nbytes: total bytes to process in hw for this request
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100043 * @sg_dst_left: bytes left dst to process in this scatter list
44 * @dst_start: offset to add to dst start position (scatter list)
Uri Simchoni7a5f6912010-04-08 19:29:16 +030045 * @hw_processed_bytes: number of bytes processed by hw (request).
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100046 *
47 * sg helper are used to iterate over the scatterlist. Since the size of the
48 * SRAM may be less than the scatter size, this struct struct is used to keep
49 * track of progress within current scatterlist.
50 */
51struct req_progress {
52 struct sg_mapping_iter src_sg_it;
53 struct sg_mapping_iter dst_sg_it;
Uri Simchonia58094a2010-04-08 19:30:19 +030054 void (*complete) (void);
55 void (*process) (int is_first);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100056
57 /* src mostly */
58 int sg_src_left;
59 int src_start;
60 int crypt_len;
Uri Simchoni3b61a902010-04-08 19:27:33 +030061 int hw_nbytes;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100062 /* dst mostly */
63 int sg_dst_left;
64 int dst_start;
Uri Simchoni7a5f6912010-04-08 19:29:16 +030065 int hw_processed_bytes;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100066};
67
68struct crypto_priv {
69 void __iomem *reg;
70 void __iomem *sram;
71 int irq;
72 struct task_struct *queue_th;
73
74 /* the lock protects queue and eng_st */
75 spinlock_t lock;
76 struct crypto_queue queue;
77 enum engine_status eng_st;
Uri Simchoni3b61a902010-04-08 19:27:33 +030078 struct crypto_async_request *cur_req;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100079 struct req_progress p;
80 int max_req_size;
81 int sram_size;
82};
83
84static struct crypto_priv *cpg;
85
86struct mv_ctx {
87 u8 aes_enc_key[AES_KEY_LEN];
88 u32 aes_dec_key[8];
89 int key_len;
90 u32 need_calc_aes_dkey;
91};
92
93enum crypto_op {
94 COP_AES_ECB,
95 COP_AES_CBC,
96};
97
98struct mv_req_ctx {
99 enum crypto_op op;
100 int decrypt;
101};
102
103static void compute_aes_dec_key(struct mv_ctx *ctx)
104{
105 struct crypto_aes_ctx gen_aes_key;
106 int key_pos;
107
108 if (!ctx->need_calc_aes_dkey)
109 return;
110
111 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
112
113 key_pos = ctx->key_len + 24;
114 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
115 switch (ctx->key_len) {
116 case AES_KEYSIZE_256:
117 key_pos -= 2;
118 /* fall */
119 case AES_KEYSIZE_192:
120 key_pos -= 2;
121 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
122 4 * 4);
123 break;
124 }
125 ctx->need_calc_aes_dkey = 0;
126}
127
128static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
129 unsigned int len)
130{
131 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
132 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
133
134 switch (len) {
135 case AES_KEYSIZE_128:
136 case AES_KEYSIZE_192:
137 case AES_KEYSIZE_256:
138 break;
139 default:
140 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
141 return -EINVAL;
142 }
143 ctx->key_len = len;
144 ctx->need_calc_aes_dkey = 1;
145
146 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
147 return 0;
148}
149
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300150static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000151{
152 int ret;
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300153 void *sbuf;
154 int copied = 0;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000155
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300156 while (1) {
157 if (!p->sg_src_left) {
158 ret = sg_miter_next(&p->src_sg_it);
159 BUG_ON(!ret);
160 p->sg_src_left = p->src_sg_it.length;
161 p->src_start = 0;
162 }
163
164 sbuf = p->src_sg_it.addr + p->src_start;
165
166 if (p->sg_src_left <= len - copied) {
167 memcpy(dbuf + copied, sbuf, p->sg_src_left);
168 copied += p->sg_src_left;
169 p->sg_src_left = 0;
170 if (copied >= len)
171 break;
172 } else {
173 int copy_len = len - copied;
174 memcpy(dbuf + copied, sbuf, copy_len);
175 p->src_start += copy_len;
176 p->sg_src_left -= copy_len;
177 break;
178 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000179 }
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300180}
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000181
Uri Simchoni3b61a902010-04-08 19:27:33 +0300182static void setup_data_in(void)
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300183{
184 struct req_progress *p = &cpg->p;
185 p->crypt_len =
Uri Simchoni7a5f6912010-04-08 19:29:16 +0300186 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300187 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
188 p->crypt_len);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000189}
190
191static void mv_process_current_q(int first_block)
192{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300193 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000194 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
195 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
196 struct sec_accel_config op;
197
198 switch (req_ctx->op) {
199 case COP_AES_ECB:
200 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
201 break;
202 case COP_AES_CBC:
Uri Simchoni6bc6fcd2010-04-08 19:25:56 +0300203 default:
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000204 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
205 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
206 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
207 if (first_block)
208 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
209 break;
210 }
211 if (req_ctx->decrypt) {
212 op.config |= CFG_DIR_DEC;
213 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
214 AES_KEY_LEN);
215 } else {
216 op.config |= CFG_DIR_ENC;
217 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
218 AES_KEY_LEN);
219 }
220
221 switch (ctx->key_len) {
222 case AES_KEYSIZE_128:
223 op.config |= CFG_AES_LEN_128;
224 break;
225 case AES_KEYSIZE_192:
226 op.config |= CFG_AES_LEN_192;
227 break;
228 case AES_KEYSIZE_256:
229 op.config |= CFG_AES_LEN_256;
230 break;
231 }
232 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
233 ENC_P_DST(SRAM_DATA_OUT_START);
234 op.enc_key_p = SRAM_DATA_KEY_P;
235
Uri Simchoni3b61a902010-04-08 19:27:33 +0300236 setup_data_in();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000237 op.enc_len = cpg->p.crypt_len;
238 memcpy(cpg->sram + SRAM_CONFIG, &op,
239 sizeof(struct sec_accel_config));
240
241 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
242 /* GO */
243 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
244
245 /*
246 * XXX: add timer if the interrupt does not occur for some mystery
247 * reason
248 */
249}
250
251static void mv_crypto_algo_completion(void)
252{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300253 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000254 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
255
Uri Simchonia58094a2010-04-08 19:30:19 +0300256 sg_miter_stop(&cpg->p.src_sg_it);
257 sg_miter_stop(&cpg->p.dst_sg_it);
258
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000259 if (req_ctx->op != COP_AES_CBC)
260 return ;
261
262 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
263}
264
265static void dequeue_complete_req(void)
266{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300267 struct crypto_async_request *req = cpg->cur_req;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000268 void *buf;
269 int ret;
Uri Simchonif565e672010-04-08 19:26:34 +0300270 int need_copy_len = cpg->p.crypt_len;
271 int sram_offset = 0;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000272
Uri Simchoni7a5f6912010-04-08 19:29:16 +0300273 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000274 do {
275 int dst_copy;
276
277 if (!cpg->p.sg_dst_left) {
278 ret = sg_miter_next(&cpg->p.dst_sg_it);
279 BUG_ON(!ret);
280 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
281 cpg->p.dst_start = 0;
282 }
283
284 buf = cpg->p.dst_sg_it.addr;
285 buf += cpg->p.dst_start;
286
Uri Simchonif565e672010-04-08 19:26:34 +0300287 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000288
Uri Simchonif565e672010-04-08 19:26:34 +0300289 memcpy(buf,
290 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
291 dst_copy);
292 sram_offset += dst_copy;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000293 cpg->p.sg_dst_left -= dst_copy;
Uri Simchonif565e672010-04-08 19:26:34 +0300294 need_copy_len -= dst_copy;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000295 cpg->p.dst_start += dst_copy;
Uri Simchonif565e672010-04-08 19:26:34 +0300296 } while (need_copy_len > 0);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000297
298 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
Uri Simchoni7a5f6912010-04-08 19:29:16 +0300299 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000300 /* process next scatter list entry */
301 cpg->eng_st = ENGINE_BUSY;
Uri Simchonia58094a2010-04-08 19:30:19 +0300302 cpg->p.process(0);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000303 } else {
Uri Simchonia58094a2010-04-08 19:30:19 +0300304 cpg->p.complete();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000305 cpg->eng_st = ENGINE_IDLE;
Uri Simchoni0328ac22010-04-08 19:25:37 +0300306 local_bh_disable();
Uri Simchoni3b61a902010-04-08 19:27:33 +0300307 req->complete(req, 0);
Uri Simchoni0328ac22010-04-08 19:25:37 +0300308 local_bh_enable();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000309 }
310}
311
312static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
313{
314 int i = 0;
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300315 size_t cur_len;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000316
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300317 while (1) {
318 cur_len = sl[i].length;
319 ++i;
320 if (total_bytes > cur_len)
321 total_bytes -= cur_len;
322 else
323 break;
324 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000325
326 return i;
327}
328
329static void mv_enqueue_new_req(struct ablkcipher_request *req)
330{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300331 struct req_progress *p = &cpg->p;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000332 int num_sgs;
333
Uri Simchoni3b61a902010-04-08 19:27:33 +0300334 cpg->cur_req = &req->base;
335 memset(p, 0, sizeof(struct req_progress));
336 p->hw_nbytes = req->nbytes;
Uri Simchonia58094a2010-04-08 19:30:19 +0300337 p->complete = mv_crypto_algo_completion;
338 p->process = mv_process_current_q;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000339
340 num_sgs = count_sgs(req->src, req->nbytes);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300341 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000342
343 num_sgs = count_sgs(req->dst, req->nbytes);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300344 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
345
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000346 mv_process_current_q(1);
347}
348
349static int queue_manag(void *data)
350{
351 cpg->eng_st = ENGINE_IDLE;
352 do {
353 struct ablkcipher_request *req;
354 struct crypto_async_request *async_req = NULL;
355 struct crypto_async_request *backlog;
356
357 __set_current_state(TASK_INTERRUPTIBLE);
358
359 if (cpg->eng_st == ENGINE_W_DEQUEUE)
360 dequeue_complete_req();
361
362 spin_lock_irq(&cpg->lock);
363 if (cpg->eng_st == ENGINE_IDLE) {
364 backlog = crypto_get_backlog(&cpg->queue);
365 async_req = crypto_dequeue_request(&cpg->queue);
366 if (async_req) {
367 BUG_ON(cpg->eng_st != ENGINE_IDLE);
368 cpg->eng_st = ENGINE_BUSY;
369 }
370 }
371 spin_unlock_irq(&cpg->lock);
372
373 if (backlog) {
374 backlog->complete(backlog, -EINPROGRESS);
375 backlog = NULL;
376 }
377
378 if (async_req) {
379 req = container_of(async_req,
380 struct ablkcipher_request, base);
381 mv_enqueue_new_req(req);
382 async_req = NULL;
383 }
384
385 schedule();
386
387 } while (!kthread_should_stop());
388 return 0;
389}
390
Uri Simchoni3b61a902010-04-08 19:27:33 +0300391static int mv_handle_req(struct crypto_async_request *req)
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000392{
393 unsigned long flags;
394 int ret;
395
396 spin_lock_irqsave(&cpg->lock, flags);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300397 ret = crypto_enqueue_request(&cpg->queue, req);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000398 spin_unlock_irqrestore(&cpg->lock, flags);
399 wake_up_process(cpg->queue_th);
400 return ret;
401}
402
403static int mv_enc_aes_ecb(struct ablkcipher_request *req)
404{
405 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
406
407 req_ctx->op = COP_AES_ECB;
408 req_ctx->decrypt = 0;
409
Uri Simchoni3b61a902010-04-08 19:27:33 +0300410 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000411}
412
413static int mv_dec_aes_ecb(struct ablkcipher_request *req)
414{
415 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
416 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
417
418 req_ctx->op = COP_AES_ECB;
419 req_ctx->decrypt = 1;
420
421 compute_aes_dec_key(ctx);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300422 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000423}
424
425static int mv_enc_aes_cbc(struct ablkcipher_request *req)
426{
427 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
428
429 req_ctx->op = COP_AES_CBC;
430 req_ctx->decrypt = 0;
431
Uri Simchoni3b61a902010-04-08 19:27:33 +0300432 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000433}
434
435static int mv_dec_aes_cbc(struct ablkcipher_request *req)
436{
437 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
438 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
439
440 req_ctx->op = COP_AES_CBC;
441 req_ctx->decrypt = 1;
442
443 compute_aes_dec_key(ctx);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300444 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000445}
446
447static int mv_cra_init(struct crypto_tfm *tfm)
448{
449 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
450 return 0;
451}
452
453irqreturn_t crypto_int(int irq, void *priv)
454{
455 u32 val;
456
457 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
458 if (!(val & SEC_INT_ACCEL0_DONE))
459 return IRQ_NONE;
460
461 val &= ~SEC_INT_ACCEL0_DONE;
462 writel(val, cpg->reg + FPGA_INT_STATUS);
463 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
464 BUG_ON(cpg->eng_st != ENGINE_BUSY);
465 cpg->eng_st = ENGINE_W_DEQUEUE;
466 wake_up_process(cpg->queue_th);
467 return IRQ_HANDLED;
468}
469
470struct crypto_alg mv_aes_alg_ecb = {
471 .cra_name = "ecb(aes)",
472 .cra_driver_name = "mv-ecb-aes",
473 .cra_priority = 300,
474 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
475 .cra_blocksize = 16,
476 .cra_ctxsize = sizeof(struct mv_ctx),
477 .cra_alignmask = 0,
478 .cra_type = &crypto_ablkcipher_type,
479 .cra_module = THIS_MODULE,
480 .cra_init = mv_cra_init,
481 .cra_u = {
482 .ablkcipher = {
483 .min_keysize = AES_MIN_KEY_SIZE,
484 .max_keysize = AES_MAX_KEY_SIZE,
485 .setkey = mv_setkey_aes,
486 .encrypt = mv_enc_aes_ecb,
487 .decrypt = mv_dec_aes_ecb,
488 },
489 },
490};
491
492struct crypto_alg mv_aes_alg_cbc = {
493 .cra_name = "cbc(aes)",
494 .cra_driver_name = "mv-cbc-aes",
495 .cra_priority = 300,
496 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
497 .cra_blocksize = AES_BLOCK_SIZE,
498 .cra_ctxsize = sizeof(struct mv_ctx),
499 .cra_alignmask = 0,
500 .cra_type = &crypto_ablkcipher_type,
501 .cra_module = THIS_MODULE,
502 .cra_init = mv_cra_init,
503 .cra_u = {
504 .ablkcipher = {
505 .ivsize = AES_BLOCK_SIZE,
506 .min_keysize = AES_MIN_KEY_SIZE,
507 .max_keysize = AES_MAX_KEY_SIZE,
508 .setkey = mv_setkey_aes,
509 .encrypt = mv_enc_aes_cbc,
510 .decrypt = mv_dec_aes_cbc,
511 },
512 },
513};
514
515static int mv_probe(struct platform_device *pdev)
516{
517 struct crypto_priv *cp;
518 struct resource *res;
519 int irq;
520 int ret;
521
522 if (cpg) {
523 printk(KERN_ERR "Second crypto dev?\n");
524 return -EEXIST;
525 }
526
527 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
528 if (!res)
529 return -ENXIO;
530
531 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
532 if (!cp)
533 return -ENOMEM;
534
535 spin_lock_init(&cp->lock);
536 crypto_init_queue(&cp->queue, 50);
537 cp->reg = ioremap(res->start, res->end - res->start + 1);
538 if (!cp->reg) {
539 ret = -ENOMEM;
540 goto err;
541 }
542
543 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
544 if (!res) {
545 ret = -ENXIO;
546 goto err_unmap_reg;
547 }
548 cp->sram_size = res->end - res->start + 1;
549 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
550 cp->sram = ioremap(res->start, cp->sram_size);
551 if (!cp->sram) {
552 ret = -ENOMEM;
553 goto err_unmap_reg;
554 }
555
556 irq = platform_get_irq(pdev, 0);
557 if (irq < 0 || irq == NO_IRQ) {
558 ret = irq;
559 goto err_unmap_sram;
560 }
561 cp->irq = irq;
562
563 platform_set_drvdata(pdev, cp);
564 cpg = cp;
565
566 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
567 if (IS_ERR(cp->queue_th)) {
568 ret = PTR_ERR(cp->queue_th);
569 goto err_thread;
570 }
571
572 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
573 cp);
574 if (ret)
575 goto err_unmap_sram;
576
577 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
578 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
579
580 ret = crypto_register_alg(&mv_aes_alg_ecb);
581 if (ret)
582 goto err_reg;
583
584 ret = crypto_register_alg(&mv_aes_alg_cbc);
585 if (ret)
586 goto err_unreg_ecb;
587 return 0;
588err_unreg_ecb:
589 crypto_unregister_alg(&mv_aes_alg_ecb);
590err_thread:
591 free_irq(irq, cp);
592err_reg:
593 kthread_stop(cp->queue_th);
594err_unmap_sram:
595 iounmap(cp->sram);
596err_unmap_reg:
597 iounmap(cp->reg);
598err:
599 kfree(cp);
600 cpg = NULL;
601 platform_set_drvdata(pdev, NULL);
602 return ret;
603}
604
605static int mv_remove(struct platform_device *pdev)
606{
607 struct crypto_priv *cp = platform_get_drvdata(pdev);
608
609 crypto_unregister_alg(&mv_aes_alg_ecb);
610 crypto_unregister_alg(&mv_aes_alg_cbc);
611 kthread_stop(cp->queue_th);
612 free_irq(cp->irq, cp);
613 memset(cp->sram, 0, cp->sram_size);
614 iounmap(cp->sram);
615 iounmap(cp->reg);
616 kfree(cp);
617 cpg = NULL;
618 return 0;
619}
620
621static struct platform_driver marvell_crypto = {
622 .probe = mv_probe,
623 .remove = mv_remove,
624 .driver = {
625 .owner = THIS_MODULE,
626 .name = "mv_crypto",
627 },
628};
629MODULE_ALIAS("platform:mv_crypto");
630
631static int __init mv_crypto_init(void)
632{
633 return platform_driver_register(&marvell_crypto);
634}
635module_init(mv_crypto_init);
636
637static void __exit mv_crypto_exit(void)
638{
639 platform_driver_unregister(&marvell_crypto);
640}
641module_exit(mv_crypto_exit);
642
643MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
644MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
645MODULE_LICENSE("GPL");