Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Support for Marvell's crypto engine which can be found on some Orion5X |
| 3 | * boards. |
| 4 | * |
| 5 | * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > |
| 6 | * License: GPLv2 |
| 7 | * |
| 8 | */ |
| 9 | #include <crypto/aes.h> |
| 10 | #include <crypto/algapi.h> |
| 11 | #include <linux/crypto.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/kthread.h> |
| 15 | #include <linux/platform_device.h> |
| 16 | #include <linux/scatterlist.h> |
| 17 | |
| 18 | #include "mv_cesa.h" |
| 19 | /* |
| 20 | * STM: |
| 21 | * /---------------------------------------\ |
| 22 | * | | request complete |
| 23 | * \./ | |
| 24 | * IDLE -> new request -> BUSY -> done -> DEQUEUE |
| 25 | * /°\ | |
| 26 | * | | more scatter entries |
| 27 | * \________________/ |
| 28 | */ |
| 29 | enum engine_status { |
| 30 | ENGINE_IDLE, |
| 31 | ENGINE_BUSY, |
| 32 | ENGINE_W_DEQUEUE, |
| 33 | }; |
| 34 | |
| 35 | /** |
| 36 | * struct req_progress - used for every crypt request |
| 37 | * @src_sg_it: sg iterator for src |
| 38 | * @dst_sg_it: sg iterator for dst |
| 39 | * @sg_src_left: bytes left in src to process (scatter list) |
| 40 | * @src_start: offset to add to src start position (scatter list) |
| 41 | * @crypt_len: length of current crypt process |
| 42 | * @sg_dst_left: bytes left dst to process in this scatter list |
| 43 | * @dst_start: offset to add to dst start position (scatter list) |
| 44 | * @total_req_bytes: total number of bytes processed (request). |
| 45 | * |
| 46 | * sg helper are used to iterate over the scatterlist. Since the size of the |
| 47 | * SRAM may be less than the scatter size, this struct struct is used to keep |
| 48 | * track of progress within current scatterlist. |
| 49 | */ |
| 50 | struct req_progress { |
| 51 | struct sg_mapping_iter src_sg_it; |
| 52 | struct sg_mapping_iter dst_sg_it; |
| 53 | |
| 54 | /* src mostly */ |
| 55 | int sg_src_left; |
| 56 | int src_start; |
| 57 | int crypt_len; |
| 58 | /* dst mostly */ |
| 59 | int sg_dst_left; |
| 60 | int dst_start; |
| 61 | int total_req_bytes; |
| 62 | }; |
| 63 | |
| 64 | struct crypto_priv { |
| 65 | void __iomem *reg; |
| 66 | void __iomem *sram; |
| 67 | int irq; |
| 68 | struct task_struct *queue_th; |
| 69 | |
| 70 | /* the lock protects queue and eng_st */ |
| 71 | spinlock_t lock; |
| 72 | struct crypto_queue queue; |
| 73 | enum engine_status eng_st; |
| 74 | struct ablkcipher_request *cur_req; |
| 75 | struct req_progress p; |
| 76 | int max_req_size; |
| 77 | int sram_size; |
| 78 | }; |
| 79 | |
| 80 | static struct crypto_priv *cpg; |
| 81 | |
| 82 | struct mv_ctx { |
| 83 | u8 aes_enc_key[AES_KEY_LEN]; |
| 84 | u32 aes_dec_key[8]; |
| 85 | int key_len; |
| 86 | u32 need_calc_aes_dkey; |
| 87 | }; |
| 88 | |
| 89 | enum crypto_op { |
| 90 | COP_AES_ECB, |
| 91 | COP_AES_CBC, |
| 92 | }; |
| 93 | |
| 94 | struct mv_req_ctx { |
| 95 | enum crypto_op op; |
| 96 | int decrypt; |
| 97 | }; |
| 98 | |
| 99 | static void compute_aes_dec_key(struct mv_ctx *ctx) |
| 100 | { |
| 101 | struct crypto_aes_ctx gen_aes_key; |
| 102 | int key_pos; |
| 103 | |
| 104 | if (!ctx->need_calc_aes_dkey) |
| 105 | return; |
| 106 | |
| 107 | crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); |
| 108 | |
| 109 | key_pos = ctx->key_len + 24; |
| 110 | memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); |
| 111 | switch (ctx->key_len) { |
| 112 | case AES_KEYSIZE_256: |
| 113 | key_pos -= 2; |
| 114 | /* fall */ |
| 115 | case AES_KEYSIZE_192: |
| 116 | key_pos -= 2; |
| 117 | memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], |
| 118 | 4 * 4); |
| 119 | break; |
| 120 | } |
| 121 | ctx->need_calc_aes_dkey = 0; |
| 122 | } |
| 123 | |
| 124 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, |
| 125 | unsigned int len) |
| 126 | { |
| 127 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
| 128 | struct mv_ctx *ctx = crypto_tfm_ctx(tfm); |
| 129 | |
| 130 | switch (len) { |
| 131 | case AES_KEYSIZE_128: |
| 132 | case AES_KEYSIZE_192: |
| 133 | case AES_KEYSIZE_256: |
| 134 | break; |
| 135 | default: |
| 136 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 137 | return -EINVAL; |
| 138 | } |
| 139 | ctx->key_len = len; |
| 140 | ctx->need_calc_aes_dkey = 1; |
| 141 | |
| 142 | memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); |
| 143 | return 0; |
| 144 | } |
| 145 | |
| 146 | static void setup_data_in(struct ablkcipher_request *req) |
| 147 | { |
| 148 | int ret; |
| 149 | void *buf; |
| 150 | |
| 151 | if (!cpg->p.sg_src_left) { |
| 152 | ret = sg_miter_next(&cpg->p.src_sg_it); |
| 153 | BUG_ON(!ret); |
| 154 | cpg->p.sg_src_left = cpg->p.src_sg_it.length; |
| 155 | cpg->p.src_start = 0; |
| 156 | } |
| 157 | |
| 158 | cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size); |
| 159 | |
| 160 | buf = cpg->p.src_sg_it.addr; |
| 161 | buf += cpg->p.src_start; |
| 162 | |
| 163 | memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); |
| 164 | |
| 165 | cpg->p.sg_src_left -= cpg->p.crypt_len; |
| 166 | cpg->p.src_start += cpg->p.crypt_len; |
| 167 | } |
| 168 | |
| 169 | static void mv_process_current_q(int first_block) |
| 170 | { |
| 171 | struct ablkcipher_request *req = cpg->cur_req; |
| 172 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 173 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 174 | struct sec_accel_config op; |
| 175 | |
| 176 | switch (req_ctx->op) { |
| 177 | case COP_AES_ECB: |
| 178 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; |
| 179 | break; |
| 180 | case COP_AES_CBC: |
Uri Simchoni | 6bc6fcd | 2010-04-08 19:25:56 +0300 | [diff] [blame^] | 181 | default: |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 182 | op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; |
| 183 | op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | |
| 184 | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); |
| 185 | if (first_block) |
| 186 | memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); |
| 187 | break; |
| 188 | } |
| 189 | if (req_ctx->decrypt) { |
| 190 | op.config |= CFG_DIR_DEC; |
| 191 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, |
| 192 | AES_KEY_LEN); |
| 193 | } else { |
| 194 | op.config |= CFG_DIR_ENC; |
| 195 | memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, |
| 196 | AES_KEY_LEN); |
| 197 | } |
| 198 | |
| 199 | switch (ctx->key_len) { |
| 200 | case AES_KEYSIZE_128: |
| 201 | op.config |= CFG_AES_LEN_128; |
| 202 | break; |
| 203 | case AES_KEYSIZE_192: |
| 204 | op.config |= CFG_AES_LEN_192; |
| 205 | break; |
| 206 | case AES_KEYSIZE_256: |
| 207 | op.config |= CFG_AES_LEN_256; |
| 208 | break; |
| 209 | } |
| 210 | op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | |
| 211 | ENC_P_DST(SRAM_DATA_OUT_START); |
| 212 | op.enc_key_p = SRAM_DATA_KEY_P; |
| 213 | |
| 214 | setup_data_in(req); |
| 215 | op.enc_len = cpg->p.crypt_len; |
| 216 | memcpy(cpg->sram + SRAM_CONFIG, &op, |
| 217 | sizeof(struct sec_accel_config)); |
| 218 | |
| 219 | writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); |
| 220 | /* GO */ |
| 221 | writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); |
| 222 | |
| 223 | /* |
| 224 | * XXX: add timer if the interrupt does not occur for some mystery |
| 225 | * reason |
| 226 | */ |
| 227 | } |
| 228 | |
| 229 | static void mv_crypto_algo_completion(void) |
| 230 | { |
| 231 | struct ablkcipher_request *req = cpg->cur_req; |
| 232 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 233 | |
| 234 | if (req_ctx->op != COP_AES_CBC) |
| 235 | return ; |
| 236 | |
| 237 | memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); |
| 238 | } |
| 239 | |
| 240 | static void dequeue_complete_req(void) |
| 241 | { |
| 242 | struct ablkcipher_request *req = cpg->cur_req; |
| 243 | void *buf; |
| 244 | int ret; |
| 245 | |
| 246 | cpg->p.total_req_bytes += cpg->p.crypt_len; |
| 247 | do { |
| 248 | int dst_copy; |
| 249 | |
| 250 | if (!cpg->p.sg_dst_left) { |
| 251 | ret = sg_miter_next(&cpg->p.dst_sg_it); |
| 252 | BUG_ON(!ret); |
| 253 | cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; |
| 254 | cpg->p.dst_start = 0; |
| 255 | } |
| 256 | |
| 257 | buf = cpg->p.dst_sg_it.addr; |
| 258 | buf += cpg->p.dst_start; |
| 259 | |
| 260 | dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); |
| 261 | |
| 262 | memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); |
| 263 | |
| 264 | cpg->p.sg_dst_left -= dst_copy; |
| 265 | cpg->p.crypt_len -= dst_copy; |
| 266 | cpg->p.dst_start += dst_copy; |
| 267 | } while (cpg->p.crypt_len > 0); |
| 268 | |
| 269 | BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); |
| 270 | if (cpg->p.total_req_bytes < req->nbytes) { |
| 271 | /* process next scatter list entry */ |
| 272 | cpg->eng_st = ENGINE_BUSY; |
| 273 | mv_process_current_q(0); |
| 274 | } else { |
| 275 | sg_miter_stop(&cpg->p.src_sg_it); |
| 276 | sg_miter_stop(&cpg->p.dst_sg_it); |
| 277 | mv_crypto_algo_completion(); |
| 278 | cpg->eng_st = ENGINE_IDLE; |
Uri Simchoni | 0328ac2 | 2010-04-08 19:25:37 +0300 | [diff] [blame] | 279 | local_bh_disable(); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 280 | req->base.complete(&req->base, 0); |
Uri Simchoni | 0328ac2 | 2010-04-08 19:25:37 +0300 | [diff] [blame] | 281 | local_bh_enable(); |
Sebastian Andrzej Siewior | 85a7f0a | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 282 | } |
| 283 | } |
| 284 | |
| 285 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) |
| 286 | { |
| 287 | int i = 0; |
| 288 | |
| 289 | do { |
| 290 | total_bytes -= sl[i].length; |
| 291 | i++; |
| 292 | |
| 293 | } while (total_bytes > 0); |
| 294 | |
| 295 | return i; |
| 296 | } |
| 297 | |
| 298 | static void mv_enqueue_new_req(struct ablkcipher_request *req) |
| 299 | { |
| 300 | int num_sgs; |
| 301 | |
| 302 | cpg->cur_req = req; |
| 303 | memset(&cpg->p, 0, sizeof(struct req_progress)); |
| 304 | |
| 305 | num_sgs = count_sgs(req->src, req->nbytes); |
| 306 | sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); |
| 307 | |
| 308 | num_sgs = count_sgs(req->dst, req->nbytes); |
| 309 | sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); |
| 310 | mv_process_current_q(1); |
| 311 | } |
| 312 | |
| 313 | static int queue_manag(void *data) |
| 314 | { |
| 315 | cpg->eng_st = ENGINE_IDLE; |
| 316 | do { |
| 317 | struct ablkcipher_request *req; |
| 318 | struct crypto_async_request *async_req = NULL; |
| 319 | struct crypto_async_request *backlog; |
| 320 | |
| 321 | __set_current_state(TASK_INTERRUPTIBLE); |
| 322 | |
| 323 | if (cpg->eng_st == ENGINE_W_DEQUEUE) |
| 324 | dequeue_complete_req(); |
| 325 | |
| 326 | spin_lock_irq(&cpg->lock); |
| 327 | if (cpg->eng_st == ENGINE_IDLE) { |
| 328 | backlog = crypto_get_backlog(&cpg->queue); |
| 329 | async_req = crypto_dequeue_request(&cpg->queue); |
| 330 | if (async_req) { |
| 331 | BUG_ON(cpg->eng_st != ENGINE_IDLE); |
| 332 | cpg->eng_st = ENGINE_BUSY; |
| 333 | } |
| 334 | } |
| 335 | spin_unlock_irq(&cpg->lock); |
| 336 | |
| 337 | if (backlog) { |
| 338 | backlog->complete(backlog, -EINPROGRESS); |
| 339 | backlog = NULL; |
| 340 | } |
| 341 | |
| 342 | if (async_req) { |
| 343 | req = container_of(async_req, |
| 344 | struct ablkcipher_request, base); |
| 345 | mv_enqueue_new_req(req); |
| 346 | async_req = NULL; |
| 347 | } |
| 348 | |
| 349 | schedule(); |
| 350 | |
| 351 | } while (!kthread_should_stop()); |
| 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | static int mv_handle_req(struct ablkcipher_request *req) |
| 356 | { |
| 357 | unsigned long flags; |
| 358 | int ret; |
| 359 | |
| 360 | spin_lock_irqsave(&cpg->lock, flags); |
| 361 | ret = ablkcipher_enqueue_request(&cpg->queue, req); |
| 362 | spin_unlock_irqrestore(&cpg->lock, flags); |
| 363 | wake_up_process(cpg->queue_th); |
| 364 | return ret; |
| 365 | } |
| 366 | |
| 367 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) |
| 368 | { |
| 369 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 370 | |
| 371 | req_ctx->op = COP_AES_ECB; |
| 372 | req_ctx->decrypt = 0; |
| 373 | |
| 374 | return mv_handle_req(req); |
| 375 | } |
| 376 | |
| 377 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) |
| 378 | { |
| 379 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 380 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 381 | |
| 382 | req_ctx->op = COP_AES_ECB; |
| 383 | req_ctx->decrypt = 1; |
| 384 | |
| 385 | compute_aes_dec_key(ctx); |
| 386 | return mv_handle_req(req); |
| 387 | } |
| 388 | |
| 389 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) |
| 390 | { |
| 391 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 392 | |
| 393 | req_ctx->op = COP_AES_CBC; |
| 394 | req_ctx->decrypt = 0; |
| 395 | |
| 396 | return mv_handle_req(req); |
| 397 | } |
| 398 | |
| 399 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) |
| 400 | { |
| 401 | struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 402 | struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); |
| 403 | |
| 404 | req_ctx->op = COP_AES_CBC; |
| 405 | req_ctx->decrypt = 1; |
| 406 | |
| 407 | compute_aes_dec_key(ctx); |
| 408 | return mv_handle_req(req); |
| 409 | } |
| 410 | |
| 411 | static int mv_cra_init(struct crypto_tfm *tfm) |
| 412 | { |
| 413 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); |
| 414 | return 0; |
| 415 | } |
| 416 | |
| 417 | irqreturn_t crypto_int(int irq, void *priv) |
| 418 | { |
| 419 | u32 val; |
| 420 | |
| 421 | val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); |
| 422 | if (!(val & SEC_INT_ACCEL0_DONE)) |
| 423 | return IRQ_NONE; |
| 424 | |
| 425 | val &= ~SEC_INT_ACCEL0_DONE; |
| 426 | writel(val, cpg->reg + FPGA_INT_STATUS); |
| 427 | writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); |
| 428 | BUG_ON(cpg->eng_st != ENGINE_BUSY); |
| 429 | cpg->eng_st = ENGINE_W_DEQUEUE; |
| 430 | wake_up_process(cpg->queue_th); |
| 431 | return IRQ_HANDLED; |
| 432 | } |
| 433 | |
| 434 | struct crypto_alg mv_aes_alg_ecb = { |
| 435 | .cra_name = "ecb(aes)", |
| 436 | .cra_driver_name = "mv-ecb-aes", |
| 437 | .cra_priority = 300, |
| 438 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 439 | .cra_blocksize = 16, |
| 440 | .cra_ctxsize = sizeof(struct mv_ctx), |
| 441 | .cra_alignmask = 0, |
| 442 | .cra_type = &crypto_ablkcipher_type, |
| 443 | .cra_module = THIS_MODULE, |
| 444 | .cra_init = mv_cra_init, |
| 445 | .cra_u = { |
| 446 | .ablkcipher = { |
| 447 | .min_keysize = AES_MIN_KEY_SIZE, |
| 448 | .max_keysize = AES_MAX_KEY_SIZE, |
| 449 | .setkey = mv_setkey_aes, |
| 450 | .encrypt = mv_enc_aes_ecb, |
| 451 | .decrypt = mv_dec_aes_ecb, |
| 452 | }, |
| 453 | }, |
| 454 | }; |
| 455 | |
| 456 | struct crypto_alg mv_aes_alg_cbc = { |
| 457 | .cra_name = "cbc(aes)", |
| 458 | .cra_driver_name = "mv-cbc-aes", |
| 459 | .cra_priority = 300, |
| 460 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
| 461 | .cra_blocksize = AES_BLOCK_SIZE, |
| 462 | .cra_ctxsize = sizeof(struct mv_ctx), |
| 463 | .cra_alignmask = 0, |
| 464 | .cra_type = &crypto_ablkcipher_type, |
| 465 | .cra_module = THIS_MODULE, |
| 466 | .cra_init = mv_cra_init, |
| 467 | .cra_u = { |
| 468 | .ablkcipher = { |
| 469 | .ivsize = AES_BLOCK_SIZE, |
| 470 | .min_keysize = AES_MIN_KEY_SIZE, |
| 471 | .max_keysize = AES_MAX_KEY_SIZE, |
| 472 | .setkey = mv_setkey_aes, |
| 473 | .encrypt = mv_enc_aes_cbc, |
| 474 | .decrypt = mv_dec_aes_cbc, |
| 475 | }, |
| 476 | }, |
| 477 | }; |
| 478 | |
| 479 | static int mv_probe(struct platform_device *pdev) |
| 480 | { |
| 481 | struct crypto_priv *cp; |
| 482 | struct resource *res; |
| 483 | int irq; |
| 484 | int ret; |
| 485 | |
| 486 | if (cpg) { |
| 487 | printk(KERN_ERR "Second crypto dev?\n"); |
| 488 | return -EEXIST; |
| 489 | } |
| 490 | |
| 491 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
| 492 | if (!res) |
| 493 | return -ENXIO; |
| 494 | |
| 495 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
| 496 | if (!cp) |
| 497 | return -ENOMEM; |
| 498 | |
| 499 | spin_lock_init(&cp->lock); |
| 500 | crypto_init_queue(&cp->queue, 50); |
| 501 | cp->reg = ioremap(res->start, res->end - res->start + 1); |
| 502 | if (!cp->reg) { |
| 503 | ret = -ENOMEM; |
| 504 | goto err; |
| 505 | } |
| 506 | |
| 507 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); |
| 508 | if (!res) { |
| 509 | ret = -ENXIO; |
| 510 | goto err_unmap_reg; |
| 511 | } |
| 512 | cp->sram_size = res->end - res->start + 1; |
| 513 | cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; |
| 514 | cp->sram = ioremap(res->start, cp->sram_size); |
| 515 | if (!cp->sram) { |
| 516 | ret = -ENOMEM; |
| 517 | goto err_unmap_reg; |
| 518 | } |
| 519 | |
| 520 | irq = platform_get_irq(pdev, 0); |
| 521 | if (irq < 0 || irq == NO_IRQ) { |
| 522 | ret = irq; |
| 523 | goto err_unmap_sram; |
| 524 | } |
| 525 | cp->irq = irq; |
| 526 | |
| 527 | platform_set_drvdata(pdev, cp); |
| 528 | cpg = cp; |
| 529 | |
| 530 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); |
| 531 | if (IS_ERR(cp->queue_th)) { |
| 532 | ret = PTR_ERR(cp->queue_th); |
| 533 | goto err_thread; |
| 534 | } |
| 535 | |
| 536 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), |
| 537 | cp); |
| 538 | if (ret) |
| 539 | goto err_unmap_sram; |
| 540 | |
| 541 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
| 542 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
| 543 | |
| 544 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
| 545 | if (ret) |
| 546 | goto err_reg; |
| 547 | |
| 548 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
| 549 | if (ret) |
| 550 | goto err_unreg_ecb; |
| 551 | return 0; |
| 552 | err_unreg_ecb: |
| 553 | crypto_unregister_alg(&mv_aes_alg_ecb); |
| 554 | err_thread: |
| 555 | free_irq(irq, cp); |
| 556 | err_reg: |
| 557 | kthread_stop(cp->queue_th); |
| 558 | err_unmap_sram: |
| 559 | iounmap(cp->sram); |
| 560 | err_unmap_reg: |
| 561 | iounmap(cp->reg); |
| 562 | err: |
| 563 | kfree(cp); |
| 564 | cpg = NULL; |
| 565 | platform_set_drvdata(pdev, NULL); |
| 566 | return ret; |
| 567 | } |
| 568 | |
| 569 | static int mv_remove(struct platform_device *pdev) |
| 570 | { |
| 571 | struct crypto_priv *cp = platform_get_drvdata(pdev); |
| 572 | |
| 573 | crypto_unregister_alg(&mv_aes_alg_ecb); |
| 574 | crypto_unregister_alg(&mv_aes_alg_cbc); |
| 575 | kthread_stop(cp->queue_th); |
| 576 | free_irq(cp->irq, cp); |
| 577 | memset(cp->sram, 0, cp->sram_size); |
| 578 | iounmap(cp->sram); |
| 579 | iounmap(cp->reg); |
| 580 | kfree(cp); |
| 581 | cpg = NULL; |
| 582 | return 0; |
| 583 | } |
| 584 | |
| 585 | static struct platform_driver marvell_crypto = { |
| 586 | .probe = mv_probe, |
| 587 | .remove = mv_remove, |
| 588 | .driver = { |
| 589 | .owner = THIS_MODULE, |
| 590 | .name = "mv_crypto", |
| 591 | }, |
| 592 | }; |
| 593 | MODULE_ALIAS("platform:mv_crypto"); |
| 594 | |
| 595 | static int __init mv_crypto_init(void) |
| 596 | { |
| 597 | return platform_driver_register(&marvell_crypto); |
| 598 | } |
| 599 | module_init(mv_crypto_init); |
| 600 | |
| 601 | static void __exit mv_crypto_exit(void) |
| 602 | { |
| 603 | platform_driver_unregister(&marvell_crypto); |
| 604 | } |
| 605 | module_exit(mv_crypto_exit); |
| 606 | |
| 607 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); |
| 608 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); |
| 609 | MODULE_LICENSE("GPL"); |